1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_normal(MemBarNode *leading);
1045   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1046   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1048   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1049 
1050   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1051 
1052   bool unnecessary_acquire(const Node *barrier);
1053   bool needs_acquiring_load(const Node *load);
1054 
1055   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1056 
1057   bool unnecessary_release(const Node *barrier);
1058   bool unnecessary_volatile(const Node *barrier);
1059   bool needs_releasing_store(const Node *store);
1060 
1061   // predicate controlling translation of CompareAndSwapX
1062   bool needs_acquiring_load_exclusive(const Node *load);
1063 
1064   // predicate controlling translation of StoreCM
1065   bool unnecessary_storestore(const Node *storecm);
1066 %}
1067 
1068 source %{
1069 
1070   // Optimizaton of volatile gets and puts
1071   // -------------------------------------
1072   //
1073   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1074   // use to implement volatile reads and writes. For a volatile read
1075   // we simply need
1076   //
1077   //   ldar<x>
1078   //
1079   // and for a volatile write we need
1080   //
1081   //   stlr<x>
1082   // 
1083   // Alternatively, we can implement them by pairing a normal
1084   // load/store with a memory barrier. For a volatile read we need
1085   // 
1086   //   ldr<x>
1087   //   dmb ishld
1088   //
1089   // for a volatile write
1090   //
1091   //   dmb ish
1092   //   str<x>
1093   //   dmb ish
1094   //
1095   // We can also use ldaxr and stlxr to implement compare and swap CAS
1096   // sequences. These are normally translated to an instruction
1097   // sequence like the following
1098   //
1099   //   dmb      ish
1100   // retry:
1101   //   ldxr<x>   rval raddr
1102   //   cmp       rval rold
1103   //   b.ne done
1104   //   stlxr<x>  rval, rnew, rold
1105   //   cbnz      rval retry
1106   // done:
1107   //   cset      r0, eq
1108   //   dmb ishld
1109   //
1110   // Note that the exclusive store is already using an stlxr
1111   // instruction. That is required to ensure visibility to other
1112   // threads of the exclusive write (assuming it succeeds) before that
1113   // of any subsequent writes.
1114   //
1115   // The following instruction sequence is an improvement on the above
1116   //
1117   // retry:
1118   //   ldaxr<x>  rval raddr
1119   //   cmp       rval rold
1120   //   b.ne done
1121   //   stlxr<x>  rval, rnew, rold
1122   //   cbnz      rval retry
1123   // done:
1124   //   cset      r0, eq
1125   //
1126   // We don't need the leading dmb ish since the stlxr guarantees
1127   // visibility of prior writes in the case that the swap is
1128   // successful. Crucially we don't have to worry about the case where
1129   // the swap is not successful since no valid program should be
1130   // relying on visibility of prior changes by the attempting thread
1131   // in the case where the CAS fails.
1132   //
1133   // Similarly, we don't need the trailing dmb ishld if we substitute
1134   // an ldaxr instruction since that will provide all the guarantees we
1135   // require regarding observation of changes made by other threads
1136   // before any change to the CAS address observed by the load.
1137   //
1138   // In order to generate the desired instruction sequence we need to
1139   // be able to identify specific 'signature' ideal graph node
1140   // sequences which i) occur as a translation of a volatile reads or
1141   // writes or CAS operations and ii) do not occur through any other
1142   // translation or graph transformation. We can then provide
1143   // alternative aldc matching rules which translate these node
1144   // sequences to the desired machine code sequences. Selection of the
1145   // alternative rules can be implemented by predicates which identify
1146   // the relevant node sequences.
1147   //
1148   // The ideal graph generator translates a volatile read to the node
1149   // sequence
1150   //
1151   //   LoadX[mo_acquire]
1152   //   MemBarAcquire
1153   //
1154   // As a special case when using the compressed oops optimization we
1155   // may also see this variant
1156   //
1157   //   LoadN[mo_acquire]
1158   //   DecodeN
1159   //   MemBarAcquire
1160   //
1161   // A volatile write is translated to the node sequence
1162   //
1163   //   MemBarRelease
1164   //   StoreX[mo_release] {CardMark}-optional
1165   //   MemBarVolatile
1166   //
1167   // n.b. the above node patterns are generated with a strict
1168   // 'signature' configuration of input and output dependencies (see
1169   // the predicates below for exact details). The card mark may be as
1170   // simple as a few extra nodes or, in a few GC configurations, may
1171   // include more complex control flow between the leading and
1172   // trailing memory barriers. However, whatever the card mark
1173   // configuration these signatures are unique to translated volatile
1174   // reads/stores -- they will not appear as a result of any other
1175   // bytecode translation or inlining nor as a consequence of
1176   // optimizing transforms.
1177   //
1178   // We also want to catch inlined unsafe volatile gets and puts and
1179   // be able to implement them using either ldar<x>/stlr<x> or some
1180   // combination of ldr<x>/stlr<x> and dmb instructions.
1181   //
1182   // Inlined unsafe volatiles puts manifest as a minor variant of the
1183   // normal volatile put node sequence containing an extra cpuorder
1184   // membar
1185   //
1186   //   MemBarRelease
1187   //   MemBarCPUOrder
1188   //   StoreX[mo_release] {CardMark}-optional
1189   //   MemBarVolatile
1190   //
1191   // n.b. as an aside, the cpuorder membar is not itself subject to
1192   // matching and translation by adlc rules.  However, the rule
1193   // predicates need to detect its presence in order to correctly
1194   // select the desired adlc rules.
1195   //
1196   // Inlined unsafe volatile gets manifest as a somewhat different
1197   // node sequence to a normal volatile get
1198   //
1199   //   MemBarCPUOrder
1200   //        ||       \\
1201   //   MemBarAcquire LoadX[mo_acquire]
1202   //        ||
1203   //   MemBarCPUOrder
1204   //
1205   // In this case the acquire membar does not directly depend on the
1206   // load. However, we can be sure that the load is generated from an
1207   // inlined unsafe volatile get if we see it dependent on this unique
1208   // sequence of membar nodes. Similarly, given an acquire membar we
1209   // can know that it was added because of an inlined unsafe volatile
1210   // get if it is fed and feeds a cpuorder membar and if its feed
1211   // membar also feeds an acquiring load.
1212   //
1213   // Finally an inlined (Unsafe) CAS operation is translated to the
1214   // following ideal graph
1215   //
1216   //   MemBarRelease
1217   //   MemBarCPUOrder
1218   //   CompareAndSwapX {CardMark}-optional
1219   //   MemBarCPUOrder
1220   //   MemBarAcquire
1221   //
1222   // So, where we can identify these volatile read and write
1223   // signatures we can choose to plant either of the above two code
1224   // sequences. For a volatile read we can simply plant a normal
1225   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1226   // also choose to inhibit translation of the MemBarAcquire and
1227   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1228   //
1229   // When we recognise a volatile store signature we can choose to
1230   // plant at a dmb ish as a translation for the MemBarRelease, a
1231   // normal str<x> and then a dmb ish for the MemBarVolatile.
1232   // Alternatively, we can inhibit translation of the MemBarRelease
1233   // and MemBarVolatile and instead plant a simple stlr<x>
1234   // instruction.
1235   //
1236   // when we recognise a CAS signature we can choose to plant a dmb
1237   // ish as a translation for the MemBarRelease, the conventional
1238   // macro-instruction sequence for the CompareAndSwap node (which
1239   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1240   // Alternatively, we can elide generation of the dmb instructions
1241   // and plant the alternative CompareAndSwap macro-instruction
1242   // sequence (which uses ldaxr<x>).
1243   // 
1244   // Of course, the above only applies when we see these signature
1245   // configurations. We still want to plant dmb instructions in any
1246   // other cases where we may see a MemBarAcquire, MemBarRelease or
1247   // MemBarVolatile. For example, at the end of a constructor which
1248   // writes final/volatile fields we will see a MemBarRelease
1249   // instruction and this needs a 'dmb ish' lest we risk the
1250   // constructed object being visible without making the
1251   // final/volatile field writes visible.
1252   //
1253   // n.b. the translation rules below which rely on detection of the
1254   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1255   // If we see anything other than the signature configurations we
1256   // always just translate the loads and stores to ldr<x> and str<x>
1257   // and translate acquire, release and volatile membars to the
1258   // relevant dmb instructions.
1259   //
1260 
1261   // graph traversal helpers used for volatile put/get and CAS
1262   // optimization
1263 
1264   // 1) general purpose helpers
1265 
1266   // if node n is linked to a parent MemBarNode by an intervening
1267   // Control and Memory ProjNode return the MemBarNode otherwise return
1268   // NULL.
1269   //
1270   // n may only be a Load or a MemBar.
1271 
1272   MemBarNode *parent_membar(const Node *n)
1273   {
1274     Node *ctl = NULL;
1275     Node *mem = NULL;
1276     Node *membar = NULL;
1277 
1278     if (n->is_Load()) {
1279       ctl = n->lookup(LoadNode::Control);
1280       mem = n->lookup(LoadNode::Memory);
1281     } else if (n->is_MemBar()) {
1282       ctl = n->lookup(TypeFunc::Control);
1283       mem = n->lookup(TypeFunc::Memory);
1284     } else {
1285         return NULL;
1286     }
1287 
1288     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1289       return NULL;
1290     }
1291 
1292     membar = ctl->lookup(0);
1293 
1294     if (!membar || !membar->is_MemBar()) {
1295       return NULL;
1296     }
1297 
1298     if (mem->lookup(0) != membar) {
1299       return NULL;
1300     }
1301 
1302     return membar->as_MemBar();
1303   }
1304 
1305   // if n is linked to a child MemBarNode by intervening Control and
1306   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1307 
1308   MemBarNode *child_membar(const MemBarNode *n)
1309   {
1310     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1311     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1312 
1313     // MemBar needs to have both a Ctl and Mem projection
1314     if (! ctl || ! mem)
1315       return NULL;
1316 
1317     MemBarNode *child = NULL;
1318     Node *x;
1319 
1320     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1321       x = ctl->fast_out(i);
1322       // if we see a membar we keep hold of it. we may also see a new
1323       // arena copy of the original but it will appear later
1324       if (x->is_MemBar()) {
1325           child = x->as_MemBar();
1326           break;
1327       }
1328     }
1329 
1330     if (child == NULL) {
1331       return NULL;
1332     }
1333 
1334     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1335       x = mem->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x == child) {
1339         return child;
1340       }
1341     }
1342     return NULL;
1343   }
1344 
1345   // helper predicate use to filter candidates for a leading memory
1346   // barrier
1347   //
1348   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1349   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1350 
1351   bool leading_membar(const MemBarNode *barrier)
1352   {
1353     int opcode = barrier->Opcode();
1354     // if this is a release membar we are ok
1355     if (opcode == Op_MemBarRelease) {
1356       return true;
1357     }
1358     // if its a cpuorder membar . . .
1359     if (opcode != Op_MemBarCPUOrder) {
1360       return false;
1361     }
1362     // then the parent has to be a release membar
1363     MemBarNode *parent = parent_membar(barrier);
1364     if (!parent) {
1365       return false;
1366     }
1367     opcode = parent->Opcode();
1368     return opcode == Op_MemBarRelease;
1369   }
1370  
1371   // 2) card mark detection helper
1372 
1373   // helper predicate which can be used to detect a volatile membar
1374   // introduced as part of a conditional card mark sequence either by
1375   // G1 or by CMS when UseCondCardMark is true.
1376   //
1377   // membar can be definitively determined to be part of a card mark
1378   // sequence if and only if all the following hold
1379   //
1380   // i) it is a MemBarVolatile
1381   //
1382   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1383   // true
1384   //
1385   // iii) the node's Mem projection feeds a StoreCM node.
1386   
1387   bool is_card_mark_membar(const MemBarNode *barrier)
1388   {
1389     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1390       return false;
1391     }
1392 
1393     if (barrier->Opcode() != Op_MemBarVolatile) {
1394       return false;
1395     }
1396 
1397     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1398 
1399     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1400       Node *y = mem->fast_out(i);
1401       if (y->Opcode() == Op_StoreCM) {
1402         return true;
1403       }
1404     }
1405   
1406     return false;
1407   }
1408 
1409 
1410   // 3) helper predicates to traverse volatile put or CAS graphs which
1411   // may contain GC barrier subgraphs
1412 
1413   // Preamble
1414   // --------
1415   //
1416   // for volatile writes we can omit generating barriers and employ a
1417   // releasing store when we see a node sequence sequence with a
1418   // leading MemBarRelease and a trailing MemBarVolatile as follows
1419   //
1420   //   MemBarRelease
1421   //  {      ||      } -- optional
1422   //  {MemBarCPUOrder}
1423   //         ||     \\
1424   //         ||     StoreX[mo_release]
1425   //         | \     /
1426   //         | MergeMem
1427   //         | /
1428   //   MemBarVolatile
1429   //
1430   // where
1431   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1432   //  | \ and / indicate further routing of the Ctl and Mem feeds
1433   // 
1434   // this is the graph we see for non-object stores. however, for a
1435   // volatile Object store (StoreN/P) we may see other nodes below the
1436   // leading membar because of the need for a GC pre- or post-write
1437   // barrier.
1438   //
1439   // with most GC configurations we with see this simple variant which
1440   // includes a post-write barrier card mark.
1441   //
1442   //   MemBarRelease______________________________
1443   //         ||    \\               Ctl \        \\
1444   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1445   //         | \     /                       . . .  /
1446   //         | MergeMem
1447   //         | /
1448   //         ||      /
1449   //   MemBarVolatile
1450   //
1451   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1452   // the object address to an int used to compute the card offset) and
1453   // Ctl+Mem to a StoreB node (which does the actual card mark).
1454   //
1455   // n.b. a StoreCM node will only appear in this configuration when
1456   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1457   // because it implies a requirement to order visibility of the card
1458   // mark (StoreCM) relative to the object put (StoreP/N) using a
1459   // StoreStore memory barrier (arguably this ought to be represented
1460   // explicitly in the ideal graph but that is not how it works). This
1461   // ordering is required for both non-volatile and volatile
1462   // puts. Normally that means we need to translate a StoreCM using
1463   // the sequence
1464   //
1465   //   dmb ishst
1466   //   stlrb
1467   //
1468   // However, in the case of a volatile put if we can recognise this
1469   // configuration and plant an stlr for the object write then we can
1470   // omit the dmb and just plant an strb since visibility of the stlr
1471   // is ordered before visibility of subsequent stores. StoreCM nodes
1472   // also arise when using G1 or using CMS with conditional card
1473   // marking. In these cases (as we shall see) we don't need to insert
1474   // the dmb when translating StoreCM because there is already an
1475   // intervening StoreLoad barrier between it and the StoreP/N.
1476   //
1477   // It is also possible to perform the card mark conditionally on it
1478   // currently being unmarked in which case the volatile put graph
1479   // will look slightly different
1480   //
1481   //   MemBarRelease____________________________________________
1482   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1483   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1484   //         | \     /                              \            |
1485   //         | MergeMem                            . . .      StoreB
1486   //         | /                                                /
1487   //         ||     /
1488   //   MemBarVolatile
1489   //
1490   // It is worth noting at this stage that both the above
1491   // configurations can be uniquely identified by checking that the
1492   // memory flow includes the following subgraph:
1493   //
1494   //   MemBarRelease
1495   //  {MemBarCPUOrder}
1496   //          |  \      . . .
1497   //          |  StoreX[mo_release]  . . .
1498   //          |   /
1499   //         MergeMem
1500   //          |
1501   //   MemBarVolatile
1502   //
1503   // This is referred to as a *normal* subgraph. It can easily be
1504   // detected starting from any candidate MemBarRelease,
1505   // StoreX[mo_release] or MemBarVolatile.
1506   //
1507   // A simple variation on this normal case occurs for an unsafe CAS
1508   // operation. The basic graph for a non-object CAS is
1509   //
1510   //   MemBarRelease
1511   //         ||
1512   //   MemBarCPUOrder
1513   //         ||     \\   . . .
1514   //         ||     CompareAndSwapX
1515   //         ||       |
1516   //         ||     SCMemProj
1517   //         | \     /
1518   //         | MergeMem
1519   //         | /
1520   //   MemBarCPUOrder
1521   //         ||
1522   //   MemBarAcquire
1523   //
1524   // The same basic variations on this arrangement (mutatis mutandis)
1525   // occur when a card mark is introduced. i.e. we se the same basic
1526   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1527   // tail of the graph is a pair comprising a MemBarCPUOrder +
1528   // MemBarAcquire.
1529   //
1530   // So, in the case of a CAS the normal graph has the variant form
1531   //
1532   //   MemBarRelease
1533   //   MemBarCPUOrder
1534   //          |   \      . . .
1535   //          |  CompareAndSwapX  . . .
1536   //          |    |
1537   //          |   SCMemProj
1538   //          |   /  . . .
1539   //         MergeMem
1540   //          |
1541   //   MemBarCPUOrder
1542   //   MemBarAcquire
1543   //
1544   // This graph can also easily be detected starting from any
1545   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1546   //
1547   // the code below uses two helper predicates, leading_to_normal and
1548   // normal_to_leading to identify these normal graphs, one validating
1549   // the layout starting from the top membar and searching down and
1550   // the other validating the layout starting from the lower membar
1551   // and searching up.
1552   //
1553   // There are two special case GC configurations when a normal graph
1554   // may not be generated: when using G1 (which always employs a
1555   // conditional card mark); and when using CMS with conditional card
1556   // marking configured. These GCs are both concurrent rather than
1557   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1558   // graph between the leading and trailing membar nodes, in
1559   // particular enforcing stronger memory serialisation beween the
1560   // object put and the corresponding conditional card mark. CMS
1561   // employs a post-write GC barrier while G1 employs both a pre- and
1562   // post-write GC barrier. Of course the extra nodes may be absent --
1563   // they are only inserted for object puts. This significantly
1564   // complicates the task of identifying whether a MemBarRelease,
1565   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1566   // when using these GC configurations (see below). It adds similar
1567   // complexity to the task of identifying whether a MemBarRelease,
1568   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1569   //
1570   // In both cases the post-write subtree includes an auxiliary
1571   // MemBarVolatile (StoreLoad barrier) separating the object put and
1572   // the read of the corresponding card. This poses two additional
1573   // problems.
1574   //
1575   // Firstly, a card mark MemBarVolatile needs to be distinguished
1576   // from a normal trailing MemBarVolatile. Resolving this first
1577   // problem is straightforward: a card mark MemBarVolatile always
1578   // projects a Mem feed to a StoreCM node and that is a unique marker
1579   //
1580   //      MemBarVolatile (card mark)
1581   //       C |    \     . . .
1582   //         |   StoreCM   . . .
1583   //       . . .
1584   //
1585   // The second problem is how the code generator is to translate the
1586   // card mark barrier? It always needs to be translated to a "dmb
1587   // ish" instruction whether or not it occurs as part of a volatile
1588   // put. A StoreLoad barrier is needed after the object put to ensure
1589   // i) visibility to GC threads of the object put and ii) visibility
1590   // to the mutator thread of any card clearing write by a GC
1591   // thread. Clearly a normal store (str) will not guarantee this
1592   // ordering but neither will a releasing store (stlr). The latter
1593   // guarantees that the object put is visible but does not guarantee
1594   // that writes by other threads have also been observed.
1595   // 
1596   // So, returning to the task of translating the object put and the
1597   // leading/trailing membar nodes: what do the non-normal node graph
1598   // look like for these 2 special cases? and how can we determine the
1599   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1600   // in both normal and non-normal cases?
1601   //
1602   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1603   // which selects conditonal execution based on the value loaded
1604   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1605   // intervening StoreLoad barrier (MemBarVolatile).
1606   //
1607   // So, with CMS we may see a node graph for a volatile object store
1608   // which looks like this
1609   //
1610   //   MemBarRelease
1611   //   MemBarCPUOrder_(leading)__________________
1612   //     C |    M \       \\                   C \
1613   //       |       \    StoreN/P[mo_release]  CastP2X
1614   //       |    Bot \    /
1615   //       |       MergeMem
1616   //       |         /
1617   //      MemBarVolatile (card mark)
1618   //     C |  ||    M |
1619   //       | LoadB    |
1620   //       |   |      |
1621   //       | Cmp      |\
1622   //       | /        | \
1623   //       If         |  \
1624   //       | \        |   \
1625   // IfFalse  IfTrue  |    \
1626   //       \     / \  |     \
1627   //        \   / StoreCM    |
1628   //         \ /      |      |
1629   //        Region   . . .   |
1630   //          | \           /
1631   //          |  . . .  \  / Bot
1632   //          |       MergeMem
1633   //          |          |
1634   //        MemBarVolatile (trailing)
1635   //
1636   // The first MergeMem merges the AliasIdxBot Mem slice from the
1637   // leading membar and the oopptr Mem slice from the Store into the
1638   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1639   // Mem slice from the card mark membar and the AliasIdxRaw slice
1640   // from the StoreCM into the trailing membar (n.b. the latter
1641   // proceeds via a Phi associated with the If region).
1642   //
1643   // The graph for a CAS varies slightly, the obvious difference being
1644   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1645   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1646   // MemBarAcquire pair. The other important difference is that the
1647   // CompareAndSwap node's SCMemProj is not merged into the card mark
1648   // membar - it still feeds the trailing MergeMem. This also means
1649   // that the card mark membar receives its Mem feed directly from the
1650   // leading membar rather than via a MergeMem.
1651   //
1652   //   MemBarRelease
1653   //   MemBarCPUOrder__(leading)_________________________
1654   //       ||                       \\                 C \
1655   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1656   //     C |  ||    M |              |
1657   //       | LoadB    |       ______/|
1658   //       |   |      |      /       |
1659   //       | Cmp      |     /      SCMemProj
1660   //       | /        |    /         |
1661   //       If         |   /         /
1662   //       | \        |  /         /
1663   // IfFalse  IfTrue  | /         /
1664   //       \     / \  |/ prec    /
1665   //        \   / StoreCM       /
1666   //         \ /      |        /
1667   //        Region   . . .    /
1668   //          | \            /
1669   //          |  . . .  \   / Bot
1670   //          |       MergeMem
1671   //          |          |
1672   //        MemBarCPUOrder
1673   //        MemBarAcquire (trailing)
1674   //
1675   // This has a slightly different memory subgraph to the one seen
1676   // previously but the core of it is the same as for the CAS normal
1677   // sungraph
1678   //
1679   //   MemBarRelease
1680   //   MemBarCPUOrder____
1681   //      ||             \      . . .
1682   //   MemBarVolatile  CompareAndSwapX  . . .
1683   //      |  \            |
1684   //        . . .   SCMemProj
1685   //          |     /  . . .
1686   //         MergeMem
1687   //          |
1688   //   MemBarCPUOrder
1689   //   MemBarAcquire
1690   //
1691   //
1692   // G1 is quite a lot more complicated. The nodes inserted on behalf
1693   // of G1 may comprise: a pre-write graph which adds the old value to
1694   // the SATB queue; the releasing store itself; and, finally, a
1695   // post-write graph which performs a card mark.
1696   //
1697   // The pre-write graph may be omitted, but only when the put is
1698   // writing to a newly allocated (young gen) object and then only if
1699   // there is a direct memory chain to the Initialize node for the
1700   // object allocation. This will not happen for a volatile put since
1701   // any memory chain passes through the leading membar.
1702   //
1703   // The pre-write graph includes a series of 3 If tests. The outermost
1704   // If tests whether SATB is enabled (no else case). The next If tests
1705   // whether the old value is non-NULL (no else case). The third tests
1706   // whether the SATB queue index is > 0, if so updating the queue. The
1707   // else case for this third If calls out to the runtime to allocate a
1708   // new queue buffer.
1709   //
1710   // So with G1 the pre-write and releasing store subgraph looks like
1711   // this (the nested Ifs are omitted).
1712   //
1713   //  MemBarRelease (leading)____________
1714   //     C |  ||  M \   M \    M \  M \ . . .
1715   //       | LoadB   \  LoadL  LoadN   \
1716   //       | /        \                 \
1717   //       If         |\                 \
1718   //       | \        | \                 \
1719   //  IfFalse  IfTrue |  \                 \
1720   //       |     |    |   \                 |
1721   //       |     If   |   /\                |
1722   //       |     |          \               |
1723   //       |                 \              |
1724   //       |    . . .         \             |
1725   //       | /       | /       |            |
1726   //      Region  Phi[M]       |            |
1727   //       | \       |         |            |
1728   //       |  \_____ | ___     |            |
1729   //     C | C \     |   C \ M |            |
1730   //       | CastP2X | StoreN/P[mo_release] |
1731   //       |         |         |            |
1732   //     C |       M |       M |          M |
1733   //        \        |         |           /
1734   //                  . . . 
1735   //          (post write subtree elided)
1736   //                    . . .
1737   //             C \         M /
1738   //         MemBarVolatile (trailing)
1739   //
1740   // n.b. the LoadB in this subgraph is not the card read -- it's a
1741   // read of the SATB queue active flag.
1742   //
1743   // Once again the CAS graph is a minor variant on the above with the
1744   // expected substitutions of CompareAndSawpX for StoreN/P and
1745   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1746   //
1747   // The G1 post-write subtree is also optional, this time when the
1748   // new value being written is either null or can be identified as a
1749   // newly allocated (young gen) object with no intervening control
1750   // flow. The latter cannot happen but the former may, in which case
1751   // the card mark membar is omitted and the memory feeds form the
1752   // leading membar and the SToreN/P are merged direct into the
1753   // trailing membar as per the normal subgraph. So, the only special
1754   // case which arises is when the post-write subgraph is generated.
1755   //
1756   // The kernel of the post-write G1 subgraph is the card mark itself
1757   // which includes a card mark memory barrier (MemBarVolatile), a
1758   // card test (LoadB), and a conditional update (If feeding a
1759   // StoreCM). These nodes are surrounded by a series of nested Ifs
1760   // which try to avoid doing the card mark. The top level If skips if
1761   // the object reference does not cross regions (i.e. it tests if
1762   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1763   // need not be recorded. The next If, which skips on a NULL value,
1764   // may be absent (it is not generated if the type of value is >=
1765   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1766   // checking if card_val != young).  n.b. although this test requires
1767   // a pre-read of the card it can safely be done before the StoreLoad
1768   // barrier. However that does not bypass the need to reread the card
1769   // after the barrier.
1770   //
1771   //                (pre-write subtree elided)
1772   //        . . .                  . . .    . . .  . . .
1773   //        C |                    M |     M |    M |
1774   //       Region                  Phi[M] StoreN    |
1775   //          |                     / \      |      |
1776   //         / \_______            /   \     |      |
1777   //      C / C \      . . .            \    |      |
1778   //       If   CastP2X . . .            |   |      |
1779   //       / \                           |   |      |
1780   //      /   \                          |   |      |
1781   // IfFalse IfTrue                      |   |      |
1782   //   |       |                         |   |     /|
1783   //   |       If                        |   |    / |
1784   //   |      / \                        |   |   /  |
1785   //   |     /   \                        \  |  /   |
1786   //   | IfFalse IfTrue                   MergeMem  |
1787   //   |  . . .    / \                       /      |
1788   //   |          /   \                     /       |
1789   //   |     IfFalse IfTrue                /        |
1790   //   |      . . .    |                  /         |
1791   //   |               If                /          |
1792   //   |               / \              /           |
1793   //   |              /   \            /            |
1794   //   |         IfFalse IfTrue       /             |
1795   //   |           . . .   |         /              |
1796   //   |                    \       /               |
1797   //   |                     \     /                |
1798   //   |             MemBarVolatile__(card mark)    |
1799   //   |                ||   C |  M \  M \          |
1800   //   |               LoadB   If    |    |         |
1801   //   |                      / \    |    |         |
1802   //   |                     . . .   |    |         |
1803   //   |                          \  |    |        /
1804   //   |                        StoreCM   |       /
1805   //   |                          . . .   |      /
1806   //   |                        _________/      /
1807   //   |                       /  _____________/
1808   //   |   . . .       . . .  |  /            /
1809   //   |    |                 | /   _________/
1810   //   |    |               Phi[M] /        /
1811   //   |    |                 |   /        /
1812   //   |    |                 |  /        /
1813   //   |  Region  . . .     Phi[M]  _____/
1814   //   |    /                 |    /
1815   //   |                      |   /   
1816   //   | . . .   . . .        |  /
1817   //   | /                    | /
1818   // Region           |  |  Phi[M]
1819   //   |              |  |  / Bot
1820   //    \            MergeMem 
1821   //     \            /
1822   //     MemBarVolatile
1823   //
1824   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1825   // from the leading membar and the oopptr Mem slice from the Store
1826   // into the card mark membar i.e. the memory flow to the card mark
1827   // membar still looks like a normal graph.
1828   //
1829   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1830   // Mem slices (from the StoreCM and other card mark queue stores).
1831   // However in this case the AliasIdxBot Mem slice does not come
1832   // direct from the card mark membar. It is merged through a series
1833   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1834   // from the leading membar with the Mem feed from the card mark
1835   // membar. Each Phi corresponds to one of the Ifs which may skip
1836   // around the card mark membar. So when the If implementing the NULL
1837   // value check has been elided the total number of Phis is 2
1838   // otherwise it is 3.
1839   //
1840   // The CAS graph when using G1GC also includes a pre-write subgraph
1841   // and an optional post-write subgraph. Teh sam evarioations are
1842   // introduced as for CMS with conditional card marking i.e. the
1843   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1844   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1845   // Mem feed from the CompareAndSwapP/N includes a precedence
1846   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1847   // trailing membar. So, as before the configuration includes the
1848   // normal CAS graph as a subgraph of the memory flow.
1849   //
1850   // So, the upshot is that in all cases the volatile put graph will
1851   // include a *normal* memory subgraph betwen the leading membar and
1852   // its child membar, either a volatile put graph (including a
1853   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1854   // When that child is not a card mark membar then it marks the end
1855   // of the volatile put or CAS subgraph. If the child is a card mark
1856   // membar then the normal subgraph will form part of a volatile put
1857   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1858   // to a trailing barrier via a MergeMem. That feed is either direct
1859   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1860   // memory flow (for G1).
1861   // 
1862   // The predicates controlling generation of instructions for store
1863   // and barrier nodes employ a few simple helper functions (described
1864   // below) which identify the presence or absence of all these
1865   // subgraph configurations and provide a means of traversing from
1866   // one node in the subgraph to another.
1867 
1868   // is_CAS(int opcode)
1869   //
1870   // return true if opcode is one of the possible CompareAndSwapX
1871   // values otherwise false.
1872 
1873   bool is_CAS(int opcode)
1874   {
1875     return (opcode == Op_CompareAndSwapI ||
1876             opcode == Op_CompareAndSwapL ||
1877             opcode == Op_CompareAndSwapN ||
1878             opcode == Op_CompareAndSwapP);
1879   }
1880 
1881   // leading_to_normal
1882   //
1883   //graph traversal helper which detects the normal case Mem feed from
1884   // a release membar (or, optionally, its cpuorder child) to a
1885   // dependent volatile membar i.e. it ensures that one or other of
1886   // the following Mem flow subgraph is present.
1887   //
1888   //   MemBarRelease
1889   //   MemBarCPUOrder {leading}
1890   //          |  \      . . .
1891   //          |  StoreN/P[mo_release]  . . .
1892   //          |   /
1893   //         MergeMem
1894   //          |
1895   //   MemBarVolatile {trailing or card mark}
1896   //
1897   //   MemBarRelease
1898   //   MemBarCPUOrder {leading}
1899   //      |       \      . . .
1900   //      |     CompareAndSwapX  . . .
1901   //               |
1902   //     . . .    SCMemProj
1903   //           \   |
1904   //      |    MergeMem
1905   //      |       /
1906   //    MemBarCPUOrder
1907   //    MemBarAcquire {trailing}
1908   //
1909   // if the correct configuration is present returns the trailing
1910   // membar otherwise NULL.
1911   //
1912   // the input membar is expected to be either a cpuorder membar or a
1913   // release membar. in the latter case it should not have a cpu membar
1914   // child.
1915   //
1916   // the returned value may be a card mark or trailing membar
1917   //
1918 
1919   MemBarNode *leading_to_normal(MemBarNode *leading)
1920   {
1921     assert((leading->Opcode() == Op_MemBarRelease ||
1922             leading->Opcode() == Op_MemBarCPUOrder),
1923            "expecting a volatile or cpuroder membar!");
1924 
1925     // check the mem flow
1926     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1927 
1928     if (!mem) {
1929       return NULL;
1930     }
1931 
1932     Node *x = NULL;
1933     StoreNode * st = NULL;
1934     LoadStoreNode *cas = NULL;
1935     MergeMemNode *mm = NULL;
1936 
1937     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1938       x = mem->fast_out(i);
1939       if (x->is_MergeMem()) {
1940         if (mm != NULL) {
1941           return NULL;
1942         }
1943         // two merge mems is one too many
1944         mm = x->as_MergeMem();
1945       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1946         // two releasing stores/CAS nodes is one too many
1947         if (st != NULL || cas != NULL) {
1948           return NULL;
1949         }
1950         st = x->as_Store();
1951       } else if (is_CAS(x->Opcode())) {
1952         if (st != NULL || cas != NULL) {
1953           return NULL;
1954         }
1955         cas = x->as_LoadStore();
1956       }
1957     }
1958 
1959     // must have a store or a cas
1960     if (!st && !cas) {
1961       return NULL;
1962     }
1963 
1964     // must have a merge if we also have st
1965     if (st && !mm) {
1966       return NULL;
1967     }
1968 
1969     Node *y = NULL;
1970     if (cas) {
1971       // look for an SCMemProj
1972       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
1973         x = cas->fast_out(i);
1974         if (x->is_Proj()) {
1975           y = x;
1976           break;
1977         }
1978       }
1979       if (y == NULL) {
1980         return NULL;
1981       }
1982       // the proj must feed a MergeMem
1983       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
1984         x = y->fast_out(i);
1985         if (x->is_MergeMem()) {
1986           mm = x->as_MergeMem();
1987           break;
1988         }
1989       }
1990       if (mm == NULL)
1991         return NULL;
1992     } else {
1993       // ensure the store feeds the existing mergemem;
1994       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1995         if (st->fast_out(i) == mm) {
1996           y = st;
1997           break;
1998         }
1999       }
2000       if (y == NULL) {
2001         return NULL;
2002       }
2003     }
2004 
2005     MemBarNode *mbar = NULL;
2006     // ensure the merge feeds to the expected type of membar
2007     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2008       x = mm->fast_out(i);
2009       if (x->is_MemBar()) {
2010         int opcode = x->Opcode();
2011         if (opcode == Op_MemBarVolatile && st) {
2012           mbar = x->as_MemBar();
2013         } else if (cas && opcode == Op_MemBarCPUOrder) {
2014           MemBarNode *y =  x->as_MemBar();
2015           y = child_membar(y);
2016           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2017             mbar = y;
2018           }
2019         }
2020         break;
2021       }
2022     }
2023 
2024     return mbar;
2025   }
2026 
2027   // normal_to_leading
2028   //
2029   // graph traversal helper which detects the normal case Mem feed
2030   // from either a card mark or a trailing membar to a preceding
2031   // release membar (optionally its cpuorder child) i.e. it ensures
2032   // that one or other of the following Mem flow subgraphs is present.
2033   //
2034   //   MemBarRelease
2035   //   MemBarCPUOrder {leading}
2036   //          |  \      . . .
2037   //          |  StoreN/P[mo_release]  . . .
2038   //          |   /
2039   //         MergeMem
2040   //          |
2041   //   MemBarVolatile {card mark or trailing}
2042   //
2043   //   MemBarRelease
2044   //   MemBarCPUOrder {leading}
2045   //      |       \      . . .
2046   //      |     CompareAndSwapX  . . .
2047   //               |
2048   //     . . .    SCMemProj
2049   //           \   |
2050   //      |    MergeMem
2051   //      |        /
2052   //    MemBarCPUOrder
2053   //    MemBarAcquire {trailing}
2054   //
2055   // this predicate checks for the same flow as the previous predicate
2056   // but starting from the bottom rather than the top.
2057   //
2058   // if the configuration is present returns the cpuorder member for
2059   // preference or when absent the release membar otherwise NULL.
2060   //
2061   // n.b. the input membar is expected to be a MemBarVolatile but
2062   // need not be a card mark membar.
2063 
2064   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2065   {
2066     // input must be a volatile membar
2067     assert((barrier->Opcode() == Op_MemBarVolatile ||
2068             barrier->Opcode() == Op_MemBarAcquire),
2069            "expecting a volatile or an acquire membar");
2070     Node *x;
2071     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2072 
2073     // if we have an acquire membar then it must be fed via a CPUOrder
2074     // membar
2075 
2076     if (is_cas) {
2077       // skip to parent barrier which must be a cpuorder
2078       x = parent_membar(barrier);
2079       if (x->Opcode() != Op_MemBarCPUOrder)
2080         return NULL;
2081     } else {
2082       // start from the supplied barrier
2083       x = (Node *)barrier;
2084     }
2085 
2086     // the Mem feed to the membar should be a merge
2087     x = x ->in(TypeFunc::Memory);
2088     if (!x->is_MergeMem())
2089       return NULL;
2090 
2091     MergeMemNode *mm = x->as_MergeMem();
2092 
2093     if (is_cas) {
2094       // the merge should be fed from the CAS via an SCMemProj node
2095       x = NULL;
2096       for (uint idx = 1; idx < mm->req(); idx++) {
2097         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2098           x = mm->in(idx);
2099           break;
2100         }
2101       }
2102       if (x == NULL) {
2103         return NULL;
2104       }
2105       // check for a CAS feeding this proj
2106       x = x->in(0);
2107       int opcode = x->Opcode();
2108       if (!is_CAS(opcode)) {
2109         return NULL;
2110       }
2111       // the CAS should get its mem feed from the leading membar
2112       x = x->in(MemNode::Memory);
2113     } else {
2114       // the merge should get its Bottom mem feed from the leading membar
2115       x = mm->in(Compile::AliasIdxBot);      
2116     } 
2117 
2118     // ensure this is a non control projection
2119     if (!x->is_Proj() || x->is_CFG()) {
2120       return NULL;
2121     }
2122     // if it is fed by a membar that's the one we want
2123     x = x->in(0);
2124 
2125     if (!x->is_MemBar()) {
2126       return NULL;
2127     }
2128 
2129     MemBarNode *leading = x->as_MemBar();
2130     // reject invalid candidates
2131     if (!leading_membar(leading)) {
2132       return NULL;
2133     }
2134 
2135     // ok, we have a leading membar, now for the sanity clauses
2136 
2137     // the leading membar must feed Mem to a releasing store or CAS
2138     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2139     StoreNode *st = NULL;
2140     LoadStoreNode *cas = NULL;
2141     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2142       x = mem->fast_out(i);
2143       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2144         // two stores or CASes is one too many
2145         if (st != NULL || cas != NULL) {
2146           return NULL;
2147         }
2148         st = x->as_Store();
2149       } else if (is_CAS(x->Opcode())) {
2150         if (st != NULL || cas != NULL) {
2151           return NULL;
2152         }
2153         cas = x->as_LoadStore();
2154       }
2155     }
2156 
2157     // we should not have both a store and a cas
2158     if (st == NULL & cas == NULL) {
2159       return NULL;
2160     }
2161 
2162     if (st == NULL) {
2163       // nothing more to check
2164       return leading;
2165     } else {
2166       // we should not have a store if we started from an acquire
2167       if (is_cas) {
2168         return NULL;
2169       }
2170 
2171       // the store should feed the merge we used to get here
2172       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2173         if (st->fast_out(i) == mm) {
2174           return leading;
2175         }
2176       }
2177     }
2178 
2179     return NULL;
2180   }
2181 
2182   // card_mark_to_trailing
2183   //
2184   // graph traversal helper which detects extra, non-normal Mem feed
2185   // from a card mark volatile membar to a trailing membar i.e. it
2186   // ensures that one of the following three GC post-write Mem flow
2187   // subgraphs is present.
2188   //
2189   // 1)
2190   //     . . .
2191   //       |
2192   //   MemBarVolatile (card mark)
2193   //      |          |     
2194   //      |        StoreCM
2195   //      |          |
2196   //      |        . . .
2197   //  Bot |  / 
2198   //   MergeMem 
2199   //      |
2200   //      |
2201   //    MemBarVolatile {trailing}
2202   //
2203   // 2)
2204   //   MemBarRelease/CPUOrder (leading)
2205   //    |
2206   //    | 
2207   //    |\       . . .
2208   //    | \        | 
2209   //    |  \  MemBarVolatile (card mark) 
2210   //    |   \   |     |
2211   //     \   \  |   StoreCM    . . .
2212   //      \   \ |
2213   //       \  Phi
2214   //        \ /
2215   //        Phi  . . .
2216   //     Bot |   /
2217   //       MergeMem
2218   //         |
2219   //    MemBarVolatile {trailing}
2220   //
2221   //
2222   // 3)
2223   //   MemBarRelease/CPUOrder (leading)
2224   //    |
2225   //    |\
2226   //    | \
2227   //    |  \      . . .
2228   //    |   \       |
2229   //    |\   \  MemBarVolatile (card mark)
2230   //    | \   \   |     |
2231   //    |  \   \  |   StoreCM    . . .
2232   //    |   \   \ |
2233   //     \   \  Phi
2234   //      \   \ /  
2235   //       \  Phi
2236   //        \ /
2237   //        Phi  . . .
2238   //     Bot |   /
2239   //       MergeMem
2240   //         |
2241   //         |
2242   //    MemBarVolatile {trailing}
2243   //
2244   // configuration 1 is only valid if UseConcMarkSweepGC &&
2245   // UseCondCardMark
2246   //
2247   // configurations 2 and 3 are only valid if UseG1GC.
2248   //
2249   // if a valid configuration is present returns the trailing membar
2250   // otherwise NULL.
2251   //
2252   // n.b. the supplied membar is expected to be a card mark
2253   // MemBarVolatile i.e. the caller must ensure the input node has the
2254   // correct operand and feeds Mem to a StoreCM node
2255 
2256   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2257   {
2258     // input must be a card mark volatile membar
2259     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2260 
2261     Node *feed = barrier->proj_out(TypeFunc::Memory);
2262     Node *x;
2263     MergeMemNode *mm = NULL;
2264 
2265     const int MAX_PHIS = 3;     // max phis we will search through
2266     int phicount = 0;           // current search count
2267 
2268     bool retry_feed = true;
2269     while (retry_feed) {
2270       // see if we have a direct MergeMem feed
2271       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2272         x = feed->fast_out(i);
2273         // the correct Phi will be merging a Bot memory slice
2274         if (x->is_MergeMem()) {
2275           mm = x->as_MergeMem();
2276           break;
2277         }
2278       }
2279       if (mm) {
2280         retry_feed = false;
2281       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2282         // the barrier may feed indirectly via one or two Phi nodes
2283         PhiNode *phi = NULL;
2284         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2285           x = feed->fast_out(i);
2286           // the correct Phi will be merging a Bot memory slice
2287           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2288             phi = x->as_Phi();
2289             break;
2290           }
2291         }
2292         if (!phi) {
2293           return NULL;
2294         }
2295         // look for another merge below this phi
2296         feed = phi;
2297       } else {
2298         // couldn't find a merge
2299         return NULL;
2300       }
2301     }
2302 
2303     // sanity check this feed turns up as the expected slice
2304     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2305 
2306     MemBarNode *trailing = NULL;
2307     // be sure we have a trailing membar the merge
2308     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2309       x = mm->fast_out(i);
2310       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2311         trailing = x->as_MemBar();
2312         break;
2313       }
2314     }
2315 
2316     return trailing;
2317   }
2318 
2319   // trailing_to_card_mark
2320   //
2321   // graph traversal helper which detects extra, non-normal Mem feed
2322   // from a trailing volatile membar to a preceding card mark volatile
2323   // membar i.e. it identifies whether one of the three possible extra
2324   // GC post-write Mem flow subgraphs is present
2325   //
2326   // this predicate checks for the same flow as the previous predicate
2327   // but starting from the bottom rather than the top.
2328   //
2329   // if the configuration is present returns the card mark membar
2330   // otherwise NULL
2331   //
2332   // n.b. the supplied membar is expected to be a trailing
2333   // MemBarVolatile i.e. the caller must ensure the input node has the
2334   // correct opcode
2335 
2336   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2337   {
2338     assert(trailing->Opcode() == Op_MemBarVolatile,
2339            "expecting a volatile membar");
2340     assert(!is_card_mark_membar(trailing),
2341            "not expecting a card mark membar");
2342 
2343     // the Mem feed to the membar should be a merge
2344     Node *x = trailing->in(TypeFunc::Memory);
2345     if (!x->is_MergeMem()) {
2346       return NULL;
2347     }
2348 
2349     MergeMemNode *mm = x->as_MergeMem();
2350 
2351     x = mm->in(Compile::AliasIdxBot);
2352     // with G1 we may possibly see a Phi or two before we see a Memory
2353     // Proj from the card mark membar
2354 
2355     const int MAX_PHIS = 3;     // max phis we will search through
2356     int phicount = 0;           // current search count
2357 
2358     bool retry_feed = !x->is_Proj();
2359 
2360     while (retry_feed) {
2361       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2362         PhiNode *phi = x->as_Phi();
2363         ProjNode *proj = NULL;
2364         PhiNode *nextphi = NULL;
2365         bool found_leading = false;
2366         for (uint i = 1; i < phi->req(); i++) {
2367           x = phi->in(i);
2368           if (x->is_Phi()) {
2369             nextphi = x->as_Phi();
2370           } else if (x->is_Proj()) {
2371             int opcode = x->in(0)->Opcode();
2372             if (opcode == Op_MemBarVolatile) {
2373               proj = x->as_Proj();
2374             } else if (opcode == Op_MemBarRelease ||
2375                        opcode == Op_MemBarCPUOrder) {
2376               // probably a leading membar
2377               found_leading = true;
2378             }
2379           }
2380         }
2381         // if we found a correct looking proj then retry from there
2382         // otherwise we must see a leading and a phi or this the
2383         // wrong config
2384         if (proj != NULL) {
2385           x = proj;
2386           retry_feed = false;
2387         } else if (found_leading && nextphi != NULL) {
2388           // retry from this phi to check phi2
2389           x = nextphi;
2390         } else {
2391           // not what we were looking for
2392           return NULL;
2393         }
2394       } else {
2395         return NULL;
2396       }
2397     }
2398     // the proj has to come from the card mark membar
2399     x = x->in(0);
2400     if (!x->is_MemBar()) {
2401       return NULL;
2402     }
2403 
2404     MemBarNode *card_mark_membar = x->as_MemBar();
2405 
2406     if (!is_card_mark_membar(card_mark_membar)) {
2407       return NULL;
2408     }
2409 
2410     return card_mark_membar;
2411   }
2412 
2413   // trailing_to_leading
2414   //
2415   // graph traversal helper which checks the Mem flow up the graph
2416   // from a (non-card mark) trailing membar attempting to locate and
2417   // return an associated leading membar. it first looks for a
2418   // subgraph in the normal configuration (relying on helper
2419   // normal_to_leading). failing that it then looks for one of the
2420   // possible post-write card mark subgraphs linking the trailing node
2421   // to a the card mark membar (relying on helper
2422   // trailing_to_card_mark), and then checks that the card mark membar
2423   // is fed by a leading membar (once again relying on auxiliary
2424   // predicate normal_to_leading).
2425   //
2426   // if the configuration is valid returns the cpuorder member for
2427   // preference or when absent the release membar otherwise NULL.
2428   //
2429   // n.b. the input membar is expected to be either a volatile or
2430   // acquire membar but in the former case must *not* be a card mark
2431   // membar.
2432 
2433   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2434   {
2435     assert((trailing->Opcode() == Op_MemBarAcquire ||
2436             trailing->Opcode() == Op_MemBarVolatile),
2437            "expecting an acquire or volatile membar");
2438     assert((trailing->Opcode() != Op_MemBarVolatile ||
2439             !is_card_mark_membar(trailing)),
2440            "not expecting a card mark membar");
2441 
2442     MemBarNode *leading = normal_to_leading(trailing);
2443 
2444     if (leading) {
2445       return leading;
2446     }
2447 
2448     // nothing more to do if this is an acquire
2449     if (trailing->Opcode() == Op_MemBarAcquire) {
2450       return NULL;
2451     }
2452 
2453     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2454 
2455     if (!card_mark_membar) {
2456       return NULL;
2457     }
2458 
2459     return normal_to_leading(card_mark_membar);
2460   }
2461 
2462   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2463 
2464 bool unnecessary_acquire(const Node *barrier)
2465 {
2466   assert(barrier->is_MemBar(), "expecting a membar");
2467 
2468   if (UseBarriersForVolatile) {
2469     // we need to plant a dmb
2470     return false;
2471   }
2472 
2473   // a volatile read derived from bytecode (or also from an inlined
2474   // SHA field read via LibraryCallKit::load_field_from_object)
2475   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2476   // with a bogus read dependency on it's preceding load. so in those
2477   // cases we will find the load node at the PARMS offset of the
2478   // acquire membar.  n.b. there may be an intervening DecodeN node.
2479   //
2480   // a volatile load derived from an inlined unsafe field access
2481   // manifests as a cpuorder membar with Ctl and Mem projections
2482   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2483   // acquire then feeds another cpuorder membar via Ctl and Mem
2484   // projections. The load has no output dependency on these trailing
2485   // membars because subsequent nodes inserted into the graph take
2486   // their control feed from the final membar cpuorder meaning they
2487   // are all ordered after the load.
2488 
2489   Node *x = barrier->lookup(TypeFunc::Parms);
2490   if (x) {
2491     // we are starting from an acquire and it has a fake dependency
2492     //
2493     // need to check for
2494     //
2495     //   LoadX[mo_acquire]
2496     //   {  |1   }
2497     //   {DecodeN}
2498     //      |Parms
2499     //   MemBarAcquire*
2500     //
2501     // where * tags node we were passed
2502     // and |k means input k
2503     if (x->is_DecodeNarrowPtr()) {
2504       x = x->in(1);
2505     }
2506 
2507     return (x->is_Load() && x->as_Load()->is_acquire());
2508   }
2509   
2510   // now check for an unsafe volatile get
2511 
2512   // need to check for
2513   //
2514   //   MemBarCPUOrder
2515   //        ||       \\
2516   //   MemBarAcquire* LoadX[mo_acquire]
2517   //        ||
2518   //   MemBarCPUOrder
2519   //
2520   // where * tags node we were passed
2521   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2522 
2523   // check for a parent MemBarCPUOrder
2524   ProjNode *ctl;
2525   ProjNode *mem;
2526   MemBarNode *parent = parent_membar(barrier);
2527   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2528     return false;
2529   ctl = parent->proj_out(TypeFunc::Control);
2530   mem = parent->proj_out(TypeFunc::Memory);
2531   if (!ctl || !mem) {
2532     return false;
2533   }
2534   // ensure the proj nodes both feed a LoadX[mo_acquire]
2535   LoadNode *ld = NULL;
2536   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2537     x = ctl->fast_out(i);
2538     // if we see a load we keep hold of it and stop searching
2539     if (x->is_Load()) {
2540       ld = x->as_Load();
2541       break;
2542     }
2543   }
2544   // it must be an acquiring load
2545   if (ld && ld->is_acquire()) {
2546 
2547     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2548       x = mem->fast_out(i);
2549       // if we see the same load we drop it and stop searching
2550       if (x == ld) {
2551         ld = NULL;
2552         break;
2553       }
2554     }
2555     // we must have dropped the load
2556     if (ld == NULL) {
2557       // check for a child cpuorder membar
2558       MemBarNode *child  = child_membar(barrier->as_MemBar());
2559       if (child && child->Opcode() != Op_MemBarCPUOrder)
2560         return true;
2561     }
2562   }
2563 
2564   // final option for unnecessary mebar is that it is a trailing node
2565   // belonging to a CAS
2566 
2567   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2568 
2569   return leading != NULL;
2570 }
2571 
2572 bool needs_acquiring_load(const Node *n)
2573 {
2574   assert(n->is_Load(), "expecting a load");
2575   if (UseBarriersForVolatile) {
2576     // we use a normal load and a dmb
2577     return false;
2578   }
2579 
2580   LoadNode *ld = n->as_Load();
2581 
2582   if (!ld->is_acquire()) {
2583     return false;
2584   }
2585 
2586   // check if this load is feeding an acquire membar
2587   //
2588   //   LoadX[mo_acquire]
2589   //   {  |1   }
2590   //   {DecodeN}
2591   //      |Parms
2592   //   MemBarAcquire*
2593   //
2594   // where * tags node we were passed
2595   // and |k means input k
2596 
2597   Node *start = ld;
2598   Node *mbacq = NULL;
2599 
2600   // if we hit a DecodeNarrowPtr we reset the start node and restart
2601   // the search through the outputs
2602  restart:
2603 
2604   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2605     Node *x = start->fast_out(i);
2606     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2607       mbacq = x;
2608     } else if (!mbacq &&
2609                (x->is_DecodeNarrowPtr() ||
2610                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2611       start = x;
2612       goto restart;
2613     }
2614   }
2615 
2616   if (mbacq) {
2617     return true;
2618   }
2619 
2620   // now check for an unsafe volatile get
2621 
2622   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2623   //
2624   //     MemBarCPUOrder
2625   //        ||       \\
2626   //   MemBarAcquire* LoadX[mo_acquire]
2627   //        ||
2628   //   MemBarCPUOrder
2629 
2630   MemBarNode *membar;
2631 
2632   membar = parent_membar(ld);
2633 
2634   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2635     return false;
2636   }
2637 
2638   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2639 
2640   membar = child_membar(membar);
2641 
2642   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2643     return false;
2644   }
2645 
2646   membar = child_membar(membar);
2647   
2648   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2649     return false;
2650   }
2651 
2652   return true;
2653 }
2654 
2655 bool unnecessary_release(const Node *n)
2656 {
2657   assert((n->is_MemBar() &&
2658           n->Opcode() == Op_MemBarRelease),
2659          "expecting a release membar");
2660 
2661   if (UseBarriersForVolatile) {
2662     // we need to plant a dmb
2663     return false;
2664   }
2665 
2666   // if there is a dependent CPUOrder barrier then use that as the
2667   // leading
2668 
2669   MemBarNode *barrier = n->as_MemBar();
2670   // check for an intervening cpuorder membar
2671   MemBarNode *b = child_membar(barrier);
2672   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2673     // ok, so start the check from the dependent cpuorder barrier
2674     barrier = b;
2675   }
2676 
2677   // must start with a normal feed
2678   MemBarNode *child_barrier = leading_to_normal(barrier);
2679 
2680   if (!child_barrier) {
2681     return false;
2682   }
2683 
2684   if (!is_card_mark_membar(child_barrier)) {
2685     // this is the trailing membar and we are done
2686     return true;
2687   }
2688 
2689   // must be sure this card mark feeds a trailing membar
2690   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2691   return (trailing != NULL);
2692 }
2693 
2694 bool unnecessary_volatile(const Node *n)
2695 {
2696   // assert n->is_MemBar();
2697   if (UseBarriersForVolatile) {
2698     // we need to plant a dmb
2699     return false;
2700   }
2701 
2702   MemBarNode *mbvol = n->as_MemBar();
2703 
2704   // first we check if this is part of a card mark. if so then we have
2705   // to generate a StoreLoad barrier
2706   
2707   if (is_card_mark_membar(mbvol)) {
2708       return false;
2709   }
2710 
2711   // ok, if it's not a card mark then we still need to check if it is
2712   // a trailing membar of a volatile put hgraph.
2713 
2714   return (trailing_to_leading(mbvol) != NULL);
2715 }
2716 
2717 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2718 
2719 bool needs_releasing_store(const Node *n)
2720 {
2721   // assert n->is_Store();
2722   if (UseBarriersForVolatile) {
2723     // we use a normal store and dmb combination
2724     return false;
2725   }
2726 
2727   StoreNode *st = n->as_Store();
2728 
2729   // the store must be marked as releasing
2730   if (!st->is_release()) {
2731     return false;
2732   }
2733 
2734   // the store must be fed by a membar
2735 
2736   Node *x = st->lookup(StoreNode::Memory);
2737 
2738   if (! x || !x->is_Proj()) {
2739     return false;
2740   }
2741 
2742   ProjNode *proj = x->as_Proj();
2743 
2744   x = proj->lookup(0);
2745 
2746   if (!x || !x->is_MemBar()) {
2747     return false;
2748   }
2749 
2750   MemBarNode *barrier = x->as_MemBar();
2751 
2752   // if the barrier is a release membar or a cpuorder mmebar fed by a
2753   // release membar then we need to check whether that forms part of a
2754   // volatile put graph.
2755 
2756   // reject invalid candidates
2757   if (!leading_membar(barrier)) {
2758     return false;
2759   }
2760 
2761   // does this lead a normal subgraph?
2762   MemBarNode *mbvol = leading_to_normal(barrier);
2763 
2764   if (!mbvol) {
2765     return false;
2766   }
2767 
2768   // all done unless this is a card mark
2769   if (!is_card_mark_membar(mbvol)) {
2770     return true;
2771   }
2772   
2773   // we found a card mark -- just make sure we have a trailing barrier
2774 
2775   return (card_mark_to_trailing(mbvol) != NULL);
2776 }
2777 
2778 // predicate controlling translation of CAS
2779 //
2780 // returns true if CAS needs to use an acquiring load otherwise false
2781 
2782 bool needs_acquiring_load_exclusive(const Node *n)
2783 {
2784   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2785   if (UseBarriersForVolatile) {
2786     return false;
2787   }
2788 
2789   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2790 #ifdef ASSERT
2791   LoadStoreNode *st = n->as_LoadStore();
2792 
2793   // the store must be fed by a membar
2794 
2795   Node *x = st->lookup(StoreNode::Memory);
2796 
2797   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2798 
2799   ProjNode *proj = x->as_Proj();
2800 
2801   x = proj->lookup(0);
2802 
2803   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2804 
2805   MemBarNode *barrier = x->as_MemBar();
2806 
2807   // the barrier must be a cpuorder mmebar fed by a release membar
2808 
2809   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2810          "CAS not fed by cpuorder membar!");
2811       
2812   MemBarNode *b = parent_membar(barrier);
2813   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2814           "CAS not fed by cpuorder+release membar pair!");
2815 
2816   // does this lead a normal subgraph?
2817   MemBarNode *mbar = leading_to_normal(barrier);
2818 
2819   assert(mbar != NULL, "CAS not embedded in normal graph!");
2820 
2821   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2822 #endif // ASSERT
2823   // so we can just return true here
2824   return true;
2825 }
2826 
2827 // predicate controlling translation of StoreCM
2828 //
2829 // returns true if a StoreStore must precede the card write otherwise
2830 // false
2831 
2832 bool unnecessary_storestore(const Node *storecm)
2833 {
2834   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2835 
2836   // we only ever need to generate a dmb ishst between an object put
2837   // and the associated card mark when we are using CMS without
2838   // conditional card marking
2839 
2840   if (!UseConcMarkSweepGC || UseCondCardMark) {
2841     return true;
2842   }
2843 
2844   // if we are implementing volatile puts using barriers then the
2845   // object put as an str so we must insert the dmb ishst
2846 
2847   if (UseBarriersForVolatile) {
2848     return false;
2849   }
2850 
2851   // we can omit the dmb ishst if this StoreCM is part of a volatile
2852   // put because in thta case the put will be implemented by stlr
2853   //
2854   // we need to check for a normal subgraph feeding this StoreCM.
2855   // that means the StoreCM must be fed Memory from a leading membar,
2856   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2857   // leading membar must be part of a normal subgraph
2858 
2859   Node *x = storecm->in(StoreNode::Memory);
2860 
2861   if (!x->is_Proj()) {
2862     return false;
2863   }
2864 
2865   x = x->in(0);
2866 
2867   if (!x->is_MemBar()) {
2868     return false;
2869   }
2870 
2871   MemBarNode *leading = x->as_MemBar();
2872 
2873   // reject invalid candidates
2874   if (!leading_membar(leading)) {
2875     return false;
2876   }
2877 
2878   // we can omit the StoreStore if it is the head of a normal subgraph
2879   return (leading_to_normal(leading) != NULL);
2880 }
2881 
2882 
2883 #define __ _masm.
2884 
2885 // advance declarations for helper functions to convert register
2886 // indices to register objects
2887 
2888 // the ad file has to provide implementations of certain methods
2889 // expected by the generic code
2890 //
2891 // REQUIRED FUNCTIONALITY
2892 
2893 //=============================================================================
2894 
2895 // !!!!! Special hack to get all types of calls to specify the byte offset
2896 //       from the start of the call to the point where the return address
2897 //       will point.
2898 
2899 int MachCallStaticJavaNode::ret_addr_offset()
2900 {
2901   // call should be a simple bl
2902   int off = 4;
2903   return off;
2904 }
2905 
2906 int MachCallDynamicJavaNode::ret_addr_offset()
2907 {
2908   return 16; // movz, movk, movk, bl
2909 }
2910 
2911 int MachCallRuntimeNode::ret_addr_offset() {
2912   // for generated stubs the call will be
2913   //   far_call(addr)
2914   // for real runtime callouts it will be six instructions
2915   // see aarch64_enc_java_to_runtime
2916   //   adr(rscratch2, retaddr)
2917   //   lea(rscratch1, RuntimeAddress(addr)
2918   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2919   //   blrt rscratch1
2920   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2921   if (cb) {
2922     return MacroAssembler::far_branch_size();
2923   } else {
2924     return 6 * NativeInstruction::instruction_size;
2925   }
2926 }
2927 
2928 // Indicate if the safepoint node needs the polling page as an input
2929 
2930 // the shared code plants the oop data at the start of the generated
2931 // code for the safepoint node and that needs ot be at the load
2932 // instruction itself. so we cannot plant a mov of the safepoint poll
2933 // address followed by a load. setting this to true means the mov is
2934 // scheduled as a prior instruction. that's better for scheduling
2935 // anyway.
2936 
2937 bool SafePointNode::needs_polling_address_input()
2938 {
2939   return true;
2940 }
2941 
2942 //=============================================================================
2943 
2944 #ifndef PRODUCT
2945 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2946   st->print("BREAKPOINT");
2947 }
2948 #endif
2949 
2950 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2951   MacroAssembler _masm(&cbuf);
2952   __ brk(0);
2953 }
2954 
2955 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2956   return MachNode::size(ra_);
2957 }
2958 
2959 //=============================================================================
2960 
2961 #ifndef PRODUCT
2962   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2963     st->print("nop \t# %d bytes pad for loops and calls", _count);
2964   }
2965 #endif
2966 
2967   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2968     MacroAssembler _masm(&cbuf);
2969     for (int i = 0; i < _count; i++) {
2970       __ nop();
2971     }
2972   }
2973 
2974   uint MachNopNode::size(PhaseRegAlloc*) const {
2975     return _count * NativeInstruction::instruction_size;
2976   }
2977 
2978 //=============================================================================
2979 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2980 
2981 int Compile::ConstantTable::calculate_table_base_offset() const {
2982   return 0;  // absolute addressing, no offset
2983 }
2984 
2985 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2986 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2987   ShouldNotReachHere();
2988 }
2989 
2990 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2991   // Empty encoding
2992 }
2993 
2994 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2995   return 0;
2996 }
2997 
2998 #ifndef PRODUCT
2999 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3000   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3001 }
3002 #endif
3003 
3004 #ifndef PRODUCT
3005 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3006   Compile* C = ra_->C;
3007 
3008   int framesize = C->frame_slots() << LogBytesPerInt;
3009 
3010   if (C->need_stack_bang(framesize))
3011     st->print("# stack bang size=%d\n\t", framesize);
3012 
3013   if (framesize < ((1 << 9) + 2 * wordSize)) {
3014     st->print("sub  sp, sp, #%d\n\t", framesize);
3015     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3016     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3017   } else {
3018     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3019     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3020     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3021     st->print("sub  sp, sp, rscratch1");
3022   }
3023 }
3024 #endif
3025 
3026 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3027   Compile* C = ra_->C;
3028   MacroAssembler _masm(&cbuf);
3029 
3030   // n.b. frame size includes space for return pc and rfp
3031   const long framesize = C->frame_size_in_bytes();
3032   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3033 
3034   // insert a nop at the start of the prolog so we can patch in a
3035   // branch if we need to invalidate the method later
3036   __ nop();
3037 
3038   int bangsize = C->bang_size_in_bytes();
3039   if (C->need_stack_bang(bangsize) && UseStackBanging)
3040     __ generate_stack_overflow_check(bangsize);
3041 
3042   __ build_frame(framesize);
3043 
3044   if (NotifySimulator) {
3045     __ notify(Assembler::method_entry);
3046   }
3047 
3048   if (VerifyStackAtCalls) {
3049     Unimplemented();
3050   }
3051 
3052   C->set_frame_complete(cbuf.insts_size());
3053 
3054   if (C->has_mach_constant_base_node()) {
3055     // NOTE: We set the table base offset here because users might be
3056     // emitted before MachConstantBaseNode.
3057     Compile::ConstantTable& constant_table = C->constant_table();
3058     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3059   }
3060 }
3061 
3062 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3063 {
3064   return MachNode::size(ra_); // too many variables; just compute it
3065                               // the hard way
3066 }
3067 
3068 int MachPrologNode::reloc() const
3069 {
3070   return 0;
3071 }
3072 
3073 //=============================================================================
3074 
3075 #ifndef PRODUCT
3076 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3077   Compile* C = ra_->C;
3078   int framesize = C->frame_slots() << LogBytesPerInt;
3079 
3080   st->print("# pop frame %d\n\t",framesize);
3081 
3082   if (framesize == 0) {
3083     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3084   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3085     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3086     st->print("add  sp, sp, #%d\n\t", framesize);
3087   } else {
3088     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3089     st->print("add  sp, sp, rscratch1\n\t");
3090     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3091   }
3092 
3093   if (do_polling() && C->is_method_compilation()) {
3094     st->print("# touch polling page\n\t");
3095     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3096     st->print("ldr zr, [rscratch1]");
3097   }
3098 }
3099 #endif
3100 
3101 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3102   Compile* C = ra_->C;
3103   MacroAssembler _masm(&cbuf);
3104   int framesize = C->frame_slots() << LogBytesPerInt;
3105 
3106   __ remove_frame(framesize);
3107 
3108   if (NotifySimulator) {
3109     __ notify(Assembler::method_reentry);
3110   }
3111 
3112   if (do_polling() && C->is_method_compilation()) {
3113     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3114   }
3115 }
3116 
3117 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3118   // Variable size. Determine dynamically.
3119   return MachNode::size(ra_);
3120 }
3121 
3122 int MachEpilogNode::reloc() const {
3123   // Return number of relocatable values contained in this instruction.
3124   return 1; // 1 for polling page.
3125 }
3126 
3127 const Pipeline * MachEpilogNode::pipeline() const {
3128   return MachNode::pipeline_class();
3129 }
3130 
3131 // This method seems to be obsolete. It is declared in machnode.hpp
3132 // and defined in all *.ad files, but it is never called. Should we
3133 // get rid of it?
3134 int MachEpilogNode::safepoint_offset() const {
3135   assert(do_polling(), "no return for this epilog node");
3136   return 4;
3137 }
3138 
3139 //=============================================================================
3140 
3141 // Figure out which register class each belongs in: rc_int, rc_float or
3142 // rc_stack.
3143 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3144 
3145 static enum RC rc_class(OptoReg::Name reg) {
3146 
3147   if (reg == OptoReg::Bad) {
3148     return rc_bad;
3149   }
3150 
3151   // we have 30 int registers * 2 halves
3152   // (rscratch1 and rscratch2 are omitted)
3153 
3154   if (reg < 60) {
3155     return rc_int;
3156   }
3157 
3158   // we have 32 float register * 2 halves
3159   if (reg < 60 + 128) {
3160     return rc_float;
3161   }
3162 
3163   // Between float regs & stack is the flags regs.
3164   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3165 
3166   return rc_stack;
3167 }
3168 
3169 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3170   Compile* C = ra_->C;
3171 
3172   // Get registers to move.
3173   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3174   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3175   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3176   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3177 
3178   enum RC src_hi_rc = rc_class(src_hi);
3179   enum RC src_lo_rc = rc_class(src_lo);
3180   enum RC dst_hi_rc = rc_class(dst_hi);
3181   enum RC dst_lo_rc = rc_class(dst_lo);
3182 
3183   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3184 
3185   if (src_hi != OptoReg::Bad) {
3186     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3187            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3188            "expected aligned-adjacent pairs");
3189   }
3190 
3191   if (src_lo == dst_lo && src_hi == dst_hi) {
3192     return 0;            // Self copy, no move.
3193   }
3194 
3195   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3196               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3197   int src_offset = ra_->reg2offset(src_lo);
3198   int dst_offset = ra_->reg2offset(dst_lo);
3199 
3200   if (bottom_type()->isa_vect() != NULL) {
3201     uint ireg = ideal_reg();
3202     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3203     if (cbuf) {
3204       MacroAssembler _masm(cbuf);
3205       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3206       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3207         // stack->stack
3208         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3209         if (ireg == Op_VecD) {
3210           __ unspill(rscratch1, true, src_offset);
3211           __ spill(rscratch1, true, dst_offset);
3212         } else {
3213           __ spill_copy128(src_offset, dst_offset);
3214         }
3215       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3216         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3217                ireg == Op_VecD ? __ T8B : __ T16B,
3218                as_FloatRegister(Matcher::_regEncode[src_lo]));
3219       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3220         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3221                        ireg == Op_VecD ? __ D : __ Q,
3222                        ra_->reg2offset(dst_lo));
3223       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3224         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3225                        ireg == Op_VecD ? __ D : __ Q,
3226                        ra_->reg2offset(src_lo));
3227       } else {
3228         ShouldNotReachHere();
3229       }
3230     }
3231   } else if (cbuf) {
3232     MacroAssembler _masm(cbuf);
3233     switch (src_lo_rc) {
3234     case rc_int:
3235       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3236         if (is64) {
3237             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3238                    as_Register(Matcher::_regEncode[src_lo]));
3239         } else {
3240             MacroAssembler _masm(cbuf);
3241             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3242                     as_Register(Matcher::_regEncode[src_lo]));
3243         }
3244       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3245         if (is64) {
3246             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3247                      as_Register(Matcher::_regEncode[src_lo]));
3248         } else {
3249             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3250                      as_Register(Matcher::_regEncode[src_lo]));
3251         }
3252       } else {                    // gpr --> stack spill
3253         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3254         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3255       }
3256       break;
3257     case rc_float:
3258       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3259         if (is64) {
3260             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3261                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3262         } else {
3263             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3264                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3265         }
3266       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3267           if (cbuf) {
3268             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3269                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3270         } else {
3271             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3272                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3273         }
3274       } else {                    // fpr --> stack spill
3275         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3276         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3277                  is64 ? __ D : __ S, dst_offset);
3278       }
3279       break;
3280     case rc_stack:
3281       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3282         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3283       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3284         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3285                    is64 ? __ D : __ S, src_offset);
3286       } else {                    // stack --> stack copy
3287         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3288         __ unspill(rscratch1, is64, src_offset);
3289         __ spill(rscratch1, is64, dst_offset);
3290       }
3291       break;
3292     default:
3293       assert(false, "bad rc_class for spill");
3294       ShouldNotReachHere();
3295     }
3296   }
3297 
3298   if (st) {
3299     st->print("spill ");
3300     if (src_lo_rc == rc_stack) {
3301       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3302     } else {
3303       st->print("%s -> ", Matcher::regName[src_lo]);
3304     }
3305     if (dst_lo_rc == rc_stack) {
3306       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3307     } else {
3308       st->print("%s", Matcher::regName[dst_lo]);
3309     }
3310     if (bottom_type()->isa_vect() != NULL) {
3311       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3312     } else {
3313       st->print("\t# spill size = %d", is64 ? 64:32);
3314     }
3315   }
3316 
3317   return 0;
3318 
3319 }
3320 
3321 #ifndef PRODUCT
3322 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3323   if (!ra_)
3324     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3325   else
3326     implementation(NULL, ra_, false, st);
3327 }
3328 #endif
3329 
3330 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3331   implementation(&cbuf, ra_, false, NULL);
3332 }
3333 
3334 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3335   return MachNode::size(ra_);
3336 }
3337 
3338 //=============================================================================
3339 
3340 #ifndef PRODUCT
3341 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3342   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3343   int reg = ra_->get_reg_first(this);
3344   st->print("add %s, rsp, #%d]\t# box lock",
3345             Matcher::regName[reg], offset);
3346 }
3347 #endif
3348 
3349 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3350   MacroAssembler _masm(&cbuf);
3351 
3352   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3353   int reg    = ra_->get_encode(this);
3354 
3355   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3356     __ add(as_Register(reg), sp, offset);
3357   } else {
3358     ShouldNotReachHere();
3359   }
3360 }
3361 
3362 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3363   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3364   return 4;
3365 }
3366 
3367 //=============================================================================
3368 
3369 #ifndef PRODUCT
3370 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3371 {
3372   st->print_cr("# MachUEPNode");
3373   if (UseCompressedClassPointers) {
3374     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3375     if (Universe::narrow_klass_shift() != 0) {
3376       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3377     }
3378   } else {
3379    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3380   }
3381   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3382   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3383 }
3384 #endif
3385 
3386 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3387 {
3388   // This is the unverified entry point.
3389   MacroAssembler _masm(&cbuf);
3390 
3391   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3392   Label skip;
3393   // TODO
3394   // can we avoid this skip and still use a reloc?
3395   __ br(Assembler::EQ, skip);
3396   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3397   __ bind(skip);
3398 }
3399 
3400 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3401 {
3402   return MachNode::size(ra_);
3403 }
3404 
3405 // REQUIRED EMIT CODE
3406 
3407 //=============================================================================
3408 
3409 // Emit exception handler code.
3410 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3411 {
3412   // mov rscratch1 #exception_blob_entry_point
3413   // br rscratch1
3414   // Note that the code buffer's insts_mark is always relative to insts.
3415   // That's why we must use the macroassembler to generate a handler.
3416   MacroAssembler _masm(&cbuf);
3417   address base = __ start_a_stub(size_exception_handler());
3418   if (base == NULL) {
3419     ciEnv::current()->record_failure("CodeCache is full");
3420     return 0;  // CodeBuffer::expand failed
3421   }
3422   int offset = __ offset();
3423   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3424   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3425   __ end_a_stub();
3426   return offset;
3427 }
3428 
3429 // Emit deopt handler code.
3430 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3431 {
3432   // Note that the code buffer's insts_mark is always relative to insts.
3433   // That's why we must use the macroassembler to generate a handler.
3434   MacroAssembler _masm(&cbuf);
3435   address base = __ start_a_stub(size_deopt_handler());
3436   if (base == NULL) {
3437     ciEnv::current()->record_failure("CodeCache is full");
3438     return 0;  // CodeBuffer::expand failed
3439   }
3440   int offset = __ offset();
3441 
3442   __ adr(lr, __ pc());
3443   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3444 
3445   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3446   __ end_a_stub();
3447   return offset;
3448 }
3449 
3450 // REQUIRED MATCHER CODE
3451 
3452 //=============================================================================
3453 
3454 const bool Matcher::match_rule_supported(int opcode) {
3455 
3456   // TODO
3457   // identify extra cases that we might want to provide match rules for
3458   // e.g. Op_StrEquals and other intrinsics
3459   if (!has_match_rule(opcode)) {
3460     return false;
3461   }
3462 
3463   return true;  // Per default match rules are supported.
3464 }
3465 
3466 int Matcher::regnum_to_fpu_offset(int regnum)
3467 {
3468   Unimplemented();
3469   return 0;
3470 }
3471 
3472 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
3473 {
3474   Unimplemented();
3475   return false;
3476 }
3477 
3478 const bool Matcher::isSimpleConstant64(jlong value) {
3479   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3480   // Probably always true, even if a temp register is required.
3481   return true;
3482 }
3483 
3484 // true just means we have fast l2f conversion
3485 const bool Matcher::convL2FSupported(void) {
3486   return true;
3487 }
3488 
3489 // Vector width in bytes.
3490 const int Matcher::vector_width_in_bytes(BasicType bt) {
3491   int size = MIN2(16,(int)MaxVectorSize);
3492   // Minimum 2 values in vector
3493   if (size < 2*type2aelembytes(bt)) size = 0;
3494   // But never < 4
3495   if (size < 4) size = 0;
3496   return size;
3497 }
3498 
3499 // Limits on vector size (number of elements) loaded into vector.
3500 const int Matcher::max_vector_size(const BasicType bt) {
3501   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3502 }
3503 const int Matcher::min_vector_size(const BasicType bt) {
3504 //  For the moment limit the vector size to 8 bytes
3505     int size = 8 / type2aelembytes(bt);
3506     if (size < 2) size = 2;
3507     return size;
3508 }
3509 
3510 // Vector ideal reg.
3511 const int Matcher::vector_ideal_reg(int len) {
3512   switch(len) {
3513     case  8: return Op_VecD;
3514     case 16: return Op_VecX;
3515   }
3516   ShouldNotReachHere();
3517   return 0;
3518 }
3519 
3520 const int Matcher::vector_shift_count_ideal_reg(int size) {
3521   return Op_VecX;
3522 }
3523 
3524 // AES support not yet implemented
3525 const bool Matcher::pass_original_key_for_aes() {
3526   return false;
3527 }
3528 
3529 // x86 supports misaligned vectors store/load.
3530 const bool Matcher::misaligned_vectors_ok() {
3531   return !AlignVector; // can be changed by flag
3532 }
3533 
3534 // false => size gets scaled to BytesPerLong, ok.
3535 const bool Matcher::init_array_count_is_in_bytes = false;
3536 
3537 // Threshold size for cleararray.
3538 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3539 
3540 // Use conditional move (CMOVL)
3541 const int Matcher::long_cmove_cost() {
3542   // long cmoves are no more expensive than int cmoves
3543   return 0;
3544 }
3545 
3546 const int Matcher::float_cmove_cost() {
3547   // float cmoves are no more expensive than int cmoves
3548   return 0;
3549 }
3550 
3551 // Does the CPU require late expand (see block.cpp for description of late expand)?
3552 const bool Matcher::require_postalloc_expand = false;
3553 
3554 // Should the Matcher clone shifts on addressing modes, expecting them
3555 // to be subsumed into complex addressing expressions or compute them
3556 // into registers?  True for Intel but false for most RISCs
3557 const bool Matcher::clone_shift_expressions = false;
3558 
3559 // Do we need to mask the count passed to shift instructions or does
3560 // the cpu only look at the lower 5/6 bits anyway?
3561 const bool Matcher::need_masked_shift_count = false;
3562 
3563 // This affects two different things:
3564 //  - how Decode nodes are matched
3565 //  - how ImplicitNullCheck opportunities are recognized
3566 // If true, the matcher will try to remove all Decodes and match them
3567 // (as operands) into nodes. NullChecks are not prepared to deal with
3568 // Decodes by final_graph_reshaping().
3569 // If false, final_graph_reshaping() forces the decode behind the Cmp
3570 // for a NullCheck. The matcher matches the Decode node into a register.
3571 // Implicit_null_check optimization moves the Decode along with the
3572 // memory operation back up before the NullCheck.
3573 bool Matcher::narrow_oop_use_complex_address() {
3574   return Universe::narrow_oop_shift() == 0;
3575 }
3576 
3577 bool Matcher::narrow_klass_use_complex_address() {
3578 // TODO
3579 // decide whether we need to set this to true
3580   return false;
3581 }
3582 
3583 // Is it better to copy float constants, or load them directly from
3584 // memory?  Intel can load a float constant from a direct address,
3585 // requiring no extra registers.  Most RISCs will have to materialize
3586 // an address into a register first, so they would do better to copy
3587 // the constant from stack.
3588 const bool Matcher::rematerialize_float_constants = false;
3589 
3590 // If CPU can load and store mis-aligned doubles directly then no
3591 // fixup is needed.  Else we split the double into 2 integer pieces
3592 // and move it piece-by-piece.  Only happens when passing doubles into
3593 // C code as the Java calling convention forces doubles to be aligned.
3594 const bool Matcher::misaligned_doubles_ok = true;
3595 
3596 // No-op on amd64
3597 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3598   Unimplemented();
3599 }
3600 
3601 // Advertise here if the CPU requires explicit rounding operations to
3602 // implement the UseStrictFP mode.
3603 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3604 
3605 // Are floats converted to double when stored to stack during
3606 // deoptimization?
3607 bool Matcher::float_in_double() { return true; }
3608 
3609 // Do ints take an entire long register or just half?
3610 // The relevant question is how the int is callee-saved:
3611 // the whole long is written but de-opt'ing will have to extract
3612 // the relevant 32 bits.
3613 const bool Matcher::int_in_long = true;
3614 
3615 // Return whether or not this register is ever used as an argument.
3616 // This function is used on startup to build the trampoline stubs in
3617 // generateOptoStub.  Registers not mentioned will be killed by the VM
3618 // call in the trampoline, and arguments in those registers not be
3619 // available to the callee.
3620 bool Matcher::can_be_java_arg(int reg)
3621 {
3622   return
3623     reg ==  R0_num || reg == R0_H_num ||
3624     reg ==  R1_num || reg == R1_H_num ||
3625     reg ==  R2_num || reg == R2_H_num ||
3626     reg ==  R3_num || reg == R3_H_num ||
3627     reg ==  R4_num || reg == R4_H_num ||
3628     reg ==  R5_num || reg == R5_H_num ||
3629     reg ==  R6_num || reg == R6_H_num ||
3630     reg ==  R7_num || reg == R7_H_num ||
3631     reg ==  V0_num || reg == V0_H_num ||
3632     reg ==  V1_num || reg == V1_H_num ||
3633     reg ==  V2_num || reg == V2_H_num ||
3634     reg ==  V3_num || reg == V3_H_num ||
3635     reg ==  V4_num || reg == V4_H_num ||
3636     reg ==  V5_num || reg == V5_H_num ||
3637     reg ==  V6_num || reg == V6_H_num ||
3638     reg ==  V7_num || reg == V7_H_num;
3639 }
3640 
3641 bool Matcher::is_spillable_arg(int reg)
3642 {
3643   return can_be_java_arg(reg);
3644 }
3645 
3646 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3647   return false;
3648 }
3649 
3650 RegMask Matcher::divI_proj_mask() {
3651   ShouldNotReachHere();
3652   return RegMask();
3653 }
3654 
3655 // Register for MODI projection of divmodI.
3656 RegMask Matcher::modI_proj_mask() {
3657   ShouldNotReachHere();
3658   return RegMask();
3659 }
3660 
3661 // Register for DIVL projection of divmodL.
3662 RegMask Matcher::divL_proj_mask() {
3663   ShouldNotReachHere();
3664   return RegMask();
3665 }
3666 
3667 // Register for MODL projection of divmodL.
3668 RegMask Matcher::modL_proj_mask() {
3669   ShouldNotReachHere();
3670   return RegMask();
3671 }
3672 
3673 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3674   return FP_REG_mask();
3675 }
3676 
3677 // helper for encoding java_to_runtime calls on sim
3678 //
3679 // this is needed to compute the extra arguments required when
3680 // planting a call to the simulator blrt instruction. the TypeFunc
3681 // can be queried to identify the counts for integral, and floating
3682 // arguments and the return type
3683 
3684 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3685 {
3686   int gps = 0;
3687   int fps = 0;
3688   const TypeTuple *domain = tf->domain();
3689   int max = domain->cnt();
3690   for (int i = TypeFunc::Parms; i < max; i++) {
3691     const Type *t = domain->field_at(i);
3692     switch(t->basic_type()) {
3693     case T_FLOAT:
3694     case T_DOUBLE:
3695       fps++;
3696     default:
3697       gps++;
3698     }
3699   }
3700   gpcnt = gps;
3701   fpcnt = fps;
3702   BasicType rt = tf->return_type();
3703   switch (rt) {
3704   case T_VOID:
3705     rtype = MacroAssembler::ret_type_void;
3706     break;
3707   default:
3708     rtype = MacroAssembler::ret_type_integral;
3709     break;
3710   case T_FLOAT:
3711     rtype = MacroAssembler::ret_type_float;
3712     break;
3713   case T_DOUBLE:
3714     rtype = MacroAssembler::ret_type_double;
3715     break;
3716   }
3717 }
3718 
3719 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3720   MacroAssembler _masm(&cbuf);                                          \
3721   {                                                                     \
3722     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3723     guarantee(DISP == 0, "mode not permitted for volatile");            \
3724     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3725     __ INSN(REG, as_Register(BASE));                                    \
3726   }
3727 
3728 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3729 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3730 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3731                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3732 
3733   // Used for all non-volatile memory accesses.  The use of
3734   // $mem->opcode() to discover whether this pattern uses sign-extended
3735   // offsets is something of a kludge.
3736   static void loadStore(MacroAssembler masm, mem_insn insn,
3737                          Register reg, int opcode,
3738                          Register base, int index, int size, int disp)
3739   {
3740     Address::extend scale;
3741 
3742     // Hooboy, this is fugly.  We need a way to communicate to the
3743     // encoder that the index needs to be sign extended, so we have to
3744     // enumerate all the cases.
3745     switch (opcode) {
3746     case INDINDEXSCALEDOFFSETI2L:
3747     case INDINDEXSCALEDI2L:
3748     case INDINDEXSCALEDOFFSETI2LN:
3749     case INDINDEXSCALEDI2LN:
3750     case INDINDEXOFFSETI2L:
3751     case INDINDEXOFFSETI2LN:
3752       scale = Address::sxtw(size);
3753       break;
3754     default:
3755       scale = Address::lsl(size);
3756     }
3757 
3758     if (index == -1) {
3759       (masm.*insn)(reg, Address(base, disp));
3760     } else {
3761       if (disp == 0) {
3762         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3763       } else {
3764         masm.lea(rscratch1, Address(base, disp));
3765         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3766       }
3767     }
3768   }
3769 
3770   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3771                          FloatRegister reg, int opcode,
3772                          Register base, int index, int size, int disp)
3773   {
3774     Address::extend scale;
3775 
3776     switch (opcode) {
3777     case INDINDEXSCALEDOFFSETI2L:
3778     case INDINDEXSCALEDI2L:
3779     case INDINDEXSCALEDOFFSETI2LN:
3780     case INDINDEXSCALEDI2LN:
3781       scale = Address::sxtw(size);
3782       break;
3783     default:
3784       scale = Address::lsl(size);
3785     }
3786 
3787      if (index == -1) {
3788       (masm.*insn)(reg, Address(base, disp));
3789     } else {
3790       if (disp == 0) {
3791         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3792       } else {
3793         masm.lea(rscratch1, Address(base, disp));
3794         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3795       }
3796     }
3797   }
3798 
3799   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3800                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3801                          int opcode, Register base, int index, int size, int disp)
3802   {
3803     if (index == -1) {
3804       (masm.*insn)(reg, T, Address(base, disp));
3805     } else {
3806       assert(disp == 0, "unsupported address mode");
3807       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3808     }
3809   }
3810 
3811 %}
3812 
3813 
3814 
3815 //----------ENCODING BLOCK-----------------------------------------------------
3816 // This block specifies the encoding classes used by the compiler to
3817 // output byte streams.  Encoding classes are parameterized macros
3818 // used by Machine Instruction Nodes in order to generate the bit
3819 // encoding of the instruction.  Operands specify their base encoding
3820 // interface with the interface keyword.  There are currently
3821 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3822 // COND_INTER.  REG_INTER causes an operand to generate a function
3823 // which returns its register number when queried.  CONST_INTER causes
3824 // an operand to generate a function which returns the value of the
3825 // constant when queried.  MEMORY_INTER causes an operand to generate
3826 // four functions which return the Base Register, the Index Register,
3827 // the Scale Value, and the Offset Value of the operand when queried.
3828 // COND_INTER causes an operand to generate six functions which return
3829 // the encoding code (ie - encoding bits for the instruction)
3830 // associated with each basic boolean condition for a conditional
3831 // instruction.
3832 //
3833 // Instructions specify two basic values for encoding.  Again, a
3834 // function is available to check if the constant displacement is an
3835 // oop. They use the ins_encode keyword to specify their encoding
3836 // classes (which must be a sequence of enc_class names, and their
3837 // parameters, specified in the encoding block), and they use the
3838 // opcode keyword to specify, in order, their primary, secondary, and
3839 // tertiary opcode.  Only the opcode sections which a particular
3840 // instruction needs for encoding need to be specified.
3841 encode %{
3842   // Build emit functions for each basic byte or larger field in the
3843   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3844   // from C++ code in the enc_class source block.  Emit functions will
3845   // live in the main source block for now.  In future, we can
3846   // generalize this by adding a syntax that specifies the sizes of
3847   // fields in an order, so that the adlc can build the emit functions
3848   // automagically
3849 
3850   // catch all for unimplemented encodings
3851   enc_class enc_unimplemented %{
3852     MacroAssembler _masm(&cbuf);
3853     __ unimplemented("C2 catch all");
3854   %}
3855 
3856   // BEGIN Non-volatile memory access
3857 
3858   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3859     Register dst_reg = as_Register($dst$$reg);
3860     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3862   %}
3863 
3864   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3865     Register dst_reg = as_Register($dst$$reg);
3866     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3867                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3868   %}
3869 
3870   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3871     Register dst_reg = as_Register($dst$$reg);
3872     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3873                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3874   %}
3875 
3876   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3877     Register dst_reg = as_Register($dst$$reg);
3878     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3879                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3880   %}
3881 
3882   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3883     Register dst_reg = as_Register($dst$$reg);
3884     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3885                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3886   %}
3887 
3888   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3889     Register dst_reg = as_Register($dst$$reg);
3890     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3891                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3892   %}
3893 
3894   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3895     Register dst_reg = as_Register($dst$$reg);
3896     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3897                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3898   %}
3899 
3900   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3901     Register dst_reg = as_Register($dst$$reg);
3902     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3903                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3904   %}
3905 
3906   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3907     Register dst_reg = as_Register($dst$$reg);
3908     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3909                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3910   %}
3911 
3912   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3913     Register dst_reg = as_Register($dst$$reg);
3914     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3915                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3916   %}
3917 
3918   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3919     Register dst_reg = as_Register($dst$$reg);
3920     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3921                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3922   %}
3923 
3924   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3925     Register dst_reg = as_Register($dst$$reg);
3926     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3927                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3928   %}
3929 
3930   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3931     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3932     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3933                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3934   %}
3935 
3936   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3937     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3938     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3939                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3940   %}
3941 
3942   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3943     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3944     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3945        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3946   %}
3947 
3948   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3949     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3950     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3951        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3952   %}
3953 
3954   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3955     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3956     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3957        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3958   %}
3959 
3960   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3961     Register src_reg = as_Register($src$$reg);
3962     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3963                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3964   %}
3965 
3966   enc_class aarch64_enc_strb0(memory mem) %{
3967     MacroAssembler _masm(&cbuf);
3968     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3969                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3970   %}
3971 
3972   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3973     MacroAssembler _masm(&cbuf);
3974     __ membar(Assembler::StoreStore);
3975     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3976                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3977   %}
3978 
3979   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3980     Register src_reg = as_Register($src$$reg);
3981     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3982                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3983   %}
3984 
3985   enc_class aarch64_enc_strh0(memory mem) %{
3986     MacroAssembler _masm(&cbuf);
3987     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3989   %}
3990 
3991   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3992     Register src_reg = as_Register($src$$reg);
3993     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3994                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3995   %}
3996 
3997   enc_class aarch64_enc_strw0(memory mem) %{
3998     MacroAssembler _masm(&cbuf);
3999     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4000                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4001   %}
4002 
4003   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4004     Register src_reg = as_Register($src$$reg);
4005     // we sometimes get asked to store the stack pointer into the
4006     // current thread -- we cannot do that directly on AArch64
4007     if (src_reg == r31_sp) {
4008       MacroAssembler _masm(&cbuf);
4009       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4010       __ mov(rscratch2, sp);
4011       src_reg = rscratch2;
4012     }
4013     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4014                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4015   %}
4016 
4017   enc_class aarch64_enc_str0(memory mem) %{
4018     MacroAssembler _masm(&cbuf);
4019     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4020                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4021   %}
4022 
4023   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4024     FloatRegister src_reg = as_FloatRegister($src$$reg);
4025     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4026                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4027   %}
4028 
4029   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4030     FloatRegister src_reg = as_FloatRegister($src$$reg);
4031     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4032                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4033   %}
4034 
4035   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4036     FloatRegister src_reg = as_FloatRegister($src$$reg);
4037     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4038        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4039   %}
4040 
4041   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4042     FloatRegister src_reg = as_FloatRegister($src$$reg);
4043     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4044        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4045   %}
4046 
4047   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4048     FloatRegister src_reg = as_FloatRegister($src$$reg);
4049     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4050        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4051   %}
4052 
4053   // END Non-volatile memory access
4054 
4055   // volatile loads and stores
4056 
4057   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4058     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4059                  rscratch1, stlrb);
4060   %}
4061 
4062   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4063     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4064                  rscratch1, stlrh);
4065   %}
4066 
4067   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4068     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4069                  rscratch1, stlrw);
4070   %}
4071 
4072 
4073   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4074     Register dst_reg = as_Register($dst$$reg);
4075     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4076              rscratch1, ldarb);
4077     __ sxtbw(dst_reg, dst_reg);
4078   %}
4079 
4080   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4081     Register dst_reg = as_Register($dst$$reg);
4082     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4083              rscratch1, ldarb);
4084     __ sxtb(dst_reg, dst_reg);
4085   %}
4086 
4087   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4088     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4089              rscratch1, ldarb);
4090   %}
4091 
4092   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4093     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4094              rscratch1, ldarb);
4095   %}
4096 
4097   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4098     Register dst_reg = as_Register($dst$$reg);
4099     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4100              rscratch1, ldarh);
4101     __ sxthw(dst_reg, dst_reg);
4102   %}
4103 
4104   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4105     Register dst_reg = as_Register($dst$$reg);
4106     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4107              rscratch1, ldarh);
4108     __ sxth(dst_reg, dst_reg);
4109   %}
4110 
4111   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4112     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4113              rscratch1, ldarh);
4114   %}
4115 
4116   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4117     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4118              rscratch1, ldarh);
4119   %}
4120 
4121   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4122     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4123              rscratch1, ldarw);
4124   %}
4125 
4126   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4127     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4128              rscratch1, ldarw);
4129   %}
4130 
4131   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4132     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4133              rscratch1, ldar);
4134   %}
4135 
4136   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4137     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4138              rscratch1, ldarw);
4139     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4140   %}
4141 
4142   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4143     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4144              rscratch1, ldar);
4145     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4146   %}
4147 
4148   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4149     Register src_reg = as_Register($src$$reg);
4150     // we sometimes get asked to store the stack pointer into the
4151     // current thread -- we cannot do that directly on AArch64
4152     if (src_reg == r31_sp) {
4153         MacroAssembler _masm(&cbuf);
4154       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4155       __ mov(rscratch2, sp);
4156       src_reg = rscratch2;
4157     }
4158     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4159                  rscratch1, stlr);
4160   %}
4161 
4162   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4163     {
4164       MacroAssembler _masm(&cbuf);
4165       FloatRegister src_reg = as_FloatRegister($src$$reg);
4166       __ fmovs(rscratch2, src_reg);
4167     }
4168     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4169                  rscratch1, stlrw);
4170   %}
4171 
4172   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4173     {
4174       MacroAssembler _masm(&cbuf);
4175       FloatRegister src_reg = as_FloatRegister($src$$reg);
4176       __ fmovd(rscratch2, src_reg);
4177     }
4178     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4179                  rscratch1, stlr);
4180   %}
4181 
4182   // synchronized read/update encodings
4183 
4184   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4185     MacroAssembler _masm(&cbuf);
4186     Register dst_reg = as_Register($dst$$reg);
4187     Register base = as_Register($mem$$base);
4188     int index = $mem$$index;
4189     int scale = $mem$$scale;
4190     int disp = $mem$$disp;
4191     if (index == -1) {
4192        if (disp != 0) {
4193         __ lea(rscratch1, Address(base, disp));
4194         __ ldaxr(dst_reg, rscratch1);
4195       } else {
4196         // TODO
4197         // should we ever get anything other than this case?
4198         __ ldaxr(dst_reg, base);
4199       }
4200     } else {
4201       Register index_reg = as_Register(index);
4202       if (disp == 0) {
4203         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4204         __ ldaxr(dst_reg, rscratch1);
4205       } else {
4206         __ lea(rscratch1, Address(base, disp));
4207         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4208         __ ldaxr(dst_reg, rscratch1);
4209       }
4210     }
4211   %}
4212 
4213   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4214     MacroAssembler _masm(&cbuf);
4215     Register src_reg = as_Register($src$$reg);
4216     Register base = as_Register($mem$$base);
4217     int index = $mem$$index;
4218     int scale = $mem$$scale;
4219     int disp = $mem$$disp;
4220     if (index == -1) {
4221        if (disp != 0) {
4222         __ lea(rscratch2, Address(base, disp));
4223         __ stlxr(rscratch1, src_reg, rscratch2);
4224       } else {
4225         // TODO
4226         // should we ever get anything other than this case?
4227         __ stlxr(rscratch1, src_reg, base);
4228       }
4229     } else {
4230       Register index_reg = as_Register(index);
4231       if (disp == 0) {
4232         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4233         __ stlxr(rscratch1, src_reg, rscratch2);
4234       } else {
4235         __ lea(rscratch2, Address(base, disp));
4236         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4237         __ stlxr(rscratch1, src_reg, rscratch2);
4238       }
4239     }
4240     __ cmpw(rscratch1, zr);
4241   %}
4242 
4243   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4244     MacroAssembler _masm(&cbuf);
4245     Register old_reg = as_Register($oldval$$reg);
4246     Register new_reg = as_Register($newval$$reg);
4247     Register base = as_Register($mem$$base);
4248     Register addr_reg;
4249     int index = $mem$$index;
4250     int scale = $mem$$scale;
4251     int disp = $mem$$disp;
4252     if (index == -1) {
4253        if (disp != 0) {
4254         __ lea(rscratch2, Address(base, disp));
4255         addr_reg = rscratch2;
4256       } else {
4257         // TODO
4258         // should we ever get anything other than this case?
4259         addr_reg = base;
4260       }
4261     } else {
4262       Register index_reg = as_Register(index);
4263       if (disp == 0) {
4264         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4265         addr_reg = rscratch2;
4266       } else {
4267         __ lea(rscratch2, Address(base, disp));
4268         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4269         addr_reg = rscratch2;
4270       }
4271     }
4272     Label retry_load, done;
4273     __ bind(retry_load);
4274     __ ldxr(rscratch1, addr_reg);
4275     __ cmp(rscratch1, old_reg);
4276     __ br(Assembler::NE, done);
4277     __ stlxr(rscratch1, new_reg, addr_reg);
4278     __ cbnzw(rscratch1, retry_load);
4279     __ bind(done);
4280   %}
4281 
4282   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4283     MacroAssembler _masm(&cbuf);
4284     Register old_reg = as_Register($oldval$$reg);
4285     Register new_reg = as_Register($newval$$reg);
4286     Register base = as_Register($mem$$base);
4287     Register addr_reg;
4288     int index = $mem$$index;
4289     int scale = $mem$$scale;
4290     int disp = $mem$$disp;
4291     if (index == -1) {
4292        if (disp != 0) {
4293         __ lea(rscratch2, Address(base, disp));
4294         addr_reg = rscratch2;
4295       } else {
4296         // TODO
4297         // should we ever get anything other than this case?
4298         addr_reg = base;
4299       }
4300     } else {
4301       Register index_reg = as_Register(index);
4302       if (disp == 0) {
4303         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4304         addr_reg = rscratch2;
4305       } else {
4306         __ lea(rscratch2, Address(base, disp));
4307         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4308         addr_reg = rscratch2;
4309       }
4310     }
4311     Label retry_load, done;
4312     __ bind(retry_load);
4313     __ ldxrw(rscratch1, addr_reg);
4314     __ cmpw(rscratch1, old_reg);
4315     __ br(Assembler::NE, done);
4316     __ stlxrw(rscratch1, new_reg, addr_reg);
4317     __ cbnzw(rscratch1, retry_load);
4318     __ bind(done);
4319   %}
4320 
4321   // variant of cmpxchg employing an acquiring load which is used by
4322   // CompareAndSwap{LNP} when we are eliding barriers
4323 
4324   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4325     MacroAssembler _masm(&cbuf);
4326     Register old_reg = as_Register($oldval$$reg);
4327     Register new_reg = as_Register($newval$$reg);
4328     Register base = as_Register($mem$$base);
4329     Register addr_reg;
4330     int index = $mem$$index;
4331     int scale = $mem$$scale;
4332     int disp = $mem$$disp;
4333     if (index == -1) {
4334        if (disp != 0) {
4335         __ lea(rscratch2, Address(base, disp));
4336         addr_reg = rscratch2;
4337       } else {
4338         // TODO
4339         // should we ever get anything other than this case?
4340         addr_reg = base;
4341       }
4342     } else {
4343       Register index_reg = as_Register(index);
4344       if (disp == 0) {
4345         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4346         addr_reg = rscratch2;
4347       } else {
4348         __ lea(rscratch2, Address(base, disp));
4349         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4350         addr_reg = rscratch2;
4351       }
4352     }
4353     Label retry_load, done;
4354     __ bind(retry_load);
4355     __ ldaxr(rscratch1, addr_reg);
4356     __ cmp(rscratch1, old_reg);
4357     __ br(Assembler::NE, done);
4358     __ stlxr(rscratch1, new_reg, addr_reg);
4359     __ cbnzw(rscratch1, retry_load);
4360     __ bind(done);
4361   %}
4362 
4363   // variant of cmpxchgw employing an acquiring load which is used by
4364   // CompareAndSwapI when we are eliding barriers
4365 
4366   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4367     MacroAssembler _masm(&cbuf);
4368     Register old_reg = as_Register($oldval$$reg);
4369     Register new_reg = as_Register($newval$$reg);
4370     Register base = as_Register($mem$$base);
4371     Register addr_reg;
4372     int index = $mem$$index;
4373     int scale = $mem$$scale;
4374     int disp = $mem$$disp;
4375     if (index == -1) {
4376        if (disp != 0) {
4377         __ lea(rscratch2, Address(base, disp));
4378         addr_reg = rscratch2;
4379       } else {
4380         // TODO
4381         // should we ever get anything other than this case?
4382         addr_reg = base;
4383       }
4384     } else {
4385       Register index_reg = as_Register(index);
4386       if (disp == 0) {
4387         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4388         addr_reg = rscratch2;
4389       } else {
4390         __ lea(rscratch2, Address(base, disp));
4391         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4392         addr_reg = rscratch2;
4393       }
4394     }
4395     Label retry_load, done;
4396     __ bind(retry_load);
4397     __ ldaxrw(rscratch1, addr_reg);
4398     __ cmpw(rscratch1, old_reg);
4399     __ br(Assembler::NE, done);
4400     __ stlxrw(rscratch1, new_reg, addr_reg);
4401     __ cbnzw(rscratch1, retry_load);
4402     __ bind(done);
4403   %}
4404 
4405   // auxiliary used for CompareAndSwapX to set result register
4406   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4407     MacroAssembler _masm(&cbuf);
4408     Register res_reg = as_Register($res$$reg);
4409     __ cset(res_reg, Assembler::EQ);
4410   %}
4411 
4412   // prefetch encodings
4413 
4414   enc_class aarch64_enc_prefetchw(memory mem) %{
4415     MacroAssembler _masm(&cbuf);
4416     Register base = as_Register($mem$$base);
4417     int index = $mem$$index;
4418     int scale = $mem$$scale;
4419     int disp = $mem$$disp;
4420     if (index == -1) {
4421       __ prfm(Address(base, disp), PSTL1KEEP);
4422       __ nop();
4423     } else {
4424       Register index_reg = as_Register(index);
4425       if (disp == 0) {
4426         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4427       } else {
4428         __ lea(rscratch1, Address(base, disp));
4429         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4430       }
4431     }
4432   %}
4433 
4434   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4435     MacroAssembler _masm(&cbuf);
4436     Register cnt_reg = as_Register($cnt$$reg);
4437     Register base_reg = as_Register($base$$reg);
4438     // base is word aligned
4439     // cnt is count of words
4440 
4441     Label loop;
4442     Label entry;
4443 
4444 //  Algorithm:
4445 //
4446 //    scratch1 = cnt & 7;
4447 //    cnt -= scratch1;
4448 //    p += scratch1;
4449 //    switch (scratch1) {
4450 //      do {
4451 //        cnt -= 8;
4452 //          p[-8] = 0;
4453 //        case 7:
4454 //          p[-7] = 0;
4455 //        case 6:
4456 //          p[-6] = 0;
4457 //          // ...
4458 //        case 1:
4459 //          p[-1] = 0;
4460 //        case 0:
4461 //          p += 8;
4462 //      } while (cnt);
4463 //    }
4464 
4465     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4466 
4467     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4468     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4469     // base_reg always points to the end of the region we're about to zero
4470     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4471     __ adr(rscratch2, entry);
4472     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4473     __ br(rscratch2);
4474     __ bind(loop);
4475     __ sub(cnt_reg, cnt_reg, unroll);
4476     for (int i = -unroll; i < 0; i++)
4477       __ str(zr, Address(base_reg, i * wordSize));
4478     __ bind(entry);
4479     __ add(base_reg, base_reg, unroll * wordSize);
4480     __ cbnz(cnt_reg, loop);
4481   %}
4482 
4483   /// mov envcodings
4484 
4485   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4486     MacroAssembler _masm(&cbuf);
4487     u_int32_t con = (u_int32_t)$src$$constant;
4488     Register dst_reg = as_Register($dst$$reg);
4489     if (con == 0) {
4490       __ movw(dst_reg, zr);
4491     } else {
4492       __ movw(dst_reg, con);
4493     }
4494   %}
4495 
4496   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4497     MacroAssembler _masm(&cbuf);
4498     Register dst_reg = as_Register($dst$$reg);
4499     u_int64_t con = (u_int64_t)$src$$constant;
4500     if (con == 0) {
4501       __ mov(dst_reg, zr);
4502     } else {
4503       __ mov(dst_reg, con);
4504     }
4505   %}
4506 
4507   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4508     MacroAssembler _masm(&cbuf);
4509     Register dst_reg = as_Register($dst$$reg);
4510     address con = (address)$src$$constant;
4511     if (con == NULL || con == (address)1) {
4512       ShouldNotReachHere();
4513     } else {
4514       relocInfo::relocType rtype = $src->constant_reloc();
4515       if (rtype == relocInfo::oop_type) {
4516         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4517       } else if (rtype == relocInfo::metadata_type) {
4518         __ mov_metadata(dst_reg, (Metadata*)con);
4519       } else {
4520         assert(rtype == relocInfo::none, "unexpected reloc type");
4521         if (con < (address)(uintptr_t)os::vm_page_size()) {
4522           __ mov(dst_reg, con);
4523         } else {
4524           unsigned long offset;
4525           __ adrp(dst_reg, con, offset);
4526           __ add(dst_reg, dst_reg, offset);
4527         }
4528       }
4529     }
4530   %}
4531 
4532   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4533     MacroAssembler _masm(&cbuf);
4534     Register dst_reg = as_Register($dst$$reg);
4535     __ mov(dst_reg, zr);
4536   %}
4537 
4538   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4539     MacroAssembler _masm(&cbuf);
4540     Register dst_reg = as_Register($dst$$reg);
4541     __ mov(dst_reg, (u_int64_t)1);
4542   %}
4543 
4544   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4545     MacroAssembler _masm(&cbuf);
4546     address page = (address)$src$$constant;
4547     Register dst_reg = as_Register($dst$$reg);
4548     unsigned long off;
4549     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4550     assert(off == 0, "assumed offset == 0");
4551   %}
4552 
4553   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4554     MacroAssembler _masm(&cbuf);
4555     address page = (address)$src$$constant;
4556     Register dst_reg = as_Register($dst$$reg);
4557     unsigned long off;
4558     __ adrp(dst_reg, ExternalAddress(page), off);
4559     assert(off == 0, "assumed offset == 0");
4560   %}
4561 
4562   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4563     MacroAssembler _masm(&cbuf);
4564     Register dst_reg = as_Register($dst$$reg);
4565     address con = (address)$src$$constant;
4566     if (con == NULL) {
4567       ShouldNotReachHere();
4568     } else {
4569       relocInfo::relocType rtype = $src->constant_reloc();
4570       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4571       __ set_narrow_oop(dst_reg, (jobject)con);
4572     }
4573   %}
4574 
4575   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4576     MacroAssembler _masm(&cbuf);
4577     Register dst_reg = as_Register($dst$$reg);
4578     __ mov(dst_reg, zr);
4579   %}
4580 
4581   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4582     MacroAssembler _masm(&cbuf);
4583     Register dst_reg = as_Register($dst$$reg);
4584     address con = (address)$src$$constant;
4585     if (con == NULL) {
4586       ShouldNotReachHere();
4587     } else {
4588       relocInfo::relocType rtype = $src->constant_reloc();
4589       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4590       __ set_narrow_klass(dst_reg, (Klass *)con);
4591     }
4592   %}
4593 
4594   // arithmetic encodings
4595 
4596   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4597     MacroAssembler _masm(&cbuf);
4598     Register dst_reg = as_Register($dst$$reg);
4599     Register src_reg = as_Register($src1$$reg);
4600     int32_t con = (int32_t)$src2$$constant;
4601     // add has primary == 0, subtract has primary == 1
4602     if ($primary) { con = -con; }
4603     if (con < 0) {
4604       __ subw(dst_reg, src_reg, -con);
4605     } else {
4606       __ addw(dst_reg, src_reg, con);
4607     }
4608   %}
4609 
4610   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4611     MacroAssembler _masm(&cbuf);
4612     Register dst_reg = as_Register($dst$$reg);
4613     Register src_reg = as_Register($src1$$reg);
4614     int32_t con = (int32_t)$src2$$constant;
4615     // add has primary == 0, subtract has primary == 1
4616     if ($primary) { con = -con; }
4617     if (con < 0) {
4618       __ sub(dst_reg, src_reg, -con);
4619     } else {
4620       __ add(dst_reg, src_reg, con);
4621     }
4622   %}
4623 
4624   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4625     MacroAssembler _masm(&cbuf);
4626    Register dst_reg = as_Register($dst$$reg);
4627    Register src1_reg = as_Register($src1$$reg);
4628    Register src2_reg = as_Register($src2$$reg);
4629     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4630   %}
4631 
4632   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4633     MacroAssembler _masm(&cbuf);
4634    Register dst_reg = as_Register($dst$$reg);
4635    Register src1_reg = as_Register($src1$$reg);
4636    Register src2_reg = as_Register($src2$$reg);
4637     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4638   %}
4639 
4640   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4641     MacroAssembler _masm(&cbuf);
4642    Register dst_reg = as_Register($dst$$reg);
4643    Register src1_reg = as_Register($src1$$reg);
4644    Register src2_reg = as_Register($src2$$reg);
4645     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4646   %}
4647 
4648   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4649     MacroAssembler _masm(&cbuf);
4650    Register dst_reg = as_Register($dst$$reg);
4651    Register src1_reg = as_Register($src1$$reg);
4652    Register src2_reg = as_Register($src2$$reg);
4653     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4654   %}
4655 
4656   // compare instruction encodings
4657 
4658   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4659     MacroAssembler _masm(&cbuf);
4660     Register reg1 = as_Register($src1$$reg);
4661     Register reg2 = as_Register($src2$$reg);
4662     __ cmpw(reg1, reg2);
4663   %}
4664 
4665   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4666     MacroAssembler _masm(&cbuf);
4667     Register reg = as_Register($src1$$reg);
4668     int32_t val = $src2$$constant;
4669     if (val >= 0) {
4670       __ subsw(zr, reg, val);
4671     } else {
4672       __ addsw(zr, reg, -val);
4673     }
4674   %}
4675 
4676   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4677     MacroAssembler _masm(&cbuf);
4678     Register reg1 = as_Register($src1$$reg);
4679     u_int32_t val = (u_int32_t)$src2$$constant;
4680     __ movw(rscratch1, val);
4681     __ cmpw(reg1, rscratch1);
4682   %}
4683 
4684   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4685     MacroAssembler _masm(&cbuf);
4686     Register reg1 = as_Register($src1$$reg);
4687     Register reg2 = as_Register($src2$$reg);
4688     __ cmp(reg1, reg2);
4689   %}
4690 
4691   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4692     MacroAssembler _masm(&cbuf);
4693     Register reg = as_Register($src1$$reg);
4694     int64_t val = $src2$$constant;
4695     if (val >= 0) {
4696       __ subs(zr, reg, val);
4697     } else if (val != -val) {
4698       __ adds(zr, reg, -val);
4699     } else {
4700     // aargh, Long.MIN_VALUE is a special case
4701       __ orr(rscratch1, zr, (u_int64_t)val);
4702       __ subs(zr, reg, rscratch1);
4703     }
4704   %}
4705 
4706   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4707     MacroAssembler _masm(&cbuf);
4708     Register reg1 = as_Register($src1$$reg);
4709     u_int64_t val = (u_int64_t)$src2$$constant;
4710     __ mov(rscratch1, val);
4711     __ cmp(reg1, rscratch1);
4712   %}
4713 
4714   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4715     MacroAssembler _masm(&cbuf);
4716     Register reg1 = as_Register($src1$$reg);
4717     Register reg2 = as_Register($src2$$reg);
4718     __ cmp(reg1, reg2);
4719   %}
4720 
4721   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4722     MacroAssembler _masm(&cbuf);
4723     Register reg1 = as_Register($src1$$reg);
4724     Register reg2 = as_Register($src2$$reg);
4725     __ cmpw(reg1, reg2);
4726   %}
4727 
4728   enc_class aarch64_enc_testp(iRegP src) %{
4729     MacroAssembler _masm(&cbuf);
4730     Register reg = as_Register($src$$reg);
4731     __ cmp(reg, zr);
4732   %}
4733 
4734   enc_class aarch64_enc_testn(iRegN src) %{
4735     MacroAssembler _masm(&cbuf);
4736     Register reg = as_Register($src$$reg);
4737     __ cmpw(reg, zr);
4738   %}
4739 
4740   enc_class aarch64_enc_b(label lbl) %{
4741     MacroAssembler _masm(&cbuf);
4742     Label *L = $lbl$$label;
4743     __ b(*L);
4744   %}
4745 
4746   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4747     MacroAssembler _masm(&cbuf);
4748     Label *L = $lbl$$label;
4749     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4750   %}
4751 
4752   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4753     MacroAssembler _masm(&cbuf);
4754     Label *L = $lbl$$label;
4755     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4756   %}
4757 
4758   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4759   %{
4760      Register sub_reg = as_Register($sub$$reg);
4761      Register super_reg = as_Register($super$$reg);
4762      Register temp_reg = as_Register($temp$$reg);
4763      Register result_reg = as_Register($result$$reg);
4764 
4765      Label miss;
4766      MacroAssembler _masm(&cbuf);
4767      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4768                                      NULL, &miss,
4769                                      /*set_cond_codes:*/ true);
4770      if ($primary) {
4771        __ mov(result_reg, zr);
4772      }
4773      __ bind(miss);
4774   %}
4775 
4776   enc_class aarch64_enc_java_static_call(method meth) %{
4777     MacroAssembler _masm(&cbuf);
4778 
4779     address addr = (address)$meth$$method;
4780     address call;
4781     if (!_method) {
4782       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4783       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4784     } else if (_optimized_virtual) {
4785       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4786     } else {
4787       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4788     }
4789     if (call == NULL) {
4790       ciEnv::current()->record_failure("CodeCache is full"); 
4791       return;
4792     }
4793 
4794     if (_method) {
4795       // Emit stub for static call
4796       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4797       if (stub == NULL) {
4798         ciEnv::current()->record_failure("CodeCache is full"); 
4799         return;
4800       }
4801     }
4802   %}
4803 
4804   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4805     MacroAssembler _masm(&cbuf);
4806     address call = __ ic_call((address)$meth$$method);
4807     if (call == NULL) {
4808       ciEnv::current()->record_failure("CodeCache is full"); 
4809       return;
4810     }
4811   %}
4812 
4813   enc_class aarch64_enc_call_epilog() %{
4814     MacroAssembler _masm(&cbuf);
4815     if (VerifyStackAtCalls) {
4816       // Check that stack depth is unchanged: find majik cookie on stack
4817       __ call_Unimplemented();
4818     }
4819   %}
4820 
4821   enc_class aarch64_enc_java_to_runtime(method meth) %{
4822     MacroAssembler _masm(&cbuf);
4823 
4824     // some calls to generated routines (arraycopy code) are scheduled
4825     // by C2 as runtime calls. if so we can call them using a br (they
4826     // will be in a reachable segment) otherwise we have to use a blrt
4827     // which loads the absolute address into a register.
4828     address entry = (address)$meth$$method;
4829     CodeBlob *cb = CodeCache::find_blob(entry);
4830     if (cb) {
4831       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4832       if (call == NULL) {
4833         ciEnv::current()->record_failure("CodeCache is full"); 
4834         return;
4835       }
4836     } else {
4837       int gpcnt;
4838       int fpcnt;
4839       int rtype;
4840       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4841       Label retaddr;
4842       __ adr(rscratch2, retaddr);
4843       __ lea(rscratch1, RuntimeAddress(entry));
4844       // Leave a breadcrumb for JavaThread::pd_last_frame().
4845       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4846       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4847       __ bind(retaddr);
4848       __ add(sp, sp, 2 * wordSize);
4849     }
4850   %}
4851 
4852   enc_class aarch64_enc_rethrow() %{
4853     MacroAssembler _masm(&cbuf);
4854     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4855   %}
4856 
4857   enc_class aarch64_enc_ret() %{
4858     MacroAssembler _masm(&cbuf);
4859     __ ret(lr);
4860   %}
4861 
4862   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4863     MacroAssembler _masm(&cbuf);
4864     Register target_reg = as_Register($jump_target$$reg);
4865     __ br(target_reg);
4866   %}
4867 
4868   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4869     MacroAssembler _masm(&cbuf);
4870     Register target_reg = as_Register($jump_target$$reg);
4871     // exception oop should be in r0
4872     // ret addr has been popped into lr
4873     // callee expects it in r3
4874     __ mov(r3, lr);
4875     __ br(target_reg);
4876   %}
4877 
4878   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4879     MacroAssembler _masm(&cbuf);
4880     Register oop = as_Register($object$$reg);
4881     Register box = as_Register($box$$reg);
4882     Register disp_hdr = as_Register($tmp$$reg);
4883     Register tmp = as_Register($tmp2$$reg);
4884     Label cont;
4885     Label object_has_monitor;
4886     Label cas_failed;
4887 
4888     assert_different_registers(oop, box, tmp, disp_hdr);
4889 
4890     // Load markOop from object into displaced_header.
4891     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4892 
4893     // Always do locking in runtime.
4894     if (EmitSync & 0x01) {
4895       __ cmp(oop, zr);
4896       return;
4897     }
4898 
4899     if (UseBiasedLocking && !UseOptoBiasInlining) {
4900       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4901     }
4902 
4903     // Handle existing monitor
4904     if ((EmitSync & 0x02) == 0) {
4905       // we can use AArch64's bit test and branch here but
4906       // markoopDesc does not define a bit index just the bit value
4907       // so assert in case the bit pos changes
4908 #     define __monitor_value_log2 1
4909       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4910       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4911 #     undef __monitor_value_log2
4912     }
4913 
4914     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4915     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4916 
4917     // Load Compare Value application register.
4918 
4919     // Initialize the box. (Must happen before we update the object mark!)
4920     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4921 
4922     // Compare object markOop with mark and if equal exchange scratch1
4923     // with object markOop.
4924     // Note that this is simply a CAS: it does not generate any
4925     // barriers.  These are separately generated by
4926     // membar_acquire_lock().
4927     {
4928       Label retry_load;
4929       __ bind(retry_load);
4930       __ ldxr(tmp, oop);
4931       __ cmp(tmp, disp_hdr);
4932       __ br(Assembler::NE, cas_failed);
4933       // use stlxr to ensure update is immediately visible
4934       __ stlxr(tmp, box, oop);
4935       __ cbzw(tmp, cont);
4936       __ b(retry_load);
4937     }
4938 
4939     // Formerly:
4940     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4941     //               /*newv=*/box,
4942     //               /*addr=*/oop,
4943     //               /*tmp=*/tmp,
4944     //               cont,
4945     //               /*fail*/NULL);
4946 
4947     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4948 
4949     // If the compare-and-exchange succeeded, then we found an unlocked
4950     // object, will have now locked it will continue at label cont
4951 
4952     __ bind(cas_failed);
4953     // We did not see an unlocked object so try the fast recursive case.
4954 
4955     // Check if the owner is self by comparing the value in the
4956     // markOop of object (disp_hdr) with the stack pointer.
4957     __ mov(rscratch1, sp);
4958     __ sub(disp_hdr, disp_hdr, rscratch1);
4959     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4960     // If condition is true we are cont and hence we can store 0 as the
4961     // displaced header in the box, which indicates that it is a recursive lock.
4962     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4963     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4964 
4965     // Handle existing monitor.
4966     if ((EmitSync & 0x02) == 0) {
4967       __ b(cont);
4968 
4969       __ bind(object_has_monitor);
4970       // The object's monitor m is unlocked iff m->owner == NULL,
4971       // otherwise m->owner may contain a thread or a stack address.
4972       //
4973       // Try to CAS m->owner from NULL to current thread.
4974       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4975       __ mov(disp_hdr, zr);
4976 
4977       {
4978         Label retry_load, fail;
4979         __ bind(retry_load);
4980         __ ldxr(rscratch1, tmp);
4981         __ cmp(disp_hdr, rscratch1);
4982         __ br(Assembler::NE, fail);
4983         // use stlxr to ensure update is immediately visible
4984         __ stlxr(rscratch1, rthread, tmp);
4985         __ cbnzw(rscratch1, retry_load);
4986         __ bind(fail);
4987       }
4988 
4989       // Label next;
4990       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4991       //               /*newv=*/rthread,
4992       //               /*addr=*/tmp,
4993       //               /*tmp=*/rscratch1,
4994       //               /*succeed*/next,
4995       //               /*fail*/NULL);
4996       // __ bind(next);
4997 
4998       // store a non-null value into the box.
4999       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
5000 
5001       // PPC port checks the following invariants
5002       // #ifdef ASSERT
5003       // bne(flag, cont);
5004       // We have acquired the monitor, check some invariants.
5005       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
5006       // Invariant 1: _recursions should be 0.
5007       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
5008       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
5009       //                        "monitor->_recursions should be 0", -1);
5010       // Invariant 2: OwnerIsThread shouldn't be 0.
5011       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
5012       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
5013       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
5014       // #endif
5015     }
5016 
5017     __ bind(cont);
5018     // flag == EQ indicates success
5019     // flag == NE indicates failure
5020 
5021   %}
5022 
5023   // TODO
5024   // reimplement this with custom cmpxchgptr code
5025   // which avoids some of the unnecessary branching
5026   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
5027     MacroAssembler _masm(&cbuf);
5028     Register oop = as_Register($object$$reg);
5029     Register box = as_Register($box$$reg);
5030     Register disp_hdr = as_Register($tmp$$reg);
5031     Register tmp = as_Register($tmp2$$reg);
5032     Label cont;
5033     Label object_has_monitor;
5034     Label cas_failed;
5035 
5036     assert_different_registers(oop, box, tmp, disp_hdr);
5037 
5038     // Always do locking in runtime.
5039     if (EmitSync & 0x01) {
5040       __ cmp(oop, zr); // Oop can't be 0 here => always false.
5041       return;
5042     }
5043 
5044     if (UseBiasedLocking && !UseOptoBiasInlining) {
5045       __ biased_locking_exit(oop, tmp, cont);
5046     }
5047 
5048     // Find the lock address and load the displaced header from the stack.
5049     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
5050 
5051     // If the displaced header is 0, we have a recursive unlock.
5052     __ cmp(disp_hdr, zr);
5053     __ br(Assembler::EQ, cont);
5054 
5055 
5056     // Handle existing monitor.
5057     if ((EmitSync & 0x02) == 0) {
5058       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
5059       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
5060     }
5061 
5062     // Check if it is still a light weight lock, this is is true if we
5063     // see the stack address of the basicLock in the markOop of the
5064     // object.
5065 
5066       {
5067         Label retry_load;
5068         __ bind(retry_load);
5069         __ ldxr(tmp, oop);
5070         __ cmp(box, tmp);
5071         __ br(Assembler::NE, cas_failed);
5072         // use stlxr to ensure update is immediately visible
5073         __ stlxr(tmp, disp_hdr, oop);
5074         __ cbzw(tmp, cont);
5075         __ b(retry_load);
5076       }
5077 
5078     // __ cmpxchgptr(/*compare_value=*/box,
5079     //               /*exchange_value=*/disp_hdr,
5080     //               /*where=*/oop,
5081     //               /*result=*/tmp,
5082     //               cont,
5083     //               /*cas_failed*/NULL);
5084     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5085 
5086     __ bind(cas_failed);
5087 
5088     // Handle existing monitor.
5089     if ((EmitSync & 0x02) == 0) {
5090       __ b(cont);
5091 
5092       __ bind(object_has_monitor);
5093       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5094       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5095       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5096       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5097       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5098       __ cmp(rscratch1, zr);
5099       __ br(Assembler::NE, cont);
5100 
5101       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5102       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5103       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5104       __ cmp(rscratch1, zr);
5105       __ cbnz(rscratch1, cont);
5106       // need a release store here
5107       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5108       __ stlr(rscratch1, tmp); // rscratch1 is zero
5109     }
5110 
5111     __ bind(cont);
5112     // flag == EQ indicates success
5113     // flag == NE indicates failure
5114   %}
5115 
5116 %}
5117 
5118 //----------FRAME--------------------------------------------------------------
5119 // Definition of frame structure and management information.
5120 //
5121 //  S T A C K   L A Y O U T    Allocators stack-slot number
5122 //                             |   (to get allocators register number
5123 //  G  Owned by    |        |  v    add OptoReg::stack0())
5124 //  r   CALLER     |        |
5125 //  o     |        +--------+      pad to even-align allocators stack-slot
5126 //  w     V        |  pad0  |        numbers; owned by CALLER
5127 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5128 //  h     ^        |   in   |  5
5129 //        |        |  args  |  4   Holes in incoming args owned by SELF
5130 //  |     |        |        |  3
5131 //  |     |        +--------+
5132 //  V     |        | old out|      Empty on Intel, window on Sparc
5133 //        |    old |preserve|      Must be even aligned.
5134 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5135 //        |        |   in   |  3   area for Intel ret address
5136 //     Owned by    |preserve|      Empty on Sparc.
5137 //       SELF      +--------+
5138 //        |        |  pad2  |  2   pad to align old SP
5139 //        |        +--------+  1
5140 //        |        | locks  |  0
5141 //        |        +--------+----> OptoReg::stack0(), even aligned
5142 //        |        |  pad1  | 11   pad to align new SP
5143 //        |        +--------+
5144 //        |        |        | 10
5145 //        |        | spills |  9   spills
5146 //        V        |        |  8   (pad0 slot for callee)
5147 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5148 //        ^        |  out   |  7
5149 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5150 //     Owned by    +--------+
5151 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5152 //        |    new |preserve|      Must be even-aligned.
5153 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5154 //        |        |        |
5155 //
5156 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5157 //         known from SELF's arguments and the Java calling convention.
5158 //         Region 6-7 is determined per call site.
5159 // Note 2: If the calling convention leaves holes in the incoming argument
5160 //         area, those holes are owned by SELF.  Holes in the outgoing area
5161 //         are owned by the CALLEE.  Holes should not be nessecary in the
5162 //         incoming area, as the Java calling convention is completely under
5163 //         the control of the AD file.  Doubles can be sorted and packed to
5164 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5165 //         varargs C calling conventions.
5166 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5167 //         even aligned with pad0 as needed.
5168 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5169 //           (the latter is true on Intel but is it false on AArch64?)
5170 //         region 6-11 is even aligned; it may be padded out more so that
5171 //         the region from SP to FP meets the minimum stack alignment.
5172 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5173 //         alignment.  Region 11, pad1, may be dynamically extended so that
5174 //         SP meets the minimum alignment.
5175 
5176 frame %{
5177   // What direction does stack grow in (assumed to be same for C & Java)
5178   stack_direction(TOWARDS_LOW);
5179 
5180   // These three registers define part of the calling convention
5181   // between compiled code and the interpreter.
5182 
5183   // Inline Cache Register or methodOop for I2C.
5184   inline_cache_reg(R12);
5185 
5186   // Method Oop Register when calling interpreter.
5187   interpreter_method_oop_reg(R12);
5188 
5189   // Number of stack slots consumed by locking an object
5190   sync_stack_slots(2);
5191 
5192   // Compiled code's Frame Pointer
5193   frame_pointer(R31);
5194 
5195   // Interpreter stores its frame pointer in a register which is
5196   // stored to the stack by I2CAdaptors.
5197   // I2CAdaptors convert from interpreted java to compiled java.
5198   interpreter_frame_pointer(R29);
5199 
5200   // Stack alignment requirement
5201   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5202 
5203   // Number of stack slots between incoming argument block and the start of
5204   // a new frame.  The PROLOG must add this many slots to the stack.  The
5205   // EPILOG must remove this many slots. aarch64 needs two slots for
5206   // return address and fp.
5207   // TODO think this is correct but check
5208   in_preserve_stack_slots(4);
5209 
5210   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5211   // for calls to C.  Supports the var-args backing area for register parms.
5212   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5213 
5214   // The after-PROLOG location of the return address.  Location of
5215   // return address specifies a type (REG or STACK) and a number
5216   // representing the register number (i.e. - use a register name) or
5217   // stack slot.
5218   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5219   // Otherwise, it is above the locks and verification slot and alignment word
5220   // TODO this may well be correct but need to check why that - 2 is there
5221   // ppc port uses 0 but we definitely need to allow for fixed_slots
5222   // which folds in the space used for monitors
5223   return_addr(STACK - 2 +
5224               round_to((Compile::current()->in_preserve_stack_slots() +
5225                         Compile::current()->fixed_slots()),
5226                        stack_alignment_in_slots()));
5227 
5228   // Body of function which returns an integer array locating
5229   // arguments either in registers or in stack slots.  Passed an array
5230   // of ideal registers called "sig" and a "length" count.  Stack-slot
5231   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5232   // arguments for a CALLEE.  Incoming stack arguments are
5233   // automatically biased by the preserve_stack_slots field above.
5234 
5235   calling_convention
5236   %{
5237     // No difference between ingoing/outgoing just pass false
5238     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5239   %}
5240 
5241   c_calling_convention
5242   %{
5243     // This is obviously always outgoing
5244     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5245   %}
5246 
5247   // Location of compiled Java return values.  Same as C for now.
5248   return_value
5249   %{
5250     // TODO do we allow ideal_reg == Op_RegN???
5251     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5252            "only return normal values");
5253 
5254     static const int lo[Op_RegL + 1] = { // enum name
5255       0,                                 // Op_Node
5256       0,                                 // Op_Set
5257       R0_num,                            // Op_RegN
5258       R0_num,                            // Op_RegI
5259       R0_num,                            // Op_RegP
5260       V0_num,                            // Op_RegF
5261       V0_num,                            // Op_RegD
5262       R0_num                             // Op_RegL
5263     };
5264 
5265     static const int hi[Op_RegL + 1] = { // enum name
5266       0,                                 // Op_Node
5267       0,                                 // Op_Set
5268       OptoReg::Bad,                       // Op_RegN
5269       OptoReg::Bad,                      // Op_RegI
5270       R0_H_num,                          // Op_RegP
5271       OptoReg::Bad,                      // Op_RegF
5272       V0_H_num,                          // Op_RegD
5273       R0_H_num                           // Op_RegL
5274     };
5275 
5276     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5277   %}
5278 %}
5279 
5280 //----------ATTRIBUTES---------------------------------------------------------
5281 //----------Operand Attributes-------------------------------------------------
5282 op_attrib op_cost(1);        // Required cost attribute
5283 
5284 //----------Instruction Attributes---------------------------------------------
5285 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5286 ins_attrib ins_size(32);        // Required size attribute (in bits)
5287 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5288                                 // a non-matching short branch variant
5289                                 // of some long branch?
5290 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5291                                 // be a power of 2) specifies the
5292                                 // alignment that some part of the
5293                                 // instruction (not necessarily the
5294                                 // start) requires.  If > 1, a
5295                                 // compute_padding() function must be
5296                                 // provided for the instruction
5297 
5298 //----------OPERANDS-----------------------------------------------------------
5299 // Operand definitions must precede instruction definitions for correct parsing
5300 // in the ADLC because operands constitute user defined types which are used in
5301 // instruction definitions.
5302 
5303 //----------Simple Operands----------------------------------------------------
5304 
5305 // Integer operands 32 bit
5306 // 32 bit immediate
5307 operand immI()
5308 %{
5309   match(ConI);
5310 
5311   op_cost(0);
5312   format %{ %}
5313   interface(CONST_INTER);
5314 %}
5315 
5316 // 32 bit zero
5317 operand immI0()
5318 %{
5319   predicate(n->get_int() == 0);
5320   match(ConI);
5321 
5322   op_cost(0);
5323   format %{ %}
5324   interface(CONST_INTER);
5325 %}
5326 
5327 // 32 bit unit increment
5328 operand immI_1()
5329 %{
5330   predicate(n->get_int() == 1);
5331   match(ConI);
5332 
5333   op_cost(0);
5334   format %{ %}
5335   interface(CONST_INTER);
5336 %}
5337 
5338 // 32 bit unit decrement
5339 operand immI_M1()
5340 %{
5341   predicate(n->get_int() == -1);
5342   match(ConI);
5343 
5344   op_cost(0);
5345   format %{ %}
5346   interface(CONST_INTER);
5347 %}
5348 
5349 operand immI_le_4()
5350 %{
5351   predicate(n->get_int() <= 4);
5352   match(ConI);
5353 
5354   op_cost(0);
5355   format %{ %}
5356   interface(CONST_INTER);
5357 %}
5358 
5359 operand immI_31()
5360 %{
5361   predicate(n->get_int() == 31);
5362   match(ConI);
5363 
5364   op_cost(0);
5365   format %{ %}
5366   interface(CONST_INTER);
5367 %}
5368 
5369 operand immI_8()
5370 %{
5371   predicate(n->get_int() == 8);
5372   match(ConI);
5373 
5374   op_cost(0);
5375   format %{ %}
5376   interface(CONST_INTER);
5377 %}
5378 
5379 operand immI_16()
5380 %{
5381   predicate(n->get_int() == 16);
5382   match(ConI);
5383 
5384   op_cost(0);
5385   format %{ %}
5386   interface(CONST_INTER);
5387 %}
5388 
5389 operand immI_24()
5390 %{
5391   predicate(n->get_int() == 24);
5392   match(ConI);
5393 
5394   op_cost(0);
5395   format %{ %}
5396   interface(CONST_INTER);
5397 %}
5398 
5399 operand immI_32()
5400 %{
5401   predicate(n->get_int() == 32);
5402   match(ConI);
5403 
5404   op_cost(0);
5405   format %{ %}
5406   interface(CONST_INTER);
5407 %}
5408 
5409 operand immI_48()
5410 %{
5411   predicate(n->get_int() == 48);
5412   match(ConI);
5413 
5414   op_cost(0);
5415   format %{ %}
5416   interface(CONST_INTER);
5417 %}
5418 
5419 operand immI_56()
5420 %{
5421   predicate(n->get_int() == 56);
5422   match(ConI);
5423 
5424   op_cost(0);
5425   format %{ %}
5426   interface(CONST_INTER);
5427 %}
5428 
5429 operand immI_64()
5430 %{
5431   predicate(n->get_int() == 64);
5432   match(ConI);
5433 
5434   op_cost(0);
5435   format %{ %}
5436   interface(CONST_INTER);
5437 %}
5438 
5439 operand immI_255()
5440 %{
5441   predicate(n->get_int() == 255);
5442   match(ConI);
5443 
5444   op_cost(0);
5445   format %{ %}
5446   interface(CONST_INTER);
5447 %}
5448 
5449 operand immI_65535()
5450 %{
5451   predicate(n->get_int() == 65535);
5452   match(ConI);
5453 
5454   op_cost(0);
5455   format %{ %}
5456   interface(CONST_INTER);
5457 %}
5458 
5459 operand immL_63()
5460 %{
5461   predicate(n->get_int() == 63);
5462   match(ConI);
5463 
5464   op_cost(0);
5465   format %{ %}
5466   interface(CONST_INTER);
5467 %}
5468 
5469 operand immL_255()
5470 %{
5471   predicate(n->get_int() == 255);
5472   match(ConI);
5473 
5474   op_cost(0);
5475   format %{ %}
5476   interface(CONST_INTER);
5477 %}
5478 
5479 operand immL_65535()
5480 %{
5481   predicate(n->get_long() == 65535L);
5482   match(ConL);
5483 
5484   op_cost(0);
5485   format %{ %}
5486   interface(CONST_INTER);
5487 %}
5488 
5489 operand immL_4294967295()
5490 %{
5491   predicate(n->get_long() == 4294967295L);
5492   match(ConL);
5493 
5494   op_cost(0);
5495   format %{ %}
5496   interface(CONST_INTER);
5497 %}
5498 
5499 operand immL_bitmask()
5500 %{
5501   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5502             && is_power_of_2(n->get_long() + 1));
5503   match(ConL);
5504 
5505   op_cost(0);
5506   format %{ %}
5507   interface(CONST_INTER);
5508 %}
5509 
5510 operand immI_bitmask()
5511 %{
5512   predicate(((n->get_int() & 0xc0000000) == 0)
5513             && is_power_of_2(n->get_int() + 1));
5514   match(ConI);
5515 
5516   op_cost(0);
5517   format %{ %}
5518   interface(CONST_INTER);
5519 %}
5520 
5521 // Scale values for scaled offset addressing modes (up to long but not quad)
5522 operand immIScale()
5523 %{
5524   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5525   match(ConI);
5526 
5527   op_cost(0);
5528   format %{ %}
5529   interface(CONST_INTER);
5530 %}
5531 
5532 // 26 bit signed offset -- for pc-relative branches
5533 operand immI26()
5534 %{
5535   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5536   match(ConI);
5537 
5538   op_cost(0);
5539   format %{ %}
5540   interface(CONST_INTER);
5541 %}
5542 
5543 // 19 bit signed offset -- for pc-relative loads
5544 operand immI19()
5545 %{
5546   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5547   match(ConI);
5548 
5549   op_cost(0);
5550   format %{ %}
5551   interface(CONST_INTER);
5552 %}
5553 
5554 // 12 bit unsigned offset -- for base plus immediate loads
5555 operand immIU12()
5556 %{
5557   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5558   match(ConI);
5559 
5560   op_cost(0);
5561   format %{ %}
5562   interface(CONST_INTER);
5563 %}
5564 
5565 operand immLU12()
5566 %{
5567   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5568   match(ConL);
5569 
5570   op_cost(0);
5571   format %{ %}
5572   interface(CONST_INTER);
5573 %}
5574 
5575 // Offset for scaled or unscaled immediate loads and stores
5576 operand immIOffset()
5577 %{
5578   predicate(Address::offset_ok_for_immed(n->get_int()));
5579   match(ConI);
5580 
5581   op_cost(0);
5582   format %{ %}
5583   interface(CONST_INTER);
5584 %}
5585 
5586 operand immLoffset()
5587 %{
5588   predicate(Address::offset_ok_for_immed(n->get_long()));
5589   match(ConL);
5590 
5591   op_cost(0);
5592   format %{ %}
5593   interface(CONST_INTER);
5594 %}
5595 
5596 // 32 bit integer valid for add sub immediate
5597 operand immIAddSub()
5598 %{
5599   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5600   match(ConI);
5601   op_cost(0);
5602   format %{ %}
5603   interface(CONST_INTER);
5604 %}
5605 
5606 // 32 bit unsigned integer valid for logical immediate
5607 // TODO -- check this is right when e.g the mask is 0x80000000
5608 operand immILog()
5609 %{
5610   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5611   match(ConI);
5612 
5613   op_cost(0);
5614   format %{ %}
5615   interface(CONST_INTER);
5616 %}
5617 
5618 // Integer operands 64 bit
5619 // 64 bit immediate
5620 operand immL()
5621 %{
5622   match(ConL);
5623 
5624   op_cost(0);
5625   format %{ %}
5626   interface(CONST_INTER);
5627 %}
5628 
5629 // 64 bit zero
5630 operand immL0()
5631 %{
5632   predicate(n->get_long() == 0);
5633   match(ConL);
5634 
5635   op_cost(0);
5636   format %{ %}
5637   interface(CONST_INTER);
5638 %}
5639 
5640 // 64 bit unit increment
5641 operand immL_1()
5642 %{
5643   predicate(n->get_long() == 1);
5644   match(ConL);
5645 
5646   op_cost(0);
5647   format %{ %}
5648   interface(CONST_INTER);
5649 %}
5650 
5651 // 64 bit unit decrement
5652 operand immL_M1()
5653 %{
5654   predicate(n->get_long() == -1);
5655   match(ConL);
5656 
5657   op_cost(0);
5658   format %{ %}
5659   interface(CONST_INTER);
5660 %}
5661 
5662 // 32 bit offset of pc in thread anchor
5663 
5664 operand immL_pc_off()
5665 %{
5666   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5667                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5668   match(ConL);
5669 
5670   op_cost(0);
5671   format %{ %}
5672   interface(CONST_INTER);
5673 %}
5674 
5675 // 64 bit integer valid for add sub immediate
5676 operand immLAddSub()
5677 %{
5678   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5679   match(ConL);
5680   op_cost(0);
5681   format %{ %}
5682   interface(CONST_INTER);
5683 %}
5684 
5685 // 64 bit integer valid for logical immediate
5686 operand immLLog()
5687 %{
5688   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5689   match(ConL);
5690   op_cost(0);
5691   format %{ %}
5692   interface(CONST_INTER);
5693 %}
5694 
5695 // Long Immediate: low 32-bit mask
5696 operand immL_32bits()
5697 %{
5698   predicate(n->get_long() == 0xFFFFFFFFL);
5699   match(ConL);
5700   op_cost(0);
5701   format %{ %}
5702   interface(CONST_INTER);
5703 %}
5704 
5705 // Pointer operands
5706 // Pointer Immediate
5707 operand immP()
5708 %{
5709   match(ConP);
5710 
5711   op_cost(0);
5712   format %{ %}
5713   interface(CONST_INTER);
5714 %}
5715 
5716 // NULL Pointer Immediate
5717 operand immP0()
5718 %{
5719   predicate(n->get_ptr() == 0);
5720   match(ConP);
5721 
5722   op_cost(0);
5723   format %{ %}
5724   interface(CONST_INTER);
5725 %}
5726 
5727 // Pointer Immediate One
5728 // this is used in object initialization (initial object header)
5729 operand immP_1()
5730 %{
5731   predicate(n->get_ptr() == 1);
5732   match(ConP);
5733 
5734   op_cost(0);
5735   format %{ %}
5736   interface(CONST_INTER);
5737 %}
5738 
5739 // Polling Page Pointer Immediate
5740 operand immPollPage()
5741 %{
5742   predicate((address)n->get_ptr() == os::get_polling_page());
5743   match(ConP);
5744 
5745   op_cost(0);
5746   format %{ %}
5747   interface(CONST_INTER);
5748 %}
5749 
5750 // Card Table Byte Map Base
5751 operand immByteMapBase()
5752 %{
5753   // Get base of card map
5754   predicate((jbyte*)n->get_ptr() ==
5755         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5756   match(ConP);
5757 
5758   op_cost(0);
5759   format %{ %}
5760   interface(CONST_INTER);
5761 %}
5762 
5763 // Pointer Immediate Minus One
5764 // this is used when we want to write the current PC to the thread anchor
5765 operand immP_M1()
5766 %{
5767   predicate(n->get_ptr() == -1);
5768   match(ConP);
5769 
5770   op_cost(0);
5771   format %{ %}
5772   interface(CONST_INTER);
5773 %}
5774 
5775 // Pointer Immediate Minus Two
5776 // this is used when we want to write the current PC to the thread anchor
5777 operand immP_M2()
5778 %{
5779   predicate(n->get_ptr() == -2);
5780   match(ConP);
5781 
5782   op_cost(0);
5783   format %{ %}
5784   interface(CONST_INTER);
5785 %}
5786 
5787 // Float and Double operands
5788 // Double Immediate
5789 operand immD()
5790 %{
5791   match(ConD);
5792   op_cost(0);
5793   format %{ %}
5794   interface(CONST_INTER);
5795 %}
5796 
5797 // Double Immediate: +0.0d
5798 operand immD0()
5799 %{
5800   predicate(jlong_cast(n->getd()) == 0);
5801   match(ConD);
5802 
5803   op_cost(0);
5804   format %{ %}
5805   interface(CONST_INTER);
5806 %}
5807 
5808 // constant 'double +0.0'.
5809 operand immDPacked()
5810 %{
5811   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5812   match(ConD);
5813   op_cost(0);
5814   format %{ %}
5815   interface(CONST_INTER);
5816 %}
5817 
5818 // Float Immediate
5819 operand immF()
5820 %{
5821   match(ConF);
5822   op_cost(0);
5823   format %{ %}
5824   interface(CONST_INTER);
5825 %}
5826 
5827 // Float Immediate: +0.0f.
5828 operand immF0()
5829 %{
5830   predicate(jint_cast(n->getf()) == 0);
5831   match(ConF);
5832 
5833   op_cost(0);
5834   format %{ %}
5835   interface(CONST_INTER);
5836 %}
5837 
5838 //
5839 operand immFPacked()
5840 %{
5841   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5842   match(ConF);
5843   op_cost(0);
5844   format %{ %}
5845   interface(CONST_INTER);
5846 %}
5847 
5848 // Narrow pointer operands
5849 // Narrow Pointer Immediate
5850 operand immN()
5851 %{
5852   match(ConN);
5853 
5854   op_cost(0);
5855   format %{ %}
5856   interface(CONST_INTER);
5857 %}
5858 
5859 // Narrow NULL Pointer Immediate
5860 operand immN0()
5861 %{
5862   predicate(n->get_narrowcon() == 0);
5863   match(ConN);
5864 
5865   op_cost(0);
5866   format %{ %}
5867   interface(CONST_INTER);
5868 %}
5869 
5870 operand immNKlass()
5871 %{
5872   match(ConNKlass);
5873 
5874   op_cost(0);
5875   format %{ %}
5876   interface(CONST_INTER);
5877 %}
5878 
5879 // Integer 32 bit Register Operands
5880 // Integer 32 bitRegister (excludes SP)
5881 operand iRegI()
5882 %{
5883   constraint(ALLOC_IN_RC(any_reg32));
5884   match(RegI);
5885   match(iRegINoSp);
5886   op_cost(0);
5887   format %{ %}
5888   interface(REG_INTER);
5889 %}
5890 
5891 // Integer 32 bit Register not Special
5892 operand iRegINoSp()
5893 %{
5894   constraint(ALLOC_IN_RC(no_special_reg32));
5895   match(RegI);
5896   op_cost(0);
5897   format %{ %}
5898   interface(REG_INTER);
5899 %}
5900 
5901 // Integer 64 bit Register Operands
5902 // Integer 64 bit Register (includes SP)
5903 operand iRegL()
5904 %{
5905   constraint(ALLOC_IN_RC(any_reg));
5906   match(RegL);
5907   match(iRegLNoSp);
5908   op_cost(0);
5909   format %{ %}
5910   interface(REG_INTER);
5911 %}
5912 
5913 // Integer 64 bit Register not Special
5914 operand iRegLNoSp()
5915 %{
5916   constraint(ALLOC_IN_RC(no_special_reg));
5917   match(RegL);
5918   format %{ %}
5919   interface(REG_INTER);
5920 %}
5921 
5922 // Pointer Register Operands
5923 // Pointer Register
5924 operand iRegP()
5925 %{
5926   constraint(ALLOC_IN_RC(ptr_reg));
5927   match(RegP);
5928   match(iRegPNoSp);
5929   match(iRegP_R0);
5930   //match(iRegP_R2);
5931   //match(iRegP_R4);
5932   //match(iRegP_R5);
5933   match(thread_RegP);
5934   op_cost(0);
5935   format %{ %}
5936   interface(REG_INTER);
5937 %}
5938 
5939 // Pointer 64 bit Register not Special
5940 operand iRegPNoSp()
5941 %{
5942   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5943   match(RegP);
5944   // match(iRegP);
5945   // match(iRegP_R0);
5946   // match(iRegP_R2);
5947   // match(iRegP_R4);
5948   // match(iRegP_R5);
5949   // match(thread_RegP);
5950   op_cost(0);
5951   format %{ %}
5952   interface(REG_INTER);
5953 %}
5954 
5955 // Pointer 64 bit Register R0 only
5956 operand iRegP_R0()
5957 %{
5958   constraint(ALLOC_IN_RC(r0_reg));
5959   match(RegP);
5960   // match(iRegP);
5961   match(iRegPNoSp);
5962   op_cost(0);
5963   format %{ %}
5964   interface(REG_INTER);
5965 %}
5966 
5967 // Pointer 64 bit Register R1 only
5968 operand iRegP_R1()
5969 %{
5970   constraint(ALLOC_IN_RC(r1_reg));
5971   match(RegP);
5972   // match(iRegP);
5973   match(iRegPNoSp);
5974   op_cost(0);
5975   format %{ %}
5976   interface(REG_INTER);
5977 %}
5978 
5979 // Pointer 64 bit Register R2 only
5980 operand iRegP_R2()
5981 %{
5982   constraint(ALLOC_IN_RC(r2_reg));
5983   match(RegP);
5984   // match(iRegP);
5985   match(iRegPNoSp);
5986   op_cost(0);
5987   format %{ %}
5988   interface(REG_INTER);
5989 %}
5990 
5991 // Pointer 64 bit Register R3 only
5992 operand iRegP_R3()
5993 %{
5994   constraint(ALLOC_IN_RC(r3_reg));
5995   match(RegP);
5996   // match(iRegP);
5997   match(iRegPNoSp);
5998   op_cost(0);
5999   format %{ %}
6000   interface(REG_INTER);
6001 %}
6002 
6003 // Pointer 64 bit Register R4 only
6004 operand iRegP_R4()
6005 %{
6006   constraint(ALLOC_IN_RC(r4_reg));
6007   match(RegP);
6008   // match(iRegP);
6009   match(iRegPNoSp);
6010   op_cost(0);
6011   format %{ %}
6012   interface(REG_INTER);
6013 %}
6014 
6015 // Pointer 64 bit Register R5 only
6016 operand iRegP_R5()
6017 %{
6018   constraint(ALLOC_IN_RC(r5_reg));
6019   match(RegP);
6020   // match(iRegP);
6021   match(iRegPNoSp);
6022   op_cost(0);
6023   format %{ %}
6024   interface(REG_INTER);
6025 %}
6026 
6027 // Pointer 64 bit Register R10 only
6028 operand iRegP_R10()
6029 %{
6030   constraint(ALLOC_IN_RC(r10_reg));
6031   match(RegP);
6032   // match(iRegP);
6033   match(iRegPNoSp);
6034   op_cost(0);
6035   format %{ %}
6036   interface(REG_INTER);
6037 %}
6038 
6039 // Long 64 bit Register R11 only
6040 operand iRegL_R11()
6041 %{
6042   constraint(ALLOC_IN_RC(r11_reg));
6043   match(RegL);
6044   match(iRegLNoSp);
6045   op_cost(0);
6046   format %{ %}
6047   interface(REG_INTER);
6048 %}
6049 
6050 // Pointer 64 bit Register FP only
6051 operand iRegP_FP()
6052 %{
6053   constraint(ALLOC_IN_RC(fp_reg));
6054   match(RegP);
6055   // match(iRegP);
6056   op_cost(0);
6057   format %{ %}
6058   interface(REG_INTER);
6059 %}
6060 
6061 // Register R0 only
6062 operand iRegI_R0()
6063 %{
6064   constraint(ALLOC_IN_RC(int_r0_reg));
6065   match(RegI);
6066   match(iRegINoSp);
6067   op_cost(0);
6068   format %{ %}
6069   interface(REG_INTER);
6070 %}
6071 
6072 // Register R2 only
6073 operand iRegI_R2()
6074 %{
6075   constraint(ALLOC_IN_RC(int_r2_reg));
6076   match(RegI);
6077   match(iRegINoSp);
6078   op_cost(0);
6079   format %{ %}
6080   interface(REG_INTER);
6081 %}
6082 
6083 // Register R3 only
6084 operand iRegI_R3()
6085 %{
6086   constraint(ALLOC_IN_RC(int_r3_reg));
6087   match(RegI);
6088   match(iRegINoSp);
6089   op_cost(0);
6090   format %{ %}
6091   interface(REG_INTER);
6092 %}
6093 
6094 
6095 // Register R2 only
6096 operand iRegI_R4()
6097 %{
6098   constraint(ALLOC_IN_RC(int_r4_reg));
6099   match(RegI);
6100   match(iRegINoSp);
6101   op_cost(0);
6102   format %{ %}
6103   interface(REG_INTER);
6104 %}
6105 
6106 
6107 // Pointer Register Operands
6108 // Narrow Pointer Register
6109 operand iRegN()
6110 %{
6111   constraint(ALLOC_IN_RC(any_reg32));
6112   match(RegN);
6113   match(iRegNNoSp);
6114   op_cost(0);
6115   format %{ %}
6116   interface(REG_INTER);
6117 %}
6118 
6119 // Integer 64 bit Register not Special
6120 operand iRegNNoSp()
6121 %{
6122   constraint(ALLOC_IN_RC(no_special_reg32));
6123   match(RegN);
6124   op_cost(0);
6125   format %{ %}
6126   interface(REG_INTER);
6127 %}
6128 
6129 // heap base register -- used for encoding immN0
6130 
6131 operand iRegIHeapbase()
6132 %{
6133   constraint(ALLOC_IN_RC(heapbase_reg));
6134   match(RegI);
6135   op_cost(0);
6136   format %{ %}
6137   interface(REG_INTER);
6138 %}
6139 
6140 // Float Register
6141 // Float register operands
6142 operand vRegF()
6143 %{
6144   constraint(ALLOC_IN_RC(float_reg));
6145   match(RegF);
6146 
6147   op_cost(0);
6148   format %{ %}
6149   interface(REG_INTER);
6150 %}
6151 
6152 // Double Register
6153 // Double register operands
6154 operand vRegD()
6155 %{
6156   constraint(ALLOC_IN_RC(double_reg));
6157   match(RegD);
6158 
6159   op_cost(0);
6160   format %{ %}
6161   interface(REG_INTER);
6162 %}
6163 
6164 operand vecD()
6165 %{
6166   constraint(ALLOC_IN_RC(vectord_reg));
6167   match(VecD);
6168 
6169   op_cost(0);
6170   format %{ %}
6171   interface(REG_INTER);
6172 %}
6173 
6174 operand vecX()
6175 %{
6176   constraint(ALLOC_IN_RC(vectorx_reg));
6177   match(VecX);
6178 
6179   op_cost(0);
6180   format %{ %}
6181   interface(REG_INTER);
6182 %}
6183 
6184 operand vRegD_V0()
6185 %{
6186   constraint(ALLOC_IN_RC(v0_reg));
6187   match(RegD);
6188   op_cost(0);
6189   format %{ %}
6190   interface(REG_INTER);
6191 %}
6192 
6193 operand vRegD_V1()
6194 %{
6195   constraint(ALLOC_IN_RC(v1_reg));
6196   match(RegD);
6197   op_cost(0);
6198   format %{ %}
6199   interface(REG_INTER);
6200 %}
6201 
6202 operand vRegD_V2()
6203 %{
6204   constraint(ALLOC_IN_RC(v2_reg));
6205   match(RegD);
6206   op_cost(0);
6207   format %{ %}
6208   interface(REG_INTER);
6209 %}
6210 
6211 operand vRegD_V3()
6212 %{
6213   constraint(ALLOC_IN_RC(v3_reg));
6214   match(RegD);
6215   op_cost(0);
6216   format %{ %}
6217   interface(REG_INTER);
6218 %}
6219 
6220 // Flags register, used as output of signed compare instructions
6221 
6222 // note that on AArch64 we also use this register as the output for
6223 // for floating point compare instructions (CmpF CmpD). this ensures
6224 // that ordered inequality tests use GT, GE, LT or LE none of which
6225 // pass through cases where the result is unordered i.e. one or both
6226 // inputs to the compare is a NaN. this means that the ideal code can
6227 // replace e.g. a GT with an LE and not end up capturing the NaN case
6228 // (where the comparison should always fail). EQ and NE tests are
6229 // always generated in ideal code so that unordered folds into the NE
6230 // case, matching the behaviour of AArch64 NE.
6231 //
6232 // This differs from x86 where the outputs of FP compares use a
6233 // special FP flags registers and where compares based on this
6234 // register are distinguished into ordered inequalities (cmpOpUCF) and
6235 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6236 // to explicitly handle the unordered case in branches. x86 also has
6237 // to include extra CMoveX rules to accept a cmpOpUCF input.
6238 
6239 operand rFlagsReg()
6240 %{
6241   constraint(ALLOC_IN_RC(int_flags));
6242   match(RegFlags);
6243 
6244   op_cost(0);
6245   format %{ "RFLAGS" %}
6246   interface(REG_INTER);
6247 %}
6248 
6249 // Flags register, used as output of unsigned compare instructions
6250 operand rFlagsRegU()
6251 %{
6252   constraint(ALLOC_IN_RC(int_flags));
6253   match(RegFlags);
6254 
6255   op_cost(0);
6256   format %{ "RFLAGSU" %}
6257   interface(REG_INTER);
6258 %}
6259 
6260 // Special Registers
6261 
6262 // Method Register
6263 operand inline_cache_RegP(iRegP reg)
6264 %{
6265   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6266   match(reg);
6267   match(iRegPNoSp);
6268   op_cost(0);
6269   format %{ %}
6270   interface(REG_INTER);
6271 %}
6272 
6273 operand interpreter_method_oop_RegP(iRegP reg)
6274 %{
6275   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6276   match(reg);
6277   match(iRegPNoSp);
6278   op_cost(0);
6279   format %{ %}
6280   interface(REG_INTER);
6281 %}
6282 
6283 // Thread Register
6284 operand thread_RegP(iRegP reg)
6285 %{
6286   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6287   match(reg);
6288   op_cost(0);
6289   format %{ %}
6290   interface(REG_INTER);
6291 %}
6292 
6293 operand lr_RegP(iRegP reg)
6294 %{
6295   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6296   match(reg);
6297   op_cost(0);
6298   format %{ %}
6299   interface(REG_INTER);
6300 %}
6301 
6302 //----------Memory Operands----------------------------------------------------
6303 
6304 operand indirect(iRegP reg)
6305 %{
6306   constraint(ALLOC_IN_RC(ptr_reg));
6307   match(reg);
6308   op_cost(0);
6309   format %{ "[$reg]" %}
6310   interface(MEMORY_INTER) %{
6311     base($reg);
6312     index(0xffffffff);
6313     scale(0x0);
6314     disp(0x0);
6315   %}
6316 %}
6317 
6318 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6319 %{
6320   constraint(ALLOC_IN_RC(ptr_reg));
6321   match(AddP (AddP reg (LShiftL lreg scale)) off);
6322   op_cost(INSN_COST);
6323   format %{ "$reg, $lreg lsl($scale), $off" %}
6324   interface(MEMORY_INTER) %{
6325     base($reg);
6326     index($lreg);
6327     scale($scale);
6328     disp($off);
6329   %}
6330 %}
6331 
6332 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6333 %{
6334   constraint(ALLOC_IN_RC(ptr_reg));
6335   match(AddP (AddP reg (LShiftL lreg scale)) off);
6336   op_cost(INSN_COST);
6337   format %{ "$reg, $lreg lsl($scale), $off" %}
6338   interface(MEMORY_INTER) %{
6339     base($reg);
6340     index($lreg);
6341     scale($scale);
6342     disp($off);
6343   %}
6344 %}
6345 
6346 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6347 %{
6348   constraint(ALLOC_IN_RC(ptr_reg));
6349   match(AddP (AddP reg (ConvI2L ireg)) off);
6350   op_cost(INSN_COST);
6351   format %{ "$reg, $ireg, $off I2L" %}
6352   interface(MEMORY_INTER) %{
6353     base($reg);
6354     index($ireg);
6355     scale(0x0);
6356     disp($off);
6357   %}
6358 %}
6359 
6360 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6361 %{
6362   constraint(ALLOC_IN_RC(ptr_reg));
6363   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6364   op_cost(INSN_COST);
6365   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6366   interface(MEMORY_INTER) %{
6367     base($reg);
6368     index($ireg);
6369     scale($scale);
6370     disp($off);
6371   %}
6372 %}
6373 
6374 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6375 %{
6376   constraint(ALLOC_IN_RC(ptr_reg));
6377   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6378   op_cost(0);
6379   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6380   interface(MEMORY_INTER) %{
6381     base($reg);
6382     index($ireg);
6383     scale($scale);
6384     disp(0x0);
6385   %}
6386 %}
6387 
6388 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6389 %{
6390   constraint(ALLOC_IN_RC(ptr_reg));
6391   match(AddP reg (LShiftL lreg scale));
6392   op_cost(0);
6393   format %{ "$reg, $lreg lsl($scale)" %}
6394   interface(MEMORY_INTER) %{
6395     base($reg);
6396     index($lreg);
6397     scale($scale);
6398     disp(0x0);
6399   %}
6400 %}
6401 
6402 operand indIndex(iRegP reg, iRegL lreg)
6403 %{
6404   constraint(ALLOC_IN_RC(ptr_reg));
6405   match(AddP reg lreg);
6406   op_cost(0);
6407   format %{ "$reg, $lreg" %}
6408   interface(MEMORY_INTER) %{
6409     base($reg);
6410     index($lreg);
6411     scale(0x0);
6412     disp(0x0);
6413   %}
6414 %}
6415 
6416 operand indOffI(iRegP reg, immIOffset off)
6417 %{
6418   constraint(ALLOC_IN_RC(ptr_reg));
6419   match(AddP reg off);
6420   op_cost(0);
6421   format %{ "[$reg, $off]" %}
6422   interface(MEMORY_INTER) %{
6423     base($reg);
6424     index(0xffffffff);
6425     scale(0x0);
6426     disp($off);
6427   %}
6428 %}
6429 
6430 operand indOffL(iRegP reg, immLoffset off)
6431 %{
6432   constraint(ALLOC_IN_RC(ptr_reg));
6433   match(AddP reg off);
6434   op_cost(0);
6435   format %{ "[$reg, $off]" %}
6436   interface(MEMORY_INTER) %{
6437     base($reg);
6438     index(0xffffffff);
6439     scale(0x0);
6440     disp($off);
6441   %}
6442 %}
6443 
6444 
6445 operand indirectN(iRegN reg)
6446 %{
6447   predicate(Universe::narrow_oop_shift() == 0);
6448   constraint(ALLOC_IN_RC(ptr_reg));
6449   match(DecodeN reg);
6450   op_cost(0);
6451   format %{ "[$reg]\t# narrow" %}
6452   interface(MEMORY_INTER) %{
6453     base($reg);
6454     index(0xffffffff);
6455     scale(0x0);
6456     disp(0x0);
6457   %}
6458 %}
6459 
6460 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6461 %{
6462   predicate(Universe::narrow_oop_shift() == 0);
6463   constraint(ALLOC_IN_RC(ptr_reg));
6464   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6465   op_cost(0);
6466   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6467   interface(MEMORY_INTER) %{
6468     base($reg);
6469     index($lreg);
6470     scale($scale);
6471     disp($off);
6472   %}
6473 %}
6474 
6475 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6476 %{
6477   predicate(Universe::narrow_oop_shift() == 0);
6478   constraint(ALLOC_IN_RC(ptr_reg));
6479   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6480   op_cost(INSN_COST);
6481   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6482   interface(MEMORY_INTER) %{
6483     base($reg);
6484     index($lreg);
6485     scale($scale);
6486     disp($off);
6487   %}
6488 %}
6489 
6490 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6491 %{
6492   predicate(Universe::narrow_oop_shift() == 0);
6493   constraint(ALLOC_IN_RC(ptr_reg));
6494   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6495   op_cost(INSN_COST);
6496   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6497   interface(MEMORY_INTER) %{
6498     base($reg);
6499     index($ireg);
6500     scale(0x0);
6501     disp($off);
6502   %}
6503 %}
6504 
6505 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6506 %{
6507   predicate(Universe::narrow_oop_shift() == 0);
6508   constraint(ALLOC_IN_RC(ptr_reg));
6509   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6510   op_cost(INSN_COST);
6511   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6512   interface(MEMORY_INTER) %{
6513     base($reg);
6514     index($ireg);
6515     scale($scale);
6516     disp($off);
6517   %}
6518 %}
6519 
6520 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6521 %{
6522   predicate(Universe::narrow_oop_shift() == 0);
6523   constraint(ALLOC_IN_RC(ptr_reg));
6524   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6525   op_cost(0);
6526   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6527   interface(MEMORY_INTER) %{
6528     base($reg);
6529     index($ireg);
6530     scale($scale);
6531     disp(0x0);
6532   %}
6533 %}
6534 
6535 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6536 %{
6537   predicate(Universe::narrow_oop_shift() == 0);
6538   constraint(ALLOC_IN_RC(ptr_reg));
6539   match(AddP (DecodeN reg) (LShiftL lreg scale));
6540   op_cost(0);
6541   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6542   interface(MEMORY_INTER) %{
6543     base($reg);
6544     index($lreg);
6545     scale($scale);
6546     disp(0x0);
6547   %}
6548 %}
6549 
6550 operand indIndexN(iRegN reg, iRegL lreg)
6551 %{
6552   predicate(Universe::narrow_oop_shift() == 0);
6553   constraint(ALLOC_IN_RC(ptr_reg));
6554   match(AddP (DecodeN reg) lreg);
6555   op_cost(0);
6556   format %{ "$reg, $lreg\t# narrow" %}
6557   interface(MEMORY_INTER) %{
6558     base($reg);
6559     index($lreg);
6560     scale(0x0);
6561     disp(0x0);
6562   %}
6563 %}
6564 
6565 operand indOffIN(iRegN reg, immIOffset off)
6566 %{
6567   predicate(Universe::narrow_oop_shift() == 0);
6568   constraint(ALLOC_IN_RC(ptr_reg));
6569   match(AddP (DecodeN reg) off);
6570   op_cost(0);
6571   format %{ "[$reg, $off]\t# narrow" %}
6572   interface(MEMORY_INTER) %{
6573     base($reg);
6574     index(0xffffffff);
6575     scale(0x0);
6576     disp($off);
6577   %}
6578 %}
6579 
6580 operand indOffLN(iRegN reg, immLoffset off)
6581 %{
6582   predicate(Universe::narrow_oop_shift() == 0);
6583   constraint(ALLOC_IN_RC(ptr_reg));
6584   match(AddP (DecodeN reg) off);
6585   op_cost(0);
6586   format %{ "[$reg, $off]\t# narrow" %}
6587   interface(MEMORY_INTER) %{
6588     base($reg);
6589     index(0xffffffff);
6590     scale(0x0);
6591     disp($off);
6592   %}
6593 %}
6594 
6595 
6596 
6597 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6598 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6599 %{
6600   constraint(ALLOC_IN_RC(ptr_reg));
6601   match(AddP reg off);
6602   op_cost(0);
6603   format %{ "[$reg, $off]" %}
6604   interface(MEMORY_INTER) %{
6605     base($reg);
6606     index(0xffffffff);
6607     scale(0x0);
6608     disp($off);
6609   %}
6610 %}
6611 
6612 //----------Special Memory Operands--------------------------------------------
6613 // Stack Slot Operand - This operand is used for loading and storing temporary
6614 //                      values on the stack where a match requires a value to
6615 //                      flow through memory.
6616 operand stackSlotP(sRegP reg)
6617 %{
6618   constraint(ALLOC_IN_RC(stack_slots));
6619   op_cost(100);
6620   // No match rule because this operand is only generated in matching
6621   // match(RegP);
6622   format %{ "[$reg]" %}
6623   interface(MEMORY_INTER) %{
6624     base(0x1e);  // RSP
6625     index(0x0);  // No Index
6626     scale(0x0);  // No Scale
6627     disp($reg);  // Stack Offset
6628   %}
6629 %}
6630 
6631 operand stackSlotI(sRegI reg)
6632 %{
6633   constraint(ALLOC_IN_RC(stack_slots));
6634   // No match rule because this operand is only generated in matching
6635   // match(RegI);
6636   format %{ "[$reg]" %}
6637   interface(MEMORY_INTER) %{
6638     base(0x1e);  // RSP
6639     index(0x0);  // No Index
6640     scale(0x0);  // No Scale
6641     disp($reg);  // Stack Offset
6642   %}
6643 %}
6644 
6645 operand stackSlotF(sRegF reg)
6646 %{
6647   constraint(ALLOC_IN_RC(stack_slots));
6648   // No match rule because this operand is only generated in matching
6649   // match(RegF);
6650   format %{ "[$reg]" %}
6651   interface(MEMORY_INTER) %{
6652     base(0x1e);  // RSP
6653     index(0x0);  // No Index
6654     scale(0x0);  // No Scale
6655     disp($reg);  // Stack Offset
6656   %}
6657 %}
6658 
6659 operand stackSlotD(sRegD reg)
6660 %{
6661   constraint(ALLOC_IN_RC(stack_slots));
6662   // No match rule because this operand is only generated in matching
6663   // match(RegD);
6664   format %{ "[$reg]" %}
6665   interface(MEMORY_INTER) %{
6666     base(0x1e);  // RSP
6667     index(0x0);  // No Index
6668     scale(0x0);  // No Scale
6669     disp($reg);  // Stack Offset
6670   %}
6671 %}
6672 
6673 operand stackSlotL(sRegL reg)
6674 %{
6675   constraint(ALLOC_IN_RC(stack_slots));
6676   // No match rule because this operand is only generated in matching
6677   // match(RegL);
6678   format %{ "[$reg]" %}
6679   interface(MEMORY_INTER) %{
6680     base(0x1e);  // RSP
6681     index(0x0);  // No Index
6682     scale(0x0);  // No Scale
6683     disp($reg);  // Stack Offset
6684   %}
6685 %}
6686 
6687 // Operands for expressing Control Flow
6688 // NOTE: Label is a predefined operand which should not be redefined in
6689 //       the AD file. It is generically handled within the ADLC.
6690 
6691 //----------Conditional Branch Operands----------------------------------------
6692 // Comparison Op  - This is the operation of the comparison, and is limited to
6693 //                  the following set of codes:
6694 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6695 //
6696 // Other attributes of the comparison, such as unsignedness, are specified
6697 // by the comparison instruction that sets a condition code flags register.
6698 // That result is represented by a flags operand whose subtype is appropriate
6699 // to the unsignedness (etc.) of the comparison.
6700 //
6701 // Later, the instruction which matches both the Comparison Op (a Bool) and
6702 // the flags (produced by the Cmp) specifies the coding of the comparison op
6703 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6704 
6705 // used for signed integral comparisons and fp comparisons
6706 
6707 operand cmpOp()
6708 %{
6709   match(Bool);
6710 
6711   format %{ "" %}
6712   interface(COND_INTER) %{
6713     equal(0x0, "eq");
6714     not_equal(0x1, "ne");
6715     less(0xb, "lt");
6716     greater_equal(0xa, "ge");
6717     less_equal(0xd, "le");
6718     greater(0xc, "gt");
6719     overflow(0x6, "vs");
6720     no_overflow(0x7, "vc");
6721   %}
6722 %}
6723 
6724 // used for unsigned integral comparisons
6725 
6726 operand cmpOpU()
6727 %{
6728   match(Bool);
6729 
6730   format %{ "" %}
6731   interface(COND_INTER) %{
6732     equal(0x0, "eq");
6733     not_equal(0x1, "ne");
6734     less(0x3, "lo");
6735     greater_equal(0x2, "hs");
6736     less_equal(0x9, "ls");
6737     greater(0x8, "hi");
6738     overflow(0x6, "vs");
6739     no_overflow(0x7, "vc");
6740   %}
6741 %}
6742 
6743 // Special operand allowing long args to int ops to be truncated for free
6744 
6745 operand iRegL2I(iRegL reg) %{
6746 
6747   op_cost(0);
6748 
6749   match(ConvL2I reg);
6750 
6751   format %{ "l2i($reg)" %}
6752 
6753   interface(REG_INTER)
6754 %}
6755 
6756 opclass vmem(indirect, indIndex, indOffI, indOffL);
6757 
6758 //----------OPERAND CLASSES----------------------------------------------------
6759 // Operand Classes are groups of operands that are used as to simplify
6760 // instruction definitions by not requiring the AD writer to specify
6761 // separate instructions for every form of operand when the
6762 // instruction accepts multiple operand types with the same basic
6763 // encoding and format. The classic case of this is memory operands.
6764 
6765 // memory is used to define read/write location for load/store
6766 // instruction defs. we can turn a memory op into an Address
6767 
6768 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6769                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6770 
6771 
6772 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6773 // operations. it allows the src to be either an iRegI or a (ConvL2I
6774 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6775 // can be elided because the 32-bit instruction will just employ the
6776 // lower 32 bits anyway.
6777 //
6778 // n.b. this does not elide all L2I conversions. if the truncated
6779 // value is consumed by more than one operation then the ConvL2I
6780 // cannot be bundled into the consuming nodes so an l2i gets planted
6781 // (actually a movw $dst $src) and the downstream instructions consume
6782 // the result of the l2i as an iRegI input. That's a shame since the
6783 // movw is actually redundant but its not too costly.
6784 
6785 opclass iRegIorL2I(iRegI, iRegL2I);
6786 
6787 //----------PIPELINE-----------------------------------------------------------
6788 // Rules which define the behavior of the target architectures pipeline.
6789 // Integer ALU reg operation
6790 pipeline %{
6791 
6792 attributes %{
6793   // ARM instructions are of fixed length
6794   fixed_size_instructions;        // Fixed size instructions TODO does
6795   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6796   // ARM instructions come in 32-bit word units
6797   instruction_unit_size = 4;         // An instruction is 4 bytes long
6798   instruction_fetch_unit_size = 64;  // The processor fetches one line
6799   instruction_fetch_units = 1;       // of 64 bytes
6800 
6801   // List of nop instructions
6802   nops( MachNop );
6803 %}
6804 
6805 // We don't use an actual pipeline model so don't care about resources
6806 // or description. we do use pipeline classes to introduce fixed
6807 // latencies
6808 
6809 //----------RESOURCES----------------------------------------------------------
6810 // Resources are the functional units available to the machine
6811 
6812 resources( INS0, INS1, INS01 = INS0 | INS1,
6813            ALU0, ALU1, ALU = ALU0 | ALU1,
6814            MAC,
6815            DIV,
6816            BRANCH,
6817            LDST,
6818            NEON_FP);
6819 
6820 //----------PIPELINE DESCRIPTION-----------------------------------------------
6821 // Pipeline Description specifies the stages in the machine's pipeline
6822 
6823 pipe_desc(ISS, EX1, EX2, WR);
6824 
6825 //----------PIPELINE CLASSES---------------------------------------------------
6826 // Pipeline Classes describe the stages in which input and output are
6827 // referenced by the hardware pipeline.
6828 
6829 //------- Integer ALU operations --------------------------
6830 
6831 // Integer ALU reg-reg operation
6832 // Operands needed in EX1, result generated in EX2
6833 // Eg.  ADD     x0, x1, x2
6834 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6835 %{
6836   single_instruction;
6837   dst    : EX2(write);
6838   src1   : EX1(read);
6839   src2   : EX1(read);
6840   INS01  : ISS; // Dual issue as instruction 0 or 1
6841   ALU    : EX2;
6842 %}
6843 
6844 // Integer ALU reg-reg operation with constant shift
6845 // Shifted register must be available in LATE_ISS instead of EX1
6846 // Eg.  ADD     x0, x1, x2, LSL #2
6847 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6848 %{
6849   single_instruction;
6850   dst    : EX2(write);
6851   src1   : EX1(read);
6852   src2   : ISS(read);
6853   INS01  : ISS;
6854   ALU    : EX2;
6855 %}
6856 
6857 // Integer ALU reg operation with constant shift
6858 // Eg.  LSL     x0, x1, #shift
6859 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6860 %{
6861   single_instruction;
6862   dst    : EX2(write);
6863   src1   : ISS(read);
6864   INS01  : ISS;
6865   ALU    : EX2;
6866 %}
6867 
6868 // Integer ALU reg-reg operation with variable shift
6869 // Both operands must be available in LATE_ISS instead of EX1
6870 // Result is available in EX1 instead of EX2
6871 // Eg.  LSLV    x0, x1, x2
6872 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6873 %{
6874   single_instruction;
6875   dst    : EX1(write);
6876   src1   : ISS(read);
6877   src2   : ISS(read);
6878   INS01  : ISS;
6879   ALU    : EX1;
6880 %}
6881 
6882 // Integer ALU reg-reg operation with extract
6883 // As for _vshift above, but result generated in EX2
6884 // Eg.  EXTR    x0, x1, x2, #N
6885 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6886 %{
6887   single_instruction;
6888   dst    : EX2(write);
6889   src1   : ISS(read);
6890   src2   : ISS(read);
6891   INS1   : ISS; // Can only dual issue as Instruction 1
6892   ALU    : EX1;
6893 %}
6894 
6895 // Integer ALU reg operation
6896 // Eg.  NEG     x0, x1
6897 pipe_class ialu_reg(iRegI dst, iRegI src)
6898 %{
6899   single_instruction;
6900   dst    : EX2(write);
6901   src    : EX1(read);
6902   INS01  : ISS;
6903   ALU    : EX2;
6904 %}
6905 
6906 // Integer ALU reg mmediate operation
6907 // Eg.  ADD     x0, x1, #N
6908 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6909 %{
6910   single_instruction;
6911   dst    : EX2(write);
6912   src1   : EX1(read);
6913   INS01  : ISS;
6914   ALU    : EX2;
6915 %}
6916 
6917 // Integer ALU immediate operation (no source operands)
6918 // Eg.  MOV     x0, #N
6919 pipe_class ialu_imm(iRegI dst)
6920 %{
6921   single_instruction;
6922   dst    : EX1(write);
6923   INS01  : ISS;
6924   ALU    : EX1;
6925 %}
6926 
6927 //------- Compare operation -------------------------------
6928 
6929 // Compare reg-reg
6930 // Eg.  CMP     x0, x1
6931 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6932 %{
6933   single_instruction;
6934 //  fixed_latency(16);
6935   cr     : EX2(write);
6936   op1    : EX1(read);
6937   op2    : EX1(read);
6938   INS01  : ISS;
6939   ALU    : EX2;
6940 %}
6941 
6942 // Compare reg-reg
6943 // Eg.  CMP     x0, #N
6944 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6945 %{
6946   single_instruction;
6947 //  fixed_latency(16);
6948   cr     : EX2(write);
6949   op1    : EX1(read);
6950   INS01  : ISS;
6951   ALU    : EX2;
6952 %}
6953 
6954 //------- Conditional instructions ------------------------
6955 
6956 // Conditional no operands
6957 // Eg.  CSINC   x0, zr, zr, <cond>
6958 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6959 %{
6960   single_instruction;
6961   cr     : EX1(read);
6962   dst    : EX2(write);
6963   INS01  : ISS;
6964   ALU    : EX2;
6965 %}
6966 
6967 // Conditional 2 operand
6968 // EG.  CSEL    X0, X1, X2, <cond>
6969 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6970 %{
6971   single_instruction;
6972   cr     : EX1(read);
6973   src1   : EX1(read);
6974   src2   : EX1(read);
6975   dst    : EX2(write);
6976   INS01  : ISS;
6977   ALU    : EX2;
6978 %}
6979 
6980 // Conditional 2 operand
6981 // EG.  CSEL    X0, X1, X2, <cond>
6982 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6983 %{
6984   single_instruction;
6985   cr     : EX1(read);
6986   src    : EX1(read);
6987   dst    : EX2(write);
6988   INS01  : ISS;
6989   ALU    : EX2;
6990 %}
6991 
6992 //------- Multiply pipeline operations --------------------
6993 
6994 // Multiply reg-reg
6995 // Eg.  MUL     w0, w1, w2
6996 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6997 %{
6998   single_instruction;
6999   dst    : WR(write);
7000   src1   : ISS(read);
7001   src2   : ISS(read);
7002   INS01  : ISS;
7003   MAC    : WR;
7004 %}
7005 
7006 // Multiply accumulate
7007 // Eg.  MADD    w0, w1, w2, w3
7008 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7009 %{
7010   single_instruction;
7011   dst    : WR(write);
7012   src1   : ISS(read);
7013   src2   : ISS(read);
7014   src3   : ISS(read);
7015   INS01  : ISS;
7016   MAC    : WR;
7017 %}
7018 
7019 // Eg.  MUL     w0, w1, w2
7020 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7021 %{
7022   single_instruction;
7023   fixed_latency(3); // Maximum latency for 64 bit mul
7024   dst    : WR(write);
7025   src1   : ISS(read);
7026   src2   : ISS(read);
7027   INS01  : ISS;
7028   MAC    : WR;
7029 %}
7030 
7031 // Multiply accumulate
7032 // Eg.  MADD    w0, w1, w2, w3
7033 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7034 %{
7035   single_instruction;
7036   fixed_latency(3); // Maximum latency for 64 bit mul
7037   dst    : WR(write);
7038   src1   : ISS(read);
7039   src2   : ISS(read);
7040   src3   : ISS(read);
7041   INS01  : ISS;
7042   MAC    : WR;
7043 %}
7044 
7045 //------- Divide pipeline operations --------------------
7046 
7047 // Eg.  SDIV    w0, w1, w2
7048 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7049 %{
7050   single_instruction;
7051   fixed_latency(8); // Maximum latency for 32 bit divide
7052   dst    : WR(write);
7053   src1   : ISS(read);
7054   src2   : ISS(read);
7055   INS0   : ISS; // Can only dual issue as instruction 0
7056   DIV    : WR;
7057 %}
7058 
7059 // Eg.  SDIV    x0, x1, x2
7060 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7061 %{
7062   single_instruction;
7063   fixed_latency(16); // Maximum latency for 64 bit divide
7064   dst    : WR(write);
7065   src1   : ISS(read);
7066   src2   : ISS(read);
7067   INS0   : ISS; // Can only dual issue as instruction 0
7068   DIV    : WR;
7069 %}
7070 
7071 //------- Load pipeline operations ------------------------
7072 
7073 // Load - prefetch
7074 // Eg.  PFRM    <mem>
7075 pipe_class iload_prefetch(memory mem)
7076 %{
7077   single_instruction;
7078   mem    : ISS(read);
7079   INS01  : ISS;
7080   LDST   : WR;
7081 %}
7082 
7083 // Load - reg, mem
7084 // Eg.  LDR     x0, <mem>
7085 pipe_class iload_reg_mem(iRegI dst, memory mem)
7086 %{
7087   single_instruction;
7088   dst    : WR(write);
7089   mem    : ISS(read);
7090   INS01  : ISS;
7091   LDST   : WR;
7092 %}
7093 
7094 // Load - reg, reg
7095 // Eg.  LDR     x0, [sp, x1]
7096 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7097 %{
7098   single_instruction;
7099   dst    : WR(write);
7100   src    : ISS(read);
7101   INS01  : ISS;
7102   LDST   : WR;
7103 %}
7104 
7105 //------- Store pipeline operations -----------------------
7106 
7107 // Store - zr, mem
7108 // Eg.  STR     zr, <mem>
7109 pipe_class istore_mem(memory mem)
7110 %{
7111   single_instruction;
7112   mem    : ISS(read);
7113   INS01  : ISS;
7114   LDST   : WR;
7115 %}
7116 
7117 // Store - reg, mem
7118 // Eg.  STR     x0, <mem>
7119 pipe_class istore_reg_mem(iRegI src, memory mem)
7120 %{
7121   single_instruction;
7122   mem    : ISS(read);
7123   src    : EX2(read);
7124   INS01  : ISS;
7125   LDST   : WR;
7126 %}
7127 
7128 // Store - reg, reg
7129 // Eg. STR      x0, [sp, x1]
7130 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7131 %{
7132   single_instruction;
7133   dst    : ISS(read);
7134   src    : EX2(read);
7135   INS01  : ISS;
7136   LDST   : WR;
7137 %}
7138 
7139 //------- Store pipeline operations -----------------------
7140 
7141 // Branch
7142 pipe_class pipe_branch()
7143 %{
7144   single_instruction;
7145   INS01  : ISS;
7146   BRANCH : EX1;
7147 %}
7148 
7149 // Conditional branch
7150 pipe_class pipe_branch_cond(rFlagsReg cr)
7151 %{
7152   single_instruction;
7153   cr     : EX1(read);
7154   INS01  : ISS;
7155   BRANCH : EX1;
7156 %}
7157 
7158 // Compare & Branch
7159 // EG.  CBZ/CBNZ
7160 pipe_class pipe_cmp_branch(iRegI op1)
7161 %{
7162   single_instruction;
7163   op1    : EX1(read);
7164   INS01  : ISS;
7165   BRANCH : EX1;
7166 %}
7167 
7168 //------- Synchronisation operations ----------------------
7169 
7170 // Any operation requiring serialization.
7171 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7172 pipe_class pipe_serial()
7173 %{
7174   single_instruction;
7175   force_serialization;
7176   fixed_latency(16);
7177   INS01  : ISS(2); // Cannot dual issue with any other instruction
7178   LDST   : WR;
7179 %}
7180 
7181 // Generic big/slow expanded idiom - also serialized
7182 pipe_class pipe_slow()
7183 %{
7184   instruction_count(10);
7185   multiple_bundles;
7186   force_serialization;
7187   fixed_latency(16);
7188   INS01  : ISS(2); // Cannot dual issue with any other instruction
7189   LDST   : WR;
7190 %}
7191 
7192 // Empty pipeline class
7193 pipe_class pipe_class_empty()
7194 %{
7195   single_instruction;
7196   fixed_latency(0);
7197 %}
7198 
7199 // Default pipeline class.
7200 pipe_class pipe_class_default()
7201 %{
7202   single_instruction;
7203   fixed_latency(2);
7204 %}
7205 
7206 // Pipeline class for compares.
7207 pipe_class pipe_class_compare()
7208 %{
7209   single_instruction;
7210   fixed_latency(16);
7211 %}
7212 
7213 // Pipeline class for memory operations.
7214 pipe_class pipe_class_memory()
7215 %{
7216   single_instruction;
7217   fixed_latency(16);
7218 %}
7219 
7220 // Pipeline class for call.
7221 pipe_class pipe_class_call()
7222 %{
7223   single_instruction;
7224   fixed_latency(100);
7225 %}
7226 
7227 // Define the class for the Nop node.
7228 define %{
7229    MachNop = pipe_class_empty;
7230 %}
7231 
7232 %}
7233 //----------INSTRUCTIONS-------------------------------------------------------
7234 //
7235 // match      -- States which machine-independent subtree may be replaced
7236 //               by this instruction.
7237 // ins_cost   -- The estimated cost of this instruction is used by instruction
7238 //               selection to identify a minimum cost tree of machine
7239 //               instructions that matches a tree of machine-independent
7240 //               instructions.
7241 // format     -- A string providing the disassembly for this instruction.
7242 //               The value of an instruction's operand may be inserted
7243 //               by referring to it with a '$' prefix.
7244 // opcode     -- Three instruction opcodes may be provided.  These are referred
7245 //               to within an encode class as $primary, $secondary, and $tertiary
7246 //               rrspectively.  The primary opcode is commonly used to
7247 //               indicate the type of machine instruction, while secondary
7248 //               and tertiary are often used for prefix options or addressing
7249 //               modes.
7250 // ins_encode -- A list of encode classes with parameters. The encode class
7251 //               name must have been defined in an 'enc_class' specification
7252 //               in the encode section of the architecture description.
7253 
7254 // ============================================================================
7255 // Memory (Load/Store) Instructions
7256 
7257 // Load Instructions
7258 
7259 // Load Byte (8 bit signed)
7260 instruct loadB(iRegINoSp dst, memory mem)
7261 %{
7262   match(Set dst (LoadB mem));
7263   predicate(!needs_acquiring_load(n));
7264 
7265   ins_cost(4 * INSN_COST);
7266   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7267 
7268   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7269 
7270   ins_pipe(iload_reg_mem);
7271 %}
7272 
7273 // Load Byte (8 bit signed) into long
7274 instruct loadB2L(iRegLNoSp dst, memory mem)
7275 %{
7276   match(Set dst (ConvI2L (LoadB mem)));
7277   predicate(!needs_acquiring_load(n->in(1)));
7278 
7279   ins_cost(4 * INSN_COST);
7280   format %{ "ldrsb  $dst, $mem\t# byte" %}
7281 
7282   ins_encode(aarch64_enc_ldrsb(dst, mem));
7283 
7284   ins_pipe(iload_reg_mem);
7285 %}
7286 
7287 // Load Byte (8 bit unsigned)
7288 instruct loadUB(iRegINoSp dst, memory mem)
7289 %{
7290   match(Set dst (LoadUB mem));
7291   predicate(!needs_acquiring_load(n));
7292 
7293   ins_cost(4 * INSN_COST);
7294   format %{ "ldrbw  $dst, $mem\t# byte" %}
7295 
7296   ins_encode(aarch64_enc_ldrb(dst, mem));
7297 
7298   ins_pipe(iload_reg_mem);
7299 %}
7300 
7301 // Load Byte (8 bit unsigned) into long
7302 instruct loadUB2L(iRegLNoSp dst, memory mem)
7303 %{
7304   match(Set dst (ConvI2L (LoadUB mem)));
7305   predicate(!needs_acquiring_load(n->in(1)));
7306 
7307   ins_cost(4 * INSN_COST);
7308   format %{ "ldrb  $dst, $mem\t# byte" %}
7309 
7310   ins_encode(aarch64_enc_ldrb(dst, mem));
7311 
7312   ins_pipe(iload_reg_mem);
7313 %}
7314 
7315 // Load Short (16 bit signed)
7316 instruct loadS(iRegINoSp dst, memory mem)
7317 %{
7318   match(Set dst (LoadS mem));
7319   predicate(!needs_acquiring_load(n));
7320 
7321   ins_cost(4 * INSN_COST);
7322   format %{ "ldrshw  $dst, $mem\t# short" %}
7323 
7324   ins_encode(aarch64_enc_ldrshw(dst, mem));
7325 
7326   ins_pipe(iload_reg_mem);
7327 %}
7328 
7329 // Load Short (16 bit signed) into long
7330 instruct loadS2L(iRegLNoSp dst, memory mem)
7331 %{
7332   match(Set dst (ConvI2L (LoadS mem)));
7333   predicate(!needs_acquiring_load(n->in(1)));
7334 
7335   ins_cost(4 * INSN_COST);
7336   format %{ "ldrsh  $dst, $mem\t# short" %}
7337 
7338   ins_encode(aarch64_enc_ldrsh(dst, mem));
7339 
7340   ins_pipe(iload_reg_mem);
7341 %}
7342 
7343 // Load Char (16 bit unsigned)
7344 instruct loadUS(iRegINoSp dst, memory mem)
7345 %{
7346   match(Set dst (LoadUS mem));
7347   predicate(!needs_acquiring_load(n));
7348 
7349   ins_cost(4 * INSN_COST);
7350   format %{ "ldrh  $dst, $mem\t# short" %}
7351 
7352   ins_encode(aarch64_enc_ldrh(dst, mem));
7353 
7354   ins_pipe(iload_reg_mem);
7355 %}
7356 
7357 // Load Short/Char (16 bit unsigned) into long
7358 instruct loadUS2L(iRegLNoSp dst, memory mem)
7359 %{
7360   match(Set dst (ConvI2L (LoadUS mem)));
7361   predicate(!needs_acquiring_load(n->in(1)));
7362 
7363   ins_cost(4 * INSN_COST);
7364   format %{ "ldrh  $dst, $mem\t# short" %}
7365 
7366   ins_encode(aarch64_enc_ldrh(dst, mem));
7367 
7368   ins_pipe(iload_reg_mem);
7369 %}
7370 
7371 // Load Integer (32 bit signed)
7372 instruct loadI(iRegINoSp dst, memory mem)
7373 %{
7374   match(Set dst (LoadI mem));
7375   predicate(!needs_acquiring_load(n));
7376 
7377   ins_cost(4 * INSN_COST);
7378   format %{ "ldrw  $dst, $mem\t# int" %}
7379 
7380   ins_encode(aarch64_enc_ldrw(dst, mem));
7381 
7382   ins_pipe(iload_reg_mem);
7383 %}
7384 
7385 // Load Integer (32 bit signed) into long
7386 instruct loadI2L(iRegLNoSp dst, memory mem)
7387 %{
7388   match(Set dst (ConvI2L (LoadI mem)));
7389   predicate(!needs_acquiring_load(n->in(1)));
7390 
7391   ins_cost(4 * INSN_COST);
7392   format %{ "ldrsw  $dst, $mem\t# int" %}
7393 
7394   ins_encode(aarch64_enc_ldrsw(dst, mem));
7395 
7396   ins_pipe(iload_reg_mem);
7397 %}
7398 
7399 // Load Integer (32 bit unsigned) into long
7400 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7401 %{
7402   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7403   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7404 
7405   ins_cost(4 * INSN_COST);
7406   format %{ "ldrw  $dst, $mem\t# int" %}
7407 
7408   ins_encode(aarch64_enc_ldrw(dst, mem));
7409 
7410   ins_pipe(iload_reg_mem);
7411 %}
7412 
7413 // Load Long (64 bit signed)
7414 instruct loadL(iRegLNoSp dst, memory mem)
7415 %{
7416   match(Set dst (LoadL mem));
7417   predicate(!needs_acquiring_load(n));
7418 
7419   ins_cost(4 * INSN_COST);
7420   format %{ "ldr  $dst, $mem\t# int" %}
7421 
7422   ins_encode(aarch64_enc_ldr(dst, mem));
7423 
7424   ins_pipe(iload_reg_mem);
7425 %}
7426 
7427 // Load Range
7428 instruct loadRange(iRegINoSp dst, memory mem)
7429 %{
7430   match(Set dst (LoadRange mem));
7431 
7432   ins_cost(4 * INSN_COST);
7433   format %{ "ldrw  $dst, $mem\t# range" %}
7434 
7435   ins_encode(aarch64_enc_ldrw(dst, mem));
7436 
7437   ins_pipe(iload_reg_mem);
7438 %}
7439 
7440 // Load Pointer
7441 instruct loadP(iRegPNoSp dst, memory mem)
7442 %{
7443   match(Set dst (LoadP mem));
7444   predicate(!needs_acquiring_load(n));
7445 
7446   ins_cost(4 * INSN_COST);
7447   format %{ "ldr  $dst, $mem\t# ptr" %}
7448 
7449   ins_encode(aarch64_enc_ldr(dst, mem));
7450 
7451   ins_pipe(iload_reg_mem);
7452 %}
7453 
7454 // Load Compressed Pointer
7455 instruct loadN(iRegNNoSp dst, memory mem)
7456 %{
7457   match(Set dst (LoadN mem));
7458   predicate(!needs_acquiring_load(n));
7459 
7460   ins_cost(4 * INSN_COST);
7461   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7462 
7463   ins_encode(aarch64_enc_ldrw(dst, mem));
7464 
7465   ins_pipe(iload_reg_mem);
7466 %}
7467 
7468 // Load Klass Pointer
7469 instruct loadKlass(iRegPNoSp dst, memory mem)
7470 %{
7471   match(Set dst (LoadKlass mem));
7472   predicate(!needs_acquiring_load(n));
7473 
7474   ins_cost(4 * INSN_COST);
7475   format %{ "ldr  $dst, $mem\t# class" %}
7476 
7477   ins_encode(aarch64_enc_ldr(dst, mem));
7478 
7479   ins_pipe(iload_reg_mem);
7480 %}
7481 
7482 // Load Narrow Klass Pointer
7483 instruct loadNKlass(iRegNNoSp dst, memory mem)
7484 %{
7485   match(Set dst (LoadNKlass mem));
7486   predicate(!needs_acquiring_load(n));
7487 
7488   ins_cost(4 * INSN_COST);
7489   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7490 
7491   ins_encode(aarch64_enc_ldrw(dst, mem));
7492 
7493   ins_pipe(iload_reg_mem);
7494 %}
7495 
7496 // Load Float
7497 instruct loadF(vRegF dst, memory mem)
7498 %{
7499   match(Set dst (LoadF mem));
7500   predicate(!needs_acquiring_load(n));
7501 
7502   ins_cost(4 * INSN_COST);
7503   format %{ "ldrs  $dst, $mem\t# float" %}
7504 
7505   ins_encode( aarch64_enc_ldrs(dst, mem) );
7506 
7507   ins_pipe(pipe_class_memory);
7508 %}
7509 
7510 // Load Double
7511 instruct loadD(vRegD dst, memory mem)
7512 %{
7513   match(Set dst (LoadD mem));
7514   predicate(!needs_acquiring_load(n));
7515 
7516   ins_cost(4 * INSN_COST);
7517   format %{ "ldrd  $dst, $mem\t# double" %}
7518 
7519   ins_encode( aarch64_enc_ldrd(dst, mem) );
7520 
7521   ins_pipe(pipe_class_memory);
7522 %}
7523 
7524 
7525 // Load Int Constant
7526 instruct loadConI(iRegINoSp dst, immI src)
7527 %{
7528   match(Set dst src);
7529 
7530   ins_cost(INSN_COST);
7531   format %{ "mov $dst, $src\t# int" %}
7532 
7533   ins_encode( aarch64_enc_movw_imm(dst, src) );
7534 
7535   ins_pipe(ialu_imm);
7536 %}
7537 
7538 // Load Long Constant
7539 instruct loadConL(iRegLNoSp dst, immL src)
7540 %{
7541   match(Set dst src);
7542 
7543   ins_cost(INSN_COST);
7544   format %{ "mov $dst, $src\t# long" %}
7545 
7546   ins_encode( aarch64_enc_mov_imm(dst, src) );
7547 
7548   ins_pipe(ialu_imm);
7549 %}
7550 
7551 // Load Pointer Constant
7552 
7553 instruct loadConP(iRegPNoSp dst, immP con)
7554 %{
7555   match(Set dst con);
7556 
7557   ins_cost(INSN_COST * 4);
7558   format %{
7559     "mov  $dst, $con\t# ptr\n\t"
7560   %}
7561 
7562   ins_encode(aarch64_enc_mov_p(dst, con));
7563 
7564   ins_pipe(ialu_imm);
7565 %}
7566 
7567 // Load Null Pointer Constant
7568 
7569 instruct loadConP0(iRegPNoSp dst, immP0 con)
7570 %{
7571   match(Set dst con);
7572 
7573   ins_cost(INSN_COST);
7574   format %{ "mov  $dst, $con\t# NULL ptr" %}
7575 
7576   ins_encode(aarch64_enc_mov_p0(dst, con));
7577 
7578   ins_pipe(ialu_imm);
7579 %}
7580 
7581 // Load Pointer Constant One
7582 
7583 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7584 %{
7585   match(Set dst con);
7586 
7587   ins_cost(INSN_COST);
7588   format %{ "mov  $dst, $con\t# NULL ptr" %}
7589 
7590   ins_encode(aarch64_enc_mov_p1(dst, con));
7591 
7592   ins_pipe(ialu_imm);
7593 %}
7594 
7595 // Load Poll Page Constant
7596 
7597 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7598 %{
7599   match(Set dst con);
7600 
7601   ins_cost(INSN_COST);
7602   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7603 
7604   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7605 
7606   ins_pipe(ialu_imm);
7607 %}
7608 
7609 // Load Byte Map Base Constant
7610 
7611 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7612 %{
7613   match(Set dst con);
7614 
7615   ins_cost(INSN_COST);
7616   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7617 
7618   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7619 
7620   ins_pipe(ialu_imm);
7621 %}
7622 
7623 // Load Narrow Pointer Constant
7624 
7625 instruct loadConN(iRegNNoSp dst, immN con)
7626 %{
7627   match(Set dst con);
7628 
7629   ins_cost(INSN_COST * 4);
7630   format %{ "mov  $dst, $con\t# compressed ptr" %}
7631 
7632   ins_encode(aarch64_enc_mov_n(dst, con));
7633 
7634   ins_pipe(ialu_imm);
7635 %}
7636 
7637 // Load Narrow Null Pointer Constant
7638 
7639 instruct loadConN0(iRegNNoSp dst, immN0 con)
7640 %{
7641   match(Set dst con);
7642 
7643   ins_cost(INSN_COST);
7644   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7645 
7646   ins_encode(aarch64_enc_mov_n0(dst, con));
7647 
7648   ins_pipe(ialu_imm);
7649 %}
7650 
7651 // Load Narrow Klass Constant
7652 
7653 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7654 %{
7655   match(Set dst con);
7656 
7657   ins_cost(INSN_COST);
7658   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7659 
7660   ins_encode(aarch64_enc_mov_nk(dst, con));
7661 
7662   ins_pipe(ialu_imm);
7663 %}
7664 
7665 // Load Packed Float Constant
7666 
7667 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7668   match(Set dst con);
7669   ins_cost(INSN_COST * 4);
7670   format %{ "fmovs  $dst, $con"%}
7671   ins_encode %{
7672     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7673   %}
7674 
7675   ins_pipe(pipe_class_default);
7676 %}
7677 
7678 // Load Float Constant
7679 
7680 instruct loadConF(vRegF dst, immF con) %{
7681   match(Set dst con);
7682 
7683   ins_cost(INSN_COST * 4);
7684 
7685   format %{
7686     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7687   %}
7688 
7689   ins_encode %{
7690     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7691   %}
7692 
7693   ins_pipe(pipe_class_default);
7694 %}
7695 
7696 // Load Packed Double Constant
7697 
7698 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7699   match(Set dst con);
7700   ins_cost(INSN_COST);
7701   format %{ "fmovd  $dst, $con"%}
7702   ins_encode %{
7703     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7704   %}
7705 
7706   ins_pipe(pipe_class_default);
7707 %}
7708 
7709 // Load Double Constant
7710 
7711 instruct loadConD(vRegD dst, immD con) %{
7712   match(Set dst con);
7713 
7714   ins_cost(INSN_COST * 5);
7715   format %{
7716     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7717   %}
7718 
7719   ins_encode %{
7720     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7721   %}
7722 
7723   ins_pipe(pipe_class_default);
7724 %}
7725 
7726 // Store Instructions
7727 
7728 // Store CMS card-mark Immediate
7729 instruct storeimmCM0(immI0 zero, memory mem)
7730 %{
7731   match(Set mem (StoreCM mem zero));
7732   predicate(unnecessary_storestore(n));
7733 
7734   ins_cost(INSN_COST);
7735   format %{ "strb zr, $mem\t# byte" %}
7736 
7737   ins_encode(aarch64_enc_strb0(mem));
7738 
7739   ins_pipe(istore_mem);
7740 %}
7741 
7742 // Store CMS card-mark Immediate with intervening StoreStore
7743 // needed when using CMS with no conditional card marking
7744 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7745 %{
7746   match(Set mem (StoreCM mem zero));
7747 
7748   ins_cost(INSN_COST * 2);
7749   format %{ "dmb ishst"
7750       "\n\tstrb zr, $mem\t# byte" %}
7751 
7752   ins_encode(aarch64_enc_strb0_ordered(mem));
7753 
7754   ins_pipe(istore_mem);
7755 %}
7756 
7757 // Store Byte
7758 instruct storeB(iRegIorL2I src, memory mem)
7759 %{
7760   match(Set mem (StoreB mem src));
7761   predicate(!needs_releasing_store(n));
7762 
7763   ins_cost(INSN_COST);
7764   format %{ "strb  $src, $mem\t# byte" %}
7765 
7766   ins_encode(aarch64_enc_strb(src, mem));
7767 
7768   ins_pipe(istore_reg_mem);
7769 %}
7770 
7771 
7772 instruct storeimmB0(immI0 zero, memory mem)
7773 %{
7774   match(Set mem (StoreB mem zero));
7775   predicate(!needs_releasing_store(n));
7776 
7777   ins_cost(INSN_COST);
7778   format %{ "strb rscractch2, $mem\t# byte" %}
7779 
7780   ins_encode(aarch64_enc_strb0(mem));
7781 
7782   ins_pipe(istore_mem);
7783 %}
7784 
7785 // Store Char/Short
7786 instruct storeC(iRegIorL2I src, memory mem)
7787 %{
7788   match(Set mem (StoreC mem src));
7789   predicate(!needs_releasing_store(n));
7790 
7791   ins_cost(INSN_COST);
7792   format %{ "strh  $src, $mem\t# short" %}
7793 
7794   ins_encode(aarch64_enc_strh(src, mem));
7795 
7796   ins_pipe(istore_reg_mem);
7797 %}
7798 
7799 instruct storeimmC0(immI0 zero, memory mem)
7800 %{
7801   match(Set mem (StoreC mem zero));
7802   predicate(!needs_releasing_store(n));
7803 
7804   ins_cost(INSN_COST);
7805   format %{ "strh  zr, $mem\t# short" %}
7806 
7807   ins_encode(aarch64_enc_strh0(mem));
7808 
7809   ins_pipe(istore_mem);
7810 %}
7811 
7812 // Store Integer
7813 
7814 instruct storeI(iRegIorL2I src, memory mem)
7815 %{
7816   match(Set mem(StoreI mem src));
7817   predicate(!needs_releasing_store(n));
7818 
7819   ins_cost(INSN_COST);
7820   format %{ "strw  $src, $mem\t# int" %}
7821 
7822   ins_encode(aarch64_enc_strw(src, mem));
7823 
7824   ins_pipe(istore_reg_mem);
7825 %}
7826 
7827 instruct storeimmI0(immI0 zero, memory mem)
7828 %{
7829   match(Set mem(StoreI mem zero));
7830   predicate(!needs_releasing_store(n));
7831 
7832   ins_cost(INSN_COST);
7833   format %{ "strw  zr, $mem\t# int" %}
7834 
7835   ins_encode(aarch64_enc_strw0(mem));
7836 
7837   ins_pipe(istore_mem);
7838 %}
7839 
7840 // Store Long (64 bit signed)
7841 instruct storeL(iRegL src, memory mem)
7842 %{
7843   match(Set mem (StoreL mem src));
7844   predicate(!needs_releasing_store(n));
7845 
7846   ins_cost(INSN_COST);
7847   format %{ "str  $src, $mem\t# int" %}
7848 
7849   ins_encode(aarch64_enc_str(src, mem));
7850 
7851   ins_pipe(istore_reg_mem);
7852 %}
7853 
7854 // Store Long (64 bit signed)
7855 instruct storeimmL0(immL0 zero, memory mem)
7856 %{
7857   match(Set mem (StoreL mem zero));
7858   predicate(!needs_releasing_store(n));
7859 
7860   ins_cost(INSN_COST);
7861   format %{ "str  zr, $mem\t# int" %}
7862 
7863   ins_encode(aarch64_enc_str0(mem));
7864 
7865   ins_pipe(istore_mem);
7866 %}
7867 
7868 // Store Pointer
7869 instruct storeP(iRegP src, memory mem)
7870 %{
7871   match(Set mem (StoreP mem src));
7872   predicate(!needs_releasing_store(n));
7873 
7874   ins_cost(INSN_COST);
7875   format %{ "str  $src, $mem\t# ptr" %}
7876 
7877   ins_encode(aarch64_enc_str(src, mem));
7878 
7879   ins_pipe(istore_reg_mem);
7880 %}
7881 
7882 // Store Pointer
7883 instruct storeimmP0(immP0 zero, memory mem)
7884 %{
7885   match(Set mem (StoreP mem zero));
7886   predicate(!needs_releasing_store(n));
7887 
7888   ins_cost(INSN_COST);
7889   format %{ "str zr, $mem\t# ptr" %}
7890 
7891   ins_encode(aarch64_enc_str0(mem));
7892 
7893   ins_pipe(istore_mem);
7894 %}
7895 
7896 // Store Compressed Pointer
7897 instruct storeN(iRegN src, memory mem)
7898 %{
7899   match(Set mem (StoreN mem src));
7900   predicate(!needs_releasing_store(n));
7901 
7902   ins_cost(INSN_COST);
7903   format %{ "strw  $src, $mem\t# compressed ptr" %}
7904 
7905   ins_encode(aarch64_enc_strw(src, mem));
7906 
7907   ins_pipe(istore_reg_mem);
7908 %}
7909 
7910 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7911 %{
7912   match(Set mem (StoreN mem zero));
7913   predicate(Universe::narrow_oop_base() == NULL &&
7914             Universe::narrow_klass_base() == NULL &&
7915             (!needs_releasing_store(n)));
7916 
7917   ins_cost(INSN_COST);
7918   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7919 
7920   ins_encode(aarch64_enc_strw(heapbase, mem));
7921 
7922   ins_pipe(istore_reg_mem);
7923 %}
7924 
7925 // Store Float
7926 instruct storeF(vRegF src, memory mem)
7927 %{
7928   match(Set mem (StoreF mem src));
7929   predicate(!needs_releasing_store(n));
7930 
7931   ins_cost(INSN_COST);
7932   format %{ "strs  $src, $mem\t# float" %}
7933 
7934   ins_encode( aarch64_enc_strs(src, mem) );
7935 
7936   ins_pipe(pipe_class_memory);
7937 %}
7938 
7939 // TODO
7940 // implement storeImmF0 and storeFImmPacked
7941 
7942 // Store Double
7943 instruct storeD(vRegD src, memory mem)
7944 %{
7945   match(Set mem (StoreD mem src));
7946   predicate(!needs_releasing_store(n));
7947 
7948   ins_cost(INSN_COST);
7949   format %{ "strd  $src, $mem\t# double" %}
7950 
7951   ins_encode( aarch64_enc_strd(src, mem) );
7952 
7953   ins_pipe(pipe_class_memory);
7954 %}
7955 
7956 // Store Compressed Klass Pointer
7957 instruct storeNKlass(iRegN src, memory mem)
7958 %{
7959   predicate(!needs_releasing_store(n));
7960   match(Set mem (StoreNKlass mem src));
7961 
7962   ins_cost(INSN_COST);
7963   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7964 
7965   ins_encode(aarch64_enc_strw(src, mem));
7966 
7967   ins_pipe(istore_reg_mem);
7968 %}
7969 
7970 // TODO
7971 // implement storeImmD0 and storeDImmPacked
7972 
7973 // prefetch instructions
7974 // Must be safe to execute with invalid address (cannot fault).
7975 
7976 instruct prefetchalloc( memory mem ) %{
7977   match(PrefetchAllocation mem);
7978 
7979   ins_cost(INSN_COST);
7980   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7981 
7982   ins_encode( aarch64_enc_prefetchw(mem) );
7983 
7984   ins_pipe(iload_prefetch);
7985 %}
7986 
7987 //  ---------------- volatile loads and stores ----------------
7988 
7989 // Load Byte (8 bit signed)
7990 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7991 %{
7992   match(Set dst (LoadB mem));
7993 
7994   ins_cost(VOLATILE_REF_COST);
7995   format %{ "ldarsb  $dst, $mem\t# byte" %}
7996 
7997   ins_encode(aarch64_enc_ldarsb(dst, mem));
7998 
7999   ins_pipe(pipe_serial);
8000 %}
8001 
8002 // Load Byte (8 bit signed) into long
8003 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8004 %{
8005   match(Set dst (ConvI2L (LoadB mem)));
8006 
8007   ins_cost(VOLATILE_REF_COST);
8008   format %{ "ldarsb  $dst, $mem\t# byte" %}
8009 
8010   ins_encode(aarch64_enc_ldarsb(dst, mem));
8011 
8012   ins_pipe(pipe_serial);
8013 %}
8014 
8015 // Load Byte (8 bit unsigned)
8016 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8017 %{
8018   match(Set dst (LoadUB mem));
8019 
8020   ins_cost(VOLATILE_REF_COST);
8021   format %{ "ldarb  $dst, $mem\t# byte" %}
8022 
8023   ins_encode(aarch64_enc_ldarb(dst, mem));
8024 
8025   ins_pipe(pipe_serial);
8026 %}
8027 
8028 // Load Byte (8 bit unsigned) into long
8029 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8030 %{
8031   match(Set dst (ConvI2L (LoadUB mem)));
8032 
8033   ins_cost(VOLATILE_REF_COST);
8034   format %{ "ldarb  $dst, $mem\t# byte" %}
8035 
8036   ins_encode(aarch64_enc_ldarb(dst, mem));
8037 
8038   ins_pipe(pipe_serial);
8039 %}
8040 
8041 // Load Short (16 bit signed)
8042 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8043 %{
8044   match(Set dst (LoadS mem));
8045 
8046   ins_cost(VOLATILE_REF_COST);
8047   format %{ "ldarshw  $dst, $mem\t# short" %}
8048 
8049   ins_encode(aarch64_enc_ldarshw(dst, mem));
8050 
8051   ins_pipe(pipe_serial);
8052 %}
8053 
8054 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8055 %{
8056   match(Set dst (LoadUS mem));
8057 
8058   ins_cost(VOLATILE_REF_COST);
8059   format %{ "ldarhw  $dst, $mem\t# short" %}
8060 
8061   ins_encode(aarch64_enc_ldarhw(dst, mem));
8062 
8063   ins_pipe(pipe_serial);
8064 %}
8065 
8066 // Load Short/Char (16 bit unsigned) into long
8067 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8068 %{
8069   match(Set dst (ConvI2L (LoadUS mem)));
8070 
8071   ins_cost(VOLATILE_REF_COST);
8072   format %{ "ldarh  $dst, $mem\t# short" %}
8073 
8074   ins_encode(aarch64_enc_ldarh(dst, mem));
8075 
8076   ins_pipe(pipe_serial);
8077 %}
8078 
8079 // Load Short/Char (16 bit signed) into long
8080 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8081 %{
8082   match(Set dst (ConvI2L (LoadS mem)));
8083 
8084   ins_cost(VOLATILE_REF_COST);
8085   format %{ "ldarh  $dst, $mem\t# short" %}
8086 
8087   ins_encode(aarch64_enc_ldarsh(dst, mem));
8088 
8089   ins_pipe(pipe_serial);
8090 %}
8091 
8092 // Load Integer (32 bit signed)
8093 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8094 %{
8095   match(Set dst (LoadI mem));
8096 
8097   ins_cost(VOLATILE_REF_COST);
8098   format %{ "ldarw  $dst, $mem\t# int" %}
8099 
8100   ins_encode(aarch64_enc_ldarw(dst, mem));
8101 
8102   ins_pipe(pipe_serial);
8103 %}
8104 
8105 // Load Integer (32 bit unsigned) into long
8106 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8107 %{
8108   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8109 
8110   ins_cost(VOLATILE_REF_COST);
8111   format %{ "ldarw  $dst, $mem\t# int" %}
8112 
8113   ins_encode(aarch64_enc_ldarw(dst, mem));
8114 
8115   ins_pipe(pipe_serial);
8116 %}
8117 
8118 // Load Long (64 bit signed)
8119 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8120 %{
8121   match(Set dst (LoadL mem));
8122 
8123   ins_cost(VOLATILE_REF_COST);
8124   format %{ "ldar  $dst, $mem\t# int" %}
8125 
8126   ins_encode(aarch64_enc_ldar(dst, mem));
8127 
8128   ins_pipe(pipe_serial);
8129 %}
8130 
8131 // Load Pointer
8132 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8133 %{
8134   match(Set dst (LoadP mem));
8135 
8136   ins_cost(VOLATILE_REF_COST);
8137   format %{ "ldar  $dst, $mem\t# ptr" %}
8138 
8139   ins_encode(aarch64_enc_ldar(dst, mem));
8140 
8141   ins_pipe(pipe_serial);
8142 %}
8143 
8144 // Load Compressed Pointer
8145 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8146 %{
8147   match(Set dst (LoadN mem));
8148 
8149   ins_cost(VOLATILE_REF_COST);
8150   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8151 
8152   ins_encode(aarch64_enc_ldarw(dst, mem));
8153 
8154   ins_pipe(pipe_serial);
8155 %}
8156 
8157 // Load Float
8158 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8159 %{
8160   match(Set dst (LoadF mem));
8161 
8162   ins_cost(VOLATILE_REF_COST);
8163   format %{ "ldars  $dst, $mem\t# float" %}
8164 
8165   ins_encode( aarch64_enc_fldars(dst, mem) );
8166 
8167   ins_pipe(pipe_serial);
8168 %}
8169 
8170 // Load Double
8171 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8172 %{
8173   match(Set dst (LoadD mem));
8174 
8175   ins_cost(VOLATILE_REF_COST);
8176   format %{ "ldard  $dst, $mem\t# double" %}
8177 
8178   ins_encode( aarch64_enc_fldard(dst, mem) );
8179 
8180   ins_pipe(pipe_serial);
8181 %}
8182 
8183 // Store Byte
8184 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8185 %{
8186   match(Set mem (StoreB mem src));
8187 
8188   ins_cost(VOLATILE_REF_COST);
8189   format %{ "stlrb  $src, $mem\t# byte" %}
8190 
8191   ins_encode(aarch64_enc_stlrb(src, mem));
8192 
8193   ins_pipe(pipe_class_memory);
8194 %}
8195 
8196 // Store Char/Short
8197 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8198 %{
8199   match(Set mem (StoreC mem src));
8200 
8201   ins_cost(VOLATILE_REF_COST);
8202   format %{ "stlrh  $src, $mem\t# short" %}
8203 
8204   ins_encode(aarch64_enc_stlrh(src, mem));
8205 
8206   ins_pipe(pipe_class_memory);
8207 %}
8208 
8209 // Store Integer
8210 
8211 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8212 %{
8213   match(Set mem(StoreI mem src));
8214 
8215   ins_cost(VOLATILE_REF_COST);
8216   format %{ "stlrw  $src, $mem\t# int" %}
8217 
8218   ins_encode(aarch64_enc_stlrw(src, mem));
8219 
8220   ins_pipe(pipe_class_memory);
8221 %}
8222 
8223 // Store Long (64 bit signed)
8224 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8225 %{
8226   match(Set mem (StoreL mem src));
8227 
8228   ins_cost(VOLATILE_REF_COST);
8229   format %{ "stlr  $src, $mem\t# int" %}
8230 
8231   ins_encode(aarch64_enc_stlr(src, mem));
8232 
8233   ins_pipe(pipe_class_memory);
8234 %}
8235 
8236 // Store Pointer
8237 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8238 %{
8239   match(Set mem (StoreP mem src));
8240 
8241   ins_cost(VOLATILE_REF_COST);
8242   format %{ "stlr  $src, $mem\t# ptr" %}
8243 
8244   ins_encode(aarch64_enc_stlr(src, mem));
8245 
8246   ins_pipe(pipe_class_memory);
8247 %}
8248 
8249 // Store Compressed Pointer
8250 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8251 %{
8252   match(Set mem (StoreN mem src));
8253 
8254   ins_cost(VOLATILE_REF_COST);
8255   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8256 
8257   ins_encode(aarch64_enc_stlrw(src, mem));
8258 
8259   ins_pipe(pipe_class_memory);
8260 %}
8261 
8262 // Store Float
8263 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8264 %{
8265   match(Set mem (StoreF mem src));
8266 
8267   ins_cost(VOLATILE_REF_COST);
8268   format %{ "stlrs  $src, $mem\t# float" %}
8269 
8270   ins_encode( aarch64_enc_fstlrs(src, mem) );
8271 
8272   ins_pipe(pipe_class_memory);
8273 %}
8274 
8275 // TODO
8276 // implement storeImmF0 and storeFImmPacked
8277 
8278 // Store Double
8279 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8280 %{
8281   match(Set mem (StoreD mem src));
8282 
8283   ins_cost(VOLATILE_REF_COST);
8284   format %{ "stlrd  $src, $mem\t# double" %}
8285 
8286   ins_encode( aarch64_enc_fstlrd(src, mem) );
8287 
8288   ins_pipe(pipe_class_memory);
8289 %}
8290 
8291 //  ---------------- end of volatile loads and stores ----------------
8292 
8293 // ============================================================================
8294 // BSWAP Instructions
8295 
8296 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8297   match(Set dst (ReverseBytesI src));
8298 
8299   ins_cost(INSN_COST);
8300   format %{ "revw  $dst, $src" %}
8301 
8302   ins_encode %{
8303     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8304   %}
8305 
8306   ins_pipe(ialu_reg);
8307 %}
8308 
8309 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8310   match(Set dst (ReverseBytesL src));
8311 
8312   ins_cost(INSN_COST);
8313   format %{ "rev  $dst, $src" %}
8314 
8315   ins_encode %{
8316     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8317   %}
8318 
8319   ins_pipe(ialu_reg);
8320 %}
8321 
8322 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8323   match(Set dst (ReverseBytesUS src));
8324 
8325   ins_cost(INSN_COST);
8326   format %{ "rev16w  $dst, $src" %}
8327 
8328   ins_encode %{
8329     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8330   %}
8331 
8332   ins_pipe(ialu_reg);
8333 %}
8334 
8335 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8336   match(Set dst (ReverseBytesS src));
8337 
8338   ins_cost(INSN_COST);
8339   format %{ "rev16w  $dst, $src\n\t"
8340             "sbfmw $dst, $dst, #0, #15" %}
8341 
8342   ins_encode %{
8343     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8344     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8345   %}
8346 
8347   ins_pipe(ialu_reg);
8348 %}
8349 
8350 // ============================================================================
8351 // Zero Count Instructions
8352 
8353 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8354   match(Set dst (CountLeadingZerosI src));
8355 
8356   ins_cost(INSN_COST);
8357   format %{ "clzw  $dst, $src" %}
8358   ins_encode %{
8359     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8360   %}
8361 
8362   ins_pipe(ialu_reg);
8363 %}
8364 
8365 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8366   match(Set dst (CountLeadingZerosL src));
8367 
8368   ins_cost(INSN_COST);
8369   format %{ "clz   $dst, $src" %}
8370   ins_encode %{
8371     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8372   %}
8373 
8374   ins_pipe(ialu_reg);
8375 %}
8376 
8377 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8378   match(Set dst (CountTrailingZerosI src));
8379 
8380   ins_cost(INSN_COST * 2);
8381   format %{ "rbitw  $dst, $src\n\t"
8382             "clzw   $dst, $dst" %}
8383   ins_encode %{
8384     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8385     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8386   %}
8387 
8388   ins_pipe(ialu_reg);
8389 %}
8390 
8391 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8392   match(Set dst (CountTrailingZerosL src));
8393 
8394   ins_cost(INSN_COST * 2);
8395   format %{ "rbit   $dst, $src\n\t"
8396             "clz    $dst, $dst" %}
8397   ins_encode %{
8398     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8399     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8400   %}
8401 
8402   ins_pipe(ialu_reg);
8403 %}
8404 
8405 //---------- Population Count Instructions -------------------------------------
8406 //
8407 
8408 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8409   predicate(UsePopCountInstruction);
8410   match(Set dst (PopCountI src));
8411   effect(TEMP tmp);
8412   ins_cost(INSN_COST * 13);
8413 
8414   format %{ "movw   $src, $src\n\t"
8415             "mov    $tmp, $src\t# vector (1D)\n\t"
8416             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8417             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8418             "mov    $dst, $tmp\t# vector (1D)" %}
8419   ins_encode %{
8420     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8421     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8422     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8423     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8424     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8425   %}
8426 
8427   ins_pipe(pipe_class_default);
8428 %}
8429 
8430 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8431   predicate(UsePopCountInstruction);
8432   match(Set dst (PopCountI (LoadI mem)));
8433   effect(TEMP tmp);
8434   ins_cost(INSN_COST * 13);
8435 
8436   format %{ "ldrs   $tmp, $mem\n\t"
8437             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8438             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8439             "mov    $dst, $tmp\t# vector (1D)" %}
8440   ins_encode %{
8441     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8442     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8443                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8444     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8445     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8446     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8447   %}
8448 
8449   ins_pipe(pipe_class_default);
8450 %}
8451 
8452 // Note: Long.bitCount(long) returns an int.
8453 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8454   predicate(UsePopCountInstruction);
8455   match(Set dst (PopCountL src));
8456   effect(TEMP tmp);
8457   ins_cost(INSN_COST * 13);
8458 
8459   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8460             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8461             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8462             "mov    $dst, $tmp\t# vector (1D)" %}
8463   ins_encode %{
8464     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8465     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8466     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8467     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8468   %}
8469 
8470   ins_pipe(pipe_class_default);
8471 %}
8472 
8473 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8474   predicate(UsePopCountInstruction);
8475   match(Set dst (PopCountL (LoadL mem)));
8476   effect(TEMP tmp);
8477   ins_cost(INSN_COST * 13);
8478 
8479   format %{ "ldrd   $tmp, $mem\n\t"
8480             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8481             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8482             "mov    $dst, $tmp\t# vector (1D)" %}
8483   ins_encode %{
8484     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8485     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8486                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8487     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8488     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8489     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8490   %}
8491 
8492   ins_pipe(pipe_class_default);
8493 %}
8494 
8495 // ============================================================================
8496 // MemBar Instruction
8497 
8498 instruct load_fence() %{
8499   match(LoadFence);
8500   ins_cost(VOLATILE_REF_COST);
8501 
8502   format %{ "load_fence" %}
8503 
8504   ins_encode %{
8505     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8506   %}
8507   ins_pipe(pipe_serial);
8508 %}
8509 
8510 instruct unnecessary_membar_acquire() %{
8511   predicate(unnecessary_acquire(n));
8512   match(MemBarAcquire);
8513   ins_cost(0);
8514 
8515   format %{ "membar_acquire (elided)" %}
8516 
8517   ins_encode %{
8518     __ block_comment("membar_acquire (elided)");
8519   %}
8520 
8521   ins_pipe(pipe_class_empty);
8522 %}
8523 
8524 instruct membar_acquire() %{
8525   match(MemBarAcquire);
8526   ins_cost(VOLATILE_REF_COST);
8527 
8528   format %{ "membar_acquire" %}
8529 
8530   ins_encode %{
8531     __ block_comment("membar_acquire");
8532     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8533   %}
8534 
8535   ins_pipe(pipe_serial);
8536 %}
8537 
8538 
8539 instruct membar_acquire_lock() %{
8540   match(MemBarAcquireLock);
8541   ins_cost(VOLATILE_REF_COST);
8542 
8543   format %{ "membar_acquire_lock" %}
8544 
8545   ins_encode %{
8546     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8547   %}
8548 
8549   ins_pipe(pipe_serial);
8550 %}
8551 
8552 instruct store_fence() %{
8553   match(StoreFence);
8554   ins_cost(VOLATILE_REF_COST);
8555 
8556   format %{ "store_fence" %}
8557 
8558   ins_encode %{
8559     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8560   %}
8561   ins_pipe(pipe_serial);
8562 %}
8563 
8564 instruct unnecessary_membar_release() %{
8565   predicate(unnecessary_release(n));
8566   match(MemBarRelease);
8567   ins_cost(0);
8568 
8569   format %{ "membar_release (elided)" %}
8570 
8571   ins_encode %{
8572     __ block_comment("membar_release (elided)");
8573   %}
8574   ins_pipe(pipe_serial);
8575 %}
8576 
8577 instruct membar_release() %{
8578   match(MemBarRelease);
8579   ins_cost(VOLATILE_REF_COST);
8580 
8581   format %{ "membar_release" %}
8582 
8583   ins_encode %{
8584     __ block_comment("membar_release");
8585     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8586   %}
8587   ins_pipe(pipe_serial);
8588 %}
8589 
8590 instruct membar_storestore() %{
8591   match(MemBarStoreStore);
8592   ins_cost(VOLATILE_REF_COST);
8593 
8594   format %{ "MEMBAR-store-store" %}
8595 
8596   ins_encode %{
8597     __ membar(Assembler::StoreStore);
8598   %}
8599   ins_pipe(pipe_serial);
8600 %}
8601 
8602 instruct membar_release_lock() %{
8603   match(MemBarReleaseLock);
8604   ins_cost(VOLATILE_REF_COST);
8605 
8606   format %{ "membar_release_lock" %}
8607 
8608   ins_encode %{
8609     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8610   %}
8611 
8612   ins_pipe(pipe_serial);
8613 %}
8614 
8615 instruct unnecessary_membar_volatile() %{
8616   predicate(unnecessary_volatile(n));
8617   match(MemBarVolatile);
8618   ins_cost(0);
8619 
8620   format %{ "membar_volatile (elided)" %}
8621 
8622   ins_encode %{
8623     __ block_comment("membar_volatile (elided)");
8624   %}
8625 
8626   ins_pipe(pipe_serial);
8627 %}
8628 
8629 instruct membar_volatile() %{
8630   match(MemBarVolatile);
8631   ins_cost(VOLATILE_REF_COST*100);
8632 
8633   format %{ "membar_volatile" %}
8634 
8635   ins_encode %{
8636     __ block_comment("membar_volatile");
8637     __ membar(Assembler::StoreLoad);
8638   %}
8639 
8640   ins_pipe(pipe_serial);
8641 %}
8642 
8643 // ============================================================================
8644 // Cast/Convert Instructions
8645 
8646 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8647   match(Set dst (CastX2P src));
8648 
8649   ins_cost(INSN_COST);
8650   format %{ "mov $dst, $src\t# long -> ptr" %}
8651 
8652   ins_encode %{
8653     if ($dst$$reg != $src$$reg) {
8654       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8655     }
8656   %}
8657 
8658   ins_pipe(ialu_reg);
8659 %}
8660 
8661 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8662   match(Set dst (CastP2X src));
8663 
8664   ins_cost(INSN_COST);
8665   format %{ "mov $dst, $src\t# ptr -> long" %}
8666 
8667   ins_encode %{
8668     if ($dst$$reg != $src$$reg) {
8669       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8670     }
8671   %}
8672 
8673   ins_pipe(ialu_reg);
8674 %}
8675 
8676 // Convert oop into int for vectors alignment masking
8677 instruct convP2I(iRegINoSp dst, iRegP src) %{
8678   match(Set dst (ConvL2I (CastP2X src)));
8679 
8680   ins_cost(INSN_COST);
8681   format %{ "movw $dst, $src\t# ptr -> int" %}
8682   ins_encode %{
8683     __ movw($dst$$Register, $src$$Register);
8684   %}
8685 
8686   ins_pipe(ialu_reg);
8687 %}
8688 
8689 // Convert compressed oop into int for vectors alignment masking
8690 // in case of 32bit oops (heap < 4Gb).
8691 instruct convN2I(iRegINoSp dst, iRegN src)
8692 %{
8693   predicate(Universe::narrow_oop_shift() == 0);
8694   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8695 
8696   ins_cost(INSN_COST);
8697   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8698   ins_encode %{
8699     __ movw($dst$$Register, $src$$Register);
8700   %}
8701 
8702   ins_pipe(ialu_reg);
8703 %}
8704 
8705 
8706 // Convert oop pointer into compressed form
8707 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8708   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8709   match(Set dst (EncodeP src));
8710   effect(KILL cr);
8711   ins_cost(INSN_COST * 3);
8712   format %{ "encode_heap_oop $dst, $src" %}
8713   ins_encode %{
8714     Register s = $src$$Register;
8715     Register d = $dst$$Register;
8716     __ encode_heap_oop(d, s);
8717   %}
8718   ins_pipe(ialu_reg);
8719 %}
8720 
8721 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8722   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8723   match(Set dst (EncodeP src));
8724   ins_cost(INSN_COST * 3);
8725   format %{ "encode_heap_oop_not_null $dst, $src" %}
8726   ins_encode %{
8727     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8728   %}
8729   ins_pipe(ialu_reg);
8730 %}
8731 
8732 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8733   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8734             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8735   match(Set dst (DecodeN src));
8736   ins_cost(INSN_COST * 3);
8737   format %{ "decode_heap_oop $dst, $src" %}
8738   ins_encode %{
8739     Register s = $src$$Register;
8740     Register d = $dst$$Register;
8741     __ decode_heap_oop(d, s);
8742   %}
8743   ins_pipe(ialu_reg);
8744 %}
8745 
8746 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8747   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8748             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8749   match(Set dst (DecodeN src));
8750   ins_cost(INSN_COST * 3);
8751   format %{ "decode_heap_oop_not_null $dst, $src" %}
8752   ins_encode %{
8753     Register s = $src$$Register;
8754     Register d = $dst$$Register;
8755     __ decode_heap_oop_not_null(d, s);
8756   %}
8757   ins_pipe(ialu_reg);
8758 %}
8759 
8760 // n.b. AArch64 implementations of encode_klass_not_null and
8761 // decode_klass_not_null do not modify the flags register so, unlike
8762 // Intel, we don't kill CR as a side effect here
8763 
8764 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8765   match(Set dst (EncodePKlass src));
8766 
8767   ins_cost(INSN_COST * 3);
8768   format %{ "encode_klass_not_null $dst,$src" %}
8769 
8770   ins_encode %{
8771     Register src_reg = as_Register($src$$reg);
8772     Register dst_reg = as_Register($dst$$reg);
8773     __ encode_klass_not_null(dst_reg, src_reg);
8774   %}
8775 
8776    ins_pipe(ialu_reg);
8777 %}
8778 
8779 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8780   match(Set dst (DecodeNKlass src));
8781 
8782   ins_cost(INSN_COST * 3);
8783   format %{ "decode_klass_not_null $dst,$src" %}
8784 
8785   ins_encode %{
8786     Register src_reg = as_Register($src$$reg);
8787     Register dst_reg = as_Register($dst$$reg);
8788     if (dst_reg != src_reg) {
8789       __ decode_klass_not_null(dst_reg, src_reg);
8790     } else {
8791       __ decode_klass_not_null(dst_reg);
8792     }
8793   %}
8794 
8795    ins_pipe(ialu_reg);
8796 %}
8797 
8798 instruct checkCastPP(iRegPNoSp dst)
8799 %{
8800   match(Set dst (CheckCastPP dst));
8801 
8802   size(0);
8803   format %{ "# checkcastPP of $dst" %}
8804   ins_encode(/* empty encoding */);
8805   ins_pipe(pipe_class_empty);
8806 %}
8807 
8808 instruct castPP(iRegPNoSp dst)
8809 %{
8810   match(Set dst (CastPP dst));
8811 
8812   size(0);
8813   format %{ "# castPP of $dst" %}
8814   ins_encode(/* empty encoding */);
8815   ins_pipe(pipe_class_empty);
8816 %}
8817 
8818 instruct castII(iRegI dst)
8819 %{
8820   match(Set dst (CastII dst));
8821 
8822   size(0);
8823   format %{ "# castII of $dst" %}
8824   ins_encode(/* empty encoding */);
8825   ins_cost(0);
8826   ins_pipe(pipe_class_empty);
8827 %}
8828 
8829 // ============================================================================
8830 // Atomic operation instructions
8831 //
8832 // Intel and SPARC both implement Ideal Node LoadPLocked and
8833 // Store{PIL}Conditional instructions using a normal load for the
8834 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8835 //
8836 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8837 // pair to lock object allocations from Eden space when not using
8838 // TLABs.
8839 //
8840 // There does not appear to be a Load{IL}Locked Ideal Node and the
8841 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8842 // and to use StoreIConditional only for 32-bit and StoreLConditional
8843 // only for 64-bit.
8844 //
8845 // We implement LoadPLocked and StorePLocked instructions using,
8846 // respectively the AArch64 hw load-exclusive and store-conditional
8847 // instructions. Whereas we must implement each of
8848 // Store{IL}Conditional using a CAS which employs a pair of
8849 // instructions comprising a load-exclusive followed by a
8850 // store-conditional.
8851 
8852 
8853 // Locked-load (linked load) of the current heap-top
8854 // used when updating the eden heap top
8855 // implemented using ldaxr on AArch64
8856 
8857 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8858 %{
8859   match(Set dst (LoadPLocked mem));
8860 
8861   ins_cost(VOLATILE_REF_COST);
8862 
8863   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8864 
8865   ins_encode(aarch64_enc_ldaxr(dst, mem));
8866 
8867   ins_pipe(pipe_serial);
8868 %}
8869 
8870 // Conditional-store of the updated heap-top.
8871 // Used during allocation of the shared heap.
8872 // Sets flag (EQ) on success.
8873 // implemented using stlxr on AArch64.
8874 
8875 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8876 %{
8877   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8878 
8879   ins_cost(VOLATILE_REF_COST);
8880 
8881  // TODO
8882  // do we need to do a store-conditional release or can we just use a
8883  // plain store-conditional?
8884 
8885   format %{
8886     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8887     "cmpw rscratch1, zr\t# EQ on successful write"
8888   %}
8889 
8890   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8891 
8892   ins_pipe(pipe_serial);
8893 %}
8894 
8895 // this has to be implemented as a CAS
8896 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8897 %{
8898   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8899 
8900   ins_cost(VOLATILE_REF_COST);
8901 
8902   format %{
8903     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8904     "cmpw rscratch1, zr\t# EQ on successful write"
8905   %}
8906 
8907   ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
8908 
8909   ins_pipe(pipe_slow);
8910 %}
8911 
8912 // this has to be implemented as a CAS
8913 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8914 %{
8915   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8916 
8917   ins_cost(VOLATILE_REF_COST);
8918 
8919   format %{
8920     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8921     "cmpw rscratch1, zr\t# EQ on successful write"
8922   %}
8923 
8924   ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
8925 
8926   ins_pipe(pipe_slow);
8927 %}
8928 
8929 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8930 // can't match them
8931 
8932 // standard CompareAndSwapX when we are using barriers
8933 
8934 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8935 
8936   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8937   ins_cost(2 * VOLATILE_REF_COST);
8938 
8939   effect(KILL cr);
8940 
8941  format %{
8942     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8943     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8944  %}
8945 
8946  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8947             aarch64_enc_cset_eq(res));
8948 
8949   ins_pipe(pipe_slow);
8950 %}
8951 
8952 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8953 
8954   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8955   ins_cost(2 * VOLATILE_REF_COST);
8956 
8957   effect(KILL cr);
8958 
8959  format %{
8960     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8961     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8962  %}
8963 
8964  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8965             aarch64_enc_cset_eq(res));
8966 
8967   ins_pipe(pipe_slow);
8968 %}
8969 
8970 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8971 
8972   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8973   ins_cost(2 * VOLATILE_REF_COST);
8974 
8975   effect(KILL cr);
8976 
8977  format %{
8978     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8979     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8980  %}
8981 
8982  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8983             aarch64_enc_cset_eq(res));
8984 
8985   ins_pipe(pipe_slow);
8986 %}
8987 
8988 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8989 
8990   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8991   ins_cost(2 * VOLATILE_REF_COST);
8992 
8993   effect(KILL cr);
8994 
8995  format %{
8996     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8997     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8998  %}
8999 
9000  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9001             aarch64_enc_cset_eq(res));
9002 
9003   ins_pipe(pipe_slow);
9004 %}
9005 
9006 // alternative CompareAndSwapX when we are eliding barriers
9007 
9008 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9009 
9010   predicate(needs_acquiring_load_exclusive(n));
9011   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9012   ins_cost(VOLATILE_REF_COST);
9013 
9014   effect(KILL cr);
9015 
9016  format %{
9017     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9018     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9019  %}
9020 
9021  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9022             aarch64_enc_cset_eq(res));
9023 
9024   ins_pipe(pipe_slow);
9025 %}
9026 
9027 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9028 
9029   predicate(needs_acquiring_load_exclusive(n));
9030   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9031   ins_cost(VOLATILE_REF_COST);
9032 
9033   effect(KILL cr);
9034 
9035  format %{
9036     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9037     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9038  %}
9039 
9040  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9041             aarch64_enc_cset_eq(res));
9042 
9043   ins_pipe(pipe_slow);
9044 %}
9045 
9046 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9047 
9048   predicate(needs_acquiring_load_exclusive(n));
9049   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9050   ins_cost(VOLATILE_REF_COST);
9051 
9052   effect(KILL cr);
9053 
9054  format %{
9055     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9056     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9057  %}
9058 
9059  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9060             aarch64_enc_cset_eq(res));
9061 
9062   ins_pipe(pipe_slow);
9063 %}
9064 
9065 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9066 
9067   predicate(needs_acquiring_load_exclusive(n));
9068   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9069   ins_cost(VOLATILE_REF_COST);
9070 
9071   effect(KILL cr);
9072 
9073  format %{
9074     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9075     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9076  %}
9077 
9078  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9079             aarch64_enc_cset_eq(res));
9080 
9081   ins_pipe(pipe_slow);
9082 %}
9083 
9084 
9085 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9086   match(Set prev (GetAndSetI mem newv));
9087   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9088   ins_encode %{
9089     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9090   %}
9091   ins_pipe(pipe_serial);
9092 %}
9093 
9094 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9095   match(Set prev (GetAndSetL mem newv));
9096   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9097   ins_encode %{
9098     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9099   %}
9100   ins_pipe(pipe_serial);
9101 %}
9102 
9103 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9104   match(Set prev (GetAndSetN mem newv));
9105   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9106   ins_encode %{
9107     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9108   %}
9109   ins_pipe(pipe_serial);
9110 %}
9111 
9112 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9113   match(Set prev (GetAndSetP mem newv));
9114   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9115   ins_encode %{
9116     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9117   %}
9118   ins_pipe(pipe_serial);
9119 %}
9120 
9121 
9122 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9123   match(Set newval (GetAndAddL mem incr));
9124   ins_cost(INSN_COST * 10);
9125   format %{ "get_and_addL $newval, [$mem], $incr" %}
9126   ins_encode %{
9127     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9128   %}
9129   ins_pipe(pipe_serial);
9130 %}
9131 
9132 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9133   predicate(n->as_LoadStore()->result_not_used());
9134   match(Set dummy (GetAndAddL mem incr));
9135   ins_cost(INSN_COST * 9);
9136   format %{ "get_and_addL [$mem], $incr" %}
9137   ins_encode %{
9138     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9139   %}
9140   ins_pipe(pipe_serial);
9141 %}
9142 
9143 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9144   match(Set newval (GetAndAddL mem incr));
9145   ins_cost(INSN_COST * 10);
9146   format %{ "get_and_addL $newval, [$mem], $incr" %}
9147   ins_encode %{
9148     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9149   %}
9150   ins_pipe(pipe_serial);
9151 %}
9152 
9153 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9154   predicate(n->as_LoadStore()->result_not_used());
9155   match(Set dummy (GetAndAddL mem incr));
9156   ins_cost(INSN_COST * 9);
9157   format %{ "get_and_addL [$mem], $incr" %}
9158   ins_encode %{
9159     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9160   %}
9161   ins_pipe(pipe_serial);
9162 %}
9163 
9164 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9165   match(Set newval (GetAndAddI mem incr));
9166   ins_cost(INSN_COST * 10);
9167   format %{ "get_and_addI $newval, [$mem], $incr" %}
9168   ins_encode %{
9169     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9170   %}
9171   ins_pipe(pipe_serial);
9172 %}
9173 
9174 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9175   predicate(n->as_LoadStore()->result_not_used());
9176   match(Set dummy (GetAndAddI mem incr));
9177   ins_cost(INSN_COST * 9);
9178   format %{ "get_and_addI [$mem], $incr" %}
9179   ins_encode %{
9180     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9181   %}
9182   ins_pipe(pipe_serial);
9183 %}
9184 
9185 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9186   match(Set newval (GetAndAddI mem incr));
9187   ins_cost(INSN_COST * 10);
9188   format %{ "get_and_addI $newval, [$mem], $incr" %}
9189   ins_encode %{
9190     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9191   %}
9192   ins_pipe(pipe_serial);
9193 %}
9194 
9195 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9196   predicate(n->as_LoadStore()->result_not_used());
9197   match(Set dummy (GetAndAddI mem incr));
9198   ins_cost(INSN_COST * 9);
9199   format %{ "get_and_addI [$mem], $incr" %}
9200   ins_encode %{
9201     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9202   %}
9203   ins_pipe(pipe_serial);
9204 %}
9205 
9206 // Manifest a CmpL result in an integer register.
9207 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9208 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9209 %{
9210   match(Set dst (CmpL3 src1 src2));
9211   effect(KILL flags);
9212 
9213   ins_cost(INSN_COST * 6);
9214   format %{
9215       "cmp $src1, $src2"
9216       "csetw $dst, ne"
9217       "cnegw $dst, lt"
9218   %}
9219   // format %{ "CmpL3 $dst, $src1, $src2" %}
9220   ins_encode %{
9221     __ cmp($src1$$Register, $src2$$Register);
9222     __ csetw($dst$$Register, Assembler::NE);
9223     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9224   %}
9225 
9226   ins_pipe(pipe_class_default);
9227 %}
9228 
9229 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9230 %{
9231   match(Set dst (CmpL3 src1 src2));
9232   effect(KILL flags);
9233 
9234   ins_cost(INSN_COST * 6);
9235   format %{
9236       "cmp $src1, $src2"
9237       "csetw $dst, ne"
9238       "cnegw $dst, lt"
9239   %}
9240   ins_encode %{
9241     int32_t con = (int32_t)$src2$$constant;
9242      if (con < 0) {
9243       __ adds(zr, $src1$$Register, -con);
9244     } else {
9245       __ subs(zr, $src1$$Register, con);
9246     }
9247     __ csetw($dst$$Register, Assembler::NE);
9248     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9249   %}
9250 
9251   ins_pipe(pipe_class_default);
9252 %}
9253 
9254 // ============================================================================
9255 // Conditional Move Instructions
9256 
9257 // n.b. we have identical rules for both a signed compare op (cmpOp)
9258 // and an unsigned compare op (cmpOpU). it would be nice if we could
9259 // define an op class which merged both inputs and use it to type the
9260 // argument to a single rule. unfortunatelyt his fails because the
9261 // opclass does not live up to the COND_INTER interface of its
9262 // component operands. When the generic code tries to negate the
9263 // operand it ends up running the generci Machoper::negate method
9264 // which throws a ShouldNotHappen. So, we have to provide two flavours
9265 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9266 
9267 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9268   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9269 
9270   ins_cost(INSN_COST * 2);
9271   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9272 
9273   ins_encode %{
9274     __ cselw(as_Register($dst$$reg),
9275              as_Register($src2$$reg),
9276              as_Register($src1$$reg),
9277              (Assembler::Condition)$cmp$$cmpcode);
9278   %}
9279 
9280   ins_pipe(icond_reg_reg);
9281 %}
9282 
9283 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9284   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9285 
9286   ins_cost(INSN_COST * 2);
9287   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9288 
9289   ins_encode %{
9290     __ cselw(as_Register($dst$$reg),
9291              as_Register($src2$$reg),
9292              as_Register($src1$$reg),
9293              (Assembler::Condition)$cmp$$cmpcode);
9294   %}
9295 
9296   ins_pipe(icond_reg_reg);
9297 %}
9298 
9299 // special cases where one arg is zero
9300 
9301 // n.b. this is selected in preference to the rule above because it
9302 // avoids loading constant 0 into a source register
9303 
9304 // TODO
9305 // we ought only to be able to cull one of these variants as the ideal
9306 // transforms ought always to order the zero consistently (to left/right?)
9307 
9308 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9309   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9310 
9311   ins_cost(INSN_COST * 2);
9312   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9313 
9314   ins_encode %{
9315     __ cselw(as_Register($dst$$reg),
9316              as_Register($src$$reg),
9317              zr,
9318              (Assembler::Condition)$cmp$$cmpcode);
9319   %}
9320 
9321   ins_pipe(icond_reg);
9322 %}
9323 
9324 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9325   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9326 
9327   ins_cost(INSN_COST * 2);
9328   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9329 
9330   ins_encode %{
9331     __ cselw(as_Register($dst$$reg),
9332              as_Register($src$$reg),
9333              zr,
9334              (Assembler::Condition)$cmp$$cmpcode);
9335   %}
9336 
9337   ins_pipe(icond_reg);
9338 %}
9339 
9340 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9341   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9342 
9343   ins_cost(INSN_COST * 2);
9344   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9345 
9346   ins_encode %{
9347     __ cselw(as_Register($dst$$reg),
9348              zr,
9349              as_Register($src$$reg),
9350              (Assembler::Condition)$cmp$$cmpcode);
9351   %}
9352 
9353   ins_pipe(icond_reg);
9354 %}
9355 
9356 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9357   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9358 
9359   ins_cost(INSN_COST * 2);
9360   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9361 
9362   ins_encode %{
9363     __ cselw(as_Register($dst$$reg),
9364              zr,
9365              as_Register($src$$reg),
9366              (Assembler::Condition)$cmp$$cmpcode);
9367   %}
9368 
9369   ins_pipe(icond_reg);
9370 %}
9371 
9372 // special case for creating a boolean 0 or 1
9373 
9374 // n.b. this is selected in preference to the rule above because it
9375 // avoids loading constants 0 and 1 into a source register
9376 
9377 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9378   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9379 
9380   ins_cost(INSN_COST * 2);
9381   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9382 
9383   ins_encode %{
9384     // equivalently
9385     // cset(as_Register($dst$$reg),
9386     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9387     __ csincw(as_Register($dst$$reg),
9388              zr,
9389              zr,
9390              (Assembler::Condition)$cmp$$cmpcode);
9391   %}
9392 
9393   ins_pipe(icond_none);
9394 %}
9395 
9396 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9397   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9398 
9399   ins_cost(INSN_COST * 2);
9400   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9401 
9402   ins_encode %{
9403     // equivalently
9404     // cset(as_Register($dst$$reg),
9405     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9406     __ csincw(as_Register($dst$$reg),
9407              zr,
9408              zr,
9409              (Assembler::Condition)$cmp$$cmpcode);
9410   %}
9411 
9412   ins_pipe(icond_none);
9413 %}
9414 
9415 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9416   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9417 
9418   ins_cost(INSN_COST * 2);
9419   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9420 
9421   ins_encode %{
9422     __ csel(as_Register($dst$$reg),
9423             as_Register($src2$$reg),
9424             as_Register($src1$$reg),
9425             (Assembler::Condition)$cmp$$cmpcode);
9426   %}
9427 
9428   ins_pipe(icond_reg_reg);
9429 %}
9430 
9431 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9432   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9433 
9434   ins_cost(INSN_COST * 2);
9435   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9436 
9437   ins_encode %{
9438     __ csel(as_Register($dst$$reg),
9439             as_Register($src2$$reg),
9440             as_Register($src1$$reg),
9441             (Assembler::Condition)$cmp$$cmpcode);
9442   %}
9443 
9444   ins_pipe(icond_reg_reg);
9445 %}
9446 
9447 // special cases where one arg is zero
9448 
9449 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9450   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9451 
9452   ins_cost(INSN_COST * 2);
9453   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9454 
9455   ins_encode %{
9456     __ csel(as_Register($dst$$reg),
9457             zr,
9458             as_Register($src$$reg),
9459             (Assembler::Condition)$cmp$$cmpcode);
9460   %}
9461 
9462   ins_pipe(icond_reg);
9463 %}
9464 
9465 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9466   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9467 
9468   ins_cost(INSN_COST * 2);
9469   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9470 
9471   ins_encode %{
9472     __ csel(as_Register($dst$$reg),
9473             zr,
9474             as_Register($src$$reg),
9475             (Assembler::Condition)$cmp$$cmpcode);
9476   %}
9477 
9478   ins_pipe(icond_reg);
9479 %}
9480 
9481 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9482   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9483 
9484   ins_cost(INSN_COST * 2);
9485   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9486 
9487   ins_encode %{
9488     __ csel(as_Register($dst$$reg),
9489             as_Register($src$$reg),
9490             zr,
9491             (Assembler::Condition)$cmp$$cmpcode);
9492   %}
9493 
9494   ins_pipe(icond_reg);
9495 %}
9496 
9497 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9498   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9499 
9500   ins_cost(INSN_COST * 2);
9501   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9502 
9503   ins_encode %{
9504     __ csel(as_Register($dst$$reg),
9505             as_Register($src$$reg),
9506             zr,
9507             (Assembler::Condition)$cmp$$cmpcode);
9508   %}
9509 
9510   ins_pipe(icond_reg);
9511 %}
9512 
9513 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9514   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9515 
9516   ins_cost(INSN_COST * 2);
9517   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9518 
9519   ins_encode %{
9520     __ csel(as_Register($dst$$reg),
9521             as_Register($src2$$reg),
9522             as_Register($src1$$reg),
9523             (Assembler::Condition)$cmp$$cmpcode);
9524   %}
9525 
9526   ins_pipe(icond_reg_reg);
9527 %}
9528 
9529 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9530   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9531 
9532   ins_cost(INSN_COST * 2);
9533   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9534 
9535   ins_encode %{
9536     __ csel(as_Register($dst$$reg),
9537             as_Register($src2$$reg),
9538             as_Register($src1$$reg),
9539             (Assembler::Condition)$cmp$$cmpcode);
9540   %}
9541 
9542   ins_pipe(icond_reg_reg);
9543 %}
9544 
9545 // special cases where one arg is zero
9546 
9547 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9548   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9549 
9550   ins_cost(INSN_COST * 2);
9551   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9552 
9553   ins_encode %{
9554     __ csel(as_Register($dst$$reg),
9555             zr,
9556             as_Register($src$$reg),
9557             (Assembler::Condition)$cmp$$cmpcode);
9558   %}
9559 
9560   ins_pipe(icond_reg);
9561 %}
9562 
9563 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9564   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9565 
9566   ins_cost(INSN_COST * 2);
9567   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9568 
9569   ins_encode %{
9570     __ csel(as_Register($dst$$reg),
9571             zr,
9572             as_Register($src$$reg),
9573             (Assembler::Condition)$cmp$$cmpcode);
9574   %}
9575 
9576   ins_pipe(icond_reg);
9577 %}
9578 
9579 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9580   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9581 
9582   ins_cost(INSN_COST * 2);
9583   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9584 
9585   ins_encode %{
9586     __ csel(as_Register($dst$$reg),
9587             as_Register($src$$reg),
9588             zr,
9589             (Assembler::Condition)$cmp$$cmpcode);
9590   %}
9591 
9592   ins_pipe(icond_reg);
9593 %}
9594 
9595 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9596   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9597 
9598   ins_cost(INSN_COST * 2);
9599   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9600 
9601   ins_encode %{
9602     __ csel(as_Register($dst$$reg),
9603             as_Register($src$$reg),
9604             zr,
9605             (Assembler::Condition)$cmp$$cmpcode);
9606   %}
9607 
9608   ins_pipe(icond_reg);
9609 %}
9610 
9611 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9612   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9613 
9614   ins_cost(INSN_COST * 2);
9615   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9616 
9617   ins_encode %{
9618     __ cselw(as_Register($dst$$reg),
9619              as_Register($src2$$reg),
9620              as_Register($src1$$reg),
9621              (Assembler::Condition)$cmp$$cmpcode);
9622   %}
9623 
9624   ins_pipe(icond_reg_reg);
9625 %}
9626 
9627 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9628   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9629 
9630   ins_cost(INSN_COST * 2);
9631   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9632 
9633   ins_encode %{
9634     __ cselw(as_Register($dst$$reg),
9635              as_Register($src2$$reg),
9636              as_Register($src1$$reg),
9637              (Assembler::Condition)$cmp$$cmpcode);
9638   %}
9639 
9640   ins_pipe(icond_reg_reg);
9641 %}
9642 
9643 // special cases where one arg is zero
9644 
9645 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9646   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9647 
9648   ins_cost(INSN_COST * 2);
9649   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9650 
9651   ins_encode %{
9652     __ cselw(as_Register($dst$$reg),
9653              zr,
9654              as_Register($src$$reg),
9655              (Assembler::Condition)$cmp$$cmpcode);
9656   %}
9657 
9658   ins_pipe(icond_reg);
9659 %}
9660 
9661 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9662   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9663 
9664   ins_cost(INSN_COST * 2);
9665   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9666 
9667   ins_encode %{
9668     __ cselw(as_Register($dst$$reg),
9669              zr,
9670              as_Register($src$$reg),
9671              (Assembler::Condition)$cmp$$cmpcode);
9672   %}
9673 
9674   ins_pipe(icond_reg);
9675 %}
9676 
9677 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9678   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9679 
9680   ins_cost(INSN_COST * 2);
9681   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9682 
9683   ins_encode %{
9684     __ cselw(as_Register($dst$$reg),
9685              as_Register($src$$reg),
9686              zr,
9687              (Assembler::Condition)$cmp$$cmpcode);
9688   %}
9689 
9690   ins_pipe(icond_reg);
9691 %}
9692 
9693 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9694   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9695 
9696   ins_cost(INSN_COST * 2);
9697   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9698 
9699   ins_encode %{
9700     __ cselw(as_Register($dst$$reg),
9701              as_Register($src$$reg),
9702              zr,
9703              (Assembler::Condition)$cmp$$cmpcode);
9704   %}
9705 
9706   ins_pipe(icond_reg);
9707 %}
9708 
9709 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9710 %{
9711   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9712 
9713   ins_cost(INSN_COST * 3);
9714 
9715   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9716   ins_encode %{
9717     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9718     __ fcsels(as_FloatRegister($dst$$reg),
9719               as_FloatRegister($src2$$reg),
9720               as_FloatRegister($src1$$reg),
9721               cond);
9722   %}
9723 
9724   ins_pipe(pipe_class_default);
9725 %}
9726 
9727 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9728 %{
9729   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9730 
9731   ins_cost(INSN_COST * 3);
9732 
9733   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9734   ins_encode %{
9735     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9736     __ fcsels(as_FloatRegister($dst$$reg),
9737               as_FloatRegister($src2$$reg),
9738               as_FloatRegister($src1$$reg),
9739               cond);
9740   %}
9741 
9742   ins_pipe(pipe_class_default);
9743 %}
9744 
9745 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9746 %{
9747   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9748 
9749   ins_cost(INSN_COST * 3);
9750 
9751   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9752   ins_encode %{
9753     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9754     __ fcseld(as_FloatRegister($dst$$reg),
9755               as_FloatRegister($src2$$reg),
9756               as_FloatRegister($src1$$reg),
9757               cond);
9758   %}
9759 
9760   ins_pipe(pipe_class_default);
9761 %}
9762 
9763 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9764 %{
9765   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9766 
9767   ins_cost(INSN_COST * 3);
9768 
9769   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9770   ins_encode %{
9771     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9772     __ fcseld(as_FloatRegister($dst$$reg),
9773               as_FloatRegister($src2$$reg),
9774               as_FloatRegister($src1$$reg),
9775               cond);
9776   %}
9777 
9778   ins_pipe(pipe_class_default);
9779 %}
9780 
9781 // ============================================================================
9782 // Arithmetic Instructions
9783 //
9784 
9785 // Integer Addition
9786 
9787 // TODO
9788 // these currently employ operations which do not set CR and hence are
9789 // not flagged as killing CR but we would like to isolate the cases
9790 // where we want to set flags from those where we don't. need to work
9791 // out how to do that.
9792 
9793 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9794   match(Set dst (AddI src1 src2));
9795 
9796   ins_cost(INSN_COST);
9797   format %{ "addw  $dst, $src1, $src2" %}
9798 
9799   ins_encode %{
9800     __ addw(as_Register($dst$$reg),
9801             as_Register($src1$$reg),
9802             as_Register($src2$$reg));
9803   %}
9804 
9805   ins_pipe(ialu_reg_reg);
9806 %}
9807 
9808 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9809   match(Set dst (AddI src1 src2));
9810 
9811   ins_cost(INSN_COST);
9812   format %{ "addw $dst, $src1, $src2" %}
9813 
9814   // use opcode to indicate that this is an add not a sub
9815   opcode(0x0);
9816 
9817   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9818 
9819   ins_pipe(ialu_reg_imm);
9820 %}
9821 
9822 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9823   match(Set dst (AddI (ConvL2I src1) src2));
9824 
9825   ins_cost(INSN_COST);
9826   format %{ "addw $dst, $src1, $src2" %}
9827 
9828   // use opcode to indicate that this is an add not a sub
9829   opcode(0x0);
9830 
9831   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9832 
9833   ins_pipe(ialu_reg_imm);
9834 %}
9835 
9836 // Pointer Addition
9837 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9838   match(Set dst (AddP src1 src2));
9839 
9840   ins_cost(INSN_COST);
9841   format %{ "add $dst, $src1, $src2\t# ptr" %}
9842 
9843   ins_encode %{
9844     __ add(as_Register($dst$$reg),
9845            as_Register($src1$$reg),
9846            as_Register($src2$$reg));
9847   %}
9848 
9849   ins_pipe(ialu_reg_reg);
9850 %}
9851 
9852 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9853   match(Set dst (AddP src1 (ConvI2L src2)));
9854 
9855   ins_cost(1.9 * INSN_COST);
9856   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9857 
9858   ins_encode %{
9859     __ add(as_Register($dst$$reg),
9860            as_Register($src1$$reg),
9861            as_Register($src2$$reg), ext::sxtw);
9862   %}
9863 
9864   ins_pipe(ialu_reg_reg);
9865 %}
9866 
9867 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9868   match(Set dst (AddP src1 (LShiftL src2 scale)));
9869 
9870   ins_cost(1.9 * INSN_COST);
9871   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9872 
9873   ins_encode %{
9874     __ lea(as_Register($dst$$reg),
9875            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9876                    Address::lsl($scale$$constant)));
9877   %}
9878 
9879   ins_pipe(ialu_reg_reg_shift);
9880 %}
9881 
9882 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9883   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9884 
9885   ins_cost(1.9 * INSN_COST);
9886   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9887 
9888   ins_encode %{
9889     __ lea(as_Register($dst$$reg),
9890            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9891                    Address::sxtw($scale$$constant)));
9892   %}
9893 
9894   ins_pipe(ialu_reg_reg_shift);
9895 %}
9896 
9897 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9898   match(Set dst (LShiftL (ConvI2L src) scale));
9899 
9900   ins_cost(INSN_COST);
9901   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9902 
9903   ins_encode %{
9904     __ sbfiz(as_Register($dst$$reg),
9905           as_Register($src$$reg),
9906           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9907   %}
9908 
9909   ins_pipe(ialu_reg_shift);
9910 %}
9911 
9912 // Pointer Immediate Addition
9913 // n.b. this needs to be more expensive than using an indirect memory
9914 // operand
9915 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9916   match(Set dst (AddP src1 src2));
9917 
9918   ins_cost(INSN_COST);
9919   format %{ "add $dst, $src1, $src2\t# ptr" %}
9920 
9921   // use opcode to indicate that this is an add not a sub
9922   opcode(0x0);
9923 
9924   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9925 
9926   ins_pipe(ialu_reg_imm);
9927 %}
9928 
9929 // Long Addition
9930 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9931 
9932   match(Set dst (AddL src1 src2));
9933 
9934   ins_cost(INSN_COST);
9935   format %{ "add  $dst, $src1, $src2" %}
9936 
9937   ins_encode %{
9938     __ add(as_Register($dst$$reg),
9939            as_Register($src1$$reg),
9940            as_Register($src2$$reg));
9941   %}
9942 
9943   ins_pipe(ialu_reg_reg);
9944 %}
9945 
9946 // No constant pool entries requiredLong Immediate Addition.
9947 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9948   match(Set dst (AddL src1 src2));
9949 
9950   ins_cost(INSN_COST);
9951   format %{ "add $dst, $src1, $src2" %}
9952 
9953   // use opcode to indicate that this is an add not a sub
9954   opcode(0x0);
9955 
9956   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9957 
9958   ins_pipe(ialu_reg_imm);
9959 %}
9960 
9961 // Integer Subtraction
9962 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9963   match(Set dst (SubI src1 src2));
9964 
9965   ins_cost(INSN_COST);
9966   format %{ "subw  $dst, $src1, $src2" %}
9967 
9968   ins_encode %{
9969     __ subw(as_Register($dst$$reg),
9970             as_Register($src1$$reg),
9971             as_Register($src2$$reg));
9972   %}
9973 
9974   ins_pipe(ialu_reg_reg);
9975 %}
9976 
9977 // Immediate Subtraction
9978 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9979   match(Set dst (SubI src1 src2));
9980 
9981   ins_cost(INSN_COST);
9982   format %{ "subw $dst, $src1, $src2" %}
9983 
9984   // use opcode to indicate that this is a sub not an add
9985   opcode(0x1);
9986 
9987   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9988 
9989   ins_pipe(ialu_reg_imm);
9990 %}
9991 
9992 // Long Subtraction
9993 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9994 
9995   match(Set dst (SubL src1 src2));
9996 
9997   ins_cost(INSN_COST);
9998   format %{ "sub  $dst, $src1, $src2" %}
9999 
10000   ins_encode %{
10001     __ sub(as_Register($dst$$reg),
10002            as_Register($src1$$reg),
10003            as_Register($src2$$reg));
10004   %}
10005 
10006   ins_pipe(ialu_reg_reg);
10007 %}
10008 
10009 // No constant pool entries requiredLong Immediate Subtraction.
10010 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10011   match(Set dst (SubL src1 src2));
10012 
10013   ins_cost(INSN_COST);
10014   format %{ "sub$dst, $src1, $src2" %}
10015 
10016   // use opcode to indicate that this is a sub not an add
10017   opcode(0x1);
10018 
10019   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10020 
10021   ins_pipe(ialu_reg_imm);
10022 %}
10023 
10024 // Integer Negation (special case for sub)
10025 
10026 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10027   match(Set dst (SubI zero src));
10028 
10029   ins_cost(INSN_COST);
10030   format %{ "negw $dst, $src\t# int" %}
10031 
10032   ins_encode %{
10033     __ negw(as_Register($dst$$reg),
10034             as_Register($src$$reg));
10035   %}
10036 
10037   ins_pipe(ialu_reg);
10038 %}
10039 
10040 // Long Negation
10041 
10042 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10043   match(Set dst (SubL zero src));
10044 
10045   ins_cost(INSN_COST);
10046   format %{ "neg $dst, $src\t# long" %}
10047 
10048   ins_encode %{
10049     __ neg(as_Register($dst$$reg),
10050            as_Register($src$$reg));
10051   %}
10052 
10053   ins_pipe(ialu_reg);
10054 %}
10055 
10056 // Integer Multiply
10057 
10058 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10059   match(Set dst (MulI src1 src2));
10060 
10061   ins_cost(INSN_COST * 3);
10062   format %{ "mulw  $dst, $src1, $src2" %}
10063 
10064   ins_encode %{
10065     __ mulw(as_Register($dst$$reg),
10066             as_Register($src1$$reg),
10067             as_Register($src2$$reg));
10068   %}
10069 
10070   ins_pipe(imul_reg_reg);
10071 %}
10072 
10073 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10074   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10075 
10076   ins_cost(INSN_COST * 3);
10077   format %{ "smull  $dst, $src1, $src2" %}
10078 
10079   ins_encode %{
10080     __ smull(as_Register($dst$$reg),
10081              as_Register($src1$$reg),
10082              as_Register($src2$$reg));
10083   %}
10084 
10085   ins_pipe(imul_reg_reg);
10086 %}
10087 
10088 // Long Multiply
10089 
10090 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10091   match(Set dst (MulL src1 src2));
10092 
10093   ins_cost(INSN_COST * 5);
10094   format %{ "mul  $dst, $src1, $src2" %}
10095 
10096   ins_encode %{
10097     __ mul(as_Register($dst$$reg),
10098            as_Register($src1$$reg),
10099            as_Register($src2$$reg));
10100   %}
10101 
10102   ins_pipe(lmul_reg_reg);
10103 %}
10104 
10105 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10106 %{
10107   match(Set dst (MulHiL src1 src2));
10108 
10109   ins_cost(INSN_COST * 7);
10110   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10111 
10112   ins_encode %{
10113     __ smulh(as_Register($dst$$reg),
10114              as_Register($src1$$reg),
10115              as_Register($src2$$reg));
10116   %}
10117 
10118   ins_pipe(lmul_reg_reg);
10119 %}
10120 
10121 // Combined Integer Multiply & Add/Sub
10122 
10123 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10124   match(Set dst (AddI src3 (MulI src1 src2)));
10125 
10126   ins_cost(INSN_COST * 3);
10127   format %{ "madd  $dst, $src1, $src2, $src3" %}
10128 
10129   ins_encode %{
10130     __ maddw(as_Register($dst$$reg),
10131              as_Register($src1$$reg),
10132              as_Register($src2$$reg),
10133              as_Register($src3$$reg));
10134   %}
10135 
10136   ins_pipe(imac_reg_reg);
10137 %}
10138 
10139 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10140   match(Set dst (SubI src3 (MulI src1 src2)));
10141 
10142   ins_cost(INSN_COST * 3);
10143   format %{ "msub  $dst, $src1, $src2, $src3" %}
10144 
10145   ins_encode %{
10146     __ msubw(as_Register($dst$$reg),
10147              as_Register($src1$$reg),
10148              as_Register($src2$$reg),
10149              as_Register($src3$$reg));
10150   %}
10151 
10152   ins_pipe(imac_reg_reg);
10153 %}
10154 
10155 // Combined Long Multiply & Add/Sub
10156 
10157 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10158   match(Set dst (AddL src3 (MulL src1 src2)));
10159 
10160   ins_cost(INSN_COST * 5);
10161   format %{ "madd  $dst, $src1, $src2, $src3" %}
10162 
10163   ins_encode %{
10164     __ madd(as_Register($dst$$reg),
10165             as_Register($src1$$reg),
10166             as_Register($src2$$reg),
10167             as_Register($src3$$reg));
10168   %}
10169 
10170   ins_pipe(lmac_reg_reg);
10171 %}
10172 
10173 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10174   match(Set dst (SubL src3 (MulL src1 src2)));
10175 
10176   ins_cost(INSN_COST * 5);
10177   format %{ "msub  $dst, $src1, $src2, $src3" %}
10178 
10179   ins_encode %{
10180     __ msub(as_Register($dst$$reg),
10181             as_Register($src1$$reg),
10182             as_Register($src2$$reg),
10183             as_Register($src3$$reg));
10184   %}
10185 
10186   ins_pipe(lmac_reg_reg);
10187 %}
10188 
10189 // Integer Divide
10190 
10191 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10192   match(Set dst (DivI src1 src2));
10193 
10194   ins_cost(INSN_COST * 19);
10195   format %{ "sdivw  $dst, $src1, $src2" %}
10196 
10197   ins_encode(aarch64_enc_divw(dst, src1, src2));
10198   ins_pipe(idiv_reg_reg);
10199 %}
10200 
10201 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10202   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10203   ins_cost(INSN_COST);
10204   format %{ "lsrw $dst, $src1, $div1" %}
10205   ins_encode %{
10206     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10207   %}
10208   ins_pipe(ialu_reg_shift);
10209 %}
10210 
10211 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10212   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10213   ins_cost(INSN_COST);
10214   format %{ "addw $dst, $src, LSR $div1" %}
10215 
10216   ins_encode %{
10217     __ addw(as_Register($dst$$reg),
10218               as_Register($src$$reg),
10219               as_Register($src$$reg),
10220               Assembler::LSR, 31);
10221   %}
10222   ins_pipe(ialu_reg);
10223 %}
10224 
10225 // Long Divide
10226 
10227 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10228   match(Set dst (DivL src1 src2));
10229 
10230   ins_cost(INSN_COST * 35);
10231   format %{ "sdiv   $dst, $src1, $src2" %}
10232 
10233   ins_encode(aarch64_enc_div(dst, src1, src2));
10234   ins_pipe(ldiv_reg_reg);
10235 %}
10236 
10237 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10238   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10239   ins_cost(INSN_COST);
10240   format %{ "lsr $dst, $src1, $div1" %}
10241   ins_encode %{
10242     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10243   %}
10244   ins_pipe(ialu_reg_shift);
10245 %}
10246 
10247 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10248   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10249   ins_cost(INSN_COST);
10250   format %{ "add $dst, $src, $div1" %}
10251 
10252   ins_encode %{
10253     __ add(as_Register($dst$$reg),
10254               as_Register($src$$reg),
10255               as_Register($src$$reg),
10256               Assembler::LSR, 63);
10257   %}
10258   ins_pipe(ialu_reg);
10259 %}
10260 
10261 // Integer Remainder
10262 
10263 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10264   match(Set dst (ModI src1 src2));
10265 
10266   ins_cost(INSN_COST * 22);
10267   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10268             "msubw($dst, rscratch1, $src2, $src1" %}
10269 
10270   ins_encode(aarch64_enc_modw(dst, src1, src2));
10271   ins_pipe(idiv_reg_reg);
10272 %}
10273 
10274 // Long Remainder
10275 
10276 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10277   match(Set dst (ModL src1 src2));
10278 
10279   ins_cost(INSN_COST * 38);
10280   format %{ "sdiv   rscratch1, $src1, $src2\n"
10281             "msub($dst, rscratch1, $src2, $src1" %}
10282 
10283   ins_encode(aarch64_enc_mod(dst, src1, src2));
10284   ins_pipe(ldiv_reg_reg);
10285 %}
10286 
10287 // Integer Shifts
10288 
10289 // Shift Left Register
10290 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10291   match(Set dst (LShiftI src1 src2));
10292 
10293   ins_cost(INSN_COST * 2);
10294   format %{ "lslvw  $dst, $src1, $src2" %}
10295 
10296   ins_encode %{
10297     __ lslvw(as_Register($dst$$reg),
10298              as_Register($src1$$reg),
10299              as_Register($src2$$reg));
10300   %}
10301 
10302   ins_pipe(ialu_reg_reg_vshift);
10303 %}
10304 
10305 // Shift Left Immediate
10306 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10307   match(Set dst (LShiftI src1 src2));
10308 
10309   ins_cost(INSN_COST);
10310   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10311 
10312   ins_encode %{
10313     __ lslw(as_Register($dst$$reg),
10314             as_Register($src1$$reg),
10315             $src2$$constant & 0x1f);
10316   %}
10317 
10318   ins_pipe(ialu_reg_shift);
10319 %}
10320 
10321 // Shift Right Logical Register
10322 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10323   match(Set dst (URShiftI src1 src2));
10324 
10325   ins_cost(INSN_COST * 2);
10326   format %{ "lsrvw  $dst, $src1, $src2" %}
10327 
10328   ins_encode %{
10329     __ lsrvw(as_Register($dst$$reg),
10330              as_Register($src1$$reg),
10331              as_Register($src2$$reg));
10332   %}
10333 
10334   ins_pipe(ialu_reg_reg_vshift);
10335 %}
10336 
10337 // Shift Right Logical Immediate
10338 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10339   match(Set dst (URShiftI src1 src2));
10340 
10341   ins_cost(INSN_COST);
10342   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10343 
10344   ins_encode %{
10345     __ lsrw(as_Register($dst$$reg),
10346             as_Register($src1$$reg),
10347             $src2$$constant & 0x1f);
10348   %}
10349 
10350   ins_pipe(ialu_reg_shift);
10351 %}
10352 
10353 // Shift Right Arithmetic Register
10354 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10355   match(Set dst (RShiftI src1 src2));
10356 
10357   ins_cost(INSN_COST * 2);
10358   format %{ "asrvw  $dst, $src1, $src2" %}
10359 
10360   ins_encode %{
10361     __ asrvw(as_Register($dst$$reg),
10362              as_Register($src1$$reg),
10363              as_Register($src2$$reg));
10364   %}
10365 
10366   ins_pipe(ialu_reg_reg_vshift);
10367 %}
10368 
10369 // Shift Right Arithmetic Immediate
10370 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10371   match(Set dst (RShiftI src1 src2));
10372 
10373   ins_cost(INSN_COST);
10374   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10375 
10376   ins_encode %{
10377     __ asrw(as_Register($dst$$reg),
10378             as_Register($src1$$reg),
10379             $src2$$constant & 0x1f);
10380   %}
10381 
10382   ins_pipe(ialu_reg_shift);
10383 %}
10384 
10385 // Combined Int Mask and Right Shift (using UBFM)
10386 // TODO
10387 
10388 // Long Shifts
10389 
10390 // Shift Left Register
10391 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10392   match(Set dst (LShiftL src1 src2));
10393 
10394   ins_cost(INSN_COST * 2);
10395   format %{ "lslv  $dst, $src1, $src2" %}
10396 
10397   ins_encode %{
10398     __ lslv(as_Register($dst$$reg),
10399             as_Register($src1$$reg),
10400             as_Register($src2$$reg));
10401   %}
10402 
10403   ins_pipe(ialu_reg_reg_vshift);
10404 %}
10405 
10406 // Shift Left Immediate
10407 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10408   match(Set dst (LShiftL src1 src2));
10409 
10410   ins_cost(INSN_COST);
10411   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10412 
10413   ins_encode %{
10414     __ lsl(as_Register($dst$$reg),
10415             as_Register($src1$$reg),
10416             $src2$$constant & 0x3f);
10417   %}
10418 
10419   ins_pipe(ialu_reg_shift);
10420 %}
10421 
10422 // Shift Right Logical Register
10423 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10424   match(Set dst (URShiftL src1 src2));
10425 
10426   ins_cost(INSN_COST * 2);
10427   format %{ "lsrv  $dst, $src1, $src2" %}
10428 
10429   ins_encode %{
10430     __ lsrv(as_Register($dst$$reg),
10431             as_Register($src1$$reg),
10432             as_Register($src2$$reg));
10433   %}
10434 
10435   ins_pipe(ialu_reg_reg_vshift);
10436 %}
10437 
10438 // Shift Right Logical Immediate
10439 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10440   match(Set dst (URShiftL src1 src2));
10441 
10442   ins_cost(INSN_COST);
10443   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10444 
10445   ins_encode %{
10446     __ lsr(as_Register($dst$$reg),
10447            as_Register($src1$$reg),
10448            $src2$$constant & 0x3f);
10449   %}
10450 
10451   ins_pipe(ialu_reg_shift);
10452 %}
10453 
10454 // A special-case pattern for card table stores.
10455 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10456   match(Set dst (URShiftL (CastP2X src1) src2));
10457 
10458   ins_cost(INSN_COST);
10459   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10460 
10461   ins_encode %{
10462     __ lsr(as_Register($dst$$reg),
10463            as_Register($src1$$reg),
10464            $src2$$constant & 0x3f);
10465   %}
10466 
10467   ins_pipe(ialu_reg_shift);
10468 %}
10469 
10470 // Shift Right Arithmetic Register
10471 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10472   match(Set dst (RShiftL src1 src2));
10473 
10474   ins_cost(INSN_COST * 2);
10475   format %{ "asrv  $dst, $src1, $src2" %}
10476 
10477   ins_encode %{
10478     __ asrv(as_Register($dst$$reg),
10479             as_Register($src1$$reg),
10480             as_Register($src2$$reg));
10481   %}
10482 
10483   ins_pipe(ialu_reg_reg_vshift);
10484 %}
10485 
10486 // Shift Right Arithmetic Immediate
10487 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10488   match(Set dst (RShiftL src1 src2));
10489 
10490   ins_cost(INSN_COST);
10491   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10492 
10493   ins_encode %{
10494     __ asr(as_Register($dst$$reg),
10495            as_Register($src1$$reg),
10496            $src2$$constant & 0x3f);
10497   %}
10498 
10499   ins_pipe(ialu_reg_shift);
10500 %}
10501 
10502 // BEGIN This section of the file is automatically generated. Do not edit --------------
10503 
10504 instruct regL_not_reg(iRegLNoSp dst,
10505                          iRegL src1, immL_M1 m1,
10506                          rFlagsReg cr) %{
10507   match(Set dst (XorL src1 m1));
10508   ins_cost(INSN_COST);
10509   format %{ "eon  $dst, $src1, zr" %}
10510 
10511   ins_encode %{
10512     __ eon(as_Register($dst$$reg),
10513               as_Register($src1$$reg),
10514               zr,
10515               Assembler::LSL, 0);
10516   %}
10517 
10518   ins_pipe(ialu_reg);
10519 %}
10520 instruct regI_not_reg(iRegINoSp dst,
10521                          iRegIorL2I src1, immI_M1 m1,
10522                          rFlagsReg cr) %{
10523   match(Set dst (XorI src1 m1));
10524   ins_cost(INSN_COST);
10525   format %{ "eonw  $dst, $src1, zr" %}
10526 
10527   ins_encode %{
10528     __ eonw(as_Register($dst$$reg),
10529               as_Register($src1$$reg),
10530               zr,
10531               Assembler::LSL, 0);
10532   %}
10533 
10534   ins_pipe(ialu_reg);
10535 %}
10536 
10537 instruct AndI_reg_not_reg(iRegINoSp dst,
10538                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10539                          rFlagsReg cr) %{
10540   match(Set dst (AndI src1 (XorI src2 m1)));
10541   ins_cost(INSN_COST);
10542   format %{ "bicw  $dst, $src1, $src2" %}
10543 
10544   ins_encode %{
10545     __ bicw(as_Register($dst$$reg),
10546               as_Register($src1$$reg),
10547               as_Register($src2$$reg),
10548               Assembler::LSL, 0);
10549   %}
10550 
10551   ins_pipe(ialu_reg_reg);
10552 %}
10553 
10554 instruct AndL_reg_not_reg(iRegLNoSp dst,
10555                          iRegL src1, iRegL src2, immL_M1 m1,
10556                          rFlagsReg cr) %{
10557   match(Set dst (AndL src1 (XorL src2 m1)));
10558   ins_cost(INSN_COST);
10559   format %{ "bic  $dst, $src1, $src2" %}
10560 
10561   ins_encode %{
10562     __ bic(as_Register($dst$$reg),
10563               as_Register($src1$$reg),
10564               as_Register($src2$$reg),
10565               Assembler::LSL, 0);
10566   %}
10567 
10568   ins_pipe(ialu_reg_reg);
10569 %}
10570 
10571 instruct OrI_reg_not_reg(iRegINoSp dst,
10572                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10573                          rFlagsReg cr) %{
10574   match(Set dst (OrI src1 (XorI src2 m1)));
10575   ins_cost(INSN_COST);
10576   format %{ "ornw  $dst, $src1, $src2" %}
10577 
10578   ins_encode %{
10579     __ ornw(as_Register($dst$$reg),
10580               as_Register($src1$$reg),
10581               as_Register($src2$$reg),
10582               Assembler::LSL, 0);
10583   %}
10584 
10585   ins_pipe(ialu_reg_reg);
10586 %}
10587 
10588 instruct OrL_reg_not_reg(iRegLNoSp dst,
10589                          iRegL src1, iRegL src2, immL_M1 m1,
10590                          rFlagsReg cr) %{
10591   match(Set dst (OrL src1 (XorL src2 m1)));
10592   ins_cost(INSN_COST);
10593   format %{ "orn  $dst, $src1, $src2" %}
10594 
10595   ins_encode %{
10596     __ orn(as_Register($dst$$reg),
10597               as_Register($src1$$reg),
10598               as_Register($src2$$reg),
10599               Assembler::LSL, 0);
10600   %}
10601 
10602   ins_pipe(ialu_reg_reg);
10603 %}
10604 
10605 instruct XorI_reg_not_reg(iRegINoSp dst,
10606                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10607                          rFlagsReg cr) %{
10608   match(Set dst (XorI m1 (XorI src2 src1)));
10609   ins_cost(INSN_COST);
10610   format %{ "eonw  $dst, $src1, $src2" %}
10611 
10612   ins_encode %{
10613     __ eonw(as_Register($dst$$reg),
10614               as_Register($src1$$reg),
10615               as_Register($src2$$reg),
10616               Assembler::LSL, 0);
10617   %}
10618 
10619   ins_pipe(ialu_reg_reg);
10620 %}
10621 
10622 instruct XorL_reg_not_reg(iRegLNoSp dst,
10623                          iRegL src1, iRegL src2, immL_M1 m1,
10624                          rFlagsReg cr) %{
10625   match(Set dst (XorL m1 (XorL src2 src1)));
10626   ins_cost(INSN_COST);
10627   format %{ "eon  $dst, $src1, $src2" %}
10628 
10629   ins_encode %{
10630     __ eon(as_Register($dst$$reg),
10631               as_Register($src1$$reg),
10632               as_Register($src2$$reg),
10633               Assembler::LSL, 0);
10634   %}
10635 
10636   ins_pipe(ialu_reg_reg);
10637 %}
10638 
10639 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10640                          iRegIorL2I src1, iRegIorL2I src2,
10641                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10642   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10643   ins_cost(1.9 * INSN_COST);
10644   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10645 
10646   ins_encode %{
10647     __ bicw(as_Register($dst$$reg),
10648               as_Register($src1$$reg),
10649               as_Register($src2$$reg),
10650               Assembler::LSR,
10651               $src3$$constant & 0x1f);
10652   %}
10653 
10654   ins_pipe(ialu_reg_reg_shift);
10655 %}
10656 
10657 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10658                          iRegL src1, iRegL src2,
10659                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10660   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10661   ins_cost(1.9 * INSN_COST);
10662   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10663 
10664   ins_encode %{
10665     __ bic(as_Register($dst$$reg),
10666               as_Register($src1$$reg),
10667               as_Register($src2$$reg),
10668               Assembler::LSR,
10669               $src3$$constant & 0x3f);
10670   %}
10671 
10672   ins_pipe(ialu_reg_reg_shift);
10673 %}
10674 
10675 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10676                          iRegIorL2I src1, iRegIorL2I src2,
10677                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10678   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10679   ins_cost(1.9 * INSN_COST);
10680   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10681 
10682   ins_encode %{
10683     __ bicw(as_Register($dst$$reg),
10684               as_Register($src1$$reg),
10685               as_Register($src2$$reg),
10686               Assembler::ASR,
10687               $src3$$constant & 0x1f);
10688   %}
10689 
10690   ins_pipe(ialu_reg_reg_shift);
10691 %}
10692 
10693 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10694                          iRegL src1, iRegL src2,
10695                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10696   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10697   ins_cost(1.9 * INSN_COST);
10698   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10699 
10700   ins_encode %{
10701     __ bic(as_Register($dst$$reg),
10702               as_Register($src1$$reg),
10703               as_Register($src2$$reg),
10704               Assembler::ASR,
10705               $src3$$constant & 0x3f);
10706   %}
10707 
10708   ins_pipe(ialu_reg_reg_shift);
10709 %}
10710 
10711 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10712                          iRegIorL2I src1, iRegIorL2I src2,
10713                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10714   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10715   ins_cost(1.9 * INSN_COST);
10716   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10717 
10718   ins_encode %{
10719     __ bicw(as_Register($dst$$reg),
10720               as_Register($src1$$reg),
10721               as_Register($src2$$reg),
10722               Assembler::LSL,
10723               $src3$$constant & 0x1f);
10724   %}
10725 
10726   ins_pipe(ialu_reg_reg_shift);
10727 %}
10728 
10729 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10730                          iRegL src1, iRegL src2,
10731                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10732   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10733   ins_cost(1.9 * INSN_COST);
10734   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10735 
10736   ins_encode %{
10737     __ bic(as_Register($dst$$reg),
10738               as_Register($src1$$reg),
10739               as_Register($src2$$reg),
10740               Assembler::LSL,
10741               $src3$$constant & 0x3f);
10742   %}
10743 
10744   ins_pipe(ialu_reg_reg_shift);
10745 %}
10746 
10747 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10748                          iRegIorL2I src1, iRegIorL2I src2,
10749                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10750   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10751   ins_cost(1.9 * INSN_COST);
10752   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10753 
10754   ins_encode %{
10755     __ eonw(as_Register($dst$$reg),
10756               as_Register($src1$$reg),
10757               as_Register($src2$$reg),
10758               Assembler::LSR,
10759               $src3$$constant & 0x1f);
10760   %}
10761 
10762   ins_pipe(ialu_reg_reg_shift);
10763 %}
10764 
10765 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10766                          iRegL src1, iRegL src2,
10767                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10768   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10769   ins_cost(1.9 * INSN_COST);
10770   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10771 
10772   ins_encode %{
10773     __ eon(as_Register($dst$$reg),
10774               as_Register($src1$$reg),
10775               as_Register($src2$$reg),
10776               Assembler::LSR,
10777               $src3$$constant & 0x3f);
10778   %}
10779 
10780   ins_pipe(ialu_reg_reg_shift);
10781 %}
10782 
10783 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10784                          iRegIorL2I src1, iRegIorL2I src2,
10785                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10786   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10787   ins_cost(1.9 * INSN_COST);
10788   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10789 
10790   ins_encode %{
10791     __ eonw(as_Register($dst$$reg),
10792               as_Register($src1$$reg),
10793               as_Register($src2$$reg),
10794               Assembler::ASR,
10795               $src3$$constant & 0x1f);
10796   %}
10797 
10798   ins_pipe(ialu_reg_reg_shift);
10799 %}
10800 
10801 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10802                          iRegL src1, iRegL src2,
10803                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10804   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10805   ins_cost(1.9 * INSN_COST);
10806   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10807 
10808   ins_encode %{
10809     __ eon(as_Register($dst$$reg),
10810               as_Register($src1$$reg),
10811               as_Register($src2$$reg),
10812               Assembler::ASR,
10813               $src3$$constant & 0x3f);
10814   %}
10815 
10816   ins_pipe(ialu_reg_reg_shift);
10817 %}
10818 
10819 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10820                          iRegIorL2I src1, iRegIorL2I src2,
10821                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10822   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10823   ins_cost(1.9 * INSN_COST);
10824   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10825 
10826   ins_encode %{
10827     __ eonw(as_Register($dst$$reg),
10828               as_Register($src1$$reg),
10829               as_Register($src2$$reg),
10830               Assembler::LSL,
10831               $src3$$constant & 0x1f);
10832   %}
10833 
10834   ins_pipe(ialu_reg_reg_shift);
10835 %}
10836 
10837 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10838                          iRegL src1, iRegL src2,
10839                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10840   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10841   ins_cost(1.9 * INSN_COST);
10842   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10843 
10844   ins_encode %{
10845     __ eon(as_Register($dst$$reg),
10846               as_Register($src1$$reg),
10847               as_Register($src2$$reg),
10848               Assembler::LSL,
10849               $src3$$constant & 0x3f);
10850   %}
10851 
10852   ins_pipe(ialu_reg_reg_shift);
10853 %}
10854 
10855 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10856                          iRegIorL2I src1, iRegIorL2I src2,
10857                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10858   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10859   ins_cost(1.9 * INSN_COST);
10860   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10861 
10862   ins_encode %{
10863     __ ornw(as_Register($dst$$reg),
10864               as_Register($src1$$reg),
10865               as_Register($src2$$reg),
10866               Assembler::LSR,
10867               $src3$$constant & 0x1f);
10868   %}
10869 
10870   ins_pipe(ialu_reg_reg_shift);
10871 %}
10872 
10873 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10874                          iRegL src1, iRegL src2,
10875                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10876   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10877   ins_cost(1.9 * INSN_COST);
10878   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10879 
10880   ins_encode %{
10881     __ orn(as_Register($dst$$reg),
10882               as_Register($src1$$reg),
10883               as_Register($src2$$reg),
10884               Assembler::LSR,
10885               $src3$$constant & 0x3f);
10886   %}
10887 
10888   ins_pipe(ialu_reg_reg_shift);
10889 %}
10890 
10891 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10892                          iRegIorL2I src1, iRegIorL2I src2,
10893                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10894   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10895   ins_cost(1.9 * INSN_COST);
10896   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10897 
10898   ins_encode %{
10899     __ ornw(as_Register($dst$$reg),
10900               as_Register($src1$$reg),
10901               as_Register($src2$$reg),
10902               Assembler::ASR,
10903               $src3$$constant & 0x1f);
10904   %}
10905 
10906   ins_pipe(ialu_reg_reg_shift);
10907 %}
10908 
10909 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10910                          iRegL src1, iRegL src2,
10911                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10912   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10913   ins_cost(1.9 * INSN_COST);
10914   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10915 
10916   ins_encode %{
10917     __ orn(as_Register($dst$$reg),
10918               as_Register($src1$$reg),
10919               as_Register($src2$$reg),
10920               Assembler::ASR,
10921               $src3$$constant & 0x3f);
10922   %}
10923 
10924   ins_pipe(ialu_reg_reg_shift);
10925 %}
10926 
10927 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10928                          iRegIorL2I src1, iRegIorL2I src2,
10929                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10930   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10931   ins_cost(1.9 * INSN_COST);
10932   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10933 
10934   ins_encode %{
10935     __ ornw(as_Register($dst$$reg),
10936               as_Register($src1$$reg),
10937               as_Register($src2$$reg),
10938               Assembler::LSL,
10939               $src3$$constant & 0x1f);
10940   %}
10941 
10942   ins_pipe(ialu_reg_reg_shift);
10943 %}
10944 
10945 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10946                          iRegL src1, iRegL src2,
10947                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10948   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10949   ins_cost(1.9 * INSN_COST);
10950   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10951 
10952   ins_encode %{
10953     __ orn(as_Register($dst$$reg),
10954               as_Register($src1$$reg),
10955               as_Register($src2$$reg),
10956               Assembler::LSL,
10957               $src3$$constant & 0x3f);
10958   %}
10959 
10960   ins_pipe(ialu_reg_reg_shift);
10961 %}
10962 
10963 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10964                          iRegIorL2I src1, iRegIorL2I src2,
10965                          immI src3, rFlagsReg cr) %{
10966   match(Set dst (AndI src1 (URShiftI src2 src3)));
10967 
10968   ins_cost(1.9 * INSN_COST);
10969   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10970 
10971   ins_encode %{
10972     __ andw(as_Register($dst$$reg),
10973               as_Register($src1$$reg),
10974               as_Register($src2$$reg),
10975               Assembler::LSR,
10976               $src3$$constant & 0x1f);
10977   %}
10978 
10979   ins_pipe(ialu_reg_reg_shift);
10980 %}
10981 
10982 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10983                          iRegL src1, iRegL src2,
10984                          immI src3, rFlagsReg cr) %{
10985   match(Set dst (AndL src1 (URShiftL src2 src3)));
10986 
10987   ins_cost(1.9 * INSN_COST);
10988   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10989 
10990   ins_encode %{
10991     __ andr(as_Register($dst$$reg),
10992               as_Register($src1$$reg),
10993               as_Register($src2$$reg),
10994               Assembler::LSR,
10995               $src3$$constant & 0x3f);
10996   %}
10997 
10998   ins_pipe(ialu_reg_reg_shift);
10999 %}
11000 
11001 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11002                          iRegIorL2I src1, iRegIorL2I src2,
11003                          immI src3, rFlagsReg cr) %{
11004   match(Set dst (AndI src1 (RShiftI src2 src3)));
11005 
11006   ins_cost(1.9 * INSN_COST);
11007   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11008 
11009   ins_encode %{
11010     __ andw(as_Register($dst$$reg),
11011               as_Register($src1$$reg),
11012               as_Register($src2$$reg),
11013               Assembler::ASR,
11014               $src3$$constant & 0x1f);
11015   %}
11016 
11017   ins_pipe(ialu_reg_reg_shift);
11018 %}
11019 
11020 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11021                          iRegL src1, iRegL src2,
11022                          immI src3, rFlagsReg cr) %{
11023   match(Set dst (AndL src1 (RShiftL src2 src3)));
11024 
11025   ins_cost(1.9 * INSN_COST);
11026   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11027 
11028   ins_encode %{
11029     __ andr(as_Register($dst$$reg),
11030               as_Register($src1$$reg),
11031               as_Register($src2$$reg),
11032               Assembler::ASR,
11033               $src3$$constant & 0x3f);
11034   %}
11035 
11036   ins_pipe(ialu_reg_reg_shift);
11037 %}
11038 
11039 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11040                          iRegIorL2I src1, iRegIorL2I src2,
11041                          immI src3, rFlagsReg cr) %{
11042   match(Set dst (AndI src1 (LShiftI src2 src3)));
11043 
11044   ins_cost(1.9 * INSN_COST);
11045   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11046 
11047   ins_encode %{
11048     __ andw(as_Register($dst$$reg),
11049               as_Register($src1$$reg),
11050               as_Register($src2$$reg),
11051               Assembler::LSL,
11052               $src3$$constant & 0x1f);
11053   %}
11054 
11055   ins_pipe(ialu_reg_reg_shift);
11056 %}
11057 
11058 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11059                          iRegL src1, iRegL src2,
11060                          immI src3, rFlagsReg cr) %{
11061   match(Set dst (AndL src1 (LShiftL src2 src3)));
11062 
11063   ins_cost(1.9 * INSN_COST);
11064   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11065 
11066   ins_encode %{
11067     __ andr(as_Register($dst$$reg),
11068               as_Register($src1$$reg),
11069               as_Register($src2$$reg),
11070               Assembler::LSL,
11071               $src3$$constant & 0x3f);
11072   %}
11073 
11074   ins_pipe(ialu_reg_reg_shift);
11075 %}
11076 
11077 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11078                          iRegIorL2I src1, iRegIorL2I src2,
11079                          immI src3, rFlagsReg cr) %{
11080   match(Set dst (XorI src1 (URShiftI src2 src3)));
11081 
11082   ins_cost(1.9 * INSN_COST);
11083   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11084 
11085   ins_encode %{
11086     __ eorw(as_Register($dst$$reg),
11087               as_Register($src1$$reg),
11088               as_Register($src2$$reg),
11089               Assembler::LSR,
11090               $src3$$constant & 0x1f);
11091   %}
11092 
11093   ins_pipe(ialu_reg_reg_shift);
11094 %}
11095 
11096 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11097                          iRegL src1, iRegL src2,
11098                          immI src3, rFlagsReg cr) %{
11099   match(Set dst (XorL src1 (URShiftL src2 src3)));
11100 
11101   ins_cost(1.9 * INSN_COST);
11102   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11103 
11104   ins_encode %{
11105     __ eor(as_Register($dst$$reg),
11106               as_Register($src1$$reg),
11107               as_Register($src2$$reg),
11108               Assembler::LSR,
11109               $src3$$constant & 0x3f);
11110   %}
11111 
11112   ins_pipe(ialu_reg_reg_shift);
11113 %}
11114 
11115 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11116                          iRegIorL2I src1, iRegIorL2I src2,
11117                          immI src3, rFlagsReg cr) %{
11118   match(Set dst (XorI src1 (RShiftI src2 src3)));
11119 
11120   ins_cost(1.9 * INSN_COST);
11121   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11122 
11123   ins_encode %{
11124     __ eorw(as_Register($dst$$reg),
11125               as_Register($src1$$reg),
11126               as_Register($src2$$reg),
11127               Assembler::ASR,
11128               $src3$$constant & 0x1f);
11129   %}
11130 
11131   ins_pipe(ialu_reg_reg_shift);
11132 %}
11133 
11134 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11135                          iRegL src1, iRegL src2,
11136                          immI src3, rFlagsReg cr) %{
11137   match(Set dst (XorL src1 (RShiftL src2 src3)));
11138 
11139   ins_cost(1.9 * INSN_COST);
11140   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11141 
11142   ins_encode %{
11143     __ eor(as_Register($dst$$reg),
11144               as_Register($src1$$reg),
11145               as_Register($src2$$reg),
11146               Assembler::ASR,
11147               $src3$$constant & 0x3f);
11148   %}
11149 
11150   ins_pipe(ialu_reg_reg_shift);
11151 %}
11152 
11153 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11154                          iRegIorL2I src1, iRegIorL2I src2,
11155                          immI src3, rFlagsReg cr) %{
11156   match(Set dst (XorI src1 (LShiftI src2 src3)));
11157 
11158   ins_cost(1.9 * INSN_COST);
11159   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11160 
11161   ins_encode %{
11162     __ eorw(as_Register($dst$$reg),
11163               as_Register($src1$$reg),
11164               as_Register($src2$$reg),
11165               Assembler::LSL,
11166               $src3$$constant & 0x1f);
11167   %}
11168 
11169   ins_pipe(ialu_reg_reg_shift);
11170 %}
11171 
11172 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11173                          iRegL src1, iRegL src2,
11174                          immI src3, rFlagsReg cr) %{
11175   match(Set dst (XorL src1 (LShiftL src2 src3)));
11176 
11177   ins_cost(1.9 * INSN_COST);
11178   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11179 
11180   ins_encode %{
11181     __ eor(as_Register($dst$$reg),
11182               as_Register($src1$$reg),
11183               as_Register($src2$$reg),
11184               Assembler::LSL,
11185               $src3$$constant & 0x3f);
11186   %}
11187 
11188   ins_pipe(ialu_reg_reg_shift);
11189 %}
11190 
11191 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11192                          iRegIorL2I src1, iRegIorL2I src2,
11193                          immI src3, rFlagsReg cr) %{
11194   match(Set dst (OrI src1 (URShiftI src2 src3)));
11195 
11196   ins_cost(1.9 * INSN_COST);
11197   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11198 
11199   ins_encode %{
11200     __ orrw(as_Register($dst$$reg),
11201               as_Register($src1$$reg),
11202               as_Register($src2$$reg),
11203               Assembler::LSR,
11204               $src3$$constant & 0x1f);
11205   %}
11206 
11207   ins_pipe(ialu_reg_reg_shift);
11208 %}
11209 
11210 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11211                          iRegL src1, iRegL src2,
11212                          immI src3, rFlagsReg cr) %{
11213   match(Set dst (OrL src1 (URShiftL src2 src3)));
11214 
11215   ins_cost(1.9 * INSN_COST);
11216   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11217 
11218   ins_encode %{
11219     __ orr(as_Register($dst$$reg),
11220               as_Register($src1$$reg),
11221               as_Register($src2$$reg),
11222               Assembler::LSR,
11223               $src3$$constant & 0x3f);
11224   %}
11225 
11226   ins_pipe(ialu_reg_reg_shift);
11227 %}
11228 
11229 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11230                          iRegIorL2I src1, iRegIorL2I src2,
11231                          immI src3, rFlagsReg cr) %{
11232   match(Set dst (OrI src1 (RShiftI src2 src3)));
11233 
11234   ins_cost(1.9 * INSN_COST);
11235   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11236 
11237   ins_encode %{
11238     __ orrw(as_Register($dst$$reg),
11239               as_Register($src1$$reg),
11240               as_Register($src2$$reg),
11241               Assembler::ASR,
11242               $src3$$constant & 0x1f);
11243   %}
11244 
11245   ins_pipe(ialu_reg_reg_shift);
11246 %}
11247 
11248 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11249                          iRegL src1, iRegL src2,
11250                          immI src3, rFlagsReg cr) %{
11251   match(Set dst (OrL src1 (RShiftL src2 src3)));
11252 
11253   ins_cost(1.9 * INSN_COST);
11254   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11255 
11256   ins_encode %{
11257     __ orr(as_Register($dst$$reg),
11258               as_Register($src1$$reg),
11259               as_Register($src2$$reg),
11260               Assembler::ASR,
11261               $src3$$constant & 0x3f);
11262   %}
11263 
11264   ins_pipe(ialu_reg_reg_shift);
11265 %}
11266 
11267 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11268                          iRegIorL2I src1, iRegIorL2I src2,
11269                          immI src3, rFlagsReg cr) %{
11270   match(Set dst (OrI src1 (LShiftI src2 src3)));
11271 
11272   ins_cost(1.9 * INSN_COST);
11273   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11274 
11275   ins_encode %{
11276     __ orrw(as_Register($dst$$reg),
11277               as_Register($src1$$reg),
11278               as_Register($src2$$reg),
11279               Assembler::LSL,
11280               $src3$$constant & 0x1f);
11281   %}
11282 
11283   ins_pipe(ialu_reg_reg_shift);
11284 %}
11285 
11286 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11287                          iRegL src1, iRegL src2,
11288                          immI src3, rFlagsReg cr) %{
11289   match(Set dst (OrL src1 (LShiftL src2 src3)));
11290 
11291   ins_cost(1.9 * INSN_COST);
11292   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11293 
11294   ins_encode %{
11295     __ orr(as_Register($dst$$reg),
11296               as_Register($src1$$reg),
11297               as_Register($src2$$reg),
11298               Assembler::LSL,
11299               $src3$$constant & 0x3f);
11300   %}
11301 
11302   ins_pipe(ialu_reg_reg_shift);
11303 %}
11304 
11305 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11306                          iRegIorL2I src1, iRegIorL2I src2,
11307                          immI src3, rFlagsReg cr) %{
11308   match(Set dst (AddI src1 (URShiftI src2 src3)));
11309 
11310   ins_cost(1.9 * INSN_COST);
11311   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11312 
11313   ins_encode %{
11314     __ addw(as_Register($dst$$reg),
11315               as_Register($src1$$reg),
11316               as_Register($src2$$reg),
11317               Assembler::LSR,
11318               $src3$$constant & 0x1f);
11319   %}
11320 
11321   ins_pipe(ialu_reg_reg_shift);
11322 %}
11323 
11324 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11325                          iRegL src1, iRegL src2,
11326                          immI src3, rFlagsReg cr) %{
11327   match(Set dst (AddL src1 (URShiftL src2 src3)));
11328 
11329   ins_cost(1.9 * INSN_COST);
11330   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11331 
11332   ins_encode %{
11333     __ add(as_Register($dst$$reg),
11334               as_Register($src1$$reg),
11335               as_Register($src2$$reg),
11336               Assembler::LSR,
11337               $src3$$constant & 0x3f);
11338   %}
11339 
11340   ins_pipe(ialu_reg_reg_shift);
11341 %}
11342 
11343 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11344                          iRegIorL2I src1, iRegIorL2I src2,
11345                          immI src3, rFlagsReg cr) %{
11346   match(Set dst (AddI src1 (RShiftI src2 src3)));
11347 
11348   ins_cost(1.9 * INSN_COST);
11349   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11350 
11351   ins_encode %{
11352     __ addw(as_Register($dst$$reg),
11353               as_Register($src1$$reg),
11354               as_Register($src2$$reg),
11355               Assembler::ASR,
11356               $src3$$constant & 0x1f);
11357   %}
11358 
11359   ins_pipe(ialu_reg_reg_shift);
11360 %}
11361 
11362 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11363                          iRegL src1, iRegL src2,
11364                          immI src3, rFlagsReg cr) %{
11365   match(Set dst (AddL src1 (RShiftL src2 src3)));
11366 
11367   ins_cost(1.9 * INSN_COST);
11368   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11369 
11370   ins_encode %{
11371     __ add(as_Register($dst$$reg),
11372               as_Register($src1$$reg),
11373               as_Register($src2$$reg),
11374               Assembler::ASR,
11375               $src3$$constant & 0x3f);
11376   %}
11377 
11378   ins_pipe(ialu_reg_reg_shift);
11379 %}
11380 
11381 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11382                          iRegIorL2I src1, iRegIorL2I src2,
11383                          immI src3, rFlagsReg cr) %{
11384   match(Set dst (AddI src1 (LShiftI src2 src3)));
11385 
11386   ins_cost(1.9 * INSN_COST);
11387   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11388 
11389   ins_encode %{
11390     __ addw(as_Register($dst$$reg),
11391               as_Register($src1$$reg),
11392               as_Register($src2$$reg),
11393               Assembler::LSL,
11394               $src3$$constant & 0x1f);
11395   %}
11396 
11397   ins_pipe(ialu_reg_reg_shift);
11398 %}
11399 
11400 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11401                          iRegL src1, iRegL src2,
11402                          immI src3, rFlagsReg cr) %{
11403   match(Set dst (AddL src1 (LShiftL src2 src3)));
11404 
11405   ins_cost(1.9 * INSN_COST);
11406   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11407 
11408   ins_encode %{
11409     __ add(as_Register($dst$$reg),
11410               as_Register($src1$$reg),
11411               as_Register($src2$$reg),
11412               Assembler::LSL,
11413               $src3$$constant & 0x3f);
11414   %}
11415 
11416   ins_pipe(ialu_reg_reg_shift);
11417 %}
11418 
11419 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11420                          iRegIorL2I src1, iRegIorL2I src2,
11421                          immI src3, rFlagsReg cr) %{
11422   match(Set dst (SubI src1 (URShiftI src2 src3)));
11423 
11424   ins_cost(1.9 * INSN_COST);
11425   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11426 
11427   ins_encode %{
11428     __ subw(as_Register($dst$$reg),
11429               as_Register($src1$$reg),
11430               as_Register($src2$$reg),
11431               Assembler::LSR,
11432               $src3$$constant & 0x1f);
11433   %}
11434 
11435   ins_pipe(ialu_reg_reg_shift);
11436 %}
11437 
11438 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11439                          iRegL src1, iRegL src2,
11440                          immI src3, rFlagsReg cr) %{
11441   match(Set dst (SubL src1 (URShiftL src2 src3)));
11442 
11443   ins_cost(1.9 * INSN_COST);
11444   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11445 
11446   ins_encode %{
11447     __ sub(as_Register($dst$$reg),
11448               as_Register($src1$$reg),
11449               as_Register($src2$$reg),
11450               Assembler::LSR,
11451               $src3$$constant & 0x3f);
11452   %}
11453 
11454   ins_pipe(ialu_reg_reg_shift);
11455 %}
11456 
11457 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11458                          iRegIorL2I src1, iRegIorL2I src2,
11459                          immI src3, rFlagsReg cr) %{
11460   match(Set dst (SubI src1 (RShiftI src2 src3)));
11461 
11462   ins_cost(1.9 * INSN_COST);
11463   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11464 
11465   ins_encode %{
11466     __ subw(as_Register($dst$$reg),
11467               as_Register($src1$$reg),
11468               as_Register($src2$$reg),
11469               Assembler::ASR,
11470               $src3$$constant & 0x1f);
11471   %}
11472 
11473   ins_pipe(ialu_reg_reg_shift);
11474 %}
11475 
11476 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11477                          iRegL src1, iRegL src2,
11478                          immI src3, rFlagsReg cr) %{
11479   match(Set dst (SubL src1 (RShiftL src2 src3)));
11480 
11481   ins_cost(1.9 * INSN_COST);
11482   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11483 
11484   ins_encode %{
11485     __ sub(as_Register($dst$$reg),
11486               as_Register($src1$$reg),
11487               as_Register($src2$$reg),
11488               Assembler::ASR,
11489               $src3$$constant & 0x3f);
11490   %}
11491 
11492   ins_pipe(ialu_reg_reg_shift);
11493 %}
11494 
11495 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11496                          iRegIorL2I src1, iRegIorL2I src2,
11497                          immI src3, rFlagsReg cr) %{
11498   match(Set dst (SubI src1 (LShiftI src2 src3)));
11499 
11500   ins_cost(1.9 * INSN_COST);
11501   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11502 
11503   ins_encode %{
11504     __ subw(as_Register($dst$$reg),
11505               as_Register($src1$$reg),
11506               as_Register($src2$$reg),
11507               Assembler::LSL,
11508               $src3$$constant & 0x1f);
11509   %}
11510 
11511   ins_pipe(ialu_reg_reg_shift);
11512 %}
11513 
11514 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11515                          iRegL src1, iRegL src2,
11516                          immI src3, rFlagsReg cr) %{
11517   match(Set dst (SubL src1 (LShiftL src2 src3)));
11518 
11519   ins_cost(1.9 * INSN_COST);
11520   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11521 
11522   ins_encode %{
11523     __ sub(as_Register($dst$$reg),
11524               as_Register($src1$$reg),
11525               as_Register($src2$$reg),
11526               Assembler::LSL,
11527               $src3$$constant & 0x3f);
11528   %}
11529 
11530   ins_pipe(ialu_reg_reg_shift);
11531 %}
11532 
11533 
11534 
11535 // Shift Left followed by Shift Right.
11536 // This idiom is used by the compiler for the i2b bytecode etc.
11537 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11538 %{
11539   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11540   // Make sure we are not going to exceed what sbfm can do.
11541   predicate((unsigned int)n->in(2)->get_int() <= 63
11542             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11543 
11544   ins_cost(INSN_COST * 2);
11545   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11546   ins_encode %{
11547     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11548     int s = 63 - lshift;
11549     int r = (rshift - lshift) & 63;
11550     __ sbfm(as_Register($dst$$reg),
11551             as_Register($src$$reg),
11552             r, s);
11553   %}
11554 
11555   ins_pipe(ialu_reg_shift);
11556 %}
11557 
11558 // Shift Left followed by Shift Right.
11559 // This idiom is used by the compiler for the i2b bytecode etc.
11560 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11561 %{
11562   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11563   // Make sure we are not going to exceed what sbfmw can do.
11564   predicate((unsigned int)n->in(2)->get_int() <= 31
11565             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11566 
11567   ins_cost(INSN_COST * 2);
11568   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11569   ins_encode %{
11570     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11571     int s = 31 - lshift;
11572     int r = (rshift - lshift) & 31;
11573     __ sbfmw(as_Register($dst$$reg),
11574             as_Register($src$$reg),
11575             r, s);
11576   %}
11577 
11578   ins_pipe(ialu_reg_shift);
11579 %}
11580 
11581 // Shift Left followed by Shift Right.
11582 // This idiom is used by the compiler for the i2b bytecode etc.
11583 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11584 %{
11585   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11586   // Make sure we are not going to exceed what ubfm can do.
11587   predicate((unsigned int)n->in(2)->get_int() <= 63
11588             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11589 
11590   ins_cost(INSN_COST * 2);
11591   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11592   ins_encode %{
11593     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11594     int s = 63 - lshift;
11595     int r = (rshift - lshift) & 63;
11596     __ ubfm(as_Register($dst$$reg),
11597             as_Register($src$$reg),
11598             r, s);
11599   %}
11600 
11601   ins_pipe(ialu_reg_shift);
11602 %}
11603 
11604 // Shift Left followed by Shift Right.
11605 // This idiom is used by the compiler for the i2b bytecode etc.
11606 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11607 %{
11608   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11609   // Make sure we are not going to exceed what ubfmw can do.
11610   predicate((unsigned int)n->in(2)->get_int() <= 31
11611             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11612 
11613   ins_cost(INSN_COST * 2);
11614   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11615   ins_encode %{
11616     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11617     int s = 31 - lshift;
11618     int r = (rshift - lshift) & 31;
11619     __ ubfmw(as_Register($dst$$reg),
11620             as_Register($src$$reg),
11621             r, s);
11622   %}
11623 
11624   ins_pipe(ialu_reg_shift);
11625 %}
11626 // Bitfield extract with shift & mask
11627 
11628 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11629 %{
11630   match(Set dst (AndI (URShiftI src rshift) mask));
11631 
11632   ins_cost(INSN_COST);
11633   format %{ "ubfxw $dst, $src, $mask" %}
11634   ins_encode %{
11635     int rshift = $rshift$$constant;
11636     long mask = $mask$$constant;
11637     int width = exact_log2(mask+1);
11638     __ ubfxw(as_Register($dst$$reg),
11639             as_Register($src$$reg), rshift, width);
11640   %}
11641   ins_pipe(ialu_reg_shift);
11642 %}
11643 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11644 %{
11645   match(Set dst (AndL (URShiftL src rshift) mask));
11646 
11647   ins_cost(INSN_COST);
11648   format %{ "ubfx $dst, $src, $mask" %}
11649   ins_encode %{
11650     int rshift = $rshift$$constant;
11651     long mask = $mask$$constant;
11652     int width = exact_log2(mask+1);
11653     __ ubfx(as_Register($dst$$reg),
11654             as_Register($src$$reg), rshift, width);
11655   %}
11656   ins_pipe(ialu_reg_shift);
11657 %}
11658 
11659 // We can use ubfx when extending an And with a mask when we know mask
11660 // is positive.  We know that because immI_bitmask guarantees it.
11661 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11662 %{
11663   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11664 
11665   ins_cost(INSN_COST * 2);
11666   format %{ "ubfx $dst, $src, $mask" %}
11667   ins_encode %{
11668     int rshift = $rshift$$constant;
11669     long mask = $mask$$constant;
11670     int width = exact_log2(mask+1);
11671     __ ubfx(as_Register($dst$$reg),
11672             as_Register($src$$reg), rshift, width);
11673   %}
11674   ins_pipe(ialu_reg_shift);
11675 %}
11676 
11677 // Rotations
11678 
11679 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11680 %{
11681   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11682   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11683 
11684   ins_cost(INSN_COST);
11685   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11686 
11687   ins_encode %{
11688     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11689             $rshift$$constant & 63);
11690   %}
11691   ins_pipe(ialu_reg_reg_extr);
11692 %}
11693 
11694 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11695 %{
11696   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11697   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11698 
11699   ins_cost(INSN_COST);
11700   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11701 
11702   ins_encode %{
11703     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11704             $rshift$$constant & 31);
11705   %}
11706   ins_pipe(ialu_reg_reg_extr);
11707 %}
11708 
11709 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11710 %{
11711   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11712   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11713 
11714   ins_cost(INSN_COST);
11715   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11716 
11717   ins_encode %{
11718     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11719             $rshift$$constant & 63);
11720   %}
11721   ins_pipe(ialu_reg_reg_extr);
11722 %}
11723 
11724 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11725 %{
11726   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11727   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11728 
11729   ins_cost(INSN_COST);
11730   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11731 
11732   ins_encode %{
11733     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11734             $rshift$$constant & 31);
11735   %}
11736   ins_pipe(ialu_reg_reg_extr);
11737 %}
11738 
11739 
11740 // rol expander
11741 
11742 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11743 %{
11744   effect(DEF dst, USE src, USE shift);
11745 
11746   format %{ "rol    $dst, $src, $shift" %}
11747   ins_cost(INSN_COST * 3);
11748   ins_encode %{
11749     __ subw(rscratch1, zr, as_Register($shift$$reg));
11750     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11751             rscratch1);
11752     %}
11753   ins_pipe(ialu_reg_reg_vshift);
11754 %}
11755 
11756 // rol expander
11757 
11758 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11759 %{
11760   effect(DEF dst, USE src, USE shift);
11761 
11762   format %{ "rol    $dst, $src, $shift" %}
11763   ins_cost(INSN_COST * 3);
11764   ins_encode %{
11765     __ subw(rscratch1, zr, as_Register($shift$$reg));
11766     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11767             rscratch1);
11768     %}
11769   ins_pipe(ialu_reg_reg_vshift);
11770 %}
11771 
11772 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11773 %{
11774   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11775 
11776   expand %{
11777     rolL_rReg(dst, src, shift, cr);
11778   %}
11779 %}
11780 
11781 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11782 %{
11783   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11784 
11785   expand %{
11786     rolL_rReg(dst, src, shift, cr);
11787   %}
11788 %}
11789 
11790 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11791 %{
11792   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11793 
11794   expand %{
11795     rolL_rReg(dst, src, shift, cr);
11796   %}
11797 %}
11798 
11799 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11800 %{
11801   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11802 
11803   expand %{
11804     rolL_rReg(dst, src, shift, cr);
11805   %}
11806 %}
11807 
11808 // ror expander
11809 
11810 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11811 %{
11812   effect(DEF dst, USE src, USE shift);
11813 
11814   format %{ "ror    $dst, $src, $shift" %}
11815   ins_cost(INSN_COST);
11816   ins_encode %{
11817     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11818             as_Register($shift$$reg));
11819     %}
11820   ins_pipe(ialu_reg_reg_vshift);
11821 %}
11822 
11823 // ror expander
11824 
11825 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11826 %{
11827   effect(DEF dst, USE src, USE shift);
11828 
11829   format %{ "ror    $dst, $src, $shift" %}
11830   ins_cost(INSN_COST);
11831   ins_encode %{
11832     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11833             as_Register($shift$$reg));
11834     %}
11835   ins_pipe(ialu_reg_reg_vshift);
11836 %}
11837 
11838 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11839 %{
11840   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11841 
11842   expand %{
11843     rorL_rReg(dst, src, shift, cr);
11844   %}
11845 %}
11846 
11847 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11848 %{
11849   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11850 
11851   expand %{
11852     rorL_rReg(dst, src, shift, cr);
11853   %}
11854 %}
11855 
11856 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11857 %{
11858   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11859 
11860   expand %{
11861     rorL_rReg(dst, src, shift, cr);
11862   %}
11863 %}
11864 
11865 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11866 %{
11867   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11868 
11869   expand %{
11870     rorL_rReg(dst, src, shift, cr);
11871   %}
11872 %}
11873 
11874 // Add/subtract (extended)
11875 
11876 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11877 %{
11878   match(Set dst (AddL src1 (ConvI2L src2)));
11879   ins_cost(INSN_COST);
11880   format %{ "add  $dst, $src1, sxtw $src2" %}
11881 
11882    ins_encode %{
11883      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11884             as_Register($src2$$reg), ext::sxtw);
11885    %}
11886   ins_pipe(ialu_reg_reg);
11887 %};
11888 
11889 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11890 %{
11891   match(Set dst (SubL src1 (ConvI2L src2)));
11892   ins_cost(INSN_COST);
11893   format %{ "sub  $dst, $src1, sxtw $src2" %}
11894 
11895    ins_encode %{
11896      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11897             as_Register($src2$$reg), ext::sxtw);
11898    %}
11899   ins_pipe(ialu_reg_reg);
11900 %};
11901 
11902 
11903 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11904 %{
11905   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11906   ins_cost(INSN_COST);
11907   format %{ "add  $dst, $src1, sxth $src2" %}
11908 
11909    ins_encode %{
11910      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11911             as_Register($src2$$reg), ext::sxth);
11912    %}
11913   ins_pipe(ialu_reg_reg);
11914 %}
11915 
11916 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11917 %{
11918   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11919   ins_cost(INSN_COST);
11920   format %{ "add  $dst, $src1, sxtb $src2" %}
11921 
11922    ins_encode %{
11923      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11924             as_Register($src2$$reg), ext::sxtb);
11925    %}
11926   ins_pipe(ialu_reg_reg);
11927 %}
11928 
11929 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11930 %{
11931   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11932   ins_cost(INSN_COST);
11933   format %{ "add  $dst, $src1, uxtb $src2" %}
11934 
11935    ins_encode %{
11936      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11937             as_Register($src2$$reg), ext::uxtb);
11938    %}
11939   ins_pipe(ialu_reg_reg);
11940 %}
11941 
11942 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11943 %{
11944   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11945   ins_cost(INSN_COST);
11946   format %{ "add  $dst, $src1, sxth $src2" %}
11947 
11948    ins_encode %{
11949      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11950             as_Register($src2$$reg), ext::sxth);
11951    %}
11952   ins_pipe(ialu_reg_reg);
11953 %}
11954 
11955 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11956 %{
11957   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11958   ins_cost(INSN_COST);
11959   format %{ "add  $dst, $src1, sxtw $src2" %}
11960 
11961    ins_encode %{
11962      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11963             as_Register($src2$$reg), ext::sxtw);
11964    %}
11965   ins_pipe(ialu_reg_reg);
11966 %}
11967 
11968 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11969 %{
11970   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11971   ins_cost(INSN_COST);
11972   format %{ "add  $dst, $src1, sxtb $src2" %}
11973 
11974    ins_encode %{
11975      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11976             as_Register($src2$$reg), ext::sxtb);
11977    %}
11978   ins_pipe(ialu_reg_reg);
11979 %}
11980 
11981 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11982 %{
11983   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11984   ins_cost(INSN_COST);
11985   format %{ "add  $dst, $src1, uxtb $src2" %}
11986 
11987    ins_encode %{
11988      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11989             as_Register($src2$$reg), ext::uxtb);
11990    %}
11991   ins_pipe(ialu_reg_reg);
11992 %}
11993 
11994 
11995 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11996 %{
11997   match(Set dst (AddI src1 (AndI src2 mask)));
11998   ins_cost(INSN_COST);
11999   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12000 
12001    ins_encode %{
12002      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12003             as_Register($src2$$reg), ext::uxtb);
12004    %}
12005   ins_pipe(ialu_reg_reg);
12006 %}
12007 
12008 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12009 %{
12010   match(Set dst (AddI src1 (AndI src2 mask)));
12011   ins_cost(INSN_COST);
12012   format %{ "addw  $dst, $src1, $src2, uxth" %}
12013 
12014    ins_encode %{
12015      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12016             as_Register($src2$$reg), ext::uxth);
12017    %}
12018   ins_pipe(ialu_reg_reg);
12019 %}
12020 
12021 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12022 %{
12023   match(Set dst (AddL src1 (AndL src2 mask)));
12024   ins_cost(INSN_COST);
12025   format %{ "add  $dst, $src1, $src2, uxtb" %}
12026 
12027    ins_encode %{
12028      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12029             as_Register($src2$$reg), ext::uxtb);
12030    %}
12031   ins_pipe(ialu_reg_reg);
12032 %}
12033 
12034 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12035 %{
12036   match(Set dst (AddL src1 (AndL src2 mask)));
12037   ins_cost(INSN_COST);
12038   format %{ "add  $dst, $src1, $src2, uxth" %}
12039 
12040    ins_encode %{
12041      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12042             as_Register($src2$$reg), ext::uxth);
12043    %}
12044   ins_pipe(ialu_reg_reg);
12045 %}
12046 
12047 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12048 %{
12049   match(Set dst (AddL src1 (AndL src2 mask)));
12050   ins_cost(INSN_COST);
12051   format %{ "add  $dst, $src1, $src2, uxtw" %}
12052 
12053    ins_encode %{
12054      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12055             as_Register($src2$$reg), ext::uxtw);
12056    %}
12057   ins_pipe(ialu_reg_reg);
12058 %}
12059 
12060 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12061 %{
12062   match(Set dst (SubI src1 (AndI src2 mask)));
12063   ins_cost(INSN_COST);
12064   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12065 
12066    ins_encode %{
12067      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12068             as_Register($src2$$reg), ext::uxtb);
12069    %}
12070   ins_pipe(ialu_reg_reg);
12071 %}
12072 
12073 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12074 %{
12075   match(Set dst (SubI src1 (AndI src2 mask)));
12076   ins_cost(INSN_COST);
12077   format %{ "subw  $dst, $src1, $src2, uxth" %}
12078 
12079    ins_encode %{
12080      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12081             as_Register($src2$$reg), ext::uxth);
12082    %}
12083   ins_pipe(ialu_reg_reg);
12084 %}
12085 
12086 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12087 %{
12088   match(Set dst (SubL src1 (AndL src2 mask)));
12089   ins_cost(INSN_COST);
12090   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12091 
12092    ins_encode %{
12093      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12094             as_Register($src2$$reg), ext::uxtb);
12095    %}
12096   ins_pipe(ialu_reg_reg);
12097 %}
12098 
12099 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12100 %{
12101   match(Set dst (SubL src1 (AndL src2 mask)));
12102   ins_cost(INSN_COST);
12103   format %{ "sub  $dst, $src1, $src2, uxth" %}
12104 
12105    ins_encode %{
12106      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12107             as_Register($src2$$reg), ext::uxth);
12108    %}
12109   ins_pipe(ialu_reg_reg);
12110 %}
12111 
12112 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12113 %{
12114   match(Set dst (SubL src1 (AndL src2 mask)));
12115   ins_cost(INSN_COST);
12116   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12117 
12118    ins_encode %{
12119      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12120             as_Register($src2$$reg), ext::uxtw);
12121    %}
12122   ins_pipe(ialu_reg_reg);
12123 %}
12124 
12125 // END This section of the file is automatically generated. Do not edit --------------
12126 
12127 // ============================================================================
12128 // Floating Point Arithmetic Instructions
12129 
12130 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12131   match(Set dst (AddF src1 src2));
12132 
12133   ins_cost(INSN_COST * 5);
12134   format %{ "fadds   $dst, $src1, $src2" %}
12135 
12136   ins_encode %{
12137     __ fadds(as_FloatRegister($dst$$reg),
12138              as_FloatRegister($src1$$reg),
12139              as_FloatRegister($src2$$reg));
12140   %}
12141 
12142   ins_pipe(pipe_class_default);
12143 %}
12144 
12145 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12146   match(Set dst (AddD src1 src2));
12147 
12148   ins_cost(INSN_COST * 5);
12149   format %{ "faddd   $dst, $src1, $src2" %}
12150 
12151   ins_encode %{
12152     __ faddd(as_FloatRegister($dst$$reg),
12153              as_FloatRegister($src1$$reg),
12154              as_FloatRegister($src2$$reg));
12155   %}
12156 
12157   ins_pipe(pipe_class_default);
12158 %}
12159 
12160 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12161   match(Set dst (SubF src1 src2));
12162 
12163   ins_cost(INSN_COST * 5);
12164   format %{ "fsubs   $dst, $src1, $src2" %}
12165 
12166   ins_encode %{
12167     __ fsubs(as_FloatRegister($dst$$reg),
12168              as_FloatRegister($src1$$reg),
12169              as_FloatRegister($src2$$reg));
12170   %}
12171 
12172   ins_pipe(pipe_class_default);
12173 %}
12174 
12175 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12176   match(Set dst (SubD src1 src2));
12177 
12178   ins_cost(INSN_COST * 5);
12179   format %{ "fsubd   $dst, $src1, $src2" %}
12180 
12181   ins_encode %{
12182     __ fsubd(as_FloatRegister($dst$$reg),
12183              as_FloatRegister($src1$$reg),
12184              as_FloatRegister($src2$$reg));
12185   %}
12186 
12187   ins_pipe(pipe_class_default);
12188 %}
12189 
12190 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12191   match(Set dst (MulF src1 src2));
12192 
12193   ins_cost(INSN_COST * 6);
12194   format %{ "fmuls   $dst, $src1, $src2" %}
12195 
12196   ins_encode %{
12197     __ fmuls(as_FloatRegister($dst$$reg),
12198              as_FloatRegister($src1$$reg),
12199              as_FloatRegister($src2$$reg));
12200   %}
12201 
12202   ins_pipe(pipe_class_default);
12203 %}
12204 
12205 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12206   match(Set dst (MulD src1 src2));
12207 
12208   ins_cost(INSN_COST * 6);
12209   format %{ "fmuld   $dst, $src1, $src2" %}
12210 
12211   ins_encode %{
12212     __ fmuld(as_FloatRegister($dst$$reg),
12213              as_FloatRegister($src1$$reg),
12214              as_FloatRegister($src2$$reg));
12215   %}
12216 
12217   ins_pipe(pipe_class_default);
12218 %}
12219 
12220 // We cannot use these fused mul w add/sub ops because they don't
12221 // produce the same result as the equivalent separated ops
12222 // (essentially they don't round the intermediate result). that's a
12223 // shame. leaving them here in case we can idenitfy cases where it is
12224 // legitimate to use them
12225 
12226 
12227 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12228 //   match(Set dst (AddF (MulF src1 src2) src3));
12229 
12230 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12231 
12232 //   ins_encode %{
12233 //     __ fmadds(as_FloatRegister($dst$$reg),
12234 //              as_FloatRegister($src1$$reg),
12235 //              as_FloatRegister($src2$$reg),
12236 //              as_FloatRegister($src3$$reg));
12237 //   %}
12238 
12239 //   ins_pipe(pipe_class_default);
12240 // %}
12241 
12242 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12243 //   match(Set dst (AddD (MulD src1 src2) src3));
12244 
12245 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12246 
12247 //   ins_encode %{
12248 //     __ fmaddd(as_FloatRegister($dst$$reg),
12249 //              as_FloatRegister($src1$$reg),
12250 //              as_FloatRegister($src2$$reg),
12251 //              as_FloatRegister($src3$$reg));
12252 //   %}
12253 
12254 //   ins_pipe(pipe_class_default);
12255 // %}
12256 
12257 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12258 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12259 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12260 
12261 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12262 
12263 //   ins_encode %{
12264 //     __ fmsubs(as_FloatRegister($dst$$reg),
12265 //               as_FloatRegister($src1$$reg),
12266 //               as_FloatRegister($src2$$reg),
12267 //              as_FloatRegister($src3$$reg));
12268 //   %}
12269 
12270 //   ins_pipe(pipe_class_default);
12271 // %}
12272 
12273 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12274 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12275 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12276 
12277 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12278 
12279 //   ins_encode %{
12280 //     __ fmsubd(as_FloatRegister($dst$$reg),
12281 //               as_FloatRegister($src1$$reg),
12282 //               as_FloatRegister($src2$$reg),
12283 //               as_FloatRegister($src3$$reg));
12284 //   %}
12285 
12286 //   ins_pipe(pipe_class_default);
12287 // %}
12288 
12289 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12290 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12291 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12292 
12293 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12294 
12295 //   ins_encode %{
12296 //     __ fnmadds(as_FloatRegister($dst$$reg),
12297 //                as_FloatRegister($src1$$reg),
12298 //                as_FloatRegister($src2$$reg),
12299 //                as_FloatRegister($src3$$reg));
12300 //   %}
12301 
12302 //   ins_pipe(pipe_class_default);
12303 // %}
12304 
12305 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12306 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12307 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12308 
12309 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12310 
12311 //   ins_encode %{
12312 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12313 //                as_FloatRegister($src1$$reg),
12314 //                as_FloatRegister($src2$$reg),
12315 //                as_FloatRegister($src3$$reg));
12316 //   %}
12317 
12318 //   ins_pipe(pipe_class_default);
12319 // %}
12320 
12321 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12322 //   match(Set dst (SubF (MulF src1 src2) src3));
12323 
12324 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12325 
12326 //   ins_encode %{
12327 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12328 //                as_FloatRegister($src1$$reg),
12329 //                as_FloatRegister($src2$$reg),
12330 //                as_FloatRegister($src3$$reg));
12331 //   %}
12332 
12333 //   ins_pipe(pipe_class_default);
12334 // %}
12335 
12336 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12337 //   match(Set dst (SubD (MulD src1 src2) src3));
12338 
12339 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12340 
12341 //   ins_encode %{
12342 //   // n.b. insn name should be fnmsubd
12343 //     __ fnmsub(as_FloatRegister($dst$$reg),
12344 //                as_FloatRegister($src1$$reg),
12345 //                as_FloatRegister($src2$$reg),
12346 //                as_FloatRegister($src3$$reg));
12347 //   %}
12348 
12349 //   ins_pipe(pipe_class_default);
12350 // %}
12351 
12352 
12353 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12354   match(Set dst (DivF src1  src2));
12355 
12356   ins_cost(INSN_COST * 18);
12357   format %{ "fdivs   $dst, $src1, $src2" %}
12358 
12359   ins_encode %{
12360     __ fdivs(as_FloatRegister($dst$$reg),
12361              as_FloatRegister($src1$$reg),
12362              as_FloatRegister($src2$$reg));
12363   %}
12364 
12365   ins_pipe(pipe_class_default);
12366 %}
12367 
12368 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12369   match(Set dst (DivD src1  src2));
12370 
12371   ins_cost(INSN_COST * 32);
12372   format %{ "fdivd   $dst, $src1, $src2" %}
12373 
12374   ins_encode %{
12375     __ fdivd(as_FloatRegister($dst$$reg),
12376              as_FloatRegister($src1$$reg),
12377              as_FloatRegister($src2$$reg));
12378   %}
12379 
12380   ins_pipe(pipe_class_default);
12381 %}
12382 
12383 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12384   match(Set dst (NegF src));
12385 
12386   ins_cost(INSN_COST * 3);
12387   format %{ "fneg   $dst, $src" %}
12388 
12389   ins_encode %{
12390     __ fnegs(as_FloatRegister($dst$$reg),
12391              as_FloatRegister($src$$reg));
12392   %}
12393 
12394   ins_pipe(pipe_class_default);
12395 %}
12396 
12397 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12398   match(Set dst (NegD src));
12399 
12400   ins_cost(INSN_COST * 3);
12401   format %{ "fnegd   $dst, $src" %}
12402 
12403   ins_encode %{
12404     __ fnegd(as_FloatRegister($dst$$reg),
12405              as_FloatRegister($src$$reg));
12406   %}
12407 
12408   ins_pipe(pipe_class_default);
12409 %}
12410 
12411 instruct absF_reg(vRegF dst, vRegF src) %{
12412   match(Set dst (AbsF src));
12413 
12414   ins_cost(INSN_COST * 3);
12415   format %{ "fabss   $dst, $src" %}
12416   ins_encode %{
12417     __ fabss(as_FloatRegister($dst$$reg),
12418              as_FloatRegister($src$$reg));
12419   %}
12420 
12421   ins_pipe(pipe_class_default);
12422 %}
12423 
12424 instruct absD_reg(vRegD dst, vRegD src) %{
12425   match(Set dst (AbsD src));
12426 
12427   ins_cost(INSN_COST * 3);
12428   format %{ "fabsd   $dst, $src" %}
12429   ins_encode %{
12430     __ fabsd(as_FloatRegister($dst$$reg),
12431              as_FloatRegister($src$$reg));
12432   %}
12433 
12434   ins_pipe(pipe_class_default);
12435 %}
12436 
12437 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12438   match(Set dst (SqrtD src));
12439 
12440   ins_cost(INSN_COST * 50);
12441   format %{ "fsqrtd  $dst, $src" %}
12442   ins_encode %{
12443     __ fsqrtd(as_FloatRegister($dst$$reg),
12444              as_FloatRegister($src$$reg));
12445   %}
12446 
12447   ins_pipe(pipe_class_default);
12448 %}
12449 
12450 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12451   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12452 
12453   ins_cost(INSN_COST * 50);
12454   format %{ "fsqrts  $dst, $src" %}
12455   ins_encode %{
12456     __ fsqrts(as_FloatRegister($dst$$reg),
12457              as_FloatRegister($src$$reg));
12458   %}
12459 
12460   ins_pipe(pipe_class_default);
12461 %}
12462 
12463 // ============================================================================
12464 // Logical Instructions
12465 
12466 // Integer Logical Instructions
12467 
12468 // And Instructions
12469 
12470 
12471 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12472   match(Set dst (AndI src1 src2));
12473 
12474   format %{ "andw  $dst, $src1, $src2\t# int" %}
12475 
12476   ins_cost(INSN_COST);
12477   ins_encode %{
12478     __ andw(as_Register($dst$$reg),
12479             as_Register($src1$$reg),
12480             as_Register($src2$$reg));
12481   %}
12482 
12483   ins_pipe(ialu_reg_reg);
12484 %}
12485 
12486 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12487   match(Set dst (AndI src1 src2));
12488 
12489   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12490 
12491   ins_cost(INSN_COST);
12492   ins_encode %{
12493     __ andw(as_Register($dst$$reg),
12494             as_Register($src1$$reg),
12495             (unsigned long)($src2$$constant));
12496   %}
12497 
12498   ins_pipe(ialu_reg_imm);
12499 %}
12500 
12501 // Or Instructions
12502 
12503 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12504   match(Set dst (OrI src1 src2));
12505 
12506   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12507 
12508   ins_cost(INSN_COST);
12509   ins_encode %{
12510     __ orrw(as_Register($dst$$reg),
12511             as_Register($src1$$reg),
12512             as_Register($src2$$reg));
12513   %}
12514 
12515   ins_pipe(ialu_reg_reg);
12516 %}
12517 
12518 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12519   match(Set dst (OrI src1 src2));
12520 
12521   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12522 
12523   ins_cost(INSN_COST);
12524   ins_encode %{
12525     __ orrw(as_Register($dst$$reg),
12526             as_Register($src1$$reg),
12527             (unsigned long)($src2$$constant));
12528   %}
12529 
12530   ins_pipe(ialu_reg_imm);
12531 %}
12532 
12533 // Xor Instructions
12534 
12535 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12536   match(Set dst (XorI src1 src2));
12537 
12538   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12539 
12540   ins_cost(INSN_COST);
12541   ins_encode %{
12542     __ eorw(as_Register($dst$$reg),
12543             as_Register($src1$$reg),
12544             as_Register($src2$$reg));
12545   %}
12546 
12547   ins_pipe(ialu_reg_reg);
12548 %}
12549 
12550 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12551   match(Set dst (XorI src1 src2));
12552 
12553   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12554 
12555   ins_cost(INSN_COST);
12556   ins_encode %{
12557     __ eorw(as_Register($dst$$reg),
12558             as_Register($src1$$reg),
12559             (unsigned long)($src2$$constant));
12560   %}
12561 
12562   ins_pipe(ialu_reg_imm);
12563 %}
12564 
12565 // Long Logical Instructions
12566 // TODO
12567 
12568 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12569   match(Set dst (AndL src1 src2));
12570 
12571   format %{ "and  $dst, $src1, $src2\t# int" %}
12572 
12573   ins_cost(INSN_COST);
12574   ins_encode %{
12575     __ andr(as_Register($dst$$reg),
12576             as_Register($src1$$reg),
12577             as_Register($src2$$reg));
12578   %}
12579 
12580   ins_pipe(ialu_reg_reg);
12581 %}
12582 
12583 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12584   match(Set dst (AndL src1 src2));
12585 
12586   format %{ "and  $dst, $src1, $src2\t# int" %}
12587 
12588   ins_cost(INSN_COST);
12589   ins_encode %{
12590     __ andr(as_Register($dst$$reg),
12591             as_Register($src1$$reg),
12592             (unsigned long)($src2$$constant));
12593   %}
12594 
12595   ins_pipe(ialu_reg_imm);
12596 %}
12597 
12598 // Or Instructions
12599 
12600 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12601   match(Set dst (OrL src1 src2));
12602 
12603   format %{ "orr  $dst, $src1, $src2\t# int" %}
12604 
12605   ins_cost(INSN_COST);
12606   ins_encode %{
12607     __ orr(as_Register($dst$$reg),
12608            as_Register($src1$$reg),
12609            as_Register($src2$$reg));
12610   %}
12611 
12612   ins_pipe(ialu_reg_reg);
12613 %}
12614 
12615 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12616   match(Set dst (OrL src1 src2));
12617 
12618   format %{ "orr  $dst, $src1, $src2\t# int" %}
12619 
12620   ins_cost(INSN_COST);
12621   ins_encode %{
12622     __ orr(as_Register($dst$$reg),
12623            as_Register($src1$$reg),
12624            (unsigned long)($src2$$constant));
12625   %}
12626 
12627   ins_pipe(ialu_reg_imm);
12628 %}
12629 
12630 // Xor Instructions
12631 
12632 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12633   match(Set dst (XorL src1 src2));
12634 
12635   format %{ "eor  $dst, $src1, $src2\t# int" %}
12636 
12637   ins_cost(INSN_COST);
12638   ins_encode %{
12639     __ eor(as_Register($dst$$reg),
12640            as_Register($src1$$reg),
12641            as_Register($src2$$reg));
12642   %}
12643 
12644   ins_pipe(ialu_reg_reg);
12645 %}
12646 
12647 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12648   match(Set dst (XorL src1 src2));
12649 
12650   ins_cost(INSN_COST);
12651   format %{ "eor  $dst, $src1, $src2\t# int" %}
12652 
12653   ins_encode %{
12654     __ eor(as_Register($dst$$reg),
12655            as_Register($src1$$reg),
12656            (unsigned long)($src2$$constant));
12657   %}
12658 
12659   ins_pipe(ialu_reg_imm);
12660 %}
12661 
12662 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12663 %{
12664   match(Set dst (ConvI2L src));
12665 
12666   ins_cost(INSN_COST);
12667   format %{ "sxtw  $dst, $src\t# i2l" %}
12668   ins_encode %{
12669     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12670   %}
12671   ins_pipe(ialu_reg_shift);
12672 %}
12673 
12674 // this pattern occurs in bigmath arithmetic
12675 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12676 %{
12677   match(Set dst (AndL (ConvI2L src) mask));
12678 
12679   ins_cost(INSN_COST);
12680   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12681   ins_encode %{
12682     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12683   %}
12684 
12685   ins_pipe(ialu_reg_shift);
12686 %}
12687 
12688 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12689   match(Set dst (ConvL2I src));
12690 
12691   ins_cost(INSN_COST);
12692   format %{ "movw  $dst, $src \t// l2i" %}
12693 
12694   ins_encode %{
12695     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12696   %}
12697 
12698   ins_pipe(ialu_reg);
12699 %}
12700 
12701 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12702 %{
12703   match(Set dst (Conv2B src));
12704   effect(KILL cr);
12705 
12706   format %{
12707     "cmpw $src, zr\n\t"
12708     "cset $dst, ne"
12709   %}
12710 
12711   ins_encode %{
12712     __ cmpw(as_Register($src$$reg), zr);
12713     __ cset(as_Register($dst$$reg), Assembler::NE);
12714   %}
12715 
12716   ins_pipe(ialu_reg);
12717 %}
12718 
12719 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12720 %{
12721   match(Set dst (Conv2B src));
12722   effect(KILL cr);
12723 
12724   format %{
12725     "cmp  $src, zr\n\t"
12726     "cset $dst, ne"
12727   %}
12728 
12729   ins_encode %{
12730     __ cmp(as_Register($src$$reg), zr);
12731     __ cset(as_Register($dst$$reg), Assembler::NE);
12732   %}
12733 
12734   ins_pipe(ialu_reg);
12735 %}
12736 
12737 instruct convD2F_reg(vRegF dst, vRegD src) %{
12738   match(Set dst (ConvD2F src));
12739 
12740   ins_cost(INSN_COST * 5);
12741   format %{ "fcvtd  $dst, $src \t// d2f" %}
12742 
12743   ins_encode %{
12744     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12745   %}
12746 
12747   ins_pipe(pipe_class_default);
12748 %}
12749 
12750 instruct convF2D_reg(vRegD dst, vRegF src) %{
12751   match(Set dst (ConvF2D src));
12752 
12753   ins_cost(INSN_COST * 5);
12754   format %{ "fcvts  $dst, $src \t// f2d" %}
12755 
12756   ins_encode %{
12757     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12758   %}
12759 
12760   ins_pipe(pipe_class_default);
12761 %}
12762 
12763 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12764   match(Set dst (ConvF2I src));
12765 
12766   ins_cost(INSN_COST * 5);
12767   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12768 
12769   ins_encode %{
12770     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12771   %}
12772 
12773   ins_pipe(pipe_class_default);
12774 %}
12775 
12776 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12777   match(Set dst (ConvF2L src));
12778 
12779   ins_cost(INSN_COST * 5);
12780   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12781 
12782   ins_encode %{
12783     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12784   %}
12785 
12786   ins_pipe(pipe_class_default);
12787 %}
12788 
12789 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12790   match(Set dst (ConvI2F src));
12791 
12792   ins_cost(INSN_COST * 5);
12793   format %{ "scvtfws  $dst, $src \t// i2f" %}
12794 
12795   ins_encode %{
12796     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12797   %}
12798 
12799   ins_pipe(pipe_class_default);
12800 %}
12801 
12802 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12803   match(Set dst (ConvL2F src));
12804 
12805   ins_cost(INSN_COST * 5);
12806   format %{ "scvtfs  $dst, $src \t// l2f" %}
12807 
12808   ins_encode %{
12809     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12810   %}
12811 
12812   ins_pipe(pipe_class_default);
12813 %}
12814 
12815 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12816   match(Set dst (ConvD2I src));
12817 
12818   ins_cost(INSN_COST * 5);
12819   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12820 
12821   ins_encode %{
12822     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12823   %}
12824 
12825   ins_pipe(pipe_class_default);
12826 %}
12827 
12828 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12829   match(Set dst (ConvD2L src));
12830 
12831   ins_cost(INSN_COST * 5);
12832   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12833 
12834   ins_encode %{
12835     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12836   %}
12837 
12838   ins_pipe(pipe_class_default);
12839 %}
12840 
12841 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12842   match(Set dst (ConvI2D src));
12843 
12844   ins_cost(INSN_COST * 5);
12845   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12846 
12847   ins_encode %{
12848     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12849   %}
12850 
12851   ins_pipe(pipe_class_default);
12852 %}
12853 
12854 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12855   match(Set dst (ConvL2D src));
12856 
12857   ins_cost(INSN_COST * 5);
12858   format %{ "scvtfd  $dst, $src \t// l2d" %}
12859 
12860   ins_encode %{
12861     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12862   %}
12863 
12864   ins_pipe(pipe_class_default);
12865 %}
12866 
12867 // stack <-> reg and reg <-> reg shuffles with no conversion
12868 
12869 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12870 
12871   match(Set dst (MoveF2I src));
12872 
12873   effect(DEF dst, USE src);
12874 
12875   ins_cost(4 * INSN_COST);
12876 
12877   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12878 
12879   ins_encode %{
12880     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12881   %}
12882 
12883   ins_pipe(iload_reg_reg);
12884 
12885 %}
12886 
12887 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12888 
12889   match(Set dst (MoveI2F src));
12890 
12891   effect(DEF dst, USE src);
12892 
12893   ins_cost(4 * INSN_COST);
12894 
12895   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12896 
12897   ins_encode %{
12898     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12899   %}
12900 
12901   ins_pipe(pipe_class_memory);
12902 
12903 %}
12904 
12905 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12906 
12907   match(Set dst (MoveD2L src));
12908 
12909   effect(DEF dst, USE src);
12910 
12911   ins_cost(4 * INSN_COST);
12912 
12913   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12914 
12915   ins_encode %{
12916     __ ldr($dst$$Register, Address(sp, $src$$disp));
12917   %}
12918 
12919   ins_pipe(iload_reg_reg);
12920 
12921 %}
12922 
12923 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12924 
12925   match(Set dst (MoveL2D src));
12926 
12927   effect(DEF dst, USE src);
12928 
12929   ins_cost(4 * INSN_COST);
12930 
12931   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12932 
12933   ins_encode %{
12934     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12935   %}
12936 
12937   ins_pipe(pipe_class_memory);
12938 
12939 %}
12940 
12941 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12942 
12943   match(Set dst (MoveF2I src));
12944 
12945   effect(DEF dst, USE src);
12946 
12947   ins_cost(INSN_COST);
12948 
12949   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12950 
12951   ins_encode %{
12952     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12953   %}
12954 
12955   ins_pipe(pipe_class_memory);
12956 
12957 %}
12958 
12959 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12960 
12961   match(Set dst (MoveI2F src));
12962 
12963   effect(DEF dst, USE src);
12964 
12965   ins_cost(INSN_COST);
12966 
12967   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12968 
12969   ins_encode %{
12970     __ strw($src$$Register, Address(sp, $dst$$disp));
12971   %}
12972 
12973   ins_pipe(istore_reg_reg);
12974 
12975 %}
12976 
12977 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12978 
12979   match(Set dst (MoveD2L src));
12980 
12981   effect(DEF dst, USE src);
12982 
12983   ins_cost(INSN_COST);
12984 
12985   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12986 
12987   ins_encode %{
12988     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12989   %}
12990 
12991   ins_pipe(pipe_class_memory);
12992 
12993 %}
12994 
12995 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12996 
12997   match(Set dst (MoveL2D src));
12998 
12999   effect(DEF dst, USE src);
13000 
13001   ins_cost(INSN_COST);
13002 
13003   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13004 
13005   ins_encode %{
13006     __ str($src$$Register, Address(sp, $dst$$disp));
13007   %}
13008 
13009   ins_pipe(istore_reg_reg);
13010 
13011 %}
13012 
13013 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13014 
13015   match(Set dst (MoveF2I src));
13016 
13017   effect(DEF dst, USE src);
13018 
13019   ins_cost(INSN_COST);
13020 
13021   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13022 
13023   ins_encode %{
13024     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13025   %}
13026 
13027   ins_pipe(pipe_class_memory);
13028 
13029 %}
13030 
13031 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13032 
13033   match(Set dst (MoveI2F src));
13034 
13035   effect(DEF dst, USE src);
13036 
13037   ins_cost(INSN_COST);
13038 
13039   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13040 
13041   ins_encode %{
13042     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13043   %}
13044 
13045   ins_pipe(pipe_class_memory);
13046 
13047 %}
13048 
13049 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13050 
13051   match(Set dst (MoveD2L src));
13052 
13053   effect(DEF dst, USE src);
13054 
13055   ins_cost(INSN_COST);
13056 
13057   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13058 
13059   ins_encode %{
13060     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13061   %}
13062 
13063   ins_pipe(pipe_class_memory);
13064 
13065 %}
13066 
13067 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13068 
13069   match(Set dst (MoveL2D src));
13070 
13071   effect(DEF dst, USE src);
13072 
13073   ins_cost(INSN_COST);
13074 
13075   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13076 
13077   ins_encode %{
13078     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13079   %}
13080 
13081   ins_pipe(pipe_class_memory);
13082 
13083 %}
13084 
13085 // ============================================================================
13086 // clearing of an array
13087 
13088 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13089 %{
13090   match(Set dummy (ClearArray cnt base));
13091   effect(USE_KILL cnt, USE_KILL base);
13092 
13093   ins_cost(4 * INSN_COST);
13094   format %{ "ClearArray $cnt, $base" %}
13095 
13096   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
13097 
13098   ins_pipe(pipe_class_memory);
13099 %}
13100 
13101 // ============================================================================
13102 // Overflow Math Instructions
13103 
13104 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13105 %{
13106   match(Set cr (OverflowAddI op1 op2));
13107 
13108   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13109   ins_cost(INSN_COST);
13110   ins_encode %{
13111     __ cmnw($op1$$Register, $op2$$Register);
13112   %}
13113 
13114   ins_pipe(icmp_reg_reg);
13115 %}
13116 
13117 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13118 %{
13119   match(Set cr (OverflowAddI op1 op2));
13120 
13121   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13122   ins_cost(INSN_COST);
13123   ins_encode %{
13124     __ cmnw($op1$$Register, $op2$$constant);
13125   %}
13126 
13127   ins_pipe(icmp_reg_imm);
13128 %}
13129 
13130 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13131 %{
13132   match(Set cr (OverflowAddL op1 op2));
13133 
13134   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13135   ins_cost(INSN_COST);
13136   ins_encode %{
13137     __ cmn($op1$$Register, $op2$$Register);
13138   %}
13139 
13140   ins_pipe(icmp_reg_reg);
13141 %}
13142 
13143 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13144 %{
13145   match(Set cr (OverflowAddL op1 op2));
13146 
13147   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13148   ins_cost(INSN_COST);
13149   ins_encode %{
13150     __ cmn($op1$$Register, $op2$$constant);
13151   %}
13152 
13153   ins_pipe(icmp_reg_imm);
13154 %}
13155 
13156 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13157 %{
13158   match(Set cr (OverflowSubI op1 op2));
13159 
13160   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13161   ins_cost(INSN_COST);
13162   ins_encode %{
13163     __ cmpw($op1$$Register, $op2$$Register);
13164   %}
13165 
13166   ins_pipe(icmp_reg_reg);
13167 %}
13168 
13169 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13170 %{
13171   match(Set cr (OverflowSubI op1 op2));
13172 
13173   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13174   ins_cost(INSN_COST);
13175   ins_encode %{
13176     __ cmpw($op1$$Register, $op2$$constant);
13177   %}
13178 
13179   ins_pipe(icmp_reg_imm);
13180 %}
13181 
13182 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13183 %{
13184   match(Set cr (OverflowSubL op1 op2));
13185 
13186   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13187   ins_cost(INSN_COST);
13188   ins_encode %{
13189     __ cmp($op1$$Register, $op2$$Register);
13190   %}
13191 
13192   ins_pipe(icmp_reg_reg);
13193 %}
13194 
13195 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13196 %{
13197   match(Set cr (OverflowSubL op1 op2));
13198 
13199   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13200   ins_cost(INSN_COST);
13201   ins_encode %{
13202     __ cmp($op1$$Register, $op2$$constant);
13203   %}
13204 
13205   ins_pipe(icmp_reg_imm);
13206 %}
13207 
13208 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13209 %{
13210   match(Set cr (OverflowSubI zero op1));
13211 
13212   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13213   ins_cost(INSN_COST);
13214   ins_encode %{
13215     __ cmpw(zr, $op1$$Register);
13216   %}
13217 
13218   ins_pipe(icmp_reg_imm);
13219 %}
13220 
13221 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13222 %{
13223   match(Set cr (OverflowSubL zero op1));
13224 
13225   format %{ "cmp   zr, $op1\t# overflow check long" %}
13226   ins_cost(INSN_COST);
13227   ins_encode %{
13228     __ cmp(zr, $op1$$Register);
13229   %}
13230 
13231   ins_pipe(icmp_reg_imm);
13232 %}
13233 
13234 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13235 %{
13236   match(Set cr (OverflowMulI op1 op2));
13237 
13238   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13239             "cmp   rscratch1, rscratch1, sxtw\n\t"
13240             "movw  rscratch1, #0x80000000\n\t"
13241             "cselw rscratch1, rscratch1, zr, NE\n\t"
13242             "cmpw  rscratch1, #1" %}
13243   ins_cost(5 * INSN_COST);
13244   ins_encode %{
13245     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13246     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13247     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13248     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13249     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13250   %}
13251 
13252   ins_pipe(pipe_slow);
13253 %}
13254 
13255 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13256 %{
13257   match(If cmp (OverflowMulI op1 op2));
13258   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13259             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13260   effect(USE labl, KILL cr);
13261 
13262   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13263             "cmp   rscratch1, rscratch1, sxtw\n\t"
13264             "b$cmp   $labl" %}
13265   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13266   ins_encode %{
13267     Label* L = $labl$$label;
13268     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13269     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13270     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13271     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13272   %}
13273 
13274   ins_pipe(pipe_serial);
13275 %}
13276 
13277 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13278 %{
13279   match(Set cr (OverflowMulL op1 op2));
13280 
13281   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13282             "smulh rscratch2, $op1, $op2\n\t"
13283             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13284             "movw  rscratch1, #0x80000000\n\t"
13285             "cselw rscratch1, rscratch1, zr, NE\n\t"
13286             "cmpw  rscratch1, #1" %}
13287   ins_cost(6 * INSN_COST);
13288   ins_encode %{
13289     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13290     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13291     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13292     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13293     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13294     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13295   %}
13296 
13297   ins_pipe(pipe_slow);
13298 %}
13299 
13300 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13301 %{
13302   match(If cmp (OverflowMulL op1 op2));
13303   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13304             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13305   effect(USE labl, KILL cr);
13306 
13307   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13308             "smulh rscratch2, $op1, $op2\n\t"
13309             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13310             "b$cmp $labl" %}
13311   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13312   ins_encode %{
13313     Label* L = $labl$$label;
13314     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13315     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13316     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13317     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13318     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13319   %}
13320 
13321   ins_pipe(pipe_serial);
13322 %}
13323 
13324 // ============================================================================
13325 // Compare Instructions
13326 
13327 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13328 %{
13329   match(Set cr (CmpI op1 op2));
13330 
13331   effect(DEF cr, USE op1, USE op2);
13332 
13333   ins_cost(INSN_COST);
13334   format %{ "cmpw  $op1, $op2" %}
13335 
13336   ins_encode(aarch64_enc_cmpw(op1, op2));
13337 
13338   ins_pipe(icmp_reg_reg);
13339 %}
13340 
13341 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13342 %{
13343   match(Set cr (CmpI op1 zero));
13344 
13345   effect(DEF cr, USE op1);
13346 
13347   ins_cost(INSN_COST);
13348   format %{ "cmpw $op1, 0" %}
13349 
13350   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13351 
13352   ins_pipe(icmp_reg_imm);
13353 %}
13354 
13355 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13356 %{
13357   match(Set cr (CmpI op1 op2));
13358 
13359   effect(DEF cr, USE op1);
13360 
13361   ins_cost(INSN_COST);
13362   format %{ "cmpw  $op1, $op2" %}
13363 
13364   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13365 
13366   ins_pipe(icmp_reg_imm);
13367 %}
13368 
13369 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13370 %{
13371   match(Set cr (CmpI op1 op2));
13372 
13373   effect(DEF cr, USE op1);
13374 
13375   ins_cost(INSN_COST * 2);
13376   format %{ "cmpw  $op1, $op2" %}
13377 
13378   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13379 
13380   ins_pipe(icmp_reg_imm);
13381 %}
13382 
13383 // Unsigned compare Instructions; really, same as signed compare
13384 // except it should only be used to feed an If or a CMovI which takes a
13385 // cmpOpU.
13386 
13387 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13388 %{
13389   match(Set cr (CmpU op1 op2));
13390 
13391   effect(DEF cr, USE op1, USE op2);
13392 
13393   ins_cost(INSN_COST);
13394   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13395 
13396   ins_encode(aarch64_enc_cmpw(op1, op2));
13397 
13398   ins_pipe(icmp_reg_reg);
13399 %}
13400 
13401 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13402 %{
13403   match(Set cr (CmpU op1 zero));
13404 
13405   effect(DEF cr, USE op1);
13406 
13407   ins_cost(INSN_COST);
13408   format %{ "cmpw $op1, #0\t# unsigned" %}
13409 
13410   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13411 
13412   ins_pipe(icmp_reg_imm);
13413 %}
13414 
13415 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13416 %{
13417   match(Set cr (CmpU op1 op2));
13418 
13419   effect(DEF cr, USE op1);
13420 
13421   ins_cost(INSN_COST);
13422   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13423 
13424   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13425 
13426   ins_pipe(icmp_reg_imm);
13427 %}
13428 
13429 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13430 %{
13431   match(Set cr (CmpU op1 op2));
13432 
13433   effect(DEF cr, USE op1);
13434 
13435   ins_cost(INSN_COST * 2);
13436   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13437 
13438   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13439 
13440   ins_pipe(icmp_reg_imm);
13441 %}
13442 
13443 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13444 %{
13445   match(Set cr (CmpL op1 op2));
13446 
13447   effect(DEF cr, USE op1, USE op2);
13448 
13449   ins_cost(INSN_COST);
13450   format %{ "cmp  $op1, $op2" %}
13451 
13452   ins_encode(aarch64_enc_cmp(op1, op2));
13453 
13454   ins_pipe(icmp_reg_reg);
13455 %}
13456 
13457 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13458 %{
13459   match(Set cr (CmpL op1 zero));
13460 
13461   effect(DEF cr, USE op1);
13462 
13463   ins_cost(INSN_COST);
13464   format %{ "tst  $op1" %}
13465 
13466   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13467 
13468   ins_pipe(icmp_reg_imm);
13469 %}
13470 
13471 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13472 %{
13473   match(Set cr (CmpL op1 op2));
13474 
13475   effect(DEF cr, USE op1);
13476 
13477   ins_cost(INSN_COST);
13478   format %{ "cmp  $op1, $op2" %}
13479 
13480   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13481 
13482   ins_pipe(icmp_reg_imm);
13483 %}
13484 
13485 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13486 %{
13487   match(Set cr (CmpL op1 op2));
13488 
13489   effect(DEF cr, USE op1);
13490 
13491   ins_cost(INSN_COST * 2);
13492   format %{ "cmp  $op1, $op2" %}
13493 
13494   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13495 
13496   ins_pipe(icmp_reg_imm);
13497 %}
13498 
13499 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13500 %{
13501   match(Set cr (CmpP op1 op2));
13502 
13503   effect(DEF cr, USE op1, USE op2);
13504 
13505   ins_cost(INSN_COST);
13506   format %{ "cmp  $op1, $op2\t // ptr" %}
13507 
13508   ins_encode(aarch64_enc_cmpp(op1, op2));
13509 
13510   ins_pipe(icmp_reg_reg);
13511 %}
13512 
13513 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13514 %{
13515   match(Set cr (CmpN op1 op2));
13516 
13517   effect(DEF cr, USE op1, USE op2);
13518 
13519   ins_cost(INSN_COST);
13520   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13521 
13522   ins_encode(aarch64_enc_cmpn(op1, op2));
13523 
13524   ins_pipe(icmp_reg_reg);
13525 %}
13526 
13527 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13528 %{
13529   match(Set cr (CmpP op1 zero));
13530 
13531   effect(DEF cr, USE op1, USE zero);
13532 
13533   ins_cost(INSN_COST);
13534   format %{ "cmp  $op1, 0\t // ptr" %}
13535 
13536   ins_encode(aarch64_enc_testp(op1));
13537 
13538   ins_pipe(icmp_reg_imm);
13539 %}
13540 
13541 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13542 %{
13543   match(Set cr (CmpN op1 zero));
13544 
13545   effect(DEF cr, USE op1, USE zero);
13546 
13547   ins_cost(INSN_COST);
13548   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13549 
13550   ins_encode(aarch64_enc_testn(op1));
13551 
13552   ins_pipe(icmp_reg_imm);
13553 %}
13554 
13555 // FP comparisons
13556 //
13557 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13558 // using normal cmpOp. See declaration of rFlagsReg for details.
13559 
13560 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13561 %{
13562   match(Set cr (CmpF src1 src2));
13563 
13564   ins_cost(3 * INSN_COST);
13565   format %{ "fcmps $src1, $src2" %}
13566 
13567   ins_encode %{
13568     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13569   %}
13570 
13571   ins_pipe(pipe_class_compare);
13572 %}
13573 
13574 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13575 %{
13576   match(Set cr (CmpF src1 src2));
13577 
13578   ins_cost(3 * INSN_COST);
13579   format %{ "fcmps $src1, 0.0" %}
13580 
13581   ins_encode %{
13582     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13583   %}
13584 
13585   ins_pipe(pipe_class_compare);
13586 %}
13587 // FROM HERE
13588 
13589 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13590 %{
13591   match(Set cr (CmpD src1 src2));
13592 
13593   ins_cost(3 * INSN_COST);
13594   format %{ "fcmpd $src1, $src2" %}
13595 
13596   ins_encode %{
13597     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13598   %}
13599 
13600   ins_pipe(pipe_class_compare);
13601 %}
13602 
13603 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13604 %{
13605   match(Set cr (CmpD src1 src2));
13606 
13607   ins_cost(3 * INSN_COST);
13608   format %{ "fcmpd $src1, 0.0" %}
13609 
13610   ins_encode %{
13611     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13612   %}
13613 
13614   ins_pipe(pipe_class_compare);
13615 %}
13616 
13617 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13618 %{
13619   match(Set dst (CmpF3 src1 src2));
13620   effect(KILL cr);
13621 
13622   ins_cost(5 * INSN_COST);
13623   format %{ "fcmps $src1, $src2\n\t"
13624             "csinvw($dst, zr, zr, eq\n\t"
13625             "csnegw($dst, $dst, $dst, lt)"
13626   %}
13627 
13628   ins_encode %{
13629     Label done;
13630     FloatRegister s1 = as_FloatRegister($src1$$reg);
13631     FloatRegister s2 = as_FloatRegister($src2$$reg);
13632     Register d = as_Register($dst$$reg);
13633     __ fcmps(s1, s2);
13634     // installs 0 if EQ else -1
13635     __ csinvw(d, zr, zr, Assembler::EQ);
13636     // keeps -1 if less or unordered else installs 1
13637     __ csnegw(d, d, d, Assembler::LT);
13638     __ bind(done);
13639   %}
13640 
13641   ins_pipe(pipe_class_default);
13642 
13643 %}
13644 
13645 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13646 %{
13647   match(Set dst (CmpD3 src1 src2));
13648   effect(KILL cr);
13649 
13650   ins_cost(5 * INSN_COST);
13651   format %{ "fcmpd $src1, $src2\n\t"
13652             "csinvw($dst, zr, zr, eq\n\t"
13653             "csnegw($dst, $dst, $dst, lt)"
13654   %}
13655 
13656   ins_encode %{
13657     Label done;
13658     FloatRegister s1 = as_FloatRegister($src1$$reg);
13659     FloatRegister s2 = as_FloatRegister($src2$$reg);
13660     Register d = as_Register($dst$$reg);
13661     __ fcmpd(s1, s2);
13662     // installs 0 if EQ else -1
13663     __ csinvw(d, zr, zr, Assembler::EQ);
13664     // keeps -1 if less or unordered else installs 1
13665     __ csnegw(d, d, d, Assembler::LT);
13666     __ bind(done);
13667   %}
13668   ins_pipe(pipe_class_default);
13669 
13670 %}
13671 
13672 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13673 %{
13674   match(Set dst (CmpF3 src1 zero));
13675   effect(KILL cr);
13676 
13677   ins_cost(5 * INSN_COST);
13678   format %{ "fcmps $src1, 0.0\n\t"
13679             "csinvw($dst, zr, zr, eq\n\t"
13680             "csnegw($dst, $dst, $dst, lt)"
13681   %}
13682 
13683   ins_encode %{
13684     Label done;
13685     FloatRegister s1 = as_FloatRegister($src1$$reg);
13686     Register d = as_Register($dst$$reg);
13687     __ fcmps(s1, 0.0D);
13688     // installs 0 if EQ else -1
13689     __ csinvw(d, zr, zr, Assembler::EQ);
13690     // keeps -1 if less or unordered else installs 1
13691     __ csnegw(d, d, d, Assembler::LT);
13692     __ bind(done);
13693   %}
13694 
13695   ins_pipe(pipe_class_default);
13696 
13697 %}
13698 
13699 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13700 %{
13701   match(Set dst (CmpD3 src1 zero));
13702   effect(KILL cr);
13703 
13704   ins_cost(5 * INSN_COST);
13705   format %{ "fcmpd $src1, 0.0\n\t"
13706             "csinvw($dst, zr, zr, eq\n\t"
13707             "csnegw($dst, $dst, $dst, lt)"
13708   %}
13709 
13710   ins_encode %{
13711     Label done;
13712     FloatRegister s1 = as_FloatRegister($src1$$reg);
13713     Register d = as_Register($dst$$reg);
13714     __ fcmpd(s1, 0.0D);
13715     // installs 0 if EQ else -1
13716     __ csinvw(d, zr, zr, Assembler::EQ);
13717     // keeps -1 if less or unordered else installs 1
13718     __ csnegw(d, d, d, Assembler::LT);
13719     __ bind(done);
13720   %}
13721   ins_pipe(pipe_class_default);
13722 
13723 %}
13724 
13725 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13726 %{
13727   match(Set dst (CmpLTMask p q));
13728   effect(KILL cr);
13729 
13730   ins_cost(3 * INSN_COST);
13731 
13732   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13733             "csetw $dst, lt\n\t"
13734             "subw $dst, zr, $dst"
13735   %}
13736 
13737   ins_encode %{
13738     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13739     __ csetw(as_Register($dst$$reg), Assembler::LT);
13740     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13741   %}
13742 
13743   ins_pipe(ialu_reg_reg);
13744 %}
13745 
13746 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13747 %{
13748   match(Set dst (CmpLTMask src zero));
13749   effect(KILL cr);
13750 
13751   ins_cost(INSN_COST);
13752 
13753   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13754 
13755   ins_encode %{
13756     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13757   %}
13758 
13759   ins_pipe(ialu_reg_shift);
13760 %}
13761 
13762 // ============================================================================
13763 // Max and Min
13764 
13765 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13766 %{
13767   match(Set dst (MinI src1 src2));
13768 
13769   effect(DEF dst, USE src1, USE src2, KILL cr);
13770   size(8);
13771 
13772   ins_cost(INSN_COST * 3);
13773   format %{
13774     "cmpw $src1 $src2\t signed int\n\t"
13775     "cselw $dst, $src1, $src2 lt\t"
13776   %}
13777 
13778   ins_encode %{
13779     __ cmpw(as_Register($src1$$reg),
13780             as_Register($src2$$reg));
13781     __ cselw(as_Register($dst$$reg),
13782              as_Register($src1$$reg),
13783              as_Register($src2$$reg),
13784              Assembler::LT);
13785   %}
13786 
13787   ins_pipe(ialu_reg_reg);
13788 %}
13789 // FROM HERE
13790 
13791 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13792 %{
13793   match(Set dst (MaxI src1 src2));
13794 
13795   effect(DEF dst, USE src1, USE src2, KILL cr);
13796   size(8);
13797 
13798   ins_cost(INSN_COST * 3);
13799   format %{
13800     "cmpw $src1 $src2\t signed int\n\t"
13801     "cselw $dst, $src1, $src2 gt\t"
13802   %}
13803 
13804   ins_encode %{
13805     __ cmpw(as_Register($src1$$reg),
13806             as_Register($src2$$reg));
13807     __ cselw(as_Register($dst$$reg),
13808              as_Register($src1$$reg),
13809              as_Register($src2$$reg),
13810              Assembler::GT);
13811   %}
13812 
13813   ins_pipe(ialu_reg_reg);
13814 %}
13815 
13816 // ============================================================================
13817 // Branch Instructions
13818 
13819 // Direct Branch.
13820 instruct branch(label lbl)
13821 %{
13822   match(Goto);
13823 
13824   effect(USE lbl);
13825 
13826   ins_cost(BRANCH_COST);
13827   format %{ "b  $lbl" %}
13828 
13829   ins_encode(aarch64_enc_b(lbl));
13830 
13831   ins_pipe(pipe_branch);
13832 %}
13833 
13834 // Conditional Near Branch
13835 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13836 %{
13837   // Same match rule as `branchConFar'.
13838   match(If cmp cr);
13839 
13840   effect(USE lbl);
13841 
13842   ins_cost(BRANCH_COST);
13843   // If set to 1 this indicates that the current instruction is a
13844   // short variant of a long branch. This avoids using this
13845   // instruction in first-pass matching. It will then only be used in
13846   // the `Shorten_branches' pass.
13847   // ins_short_branch(1);
13848   format %{ "b$cmp  $lbl" %}
13849 
13850   ins_encode(aarch64_enc_br_con(cmp, lbl));
13851 
13852   ins_pipe(pipe_branch_cond);
13853 %}
13854 
13855 // Conditional Near Branch Unsigned
13856 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13857 %{
13858   // Same match rule as `branchConFar'.
13859   match(If cmp cr);
13860 
13861   effect(USE lbl);
13862 
13863   ins_cost(BRANCH_COST);
13864   // If set to 1 this indicates that the current instruction is a
13865   // short variant of a long branch. This avoids using this
13866   // instruction in first-pass matching. It will then only be used in
13867   // the `Shorten_branches' pass.
13868   // ins_short_branch(1);
13869   format %{ "b$cmp  $lbl\t# unsigned" %}
13870 
13871   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13872 
13873   ins_pipe(pipe_branch_cond);
13874 %}
13875 
13876 // Make use of CBZ and CBNZ.  These instructions, as well as being
13877 // shorter than (cmp; branch), have the additional benefit of not
13878 // killing the flags.
13879 
13880 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13881   match(If cmp (CmpI op1 op2));
13882   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13883             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13884   effect(USE labl);
13885 
13886   ins_cost(BRANCH_COST);
13887   format %{ "cbw$cmp   $op1, $labl" %}
13888   ins_encode %{
13889     Label* L = $labl$$label;
13890     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13891     if (cond == Assembler::EQ)
13892       __ cbzw($op1$$Register, *L);
13893     else
13894       __ cbnzw($op1$$Register, *L);
13895   %}
13896   ins_pipe(pipe_cmp_branch);
13897 %}
13898 
13899 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13900   match(If cmp (CmpL op1 op2));
13901   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13902             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13903   effect(USE labl);
13904 
13905   ins_cost(BRANCH_COST);
13906   format %{ "cb$cmp   $op1, $labl" %}
13907   ins_encode %{
13908     Label* L = $labl$$label;
13909     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13910     if (cond == Assembler::EQ)
13911       __ cbz($op1$$Register, *L);
13912     else
13913       __ cbnz($op1$$Register, *L);
13914   %}
13915   ins_pipe(pipe_cmp_branch);
13916 %}
13917 
13918 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13919   match(If cmp (CmpP op1 op2));
13920   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13921             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13922   effect(USE labl);
13923 
13924   ins_cost(BRANCH_COST);
13925   format %{ "cb$cmp   $op1, $labl" %}
13926   ins_encode %{
13927     Label* L = $labl$$label;
13928     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13929     if (cond == Assembler::EQ)
13930       __ cbz($op1$$Register, *L);
13931     else
13932       __ cbnz($op1$$Register, *L);
13933   %}
13934   ins_pipe(pipe_cmp_branch);
13935 %}
13936 
13937 // Conditional Far Branch
13938 // Conditional Far Branch Unsigned
13939 // TODO: fixme
13940 
13941 // counted loop end branch near
13942 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13943 %{
13944   match(CountedLoopEnd cmp cr);
13945 
13946   effect(USE lbl);
13947 
13948   ins_cost(BRANCH_COST);
13949   // short variant.
13950   // ins_short_branch(1);
13951   format %{ "b$cmp $lbl \t// counted loop end" %}
13952 
13953   ins_encode(aarch64_enc_br_con(cmp, lbl));
13954 
13955   ins_pipe(pipe_branch);
13956 %}
13957 
13958 // counted loop end branch near Unsigned
13959 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13960 %{
13961   match(CountedLoopEnd cmp cr);
13962 
13963   effect(USE lbl);
13964 
13965   ins_cost(BRANCH_COST);
13966   // short variant.
13967   // ins_short_branch(1);
13968   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
13969 
13970   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13971 
13972   ins_pipe(pipe_branch);
13973 %}
13974 
13975 // counted loop end branch far
13976 // counted loop end branch far unsigned
13977 // TODO: fixme
13978 
13979 // ============================================================================
13980 // inlined locking and unlocking
13981 
13982 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13983 %{
13984   match(Set cr (FastLock object box));
13985   effect(TEMP tmp, TEMP tmp2);
13986 
13987   // TODO
13988   // identify correct cost
13989   ins_cost(5 * INSN_COST);
13990   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
13991 
13992   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
13993 
13994   ins_pipe(pipe_serial);
13995 %}
13996 
13997 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13998 %{
13999   match(Set cr (FastUnlock object box));
14000   effect(TEMP tmp, TEMP tmp2);
14001 
14002   ins_cost(5 * INSN_COST);
14003   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14004 
14005   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14006 
14007   ins_pipe(pipe_serial);
14008 %}
14009 
14010 
14011 // ============================================================================
14012 // Safepoint Instructions
14013 
14014 // TODO
14015 // provide a near and far version of this code
14016 
14017 instruct safePoint(iRegP poll)
14018 %{
14019   match(SafePoint poll);
14020 
14021   format %{
14022     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14023   %}
14024   ins_encode %{
14025     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14026   %}
14027   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14028 %}
14029 
14030 
14031 // ============================================================================
14032 // Procedure Call/Return Instructions
14033 
14034 // Call Java Static Instruction
14035 
14036 instruct CallStaticJavaDirect(method meth)
14037 %{
14038   match(CallStaticJava);
14039 
14040   effect(USE meth);
14041 
14042   ins_cost(CALL_COST);
14043 
14044   format %{ "call,static $meth \t// ==> " %}
14045 
14046   ins_encode( aarch64_enc_java_static_call(meth),
14047               aarch64_enc_call_epilog );
14048 
14049   ins_pipe(pipe_class_call);
14050 %}
14051 
14052 // TO HERE
14053 
14054 // Call Java Dynamic Instruction
14055 instruct CallDynamicJavaDirect(method meth)
14056 %{
14057   match(CallDynamicJava);
14058 
14059   effect(USE meth);
14060 
14061   ins_cost(CALL_COST);
14062 
14063   format %{ "CALL,dynamic $meth \t// ==> " %}
14064 
14065   ins_encode( aarch64_enc_java_dynamic_call(meth),
14066                aarch64_enc_call_epilog );
14067 
14068   ins_pipe(pipe_class_call);
14069 %}
14070 
14071 // Call Runtime Instruction
14072 
14073 instruct CallRuntimeDirect(method meth)
14074 %{
14075   match(CallRuntime);
14076 
14077   effect(USE meth);
14078 
14079   ins_cost(CALL_COST);
14080 
14081   format %{ "CALL, runtime $meth" %}
14082 
14083   ins_encode( aarch64_enc_java_to_runtime(meth) );
14084 
14085   ins_pipe(pipe_class_call);
14086 %}
14087 
14088 // Call Runtime Instruction
14089 
14090 instruct CallLeafDirect(method meth)
14091 %{
14092   match(CallLeaf);
14093 
14094   effect(USE meth);
14095 
14096   ins_cost(CALL_COST);
14097 
14098   format %{ "CALL, runtime leaf $meth" %}
14099 
14100   ins_encode( aarch64_enc_java_to_runtime(meth) );
14101 
14102   ins_pipe(pipe_class_call);
14103 %}
14104 
14105 // Call Runtime Instruction
14106 
14107 instruct CallLeafNoFPDirect(method meth)
14108 %{
14109   match(CallLeafNoFP);
14110 
14111   effect(USE meth);
14112 
14113   ins_cost(CALL_COST);
14114 
14115   format %{ "CALL, runtime leaf nofp $meth" %}
14116 
14117   ins_encode( aarch64_enc_java_to_runtime(meth) );
14118 
14119   ins_pipe(pipe_class_call);
14120 %}
14121 
14122 // Tail Call; Jump from runtime stub to Java code.
14123 // Also known as an 'interprocedural jump'.
14124 // Target of jump will eventually return to caller.
14125 // TailJump below removes the return address.
14126 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14127 %{
14128   match(TailCall jump_target method_oop);
14129 
14130   ins_cost(CALL_COST);
14131 
14132   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14133 
14134   ins_encode(aarch64_enc_tail_call(jump_target));
14135 
14136   ins_pipe(pipe_class_call);
14137 %}
14138 
14139 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14140 %{
14141   match(TailJump jump_target ex_oop);
14142 
14143   ins_cost(CALL_COST);
14144 
14145   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14146 
14147   ins_encode(aarch64_enc_tail_jmp(jump_target));
14148 
14149   ins_pipe(pipe_class_call);
14150 %}
14151 
14152 // Create exception oop: created by stack-crawling runtime code.
14153 // Created exception is now available to this handler, and is setup
14154 // just prior to jumping to this handler. No code emitted.
14155 // TODO check
14156 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14157 instruct CreateException(iRegP_R0 ex_oop)
14158 %{
14159   match(Set ex_oop (CreateEx));
14160 
14161   format %{ " -- \t// exception oop; no code emitted" %}
14162 
14163   size(0);
14164 
14165   ins_encode( /*empty*/ );
14166 
14167   ins_pipe(pipe_class_empty);
14168 %}
14169 
14170 // Rethrow exception: The exception oop will come in the first
14171 // argument position. Then JUMP (not call) to the rethrow stub code.
14172 instruct RethrowException() %{
14173   match(Rethrow);
14174   ins_cost(CALL_COST);
14175 
14176   format %{ "b rethrow_stub" %}
14177 
14178   ins_encode( aarch64_enc_rethrow() );
14179 
14180   ins_pipe(pipe_class_call);
14181 %}
14182 
14183 
14184 // Return Instruction
14185 // epilog node loads ret address into lr as part of frame pop
14186 instruct Ret()
14187 %{
14188   match(Return);
14189 
14190   format %{ "ret\t// return register" %}
14191 
14192   ins_encode( aarch64_enc_ret() );
14193 
14194   ins_pipe(pipe_branch);
14195 %}
14196 
14197 // Die now.
14198 instruct ShouldNotReachHere() %{
14199   match(Halt);
14200 
14201   ins_cost(CALL_COST);
14202   format %{ "ShouldNotReachHere" %}
14203 
14204   ins_encode %{
14205     // TODO
14206     // implement proper trap call here
14207     __ brk(999);
14208   %}
14209 
14210   ins_pipe(pipe_class_default);
14211 %}
14212 
14213 // ============================================================================
14214 // Partial Subtype Check
14215 //
14216 // superklass array for an instance of the superklass.  Set a hidden
14217 // internal cache on a hit (cache is checked with exposed code in
14218 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14219 // encoding ALSO sets flags.
14220 
14221 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14222 %{
14223   match(Set result (PartialSubtypeCheck sub super));
14224   effect(KILL cr, KILL temp);
14225 
14226   ins_cost(1100);  // slightly larger than the next version
14227   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14228 
14229   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14230 
14231   opcode(0x1); // Force zero of result reg on hit
14232 
14233   ins_pipe(pipe_class_memory);
14234 %}
14235 
14236 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14237 %{
14238   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14239   effect(KILL temp, KILL result);
14240 
14241   ins_cost(1100);  // slightly larger than the next version
14242   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14243 
14244   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14245 
14246   opcode(0x0); // Don't zero result reg on hit
14247 
14248   ins_pipe(pipe_class_memory);
14249 %}
14250 
14251 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14252                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14253 %{
14254   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14255   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14256 
14257   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14258   ins_encode %{
14259     __ string_compare($str1$$Register, $str2$$Register,
14260                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14261                       $tmp1$$Register);
14262   %}
14263   ins_pipe(pipe_class_memory);
14264 %}
14265 
14266 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14267        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14268 %{
14269   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14270   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14271          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14272   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14273 
14274   ins_encode %{
14275     __ string_indexof($str1$$Register, $str2$$Register,
14276                       $cnt1$$Register, $cnt2$$Register,
14277                       $tmp1$$Register, $tmp2$$Register,
14278                       $tmp3$$Register, $tmp4$$Register,
14279                       -1, $result$$Register);
14280   %}
14281   ins_pipe(pipe_class_memory);
14282 %}
14283 
14284 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14285                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14286                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14287 %{
14288   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14289   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14290          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14291   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14292 
14293   ins_encode %{
14294     int icnt2 = (int)$int_cnt2$$constant;
14295     __ string_indexof($str1$$Register, $str2$$Register,
14296                       $cnt1$$Register, zr,
14297                       $tmp1$$Register, $tmp2$$Register,
14298                       $tmp3$$Register, $tmp4$$Register,
14299                       icnt2, $result$$Register);
14300   %}
14301   ins_pipe(pipe_class_memory);
14302 %}
14303 
14304 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14305                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
14306 %{
14307   match(Set result (StrEquals (Binary str1 str2) cnt));
14308   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14309 
14310   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
14311   ins_encode %{
14312     __ string_equals($str1$$Register, $str2$$Register,
14313                       $cnt$$Register, $result$$Register,
14314                       $tmp$$Register);
14315   %}
14316   ins_pipe(pipe_class_memory);
14317 %}
14318 
14319 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14320                       iRegP_R10 tmp, rFlagsReg cr)
14321 %{
14322   match(Set result (AryEq ary1 ary2));
14323   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14324 
14325   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14326   ins_encode %{
14327     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
14328                           $result$$Register, $tmp$$Register);
14329   %}
14330   ins_pipe(pipe_class_memory);
14331 %}
14332 
14333 // encode char[] to byte[] in ISO_8859_1
14334 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14335                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14336                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14337                           iRegI_R0 result, rFlagsReg cr)
14338 %{
14339   match(Set result (EncodeISOArray src (Binary dst len)));
14340   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14341          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14342 
14343   format %{ "Encode array $src,$dst,$len -> $result" %}
14344   ins_encode %{
14345     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14346          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14347          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14348   %}
14349   ins_pipe( pipe_class_memory );
14350 %}
14351 
14352 // ============================================================================
14353 // This name is KNOWN by the ADLC and cannot be changed.
14354 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14355 // for this guy.
14356 instruct tlsLoadP(thread_RegP dst)
14357 %{
14358   match(Set dst (ThreadLocal));
14359 
14360   ins_cost(0);
14361 
14362   format %{ " -- \t// $dst=Thread::current(), empty" %}
14363 
14364   size(0);
14365 
14366   ins_encode( /*empty*/ );
14367 
14368   ins_pipe(pipe_class_empty);
14369 %}
14370 
14371 // ====================VECTOR INSTRUCTIONS=====================================
14372 
14373 // Load vector (32 bits)
14374 instruct loadV4(vecD dst, vmem mem)
14375 %{
14376   predicate(n->as_LoadVector()->memory_size() == 4);
14377   match(Set dst (LoadVector mem));
14378   ins_cost(4 * INSN_COST);
14379   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14380   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14381   ins_pipe(pipe_class_memory);
14382 %}
14383 
14384 // Load vector (64 bits)
14385 instruct loadV8(vecD dst, vmem mem)
14386 %{
14387   predicate(n->as_LoadVector()->memory_size() == 8);
14388   match(Set dst (LoadVector mem));
14389   ins_cost(4 * INSN_COST);
14390   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14391   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14392   ins_pipe(pipe_class_memory);
14393 %}
14394 
14395 // Load Vector (128 bits)
14396 instruct loadV16(vecX dst, vmem mem)
14397 %{
14398   predicate(n->as_LoadVector()->memory_size() == 16);
14399   match(Set dst (LoadVector mem));
14400   ins_cost(4 * INSN_COST);
14401   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14402   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14403   ins_pipe(pipe_class_memory);
14404 %}
14405 
14406 // Store Vector (32 bits)
14407 instruct storeV4(vecD src, vmem mem)
14408 %{
14409   predicate(n->as_StoreVector()->memory_size() == 4);
14410   match(Set mem (StoreVector mem src));
14411   ins_cost(4 * INSN_COST);
14412   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14413   ins_encode( aarch64_enc_strvS(src, mem) );
14414   ins_pipe(pipe_class_memory);
14415 %}
14416 
14417 // Store Vector (64 bits)
14418 instruct storeV8(vecD src, vmem mem)
14419 %{
14420   predicate(n->as_StoreVector()->memory_size() == 8);
14421   match(Set mem (StoreVector mem src));
14422   ins_cost(4 * INSN_COST);
14423   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14424   ins_encode( aarch64_enc_strvD(src, mem) );
14425   ins_pipe(pipe_class_memory);
14426 %}
14427 
14428 // Store Vector (128 bits)
14429 instruct storeV16(vecX src, vmem mem)
14430 %{
14431   predicate(n->as_StoreVector()->memory_size() == 16);
14432   match(Set mem (StoreVector mem src));
14433   ins_cost(4 * INSN_COST);
14434   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14435   ins_encode( aarch64_enc_strvQ(src, mem) );
14436   ins_pipe(pipe_class_memory);
14437 %}
14438 
14439 instruct replicate8B(vecD dst, iRegIorL2I src)
14440 %{
14441   predicate(n->as_Vector()->length() == 4 ||
14442             n->as_Vector()->length() == 8);
14443   match(Set dst (ReplicateB src));
14444   ins_cost(INSN_COST);
14445   format %{ "dup  $dst, $src\t# vector (8B)" %}
14446   ins_encode %{
14447     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14448   %}
14449   ins_pipe(pipe_class_default);
14450 %}
14451 
14452 instruct replicate16B(vecX dst, iRegIorL2I src)
14453 %{
14454   predicate(n->as_Vector()->length() == 16);
14455   match(Set dst (ReplicateB src));
14456   ins_cost(INSN_COST);
14457   format %{ "dup  $dst, $src\t# vector (16B)" %}
14458   ins_encode %{
14459     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14460   %}
14461   ins_pipe(pipe_class_default);
14462 %}
14463 
14464 instruct replicate8B_imm(vecD dst, immI con)
14465 %{
14466   predicate(n->as_Vector()->length() == 4 ||
14467             n->as_Vector()->length() == 8);
14468   match(Set dst (ReplicateB con));
14469   ins_cost(INSN_COST);
14470   format %{ "movi  $dst, $con\t# vector(8B)" %}
14471   ins_encode %{
14472     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14473   %}
14474   ins_pipe(pipe_class_default);
14475 %}
14476 
14477 instruct replicate16B_imm(vecX dst, immI con)
14478 %{
14479   predicate(n->as_Vector()->length() == 16);
14480   match(Set dst (ReplicateB con));
14481   ins_cost(INSN_COST);
14482   format %{ "movi  $dst, $con\t# vector(16B)" %}
14483   ins_encode %{
14484     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14485   %}
14486   ins_pipe(pipe_class_default);
14487 %}
14488 
14489 instruct replicate4S(vecD dst, iRegIorL2I src)
14490 %{
14491   predicate(n->as_Vector()->length() == 2 ||
14492             n->as_Vector()->length() == 4);
14493   match(Set dst (ReplicateS src));
14494   ins_cost(INSN_COST);
14495   format %{ "dup  $dst, $src\t# vector (4S)" %}
14496   ins_encode %{
14497     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14498   %}
14499   ins_pipe(pipe_class_default);
14500 %}
14501 
14502 instruct replicate8S(vecX dst, iRegIorL2I src)
14503 %{
14504   predicate(n->as_Vector()->length() == 8);
14505   match(Set dst (ReplicateS src));
14506   ins_cost(INSN_COST);
14507   format %{ "dup  $dst, $src\t# vector (8S)" %}
14508   ins_encode %{
14509     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14510   %}
14511   ins_pipe(pipe_class_default);
14512 %}
14513 
14514 instruct replicate4S_imm(vecD dst, immI con)
14515 %{
14516   predicate(n->as_Vector()->length() == 2 ||
14517             n->as_Vector()->length() == 4);
14518   match(Set dst (ReplicateS con));
14519   ins_cost(INSN_COST);
14520   format %{ "movi  $dst, $con\t# vector(4H)" %}
14521   ins_encode %{
14522     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14523   %}
14524   ins_pipe(pipe_class_default);
14525 %}
14526 
14527 instruct replicate8S_imm(vecX dst, immI con)
14528 %{
14529   predicate(n->as_Vector()->length() == 8);
14530   match(Set dst (ReplicateS con));
14531   ins_cost(INSN_COST);
14532   format %{ "movi  $dst, $con\t# vector(8H)" %}
14533   ins_encode %{
14534     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
14535   %}
14536   ins_pipe(pipe_class_default);
14537 %}
14538 
14539 instruct replicate2I(vecD dst, iRegIorL2I src)
14540 %{
14541   predicate(n->as_Vector()->length() == 2);
14542   match(Set dst (ReplicateI src));
14543   ins_cost(INSN_COST);
14544   format %{ "dup  $dst, $src\t# vector (2I)" %}
14545   ins_encode %{
14546     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
14547   %}
14548   ins_pipe(pipe_class_default);
14549 %}
14550 
14551 instruct replicate4I(vecX dst, iRegIorL2I src)
14552 %{
14553   predicate(n->as_Vector()->length() == 4);
14554   match(Set dst (ReplicateI src));
14555   ins_cost(INSN_COST);
14556   format %{ "dup  $dst, $src\t# vector (4I)" %}
14557   ins_encode %{
14558     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
14559   %}
14560   ins_pipe(pipe_class_default);
14561 %}
14562 
14563 instruct replicate2I_imm(vecD dst, immI con)
14564 %{
14565   predicate(n->as_Vector()->length() == 2);
14566   match(Set dst (ReplicateI con));
14567   ins_cost(INSN_COST);
14568   format %{ "movi  $dst, $con\t# vector(2I)" %}
14569   ins_encode %{
14570     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
14571   %}
14572   ins_pipe(pipe_class_default);
14573 %}
14574 
14575 instruct replicate4I_imm(vecX dst, immI con)
14576 %{
14577   predicate(n->as_Vector()->length() == 4);
14578   match(Set dst (ReplicateI con));
14579   ins_cost(INSN_COST);
14580   format %{ "movi  $dst, $con\t# vector(4I)" %}
14581   ins_encode %{
14582     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
14583   %}
14584   ins_pipe(pipe_class_default);
14585 %}
14586 
14587 instruct replicate2L(vecX dst, iRegL src)
14588 %{
14589   predicate(n->as_Vector()->length() == 2);
14590   match(Set dst (ReplicateL src));
14591   ins_cost(INSN_COST);
14592   format %{ "dup  $dst, $src\t# vector (2L)" %}
14593   ins_encode %{
14594     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
14595   %}
14596   ins_pipe(pipe_class_default);
14597 %}
14598 
14599 instruct replicate2L_zero(vecX dst, immI0 zero)
14600 %{
14601   predicate(n->as_Vector()->length() == 2);
14602   match(Set dst (ReplicateI zero));
14603   ins_cost(INSN_COST);
14604   format %{ "movi  $dst, $zero\t# vector(4I)" %}
14605   ins_encode %{
14606     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14607            as_FloatRegister($dst$$reg),
14608            as_FloatRegister($dst$$reg));
14609   %}
14610   ins_pipe(pipe_class_default);
14611 %}
14612 
14613 instruct replicate2F(vecD dst, vRegF src)
14614 %{
14615   predicate(n->as_Vector()->length() == 2);
14616   match(Set dst (ReplicateF src));
14617   ins_cost(INSN_COST);
14618   format %{ "dup  $dst, $src\t# vector (2F)" %}
14619   ins_encode %{
14620     __ dup(as_FloatRegister($dst$$reg), __ T2S,
14621            as_FloatRegister($src$$reg));
14622   %}
14623   ins_pipe(pipe_class_default);
14624 %}
14625 
14626 instruct replicate4F(vecX dst, vRegF src)
14627 %{
14628   predicate(n->as_Vector()->length() == 4);
14629   match(Set dst (ReplicateF src));
14630   ins_cost(INSN_COST);
14631   format %{ "dup  $dst, $src\t# vector (4F)" %}
14632   ins_encode %{
14633     __ dup(as_FloatRegister($dst$$reg), __ T4S,
14634            as_FloatRegister($src$$reg));
14635   %}
14636   ins_pipe(pipe_class_default);
14637 %}
14638 
14639 instruct replicate2D(vecX dst, vRegD src)
14640 %{
14641   predicate(n->as_Vector()->length() == 2);
14642   match(Set dst (ReplicateD src));
14643   ins_cost(INSN_COST);
14644   format %{ "dup  $dst, $src\t# vector (2D)" %}
14645   ins_encode %{
14646     __ dup(as_FloatRegister($dst$$reg), __ T2D,
14647            as_FloatRegister($src$$reg));
14648   %}
14649   ins_pipe(pipe_class_default);
14650 %}
14651 
14652 // ====================REDUCTION ARITHMETIC====================================
14653 
14654 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
14655 %{
14656   match(Set dst (AddReductionVI src1 src2));
14657   ins_cost(INSN_COST);
14658   effect(TEMP tmp, TEMP tmp2);
14659   format %{ "umov  $tmp, $src2, S, 0\n\t"
14660             "umov  $tmp2, $src2, S, 1\n\t"
14661             "addw  $dst, $src1, $tmp\n\t"
14662             "addw  $dst, $dst, $tmp2\t add reduction2i"
14663   %}
14664   ins_encode %{
14665     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14666     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14667     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
14668     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
14669   %}
14670   ins_pipe(pipe_class_default);
14671 %}
14672 
14673 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14674 %{
14675   match(Set dst (AddReductionVI src1 src2));
14676   ins_cost(INSN_COST);
14677   effect(TEMP tmp, TEMP tmp2);
14678   format %{ "addv  $tmp, T4S, $src2\n\t"
14679             "umov  $tmp2, $tmp, S, 0\n\t"
14680             "addw  $dst, $tmp2, $src1\t add reduction4i"
14681   %}
14682   ins_encode %{
14683     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
14684             as_FloatRegister($src2$$reg));
14685     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14686     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
14687   %}
14688   ins_pipe(pipe_class_default);
14689 %}
14690 
14691 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
14692 %{
14693   match(Set dst (MulReductionVI src1 src2));
14694   ins_cost(INSN_COST);
14695   effect(TEMP tmp, TEMP dst);
14696   format %{ "umov  $tmp, $src2, S, 0\n\t"
14697             "mul   $dst, $tmp, $src1\n\t"
14698             "umov  $tmp, $src2, S, 1\n\t"
14699             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
14700   %}
14701   ins_encode %{
14702     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14703     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
14704     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14705     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
14706   %}
14707   ins_pipe(pipe_class_default);
14708 %}
14709 
14710 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14711 %{
14712   match(Set dst (MulReductionVI src1 src2));
14713   ins_cost(INSN_COST);
14714   effect(TEMP tmp, TEMP tmp2, TEMP dst);
14715   format %{ "ins   $tmp, $src2, 0, 1\n\t"
14716             "mul   $tmp, $tmp, $src2\n\t"
14717             "umov  $tmp2, $tmp, S, 0\n\t"
14718             "mul   $dst, $tmp2, $src1\n\t"
14719             "umov  $tmp2, $tmp, S, 1\n\t"
14720             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
14721   %}
14722   ins_encode %{
14723     __ ins(as_FloatRegister($tmp$$reg), __ D,
14724            as_FloatRegister($src2$$reg), 0, 1);
14725     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
14726            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
14727     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14728     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
14729     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
14730     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
14731   %}
14732   ins_pipe(pipe_class_default);
14733 %}
14734 
14735 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14736 %{
14737   match(Set dst (AddReductionVF src1 src2));
14738   ins_cost(INSN_COST);
14739   effect(TEMP tmp, TEMP dst);
14740   format %{ "fadds $dst, $src1, $src2\n\t"
14741             "ins   $tmp, S, $src2, 0, 1\n\t"
14742             "fadds $dst, $dst, $tmp\t add reduction2f"
14743   %}
14744   ins_encode %{
14745     __ fadds(as_FloatRegister($dst$$reg),
14746              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14747     __ ins(as_FloatRegister($tmp$$reg), __ S,
14748            as_FloatRegister($src2$$reg), 0, 1);
14749     __ fadds(as_FloatRegister($dst$$reg),
14750              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14751   %}
14752   ins_pipe(pipe_class_default);
14753 %}
14754 
14755 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14756 %{
14757   match(Set dst (AddReductionVF src1 src2));
14758   ins_cost(INSN_COST);
14759   effect(TEMP tmp, TEMP dst);
14760   format %{ "fadds $dst, $src1, $src2\n\t"
14761             "ins   $tmp, S, $src2, 0, 1\n\t"
14762             "fadds $dst, $dst, $tmp\n\t"
14763             "ins   $tmp, S, $src2, 0, 2\n\t"
14764             "fadds $dst, $dst, $tmp\n\t"
14765             "ins   $tmp, S, $src2, 0, 3\n\t"
14766             "fadds $dst, $dst, $tmp\t add reduction4f"
14767   %}
14768   ins_encode %{
14769     __ fadds(as_FloatRegister($dst$$reg),
14770              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14771     __ ins(as_FloatRegister($tmp$$reg), __ S,
14772            as_FloatRegister($src2$$reg), 0, 1);
14773     __ fadds(as_FloatRegister($dst$$reg),
14774              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14775     __ ins(as_FloatRegister($tmp$$reg), __ S,
14776            as_FloatRegister($src2$$reg), 0, 2);
14777     __ fadds(as_FloatRegister($dst$$reg),
14778              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14779     __ ins(as_FloatRegister($tmp$$reg), __ S,
14780            as_FloatRegister($src2$$reg), 0, 3);
14781     __ fadds(as_FloatRegister($dst$$reg),
14782              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14783   %}
14784   ins_pipe(pipe_class_default);
14785 %}
14786 
14787 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14788 %{
14789   match(Set dst (MulReductionVF src1 src2));
14790   ins_cost(INSN_COST);
14791   effect(TEMP tmp, TEMP dst);
14792   format %{ "fmuls $dst, $src1, $src2\n\t"
14793             "ins   $tmp, S, $src2, 0, 1\n\t"
14794             "fmuls $dst, $dst, $tmp\t add reduction4f"
14795   %}
14796   ins_encode %{
14797     __ fmuls(as_FloatRegister($dst$$reg),
14798              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14799     __ ins(as_FloatRegister($tmp$$reg), __ S,
14800            as_FloatRegister($src2$$reg), 0, 1);
14801     __ fmuls(as_FloatRegister($dst$$reg),
14802              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14803   %}
14804   ins_pipe(pipe_class_default);
14805 %}
14806 
14807 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14808 %{
14809   match(Set dst (MulReductionVF src1 src2));
14810   ins_cost(INSN_COST);
14811   effect(TEMP tmp, TEMP dst);
14812   format %{ "fmuls $dst, $src1, $src2\n\t"
14813             "ins   $tmp, S, $src2, 0, 1\n\t"
14814             "fmuls $dst, $dst, $tmp\n\t"
14815             "ins   $tmp, S, $src2, 0, 2\n\t"
14816             "fmuls $dst, $dst, $tmp\n\t"
14817             "ins   $tmp, S, $src2, 0, 3\n\t"
14818             "fmuls $dst, $dst, $tmp\t add reduction4f"
14819   %}
14820   ins_encode %{
14821     __ fmuls(as_FloatRegister($dst$$reg),
14822              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14823     __ ins(as_FloatRegister($tmp$$reg), __ S,
14824            as_FloatRegister($src2$$reg), 0, 1);
14825     __ fmuls(as_FloatRegister($dst$$reg),
14826              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14827     __ ins(as_FloatRegister($tmp$$reg), __ S,
14828            as_FloatRegister($src2$$reg), 0, 2);
14829     __ fmuls(as_FloatRegister($dst$$reg),
14830              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14831     __ ins(as_FloatRegister($tmp$$reg), __ S,
14832            as_FloatRegister($src2$$reg), 0, 3);
14833     __ fmuls(as_FloatRegister($dst$$reg),
14834              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14835   %}
14836   ins_pipe(pipe_class_default);
14837 %}
14838 
14839 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14840 %{
14841   match(Set dst (AddReductionVD src1 src2));
14842   ins_cost(INSN_COST);
14843   effect(TEMP tmp, TEMP dst);
14844   format %{ "faddd $dst, $src1, $src2\n\t"
14845             "ins   $tmp, D, $src2, 0, 1\n\t"
14846             "faddd $dst, $dst, $tmp\t add reduction2d"
14847   %}
14848   ins_encode %{
14849     __ faddd(as_FloatRegister($dst$$reg),
14850              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14851     __ ins(as_FloatRegister($tmp$$reg), __ D,
14852            as_FloatRegister($src2$$reg), 0, 1);
14853     __ faddd(as_FloatRegister($dst$$reg),
14854              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14855   %}
14856   ins_pipe(pipe_class_default);
14857 %}
14858 
14859 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14860 %{
14861   match(Set dst (MulReductionVD src1 src2));
14862   ins_cost(INSN_COST);
14863   effect(TEMP tmp, TEMP dst);
14864   format %{ "fmuld $dst, $src1, $src2\n\t"
14865             "ins   $tmp, D, $src2, 0, 1\n\t"
14866             "fmuld $dst, $dst, $tmp\t add reduction2d"
14867   %}
14868   ins_encode %{
14869     __ fmuld(as_FloatRegister($dst$$reg),
14870              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14871     __ ins(as_FloatRegister($tmp$$reg), __ D,
14872            as_FloatRegister($src2$$reg), 0, 1);
14873     __ fmuld(as_FloatRegister($dst$$reg),
14874              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14875   %}
14876   ins_pipe(pipe_class_default);
14877 %}
14878 
14879 // ====================VECTOR ARITHMETIC=======================================
14880 
14881 // --------------------------------- ADD --------------------------------------
14882 
14883 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14884 %{
14885   predicate(n->as_Vector()->length() == 4 ||
14886             n->as_Vector()->length() == 8);
14887   match(Set dst (AddVB src1 src2));
14888   ins_cost(INSN_COST);
14889   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14890   ins_encode %{
14891     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14892             as_FloatRegister($src1$$reg),
14893             as_FloatRegister($src2$$reg));
14894   %}
14895   ins_pipe(pipe_class_default);
14896 %}
14897 
14898 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14899 %{
14900   predicate(n->as_Vector()->length() == 16);
14901   match(Set dst (AddVB src1 src2));
14902   ins_cost(INSN_COST);
14903   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14904   ins_encode %{
14905     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14906             as_FloatRegister($src1$$reg),
14907             as_FloatRegister($src2$$reg));
14908   %}
14909   ins_pipe(pipe_class_default);
14910 %}
14911 
14912 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14913 %{
14914   predicate(n->as_Vector()->length() == 2 ||
14915             n->as_Vector()->length() == 4);
14916   match(Set dst (AddVS src1 src2));
14917   ins_cost(INSN_COST);
14918   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14919   ins_encode %{
14920     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14921             as_FloatRegister($src1$$reg),
14922             as_FloatRegister($src2$$reg));
14923   %}
14924   ins_pipe(pipe_class_default);
14925 %}
14926 
14927 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14928 %{
14929   predicate(n->as_Vector()->length() == 8);
14930   match(Set dst (AddVS src1 src2));
14931   ins_cost(INSN_COST);
14932   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14933   ins_encode %{
14934     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14935             as_FloatRegister($src1$$reg),
14936             as_FloatRegister($src2$$reg));
14937   %}
14938   ins_pipe(pipe_class_default);
14939 %}
14940 
14941 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14942 %{
14943   predicate(n->as_Vector()->length() == 2);
14944   match(Set dst (AddVI src1 src2));
14945   ins_cost(INSN_COST);
14946   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14947   ins_encode %{
14948     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14949             as_FloatRegister($src1$$reg),
14950             as_FloatRegister($src2$$reg));
14951   %}
14952   ins_pipe(pipe_class_default);
14953 %}
14954 
14955 instruct vadd4I(vecX dst, vecX src1, vecX src2)
14956 %{
14957   predicate(n->as_Vector()->length() == 4);
14958   match(Set dst (AddVI src1 src2));
14959   ins_cost(INSN_COST);
14960   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
14961   ins_encode %{
14962     __ addv(as_FloatRegister($dst$$reg), __ T4S,
14963             as_FloatRegister($src1$$reg),
14964             as_FloatRegister($src2$$reg));
14965   %}
14966   ins_pipe(pipe_class_default);
14967 %}
14968 
14969 instruct vadd2L(vecX dst, vecX src1, vecX src2)
14970 %{
14971   predicate(n->as_Vector()->length() == 2);
14972   match(Set dst (AddVL src1 src2));
14973   ins_cost(INSN_COST);
14974   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
14975   ins_encode %{
14976     __ addv(as_FloatRegister($dst$$reg), __ T2D,
14977             as_FloatRegister($src1$$reg),
14978             as_FloatRegister($src2$$reg));
14979   %}
14980   ins_pipe(pipe_class_default);
14981 %}
14982 
14983 instruct vadd2F(vecD dst, vecD src1, vecD src2)
14984 %{
14985   predicate(n->as_Vector()->length() == 2);
14986   match(Set dst (AddVF src1 src2));
14987   ins_cost(INSN_COST);
14988   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
14989   ins_encode %{
14990     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
14991             as_FloatRegister($src1$$reg),
14992             as_FloatRegister($src2$$reg));
14993   %}
14994   ins_pipe(pipe_class_default);
14995 %}
14996 
14997 instruct vadd4F(vecX dst, vecX src1, vecX src2)
14998 %{
14999   predicate(n->as_Vector()->length() == 4);
15000   match(Set dst (AddVF src1 src2));
15001   ins_cost(INSN_COST);
15002   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15003   ins_encode %{
15004     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15005             as_FloatRegister($src1$$reg),
15006             as_FloatRegister($src2$$reg));
15007   %}
15008   ins_pipe(pipe_class_default);
15009 %}
15010 
15011 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15012 %{
15013   match(Set dst (AddVD src1 src2));
15014   ins_cost(INSN_COST);
15015   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15016   ins_encode %{
15017     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15018             as_FloatRegister($src1$$reg),
15019             as_FloatRegister($src2$$reg));
15020   %}
15021   ins_pipe(pipe_class_default);
15022 %}
15023 
15024 // --------------------------------- SUB --------------------------------------
15025 
15026 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15027 %{
15028   predicate(n->as_Vector()->length() == 4 ||
15029             n->as_Vector()->length() == 8);
15030   match(Set dst (SubVB src1 src2));
15031   ins_cost(INSN_COST);
15032   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15033   ins_encode %{
15034     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15035             as_FloatRegister($src1$$reg),
15036             as_FloatRegister($src2$$reg));
15037   %}
15038   ins_pipe(pipe_class_default);
15039 %}
15040 
15041 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15042 %{
15043   predicate(n->as_Vector()->length() == 16);
15044   match(Set dst (SubVB src1 src2));
15045   ins_cost(INSN_COST);
15046   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15047   ins_encode %{
15048     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15049             as_FloatRegister($src1$$reg),
15050             as_FloatRegister($src2$$reg));
15051   %}
15052   ins_pipe(pipe_class_default);
15053 %}
15054 
15055 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15056 %{
15057   predicate(n->as_Vector()->length() == 2 ||
15058             n->as_Vector()->length() == 4);
15059   match(Set dst (SubVS src1 src2));
15060   ins_cost(INSN_COST);
15061   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15062   ins_encode %{
15063     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15064             as_FloatRegister($src1$$reg),
15065             as_FloatRegister($src2$$reg));
15066   %}
15067   ins_pipe(pipe_class_default);
15068 %}
15069 
15070 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15071 %{
15072   predicate(n->as_Vector()->length() == 8);
15073   match(Set dst (SubVS src1 src2));
15074   ins_cost(INSN_COST);
15075   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15076   ins_encode %{
15077     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15078             as_FloatRegister($src1$$reg),
15079             as_FloatRegister($src2$$reg));
15080   %}
15081   ins_pipe(pipe_class_default);
15082 %}
15083 
15084 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15085 %{
15086   predicate(n->as_Vector()->length() == 2);
15087   match(Set dst (SubVI src1 src2));
15088   ins_cost(INSN_COST);
15089   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15090   ins_encode %{
15091     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15092             as_FloatRegister($src1$$reg),
15093             as_FloatRegister($src2$$reg));
15094   %}
15095   ins_pipe(pipe_class_default);
15096 %}
15097 
15098 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15099 %{
15100   predicate(n->as_Vector()->length() == 4);
15101   match(Set dst (SubVI src1 src2));
15102   ins_cost(INSN_COST);
15103   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15104   ins_encode %{
15105     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15106             as_FloatRegister($src1$$reg),
15107             as_FloatRegister($src2$$reg));
15108   %}
15109   ins_pipe(pipe_class_default);
15110 %}
15111 
15112 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15113 %{
15114   predicate(n->as_Vector()->length() == 2);
15115   match(Set dst (SubVL src1 src2));
15116   ins_cost(INSN_COST);
15117   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15118   ins_encode %{
15119     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15120             as_FloatRegister($src1$$reg),
15121             as_FloatRegister($src2$$reg));
15122   %}
15123   ins_pipe(pipe_class_default);
15124 %}
15125 
15126 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15127 %{
15128   predicate(n->as_Vector()->length() == 2);
15129   match(Set dst (SubVF src1 src2));
15130   ins_cost(INSN_COST);
15131   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15132   ins_encode %{
15133     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15134             as_FloatRegister($src1$$reg),
15135             as_FloatRegister($src2$$reg));
15136   %}
15137   ins_pipe(pipe_class_default);
15138 %}
15139 
15140 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15141 %{
15142   predicate(n->as_Vector()->length() == 4);
15143   match(Set dst (SubVF src1 src2));
15144   ins_cost(INSN_COST);
15145   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15146   ins_encode %{
15147     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15148             as_FloatRegister($src1$$reg),
15149             as_FloatRegister($src2$$reg));
15150   %}
15151   ins_pipe(pipe_class_default);
15152 %}
15153 
15154 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15155 %{
15156   predicate(n->as_Vector()->length() == 2);
15157   match(Set dst (SubVD src1 src2));
15158   ins_cost(INSN_COST);
15159   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15160   ins_encode %{
15161     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15162             as_FloatRegister($src1$$reg),
15163             as_FloatRegister($src2$$reg));
15164   %}
15165   ins_pipe(pipe_class_default);
15166 %}
15167 
15168 // --------------------------------- MUL --------------------------------------
15169 
15170 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15171 %{
15172   predicate(n->as_Vector()->length() == 2 ||
15173             n->as_Vector()->length() == 4);
15174   match(Set dst (MulVS src1 src2));
15175   ins_cost(INSN_COST);
15176   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15177   ins_encode %{
15178     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15179             as_FloatRegister($src1$$reg),
15180             as_FloatRegister($src2$$reg));
15181   %}
15182   ins_pipe(pipe_class_default);
15183 %}
15184 
15185 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15186 %{
15187   predicate(n->as_Vector()->length() == 8);
15188   match(Set dst (MulVS src1 src2));
15189   ins_cost(INSN_COST);
15190   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15191   ins_encode %{
15192     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15193             as_FloatRegister($src1$$reg),
15194             as_FloatRegister($src2$$reg));
15195   %}
15196   ins_pipe(pipe_class_default);
15197 %}
15198 
15199 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15200 %{
15201   predicate(n->as_Vector()->length() == 2);
15202   match(Set dst (MulVI src1 src2));
15203   ins_cost(INSN_COST);
15204   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15205   ins_encode %{
15206     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15207             as_FloatRegister($src1$$reg),
15208             as_FloatRegister($src2$$reg));
15209   %}
15210   ins_pipe(pipe_class_default);
15211 %}
15212 
15213 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15214 %{
15215   predicate(n->as_Vector()->length() == 4);
15216   match(Set dst (MulVI src1 src2));
15217   ins_cost(INSN_COST);
15218   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15219   ins_encode %{
15220     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15221             as_FloatRegister($src1$$reg),
15222             as_FloatRegister($src2$$reg));
15223   %}
15224   ins_pipe(pipe_class_default);
15225 %}
15226 
15227 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15228 %{
15229   predicate(n->as_Vector()->length() == 2);
15230   match(Set dst (MulVF src1 src2));
15231   ins_cost(INSN_COST);
15232   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15233   ins_encode %{
15234     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15235             as_FloatRegister($src1$$reg),
15236             as_FloatRegister($src2$$reg));
15237   %}
15238   ins_pipe(pipe_class_default);
15239 %}
15240 
15241 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15242 %{
15243   predicate(n->as_Vector()->length() == 4);
15244   match(Set dst (MulVF src1 src2));
15245   ins_cost(INSN_COST);
15246   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15247   ins_encode %{
15248     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15249             as_FloatRegister($src1$$reg),
15250             as_FloatRegister($src2$$reg));
15251   %}
15252   ins_pipe(pipe_class_default);
15253 %}
15254 
15255 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15256 %{
15257   predicate(n->as_Vector()->length() == 2);
15258   match(Set dst (MulVD src1 src2));
15259   ins_cost(INSN_COST);
15260   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15261   ins_encode %{
15262     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15263             as_FloatRegister($src1$$reg),
15264             as_FloatRegister($src2$$reg));
15265   %}
15266   ins_pipe(pipe_class_default);
15267 %}
15268 
15269 // --------------------------------- DIV --------------------------------------
15270 
15271 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15272 %{
15273   predicate(n->as_Vector()->length() == 2);
15274   match(Set dst (DivVF src1 src2));
15275   ins_cost(INSN_COST);
15276   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15277   ins_encode %{
15278     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15279             as_FloatRegister($src1$$reg),
15280             as_FloatRegister($src2$$reg));
15281   %}
15282   ins_pipe(pipe_class_default);
15283 %}
15284 
15285 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15286 %{
15287   predicate(n->as_Vector()->length() == 4);
15288   match(Set dst (DivVF src1 src2));
15289   ins_cost(INSN_COST);
15290   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15291   ins_encode %{
15292     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15293             as_FloatRegister($src1$$reg),
15294             as_FloatRegister($src2$$reg));
15295   %}
15296   ins_pipe(pipe_class_default);
15297 %}
15298 
15299 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15300 %{
15301   predicate(n->as_Vector()->length() == 2);
15302   match(Set dst (DivVD src1 src2));
15303   ins_cost(INSN_COST);
15304   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15305   ins_encode %{
15306     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15307             as_FloatRegister($src1$$reg),
15308             as_FloatRegister($src2$$reg));
15309   %}
15310   ins_pipe(pipe_class_default);
15311 %}
15312 
15313 // --------------------------------- AND --------------------------------------
15314 
15315 instruct vand8B(vecD dst, vecD src1, vecD src2)
15316 %{
15317   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15318             n->as_Vector()->length_in_bytes() == 8);
15319   match(Set dst (AndV src1 src2));
15320   ins_cost(INSN_COST);
15321   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15322   ins_encode %{
15323     __ andr(as_FloatRegister($dst$$reg), __ T8B,
15324             as_FloatRegister($src1$$reg),
15325             as_FloatRegister($src2$$reg));
15326   %}
15327   ins_pipe(pipe_class_default);
15328 %}
15329 
15330 instruct vand16B(vecX dst, vecX src1, vecX src2)
15331 %{
15332   predicate(n->as_Vector()->length_in_bytes() == 16);
15333   match(Set dst (AndV src1 src2));
15334   ins_cost(INSN_COST);
15335   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
15336   ins_encode %{
15337     __ andr(as_FloatRegister($dst$$reg), __ T16B,
15338             as_FloatRegister($src1$$reg),
15339             as_FloatRegister($src2$$reg));
15340   %}
15341   ins_pipe(pipe_class_default);
15342 %}
15343 
15344 // --------------------------------- OR ---------------------------------------
15345 
15346 instruct vor8B(vecD dst, vecD src1, vecD src2)
15347 %{
15348   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15349             n->as_Vector()->length_in_bytes() == 8);
15350   match(Set dst (OrV src1 src2));
15351   ins_cost(INSN_COST);
15352   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15353   ins_encode %{
15354     __ orr(as_FloatRegister($dst$$reg), __ T8B,
15355             as_FloatRegister($src1$$reg),
15356             as_FloatRegister($src2$$reg));
15357   %}
15358   ins_pipe(pipe_class_default);
15359 %}
15360 
15361 instruct vor16B(vecX dst, vecX src1, vecX src2)
15362 %{
15363   predicate(n->as_Vector()->length_in_bytes() == 16);
15364   match(Set dst (OrV src1 src2));
15365   ins_cost(INSN_COST);
15366   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
15367   ins_encode %{
15368     __ orr(as_FloatRegister($dst$$reg), __ T16B,
15369             as_FloatRegister($src1$$reg),
15370             as_FloatRegister($src2$$reg));
15371   %}
15372   ins_pipe(pipe_class_default);
15373 %}
15374 
15375 // --------------------------------- XOR --------------------------------------
15376 
15377 instruct vxor8B(vecD dst, vecD src1, vecD src2)
15378 %{
15379   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15380             n->as_Vector()->length_in_bytes() == 8);
15381   match(Set dst (XorV src1 src2));
15382   ins_cost(INSN_COST);
15383   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
15384   ins_encode %{
15385     __ eor(as_FloatRegister($dst$$reg), __ T8B,
15386             as_FloatRegister($src1$$reg),
15387             as_FloatRegister($src2$$reg));
15388   %}
15389   ins_pipe(pipe_class_default);
15390 %}
15391 
15392 instruct vxor16B(vecX dst, vecX src1, vecX src2)
15393 %{
15394   predicate(n->as_Vector()->length_in_bytes() == 16);
15395   match(Set dst (XorV src1 src2));
15396   ins_cost(INSN_COST);
15397   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
15398   ins_encode %{
15399     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15400             as_FloatRegister($src1$$reg),
15401             as_FloatRegister($src2$$reg));
15402   %}
15403   ins_pipe(pipe_class_default);
15404 %}
15405 
15406 // ------------------------------ Shift ---------------------------------------
15407 
15408 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
15409   match(Set dst (LShiftCntV cnt));
15410   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
15411   ins_encode %{
15412     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15413   %}
15414   ins_pipe(pipe_class_default);
15415 %}
15416 
15417 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
15418 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
15419   match(Set dst (RShiftCntV cnt));
15420   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
15421   ins_encode %{
15422     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15423     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
15424   %}
15425   ins_pipe(pipe_class_default);
15426 %}
15427 
15428 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
15429   predicate(n->as_Vector()->length() == 4 ||
15430             n->as_Vector()->length() == 8);
15431   match(Set dst (LShiftVB src shift));
15432   match(Set dst (RShiftVB src shift));
15433   ins_cost(INSN_COST);
15434   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
15435   ins_encode %{
15436     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
15437             as_FloatRegister($src$$reg),
15438             as_FloatRegister($shift$$reg));
15439   %}
15440   ins_pipe(pipe_class_default);
15441 %}
15442 
15443 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
15444   predicate(n->as_Vector()->length() == 16);
15445   match(Set dst (LShiftVB src shift));
15446   match(Set dst (RShiftVB src shift));
15447   ins_cost(INSN_COST);
15448   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
15449   ins_encode %{
15450     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
15451             as_FloatRegister($src$$reg),
15452             as_FloatRegister($shift$$reg));
15453   %}
15454   ins_pipe(pipe_class_default);
15455 %}
15456 
15457 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
15458   predicate(n->as_Vector()->length() == 4 ||
15459             n->as_Vector()->length() == 8);
15460   match(Set dst (URShiftVB src shift));
15461   ins_cost(INSN_COST);
15462   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
15463   ins_encode %{
15464     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
15465             as_FloatRegister($src$$reg),
15466             as_FloatRegister($shift$$reg));
15467   %}
15468   ins_pipe(pipe_class_default);
15469 %}
15470 
15471 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
15472   predicate(n->as_Vector()->length() == 16);
15473   match(Set dst (URShiftVB src shift));
15474   ins_cost(INSN_COST);
15475   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
15476   ins_encode %{
15477     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
15478             as_FloatRegister($src$$reg),
15479             as_FloatRegister($shift$$reg));
15480   %}
15481   ins_pipe(pipe_class_default);
15482 %}
15483 
15484 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
15485   predicate(n->as_Vector()->length() == 4 ||
15486             n->as_Vector()->length() == 8);
15487   match(Set dst (LShiftVB src shift));
15488   ins_cost(INSN_COST);
15489   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
15490   ins_encode %{
15491     int sh = (int)$shift$$constant & 31;
15492     if (sh >= 8) {
15493       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15494              as_FloatRegister($src$$reg),
15495              as_FloatRegister($src$$reg));
15496     } else {
15497       __ shl(as_FloatRegister($dst$$reg), __ T8B,
15498              as_FloatRegister($src$$reg), sh);
15499     }
15500   %}
15501   ins_pipe(pipe_class_default);
15502 %}
15503 
15504 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
15505   predicate(n->as_Vector()->length() == 16);
15506   match(Set dst (LShiftVB src shift));
15507   ins_cost(INSN_COST);
15508   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
15509   ins_encode %{
15510     int sh = (int)$shift$$constant & 31;
15511     if (sh >= 8) {
15512       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15513              as_FloatRegister($src$$reg),
15514              as_FloatRegister($src$$reg));
15515     } else {
15516       __ shl(as_FloatRegister($dst$$reg), __ T16B,
15517              as_FloatRegister($src$$reg), sh);
15518     }
15519   %}
15520   ins_pipe(pipe_class_default);
15521 %}
15522 
15523 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
15524   predicate(n->as_Vector()->length() == 4 ||
15525             n->as_Vector()->length() == 8);
15526   match(Set dst (RShiftVB src shift));
15527   ins_cost(INSN_COST);
15528   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
15529   ins_encode %{
15530     int sh = (int)$shift$$constant & 31;
15531     if (sh >= 8) sh = 7;
15532     sh = -sh & 7;
15533     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
15534            as_FloatRegister($src$$reg), sh);
15535   %}
15536   ins_pipe(pipe_class_default);
15537 %}
15538 
15539 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
15540   predicate(n->as_Vector()->length() == 16);
15541   match(Set dst (RShiftVB src shift));
15542   ins_cost(INSN_COST);
15543   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
15544   ins_encode %{
15545     int sh = (int)$shift$$constant & 31;
15546     if (sh >= 8) sh = 7;
15547     sh = -sh & 7;
15548     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
15549            as_FloatRegister($src$$reg), sh);
15550   %}
15551   ins_pipe(pipe_class_default);
15552 %}
15553 
15554 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
15555   predicate(n->as_Vector()->length() == 4 ||
15556             n->as_Vector()->length() == 8);
15557   match(Set dst (URShiftVB src shift));
15558   ins_cost(INSN_COST);
15559   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
15560   ins_encode %{
15561     int sh = (int)$shift$$constant & 31;
15562     if (sh >= 8) {
15563       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15564              as_FloatRegister($src$$reg),
15565              as_FloatRegister($src$$reg));
15566     } else {
15567       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
15568              as_FloatRegister($src$$reg), -sh & 7);
15569     }
15570   %}
15571   ins_pipe(pipe_class_default);
15572 %}
15573 
15574 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
15575   predicate(n->as_Vector()->length() == 16);
15576   match(Set dst (URShiftVB src shift));
15577   ins_cost(INSN_COST);
15578   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
15579   ins_encode %{
15580     int sh = (int)$shift$$constant & 31;
15581     if (sh >= 8) {
15582       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15583              as_FloatRegister($src$$reg),
15584              as_FloatRegister($src$$reg));
15585     } else {
15586       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
15587              as_FloatRegister($src$$reg), -sh & 7);
15588     }
15589   %}
15590   ins_pipe(pipe_class_default);
15591 %}
15592 
15593 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
15594   predicate(n->as_Vector()->length() == 2 ||
15595             n->as_Vector()->length() == 4);
15596   match(Set dst (LShiftVS src shift));
15597   match(Set dst (RShiftVS src shift));
15598   ins_cost(INSN_COST);
15599   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
15600   ins_encode %{
15601     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
15602             as_FloatRegister($src$$reg),
15603             as_FloatRegister($shift$$reg));
15604   %}
15605   ins_pipe(pipe_class_default);
15606 %}
15607 
15608 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
15609   predicate(n->as_Vector()->length() == 8);
15610   match(Set dst (LShiftVS src shift));
15611   match(Set dst (RShiftVS src shift));
15612   ins_cost(INSN_COST);
15613   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
15614   ins_encode %{
15615     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15616             as_FloatRegister($src$$reg),
15617             as_FloatRegister($shift$$reg));
15618   %}
15619   ins_pipe(pipe_class_default);
15620 %}
15621 
15622 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
15623   predicate(n->as_Vector()->length() == 2 ||
15624             n->as_Vector()->length() == 4);
15625   match(Set dst (URShiftVS src shift));
15626   ins_cost(INSN_COST);
15627   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
15628   ins_encode %{
15629     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
15630             as_FloatRegister($src$$reg),
15631             as_FloatRegister($shift$$reg));
15632   %}
15633   ins_pipe(pipe_class_default);
15634 %}
15635 
15636 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
15637   predicate(n->as_Vector()->length() == 8);
15638   match(Set dst (URShiftVS src shift));
15639   ins_cost(INSN_COST);
15640   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
15641   ins_encode %{
15642     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
15643             as_FloatRegister($src$$reg),
15644             as_FloatRegister($shift$$reg));
15645   %}
15646   ins_pipe(pipe_class_default);
15647 %}
15648 
15649 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15650   predicate(n->as_Vector()->length() == 2 ||
15651             n->as_Vector()->length() == 4);
15652   match(Set dst (LShiftVS src shift));
15653   ins_cost(INSN_COST);
15654   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15655   ins_encode %{
15656     int sh = (int)$shift$$constant & 31;
15657     if (sh >= 16) {
15658       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15659              as_FloatRegister($src$$reg),
15660              as_FloatRegister($src$$reg));
15661     } else {
15662       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15663              as_FloatRegister($src$$reg), sh);
15664     }
15665   %}
15666   ins_pipe(pipe_class_default);
15667 %}
15668 
15669 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15670   predicate(n->as_Vector()->length() == 8);
15671   match(Set dst (LShiftVS src shift));
15672   ins_cost(INSN_COST);
15673   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15674   ins_encode %{
15675     int sh = (int)$shift$$constant & 31;
15676     if (sh >= 16) {
15677       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15678              as_FloatRegister($src$$reg),
15679              as_FloatRegister($src$$reg));
15680     } else {
15681       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15682              as_FloatRegister($src$$reg), sh);
15683     }
15684   %}
15685   ins_pipe(pipe_class_default);
15686 %}
15687 
15688 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15689   predicate(n->as_Vector()->length() == 2 ||
15690             n->as_Vector()->length() == 4);
15691   match(Set dst (RShiftVS src shift));
15692   ins_cost(INSN_COST);
15693   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15694   ins_encode %{
15695     int sh = (int)$shift$$constant & 31;
15696     if (sh >= 16) sh = 15;
15697     sh = -sh & 15;
15698     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15699            as_FloatRegister($src$$reg), sh);
15700   %}
15701   ins_pipe(pipe_class_default);
15702 %}
15703 
15704 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15705   predicate(n->as_Vector()->length() == 8);
15706   match(Set dst (RShiftVS src shift));
15707   ins_cost(INSN_COST);
15708   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15709   ins_encode %{
15710     int sh = (int)$shift$$constant & 31;
15711     if (sh >= 16) sh = 15;
15712     sh = -sh & 15;
15713     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15714            as_FloatRegister($src$$reg), sh);
15715   %}
15716   ins_pipe(pipe_class_default);
15717 %}
15718 
15719 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15720   predicate(n->as_Vector()->length() == 2 ||
15721             n->as_Vector()->length() == 4);
15722   match(Set dst (URShiftVS src shift));
15723   ins_cost(INSN_COST);
15724   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15725   ins_encode %{
15726     int sh = (int)$shift$$constant & 31;
15727     if (sh >= 16) {
15728       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15729              as_FloatRegister($src$$reg),
15730              as_FloatRegister($src$$reg));
15731     } else {
15732       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15733              as_FloatRegister($src$$reg), -sh & 15);
15734     }
15735   %}
15736   ins_pipe(pipe_class_default);
15737 %}
15738 
15739 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15740   predicate(n->as_Vector()->length() == 8);
15741   match(Set dst (URShiftVS src shift));
15742   ins_cost(INSN_COST);
15743   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15744   ins_encode %{
15745     int sh = (int)$shift$$constant & 31;
15746     if (sh >= 16) {
15747       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15748              as_FloatRegister($src$$reg),
15749              as_FloatRegister($src$$reg));
15750     } else {
15751       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15752              as_FloatRegister($src$$reg), -sh & 15);
15753     }
15754   %}
15755   ins_pipe(pipe_class_default);
15756 %}
15757 
15758 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
15759   predicate(n->as_Vector()->length() == 2);
15760   match(Set dst (LShiftVI src shift));
15761   match(Set dst (RShiftVI src shift));
15762   ins_cost(INSN_COST);
15763   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15764   ins_encode %{
15765     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15766             as_FloatRegister($src$$reg),
15767             as_FloatRegister($shift$$reg));
15768   %}
15769   ins_pipe(pipe_class_default);
15770 %}
15771 
15772 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15773   predicate(n->as_Vector()->length() == 4);
15774   match(Set dst (LShiftVI src shift));
15775   match(Set dst (RShiftVI src shift));
15776   ins_cost(INSN_COST);
15777   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15778   ins_encode %{
15779     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15780             as_FloatRegister($src$$reg),
15781             as_FloatRegister($shift$$reg));
15782   %}
15783   ins_pipe(pipe_class_default);
15784 %}
15785 
15786 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
15787   predicate(n->as_Vector()->length() == 2);
15788   match(Set dst (URShiftVI src shift));
15789   ins_cost(INSN_COST);
15790   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
15791   ins_encode %{
15792     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15793             as_FloatRegister($src$$reg),
15794             as_FloatRegister($shift$$reg));
15795   %}
15796   ins_pipe(pipe_class_default);
15797 %}
15798 
15799 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
15800   predicate(n->as_Vector()->length() == 4);
15801   match(Set dst (URShiftVI src shift));
15802   ins_cost(INSN_COST);
15803   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
15804   ins_encode %{
15805     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15806             as_FloatRegister($src$$reg),
15807             as_FloatRegister($shift$$reg));
15808   %}
15809   ins_pipe(pipe_class_default);
15810 %}
15811 
15812 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15813   predicate(n->as_Vector()->length() == 2);
15814   match(Set dst (LShiftVI src shift));
15815   ins_cost(INSN_COST);
15816   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15817   ins_encode %{
15818     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15819            as_FloatRegister($src$$reg),
15820            (int)$shift$$constant & 31);
15821   %}
15822   ins_pipe(pipe_class_default);
15823 %}
15824 
15825 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15826   predicate(n->as_Vector()->length() == 4);
15827   match(Set dst (LShiftVI src shift));
15828   ins_cost(INSN_COST);
15829   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15830   ins_encode %{
15831     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15832            as_FloatRegister($src$$reg),
15833            (int)$shift$$constant & 31);
15834   %}
15835   ins_pipe(pipe_class_default);
15836 %}
15837 
15838 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15839   predicate(n->as_Vector()->length() == 2);
15840   match(Set dst (RShiftVI src shift));
15841   ins_cost(INSN_COST);
15842   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15843   ins_encode %{
15844     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15845             as_FloatRegister($src$$reg),
15846             -(int)$shift$$constant & 31);
15847   %}
15848   ins_pipe(pipe_class_default);
15849 %}
15850 
15851 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15852   predicate(n->as_Vector()->length() == 4);
15853   match(Set dst (RShiftVI src shift));
15854   ins_cost(INSN_COST);
15855   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15856   ins_encode %{
15857     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
15858             as_FloatRegister($src$$reg),
15859             -(int)$shift$$constant & 31);
15860   %}
15861   ins_pipe(pipe_class_default);
15862 %}
15863 
15864 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
15865   predicate(n->as_Vector()->length() == 2);
15866   match(Set dst (URShiftVI src shift));
15867   ins_cost(INSN_COST);
15868   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
15869   ins_encode %{
15870     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
15871             as_FloatRegister($src$$reg),
15872             -(int)$shift$$constant & 31);
15873   %}
15874   ins_pipe(pipe_class_default);
15875 %}
15876 
15877 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
15878   predicate(n->as_Vector()->length() == 4);
15879   match(Set dst (URShiftVI src shift));
15880   ins_cost(INSN_COST);
15881   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
15882   ins_encode %{
15883     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
15884             as_FloatRegister($src$$reg),
15885             -(int)$shift$$constant & 31);
15886   %}
15887   ins_pipe(pipe_class_default);
15888 %}
15889 
15890 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
15891   predicate(n->as_Vector()->length() == 2);
15892   match(Set dst (LShiftVL src shift));
15893   match(Set dst (RShiftVL src shift));
15894   ins_cost(INSN_COST);
15895   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
15896   ins_encode %{
15897     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15898             as_FloatRegister($src$$reg),
15899             as_FloatRegister($shift$$reg));
15900   %}
15901   ins_pipe(pipe_class_default);
15902 %}
15903 
15904 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
15905   predicate(n->as_Vector()->length() == 2);
15906   match(Set dst (URShiftVL src shift));
15907   ins_cost(INSN_COST);
15908   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
15909   ins_encode %{
15910     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
15911             as_FloatRegister($src$$reg),
15912             as_FloatRegister($shift$$reg));
15913   %}
15914   ins_pipe(pipe_class_default);
15915 %}
15916 
15917 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
15918   predicate(n->as_Vector()->length() == 2);
15919   match(Set dst (LShiftVL src shift));
15920   ins_cost(INSN_COST);
15921   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
15922   ins_encode %{
15923     __ shl(as_FloatRegister($dst$$reg), __ T2D,
15924            as_FloatRegister($src$$reg),
15925            (int)$shift$$constant & 63);
15926   %}
15927   ins_pipe(pipe_class_default);
15928 %}
15929 
15930 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
15931   predicate(n->as_Vector()->length() == 2);
15932   match(Set dst (RShiftVL src shift));
15933   ins_cost(INSN_COST);
15934   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
15935   ins_encode %{
15936     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
15937             as_FloatRegister($src$$reg),
15938             -(int)$shift$$constant & 63);
15939   %}
15940   ins_pipe(pipe_class_default);
15941 %}
15942 
15943 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
15944   predicate(n->as_Vector()->length() == 2);
15945   match(Set dst (URShiftVL src shift));
15946   ins_cost(INSN_COST);
15947   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
15948   ins_encode %{
15949     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
15950             as_FloatRegister($src$$reg),
15951             -(int)$shift$$constant & 63);
15952   %}
15953   ins_pipe(pipe_class_default);
15954 %}
15955 
15956 //----------PEEPHOLE RULES-----------------------------------------------------
15957 // These must follow all instruction definitions as they use the names
15958 // defined in the instructions definitions.
15959 //
15960 // peepmatch ( root_instr_name [preceding_instruction]* );
15961 //
15962 // peepconstraint %{
15963 // (instruction_number.operand_name relational_op instruction_number.operand_name
15964 //  [, ...] );
15965 // // instruction numbers are zero-based using left to right order in peepmatch
15966 //
15967 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
15968 // // provide an instruction_number.operand_name for each operand that appears
15969 // // in the replacement instruction's match rule
15970 //
15971 // ---------VM FLAGS---------------------------------------------------------
15972 //
15973 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15974 //
15975 // Each peephole rule is given an identifying number starting with zero and
15976 // increasing by one in the order seen by the parser.  An individual peephole
15977 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15978 // on the command-line.
15979 //
15980 // ---------CURRENT LIMITATIONS----------------------------------------------
15981 //
15982 // Only match adjacent instructions in same basic block
15983 // Only equality constraints
15984 // Only constraints between operands, not (0.dest_reg == RAX_enc)
15985 // Only one replacement instruction
15986 //
15987 // ---------EXAMPLE----------------------------------------------------------
15988 //
15989 // // pertinent parts of existing instructions in architecture description
15990 // instruct movI(iRegINoSp dst, iRegI src)
15991 // %{
15992 //   match(Set dst (CopyI src));
15993 // %}
15994 //
15995 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
15996 // %{
15997 //   match(Set dst (AddI dst src));
15998 //   effect(KILL cr);
15999 // %}
16000 //
16001 // // Change (inc mov) to lea
16002 // peephole %{
16003 //   // increment preceeded by register-register move
16004 //   peepmatch ( incI_iReg movI );
16005 //   // require that the destination register of the increment
16006 //   // match the destination register of the move
16007 //   peepconstraint ( 0.dst == 1.dst );
16008 //   // construct a replacement instruction that sets
16009 //   // the destination to ( move's source register + one )
16010 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16011 // %}
16012 //
16013 
16014 // Implementation no longer uses movX instructions since
16015 // machine-independent system no longer uses CopyX nodes.
16016 //
16017 // peephole
16018 // %{
16019 //   peepmatch (incI_iReg movI);
16020 //   peepconstraint (0.dst == 1.dst);
16021 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16022 // %}
16023 
16024 // peephole
16025 // %{
16026 //   peepmatch (decI_iReg movI);
16027 //   peepconstraint (0.dst == 1.dst);
16028 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16029 // %}
16030 
16031 // peephole
16032 // %{
16033 //   peepmatch (addI_iReg_imm movI);
16034 //   peepconstraint (0.dst == 1.dst);
16035 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16036 // %}
16037 
16038 // peephole
16039 // %{
16040 //   peepmatch (incL_iReg movL);
16041 //   peepconstraint (0.dst == 1.dst);
16042 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16043 // %}
16044 
16045 // peephole
16046 // %{
16047 //   peepmatch (decL_iReg movL);
16048 //   peepconstraint (0.dst == 1.dst);
16049 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16050 // %}
16051 
16052 // peephole
16053 // %{
16054 //   peepmatch (addL_iReg_imm movL);
16055 //   peepconstraint (0.dst == 1.dst);
16056 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16057 // %}
16058 
16059 // peephole
16060 // %{
16061 //   peepmatch (addP_iReg_imm movP);
16062 //   peepconstraint (0.dst == 1.dst);
16063 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16064 // %}
16065 
16066 // // Change load of spilled value to only a spill
16067 // instruct storeI(memory mem, iRegI src)
16068 // %{
16069 //   match(Set mem (StoreI mem src));
16070 // %}
16071 //
16072 // instruct loadI(iRegINoSp dst, memory mem)
16073 // %{
16074 //   match(Set dst (LoadI mem));
16075 // %}
16076 //
16077 
16078 //----------SMARTSPILL RULES---------------------------------------------------
16079 // These must follow all instruction definitions as they use the names
16080 // defined in the instructions definitions.
16081 
16082 // Local Variables:
16083 // mode: c++
16084 // End: