1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 #include "opto/addnode.hpp"
1000 
1001 class CallStubImpl {
1002 
1003   //--------------------------------------------------------------
1004   //---<  Used for optimization in Compile::shorten_branches  >---
1005   //--------------------------------------------------------------
1006 
1007  public:
1008   // Size of call trampoline stub.
1009   static uint size_call_trampoline() {
1010     return 0; // no call trampolines on this platform
1011   }
1012 
1013   // number of relocations needed by a call trampoline stub
1014   static uint reloc_call_trampoline() {
1015     return 0; // no call trampolines on this platform
1016   }
1017 };
1018 
1019 class HandlerImpl {
1020 
1021  public:
1022 
1023   static int emit_exception_handler(CodeBuffer &cbuf);
1024   static int emit_deopt_handler(CodeBuffer& cbuf);
1025 
1026   static uint size_exception_handler() {
1027     return MacroAssembler::far_branch_size();
1028   }
1029 
1030   static uint size_deopt_handler() {
1031     // count one adr and one far branch instruction
1032     return 4 * NativeInstruction::instruction_size;
1033   }
1034 };
1035 
1036   // graph traversal helpers
1037 
1038   MemBarNode *parent_membar(const Node *n);
1039   MemBarNode *child_membar(const MemBarNode *n);
1040   bool leading_membar(const MemBarNode *barrier);
1041 
1042   bool is_card_mark_membar(const MemBarNode *barrier);
1043   bool is_CAS(int opcode);
1044 
1045   MemBarNode *leading_to_trailing(MemBarNode *leading);
1046   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1048 
1049   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1050 
1051   bool unnecessary_acquire(const Node *barrier);
1052   bool needs_acquiring_load(const Node *load);
1053 
1054   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1055 
1056   bool unnecessary_release(const Node *barrier);
1057   bool unnecessary_volatile(const Node *barrier);
1058   bool needs_releasing_store(const Node *store);
1059 
1060   // predicate controlling translation of CompareAndSwapX
1061   bool needs_acquiring_load_exclusive(const Node *load);
1062 
1063   // predicate controlling translation of StoreCM
1064   bool unnecessary_storestore(const Node *storecm);
1065 
1066   // predicate controlling addressing modes
1067   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1068 %}
1069 
1070 source %{
1071 
1072   // Optimizaton of volatile gets and puts
1073   // -------------------------------------
1074   //
1075   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1076   // use to implement volatile reads and writes. For a volatile read
1077   // we simply need
1078   //
1079   //   ldar<x>
1080   //
1081   // and for a volatile write we need
1082   //
1083   //   stlr<x>
1084   //
1085   // Alternatively, we can implement them by pairing a normal
1086   // load/store with a memory barrier. For a volatile read we need
1087   //
1088   //   ldr<x>
1089   //   dmb ishld
1090   //
1091   // for a volatile write
1092   //
1093   //   dmb ish
1094   //   str<x>
1095   //   dmb ish
1096   //
1097   // We can also use ldaxr and stlxr to implement compare and swap CAS
1098   // sequences. These are normally translated to an instruction
1099   // sequence like the following
1100   //
1101   //   dmb      ish
1102   // retry:
1103   //   ldxr<x>   rval raddr
1104   //   cmp       rval rold
1105   //   b.ne done
1106   //   stlxr<x>  rval, rnew, rold
1107   //   cbnz      rval retry
1108   // done:
1109   //   cset      r0, eq
1110   //   dmb ishld
1111   //
1112   // Note that the exclusive store is already using an stlxr
1113   // instruction. That is required to ensure visibility to other
1114   // threads of the exclusive write (assuming it succeeds) before that
1115   // of any subsequent writes.
1116   //
1117   // The following instruction sequence is an improvement on the above
1118   //
1119   // retry:
1120   //   ldaxr<x>  rval raddr
1121   //   cmp       rval rold
1122   //   b.ne done
1123   //   stlxr<x>  rval, rnew, rold
1124   //   cbnz      rval retry
1125   // done:
1126   //   cset      r0, eq
1127   //
1128   // We don't need the leading dmb ish since the stlxr guarantees
1129   // visibility of prior writes in the case that the swap is
1130   // successful. Crucially we don't have to worry about the case where
1131   // the swap is not successful since no valid program should be
1132   // relying on visibility of prior changes by the attempting thread
1133   // in the case where the CAS fails.
1134   //
1135   // Similarly, we don't need the trailing dmb ishld if we substitute
1136   // an ldaxr instruction since that will provide all the guarantees we
1137   // require regarding observation of changes made by other threads
1138   // before any change to the CAS address observed by the load.
1139   //
1140   // In order to generate the desired instruction sequence we need to
1141   // be able to identify specific 'signature' ideal graph node
1142   // sequences which i) occur as a translation of a volatile reads or
1143   // writes or CAS operations and ii) do not occur through any other
1144   // translation or graph transformation. We can then provide
1145   // alternative aldc matching rules which translate these node
1146   // sequences to the desired machine code sequences. Selection of the
1147   // alternative rules can be implemented by predicates which identify
1148   // the relevant node sequences.
1149   //
1150   // The ideal graph generator translates a volatile read to the node
1151   // sequence
1152   //
1153   //   LoadX[mo_acquire]
1154   //   MemBarAcquire
1155   //
1156   // As a special case when using the compressed oops optimization we
1157   // may also see this variant
1158   //
1159   //   LoadN[mo_acquire]
1160   //   DecodeN
1161   //   MemBarAcquire
1162   //
1163   // A volatile write is translated to the node sequence
1164   //
1165   //   MemBarRelease
1166   //   StoreX[mo_release] {CardMark}-optional
1167   //   MemBarVolatile
1168   //
1169   // n.b. the above node patterns are generated with a strict
1170   // 'signature' configuration of input and output dependencies (see
1171   // the predicates below for exact details). The card mark may be as
1172   // simple as a few extra nodes or, in a few GC configurations, may
1173   // include more complex control flow between the leading and
1174   // trailing memory barriers. However, whatever the card mark
1175   // configuration these signatures are unique to translated volatile
1176   // reads/stores -- they will not appear as a result of any other
1177   // bytecode translation or inlining nor as a consequence of
1178   // optimizing transforms.
1179   //
1180   // We also want to catch inlined unsafe volatile gets and puts and
1181   // be able to implement them using either ldar<x>/stlr<x> or some
1182   // combination of ldr<x>/stlr<x> and dmb instructions.
1183   //
1184   // Inlined unsafe volatiles puts manifest as a minor variant of the
1185   // normal volatile put node sequence containing an extra cpuorder
1186   // membar
1187   //
1188   //   MemBarRelease
1189   //   MemBarCPUOrder
1190   //   StoreX[mo_release] {CardMark}-optional
1191   //   MemBarVolatile
1192   //
1193   // n.b. as an aside, the cpuorder membar is not itself subject to
1194   // matching and translation by adlc rules.  However, the rule
1195   // predicates need to detect its presence in order to correctly
1196   // select the desired adlc rules.
1197   //
1198   // Inlined unsafe volatile gets manifest as a somewhat different
1199   // node sequence to a normal volatile get
1200   //
1201   //   MemBarCPUOrder
1202   //        ||       \\
1203   //   MemBarAcquire LoadX[mo_acquire]
1204   //        ||
1205   //   MemBarCPUOrder
1206   //
1207   // In this case the acquire membar does not directly depend on the
1208   // load. However, we can be sure that the load is generated from an
1209   // inlined unsafe volatile get if we see it dependent on this unique
1210   // sequence of membar nodes. Similarly, given an acquire membar we
1211   // can know that it was added because of an inlined unsafe volatile
1212   // get if it is fed and feeds a cpuorder membar and if its feed
1213   // membar also feeds an acquiring load.
1214   //
1215   // Finally an inlined (Unsafe) CAS operation is translated to the
1216   // following ideal graph
1217   //
1218   //   MemBarRelease
1219   //   MemBarCPUOrder
1220   //   CompareAndSwapX {CardMark}-optional
1221   //   MemBarCPUOrder
1222   //   MemBarAcquire
1223   //
1224   // So, where we can identify these volatile read and write
1225   // signatures we can choose to plant either of the above two code
1226   // sequences. For a volatile read we can simply plant a normal
1227   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1228   // also choose to inhibit translation of the MemBarAcquire and
1229   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1230   //
1231   // When we recognise a volatile store signature we can choose to
1232   // plant at a dmb ish as a translation for the MemBarRelease, a
1233   // normal str<x> and then a dmb ish for the MemBarVolatile.
1234   // Alternatively, we can inhibit translation of the MemBarRelease
1235   // and MemBarVolatile and instead plant a simple stlr<x>
1236   // instruction.
1237   //
1238   // when we recognise a CAS signature we can choose to plant a dmb
1239   // ish as a translation for the MemBarRelease, the conventional
1240   // macro-instruction sequence for the CompareAndSwap node (which
1241   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1242   // Alternatively, we can elide generation of the dmb instructions
1243   // and plant the alternative CompareAndSwap macro-instruction
1244   // sequence (which uses ldaxr<x>).
1245   //
1246   // Of course, the above only applies when we see these signature
1247   // configurations. We still want to plant dmb instructions in any
1248   // other cases where we may see a MemBarAcquire, MemBarRelease or
1249   // MemBarVolatile. For example, at the end of a constructor which
1250   // writes final/volatile fields we will see a MemBarRelease
1251   // instruction and this needs a 'dmb ish' lest we risk the
1252   // constructed object being visible without making the
1253   // final/volatile field writes visible.
1254   //
1255   // n.b. the translation rules below which rely on detection of the
1256   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1257   // If we see anything other than the signature configurations we
1258   // always just translate the loads and stores to ldr<x> and str<x>
1259   // and translate acquire, release and volatile membars to the
1260   // relevant dmb instructions.
1261   //
1262 
1263   // graph traversal helpers used for volatile put/get and CAS
1264   // optimization
1265 
1266   // 1) general purpose helpers
1267 
1268   // if node n is linked to a parent MemBarNode by an intervening
1269   // Control and Memory ProjNode return the MemBarNode otherwise return
1270   // NULL.
1271   //
1272   // n may only be a Load or a MemBar.
1273 
1274   MemBarNode *parent_membar(const Node *n)
1275   {
1276     Node *ctl = NULL;
1277     Node *mem = NULL;
1278     Node *membar = NULL;
1279 
1280     if (n->is_Load()) {
1281       ctl = n->lookup(LoadNode::Control);
1282       mem = n->lookup(LoadNode::Memory);
1283     } else if (n->is_MemBar()) {
1284       ctl = n->lookup(TypeFunc::Control);
1285       mem = n->lookup(TypeFunc::Memory);
1286     } else {
1287         return NULL;
1288     }
1289 
1290     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1291       return NULL;
1292     }
1293 
1294     membar = ctl->lookup(0);
1295 
1296     if (!membar || !membar->is_MemBar()) {
1297       return NULL;
1298     }
1299 
1300     if (mem->lookup(0) != membar) {
1301       return NULL;
1302     }
1303 
1304     return membar->as_MemBar();
1305   }
1306 
1307   // if n is linked to a child MemBarNode by intervening Control and
1308   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1309 
1310   MemBarNode *child_membar(const MemBarNode *n)
1311   {
1312     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1313     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1314 
1315     // MemBar needs to have both a Ctl and Mem projection
1316     if (! ctl || ! mem)
1317       return NULL;
1318 
1319     MemBarNode *child = NULL;
1320     Node *x;
1321 
1322     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1323       x = ctl->fast_out(i);
1324       // if we see a membar we keep hold of it. we may also see a new
1325       // arena copy of the original but it will appear later
1326       if (x->is_MemBar()) {
1327           child = x->as_MemBar();
1328           break;
1329       }
1330     }
1331 
1332     if (child == NULL) {
1333       return NULL;
1334     }
1335 
1336     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1337       x = mem->fast_out(i);
1338       // if we see a membar we keep hold of it. we may also see a new
1339       // arena copy of the original but it will appear later
1340       if (x == child) {
1341         return child;
1342       }
1343     }
1344     return NULL;
1345   }
1346 
1347   // helper predicate use to filter candidates for a leading memory
1348   // barrier
1349   //
1350   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1351   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1352 
1353   bool leading_membar(const MemBarNode *barrier)
1354   {
1355     int opcode = barrier->Opcode();
1356     // if this is a release membar we are ok
1357     if (opcode == Op_MemBarRelease) {
1358       return true;
1359     }
1360     // if its a cpuorder membar . . .
1361     if (opcode != Op_MemBarCPUOrder) {
1362       return false;
1363     }
1364     // then the parent has to be a release membar
1365     MemBarNode *parent = parent_membar(barrier);
1366     if (!parent) {
1367       return false;
1368     }
1369     opcode = parent->Opcode();
1370     return opcode == Op_MemBarRelease;
1371   }
1372 
1373   // 2) card mark detection helper
1374 
1375   // helper predicate which can be used to detect a volatile membar
1376   // introduced as part of a conditional card mark sequence either by
1377   // G1 or by CMS when UseCondCardMark is true.
1378   //
1379   // membar can be definitively determined to be part of a card mark
1380   // sequence if and only if all the following hold
1381   //
1382   // i) it is a MemBarVolatile
1383   //
1384   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1385   // true
1386   //
1387   // iii) the node's Mem projection feeds a StoreCM node.
1388 
1389   bool is_card_mark_membar(const MemBarNode *barrier)
1390   {
1391     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1392       return false;
1393     }
1394 
1395     if (barrier->Opcode() != Op_MemBarVolatile) {
1396       return false;
1397     }
1398 
1399     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1400 
1401     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1402       Node *y = mem->fast_out(i);
1403       if (y->Opcode() == Op_StoreCM) {
1404         return true;
1405       }
1406     }
1407 
1408     return false;
1409   }
1410 
1411 
1412   // 3) helper predicates to traverse volatile put or CAS graphs which
1413   // may contain GC barrier subgraphs
1414 
1415   // Preamble
1416   // --------
1417   //
1418   // for volatile writes we can omit generating barriers and employ a
1419   // releasing store when we see a node sequence sequence with a
1420   // leading MemBarRelease and a trailing MemBarVolatile as follows
1421   //
1422   //   MemBarRelease
1423   //  {    ||        } -- optional
1424   //  {MemBarCPUOrder}
1425   //       ||       \\
1426   //       ||     StoreX[mo_release]
1427   //       | \ Bot    / ???
1428   //       | MergeMem
1429   //       | /
1430   //   MemBarVolatile
1431   //
1432   // where
1433   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1434   //  | \ and / indicate further routing of the Ctl and Mem feeds
1435   //
1436   // Note that the memory feed from the CPUOrder membar to the
1437   // MergeMem node is an AliasIdxBot slice while the feed from the
1438   // StoreX is for a slice determined by the type of value being
1439   // written.
1440   //
1441   // the diagram above shows the graph we see for non-object stores.
1442   // for a volatile Object store (StoreN/P) we may see other nodes
1443   // below the leading membar because of the need for a GC pre- or
1444   // post-write barrier.
1445   //
1446   // with most GC configurations we with see this simple variant which
1447   // includes a post-write barrier card mark.
1448   //
1449   //   MemBarRelease______________________________
1450   //         ||    \\               Ctl \        \\
1451   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1452   //         | \ Bot  / oop                 . . .  /
1453   //         | MergeMem
1454   //         | /
1455   //         ||      /
1456   //   MemBarVolatile
1457   //
1458   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1459   // the object address to an int used to compute the card offset) and
1460   // Ctl+Mem to a StoreB node (which does the actual card mark).
1461   //
1462   // n.b. a StoreCM node is only ever used when CMS (with or without
1463   // CondCardMark) or G1 is configured. This abstract instruction
1464   // differs from a normal card mark write (StoreB) because it implies
1465   // a requirement to order visibility of the card mark (StoreCM)
1466   // after that of the object put (StoreP/N) using a StoreStore memory
1467   // barrier. Note that this is /not/ a requirement to order the
1468   // instructions in the generated code (that is already guaranteed by
1469   // the order of memory dependencies). Rather it is a requirement to
1470   // ensure visibility order which only applies on architectures like
1471   // AArch64 which do not implement TSO. This ordering is required for
1472   // both non-volatile and volatile puts.
1473   //
1474   // That implies that we need to translate a StoreCM using the
1475   // sequence
1476   //
1477   //   dmb ishst
1478   //   stlrb
1479   //
1480   // This dmb cannot be omitted even when the associated StoreX or
1481   // CompareAndSwapX is implemented using stlr. However, as described
1482   // below there are circumstances where a specific GC configuration
1483   // requires a stronger barrier in which case it can be omitted.
1484   // 
1485   // With the Serial or Parallel GC using +CondCardMark the card mark
1486   // is performed conditionally on it currently being unmarked in
1487   // which case the volatile put graph looks slightly different
1488   //
1489   //   MemBarRelease____________________________________________
1490   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1491   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1492   //         | \ Bot / oop                          \            |
1493   //         | MergeMem                            . . .      StoreB
1494   //         | /                                                /
1495   //         ||     /
1496   //   MemBarVolatile
1497   //
1498   // It is worth noting at this stage that all the above
1499   // configurations can be uniquely identified by checking that the
1500   // memory flow includes the following subgraph:
1501   //
1502   //   MemBarRelease
1503   //  {MemBarCPUOrder}
1504   //      |  \      . . .
1505   //      |  StoreX[mo_release]  . . .
1506   //  Bot |   / oop
1507   //     MergeMem
1508   //      |
1509   //   MemBarVolatile
1510   //
1511   // This is referred to as a *normal* volatile store subgraph. It can
1512   // easily be detected starting from any candidate MemBarRelease,
1513   // StoreX[mo_release] or MemBarVolatile node.
1514   //
1515   // A small variation on this normal case occurs for an unsafe CAS
1516   // operation. The basic memory flow subgraph for a non-object CAS is
1517   // as follows
1518   //
1519   //   MemBarRelease
1520   //         ||
1521   //   MemBarCPUOrder
1522   //          |     \\   . . .
1523   //          |     CompareAndSwapX
1524   //          |       |
1525   //      Bot |     SCMemProj
1526   //           \     / Bot
1527   //           MergeMem
1528   //           /
1529   //   MemBarCPUOrder
1530   //         ||
1531   //   MemBarAcquire
1532   //
1533   // The same basic variations on this arrangement (mutatis mutandis)
1534   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1535   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1536   // flow subgraph is still present.
1537   // 
1538   // This is referred to as a *normal* CAS subgraph. It can easily be
1539   // detected starting from any candidate MemBarRelease,
1540   // StoreX[mo_release] or MemBarAcquire node.
1541   //
1542   // The code below uses two helper predicates, leading_to_trailing
1543   // and trailing_to_leading to identify these normal graphs, one
1544   // validating the layout starting from the top membar and searching
1545   // down and the other validating the layout starting from the lower
1546   // membar and searching up.
1547   //
1548   // There are two special case GC configurations when the simple
1549   // normal graphs above may not be generated: when using G1 (which
1550   // always employs a conditional card mark); and when using CMS with
1551   // conditional card marking (+CondCardMark) configured. These GCs
1552   // are both concurrent rather than stop-the world GCs. So they
1553   // introduce extra Ctl+Mem flow into the graph between the leading
1554   // and trailing membar nodes, in particular enforcing stronger
1555   // memory serialisation beween the object put and the corresponding
1556   // conditional card mark. CMS employs a post-write GC barrier while
1557   // G1 employs both a pre- and post-write GC barrier.
1558   //
1559   // The post-write barrier subgraph for these configurations includes
1560   // a MemBarVolatile node -- referred to as a card mark membar --
1561   // which is needed to order the card write (StoreCM) operation in
1562   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1563   // operations performed by GC threads i.e. a card mark membar
1564   // constitutes a StoreLoad barrier hence must be translated to a dmb
1565   // ish (whether or not it sits inside a volatile store sequence).
1566   //
1567   // Of course, the use of the dmb ish for the card mark membar also
1568   // implies theat the StoreCM which follows can omit the dmb ishst
1569   // instruction. The necessary visibility ordering will already be
1570   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1571   // needs to be generated for as part of the StoreCM sequence with GC
1572   // configuration +CMS -CondCardMark.
1573   // 
1574   // Of course all these extra barrier nodes may well be absent --
1575   // they are only inserted for object puts. Their potential presence
1576   // significantly complicates the task of identifying whether a
1577   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1578   // MemBarAcquire forms part of a volatile put or CAS when using
1579   // these GC configurations (see below) and also complicates the
1580   // decision as to how to translate a MemBarVolatile and StoreCM.
1581   //
1582   // So, thjis means that a card mark MemBarVolatile occurring in the
1583   // post-barrier graph it needs to be distinguished from a normal
1584   // trailing MemBarVolatile. Resolving this is straightforward: a
1585   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1586   // node and that is a unique marker
1587   //
1588   //      MemBarVolatile (card mark)
1589   //       C |    \     . . .
1590   //         |   StoreCM   . . .
1591   //       . . .
1592   //
1593   // Returning to the task of translating the object put and the
1594   // leading/trailing membar nodes: what do the node graphs look like
1595   // for these 2 special cases? and how can we determine the status of
1596   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1597   // normal and non-normal cases?
1598   //
1599   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1600   // which selects conditonal execution based on the value loaded
1601   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1602   // intervening StoreLoad barrier (MemBarVolatile).
1603   //
1604   // So, with CMS we may see a node graph for a volatile object store
1605   // which looks like this
1606   //
1607   //   MemBarRelease
1608   //   MemBarCPUOrder_(leading)____________________
1609   //     C |  | M \       \\               M |   C \
1610   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1611   //       |  | Bot \    / oop      \        |
1612   //       |  |    MergeMem          \      / 
1613   //       |  |      /                |    /
1614   //     MemBarVolatile (card mark)   |   /
1615   //     C |  ||    M |               |  /
1616   //       | LoadB    | Bot       oop | / Bot
1617   //       |   |      |              / /
1618   //       | Cmp      |\            / /
1619   //       | /        | \          / /
1620   //       If         |  \        / /
1621   //       | \        |   \      / /
1622   // IfFalse  IfTrue  |    \    / /
1623   //       \     / \  |    |   / /
1624   //        \   / StoreCM  |  / /
1625   //         \ /      \   /  / /
1626   //        Region     Phi  / /
1627   //          | \   Raw |  / /
1628   //          |  . . .  | / /
1629   //          |       MergeMem
1630   //          |           |
1631   //        MemBarVolatile (trailing)
1632   //
1633   // Notice that there are two MergeMem nodes below the leading
1634   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1635   // the leading membar and the oopptr Mem slice from the Store into
1636   // the card mark membar. The trailing MergeMem merges the
1637   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1638   // slice from the StoreCM and an oop slice from the StoreN/P node
1639   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1640   // associated with the If region).
1641   //
1642   // So, in the case of CMS + CondCardMark the volatile object store
1643   // graph still includes a normal volatile store subgraph from the
1644   // leading membar to the trailing membar. However, it also contains
1645   // the same shape memory flow to the card mark membar. The two flows
1646   // can be distinguished by testing whether or not the downstream
1647   // membar is a card mark membar.
1648   //
1649   // The graph for a CAS also varies with CMS + CondCardMark, in
1650   // particular employing a control feed from the CompareAndSwapX node
1651   // through a CmpI and If to the card mark membar and StoreCM which
1652   // updates the associated card. This avoids executing the card mark
1653   // if the CAS fails. However, it can be seen from the diagram below
1654   // that the presence of the barrier does not alter the normal CAS
1655   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1656   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1657   // MemBarAcquire pair.
1658   //
1659   //   MemBarRelease
1660   //   MemBarCPUOrder__(leading)_______________________
1661   //   C /  M |                        \\            C \
1662   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1663   //          |                  C /  M |
1664   //          |                 CmpI    |
1665   //          |                  /      |
1666   //          |               . . .     |
1667   //          |              IfTrue     |
1668   //          |              /          |
1669   //       MemBarVolatile (card mark)   |
1670   //        C |  ||    M |              |
1671   //          | LoadB    | Bot   ______/|
1672   //          |   |      |      /       |
1673   //          | Cmp      |     /      SCMemProj
1674   //          | /        |    /         |
1675   //          If         |   /         /
1676   //          | \        |  /         / Bot
1677   //     IfFalse  IfTrue | /         /
1678   //          |   / \   / / prec    /
1679   //   . . .  |  /  StoreCM        /
1680   //        \ | /      | raw      /
1681   //        Region    . . .      /
1682   //           | \              /
1683   //           |   . . .   \    / Bot
1684   //           |        MergeMem
1685   //           |          /
1686   //         MemBarCPUOrder
1687   //         MemBarAcquire (trailing)
1688   //
1689   // This has a slightly different memory subgraph to the one seen
1690   // previously but the core of it has a similar memory flow to the
1691   // CAS normal subgraph:
1692   //
1693   //   MemBarRelease
1694   //   MemBarCPUOrder____
1695   //         |          \      . . .
1696   //         |       CompareAndSwapX  . . .
1697   //         |       C /  M |
1698   //         |      CmpI    |
1699   //         |       /      |
1700   //         |      . .    /
1701   //     Bot |   IfTrue   /
1702   //         |   /       /
1703   //    MemBarVolatile  /
1704   //         | ...     /
1705   //      StoreCM ... /
1706   //         |       / 
1707   //       . . .  SCMemProj
1708   //      Raw \    / Bot
1709   //        MergeMem
1710   //           |
1711   //   MemBarCPUOrder
1712   //   MemBarAcquire
1713   //
1714   // The G1 graph for a volatile object put is a lot more complicated.
1715   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1716   // which adds the old value to the SATB queue; the releasing store
1717   // itself; and, finally, a post-write graph which performs a card
1718   // mark.
1719   //
1720   // The pre-write graph may be omitted, but only when the put is
1721   // writing to a newly allocated (young gen) object and then only if
1722   // there is a direct memory chain to the Initialize node for the
1723   // object allocation. This will not happen for a volatile put since
1724   // any memory chain passes through the leading membar.
1725   //
1726   // The pre-write graph includes a series of 3 If tests. The outermost
1727   // If tests whether SATB is enabled (no else case). The next If tests
1728   // whether the old value is non-NULL (no else case). The third tests
1729   // whether the SATB queue index is > 0, if so updating the queue. The
1730   // else case for this third If calls out to the runtime to allocate a
1731   // new queue buffer.
1732   //
1733   // So with G1 the pre-write and releasing store subgraph looks like
1734   // this (the nested Ifs are omitted).
1735   //
1736   //  MemBarRelease (leading)____________
1737   //     C |  ||  M \   M \    M \  M \ . . .
1738   //       | LoadB   \  LoadL  LoadN   \
1739   //       | /        \                 \
1740   //       If         |\                 \
1741   //       | \        | \                 \
1742   //  IfFalse  IfTrue |  \                 \
1743   //       |     |    |   \                 |
1744   //       |     If   |   /\                |
1745   //       |     |          \               |
1746   //       |                 \              |
1747   //       |    . . .         \             |
1748   //       | /       | /       |            |
1749   //      Region  Phi[M]       |            |
1750   //       | \       |         |            |
1751   //       |  \_____ | ___     |            |
1752   //     C | C \     |   C \ M |            |
1753   //       | CastP2X | StoreN/P[mo_release] |
1754   //       |         |         |            |
1755   //     C |       M |       M |          M |
1756   //        \        | Raw     | oop       / Bot
1757   //                  . . .
1758   //          (post write subtree elided)
1759   //                    . . .
1760   //             C \         M /
1761   //         MemBarVolatile (trailing)
1762   //
1763   // Note that the three memory feeds into the post-write tree are an
1764   // AliasRawIdx slice associated with the writes in the pre-write
1765   // tree, an oop type slice from the StoreX specific to the type of
1766   // the volatile field and the AliasBotIdx slice emanating from the
1767   // leading membar.
1768   //
1769   // n.b. the LoadB in this subgraph is not the card read -- it's a
1770   // read of the SATB queue active flag.
1771   //
1772   // The CAS graph is once again a variant of the above with a
1773   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1774   // value from the CompareAndSwapX node is fed into the post-write
1775   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1776   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1777   //
1778   //  MemBarRelease (leading)____________
1779   //     C |  ||  M \   M \    M \  M \ . . .
1780   //       | LoadB   \  LoadL  LoadN   \
1781   //       | /        \                 \
1782   //       If         |\                 \
1783   //       | \        | \                 \
1784   //  IfFalse  IfTrue |  \                 \
1785   //       |     |    |   \                 \
1786   //       |     If   |    \                 |
1787   //       |     |          \                |
1788   //       |                 \               |
1789   //       |    . . .         \              |
1790   //       | /       | /       \             |
1791   //      Region  Phi[M]        \            |
1792   //       | \       |           \           |
1793   //       |  \_____ |            |          |
1794   //     C | C \     |            |          |
1795   //       | CastP2X |     CompareAndSwapX   |
1796   //       |         |   res |     |         |
1797   //     C |       M |       |  SCMemProj  M |
1798   //        \        | Raw   |     | Bot    / Bot
1799   //                  . . .
1800   //          (post write subtree elided)
1801   //                    . . .
1802   //             C \         M /
1803   //         MemBarVolatile (trailing)
1804   //
1805   // The G1 post-write subtree is also optional, this time when the
1806   // new value being written is either null or can be identified as a
1807   // newly allocated (young gen) object with no intervening control
1808   // flow. The latter cannot happen but the former may, in which case
1809   // the card mark membar is omitted and the memory feeds from the
1810   // leading membar and the SToreN/P are merged direct into the
1811   // trailing membar as per the normal subgraph. So, the only special
1812   // case which arises is when the post-write subgraph is generated.
1813   //
1814   // The kernel of the post-write G1 subgraph is the card mark itself
1815   // which includes a card mark memory barrier (MemBarVolatile), a
1816   // card test (LoadB), and a conditional update (If feeding a
1817   // StoreCM). These nodes are surrounded by a series of nested Ifs
1818   // which try to avoid doing the card mark. The top level If skips if
1819   // the object reference does not cross regions (i.e. it tests if
1820   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1821   // need not be recorded. The next If, which skips on a NULL value,
1822   // may be absent (it is not generated if the type of value is >=
1823   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1824   // checking if card_val != young).  n.b. although this test requires
1825   // a pre-read of the card it can safely be done before the StoreLoad
1826   // barrier. However that does not bypass the need to reread the card
1827   // after the barrier.
1828   //
1829   //                (pre-write subtree elided)
1830   //        . . .                  . . .    . . .  . . .
1831   //        C |               M |    M |    M |
1832   //       Region            Phi[M] StoreN    |
1833   //          |            Raw  |  oop |  Bot |
1834   //         / \_______         |\     |\     |\
1835   //      C / C \      . . .    | \    | \    | \
1836   //       If   CastP2X . . .   |  \   |  \   |  \
1837   //       / \                  |   \  |   \  |   \
1838   //      /   \                 |    \ |    \ |    \
1839   // IfFalse IfTrue             |      |      |     \
1840   //   |       |                 \     |     /       |
1841   //   |       If                 \    | \  /   \    |
1842   //   |      / \                  \   |   /     \   |
1843   //   |     /   \                  \  |  / \     |  |
1844   //   | IfFalse IfTrue           MergeMem   \    |  |
1845   //   |  . . .    / \                 |      \   |  |
1846   //   |          /   \                |       |  |  |
1847   //   |     IfFalse IfTrue            |       |  |  |
1848   //   |      . . .    |               |       |  |  |
1849   //   |               If             /        |  |  |
1850   //   |               / \           /         |  |  |
1851   //   |              /   \         /          |  |  |
1852   //   |         IfFalse IfTrue    /           |  |  |
1853   //   |           . . .   |      /            |  |  |
1854   //   |                    \    /             |  |  |
1855   //   |                     \  /              |  |  |
1856   //   |         MemBarVolatile__(card mark  ) |  |  |
1857   //   |              ||   C |     \           |  |  |
1858   //   |             LoadB   If     |         /   |  |
1859   //   |                    / \ Raw |        /   /  /
1860   //   |                   . . .    |       /   /  /
1861   //   |                        \   |      /   /  /
1862   //   |                        StoreCM   /   /  /
1863   //   |                           |     /   /  /
1864   //   |                            . . .   /  /
1865   //   |                                   /  /
1866   //   |   . . .                          /  /
1867   //   |    |             | /            /  /
1868   //   |    |           Phi[M] /        /  /
1869   //   |    |             |   /        /  /
1870   //   |    |             |  /        /  /
1871   //   |  Region  . . .  Phi[M]      /  /
1872   //   |    |             |         /  /
1873   //    \   |             |        /  /
1874   //     \  | . . .       |       /  /
1875   //      \ |             |      /  /
1876   //      Region         Phi[M] /  /
1877   //        |               \  /  /
1878   //         \             MergeMem
1879   //          \            /
1880   //          MemBarVolatile
1881   //
1882   // As with CMS + CondCardMark the first MergeMem merges the
1883   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1884   // slice from the Store into the card mark membar. However, in this
1885   // case it may also merge an AliasRawIdx mem slice from the pre
1886   // barrier write.
1887   //
1888   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1889   // leading membar with an oop slice from the StoreN and an
1890   // AliasRawIdx slice from the post barrier writes. In this case the
1891   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1892   // which combine feeds from the If regions in the post barrier
1893   // subgraph.
1894   //
1895   // So, for G1 the same characteristic subgraph arises as for CMS +
1896   // CondCardMark. There is a normal subgraph feeding the card mark
1897   // membar and a normal subgraph feeding the trailing membar.
1898   //
1899   // The CAS graph when using G1GC also includes an optional
1900   // post-write subgraph. It is very similar to the above graph except
1901   // for a few details.
1902   // 
1903   // - The control flow is gated by an additonal If which tests the
1904   // result from the CompareAndSwapX node
1905   // 
1906   //  - The MergeMem which feeds the card mark membar only merges the
1907   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1908   // slice from the pre-barrier. It does not merge the SCMemProj
1909   // AliasIdxBot slice. So, this subgraph does not look like the
1910   // normal CAS subgraph.
1911   //
1912   // - The MergeMem which feeds the trailing membar merges the
1913   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1914   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1915   // has two AliasIdxBot input slices. However, this subgraph does
1916   // still look like the normal CAS subgraph.
1917   //
1918   // So, the upshot is:
1919   //
1920   // In all cases a volatile put graph will include a *normal*
1921   // volatile store subgraph betwen the leading membar and the
1922   // trailing membar. It may also include a normal volatile store
1923   // subgraph betwen the leading membar and the card mark membar.
1924   //
1925   // In all cases a CAS graph will contain a unique normal CAS graph
1926   // feeding the trailing membar.
1927   //
1928   // In all cases where there is a card mark membar (either as part of
1929   // a volatile object put or CAS) it will be fed by a MergeMem whose
1930   // AliasIdxBot slice feed will be a leading membar.
1931   //
1932   // The predicates controlling generation of instructions for store
1933   // and barrier nodes employ a few simple helper functions (described
1934   // below) which identify the presence or absence of all these
1935   // subgraph configurations and provide a means of traversing from
1936   // one node in the subgraph to another.
1937 
1938   // is_CAS(int opcode)
1939   //
1940   // return true if opcode is one of the possible CompareAndSwapX
1941   // values otherwise false.
1942 
1943   bool is_CAS(int opcode)
1944   {
1945     return (opcode == Op_CompareAndSwapI ||
1946             opcode == Op_CompareAndSwapL ||
1947             opcode == Op_CompareAndSwapN ||
1948             opcode == Op_CompareAndSwapP);
1949   }
1950 
1951   // leading_to_trailing
1952   //
1953   //graph traversal helper which detects the normal case Mem feed from
1954   // a release membar (or, optionally, its cpuorder child) to a
1955   // dependent volatile membar i.e. it ensures that one or other of
1956   // the following Mem flow subgraph is present.
1957   //
1958   //   MemBarRelease {leading}
1959   //   {MemBarCPUOrder} {optional}
1960   //     Bot |  \      . . .
1961   //         |  StoreN/P[mo_release]  . . .
1962   //         |   /
1963   //        MergeMem
1964   //         |
1965   //   MemBarVolatile {not card mark}
1966   //
1967   //   MemBarRelease {leading}
1968   //   {MemBarCPUOrder} {optional}
1969   //      |       \      . . .
1970   //      |     CompareAndSwapX  . . .
1971   //               |
1972   //     . . .    SCMemProj
1973   //           \   |
1974   //      |    MergeMem
1975   //      |       /
1976   //    MemBarCPUOrder
1977   //    MemBarAcquire {trailing}
1978   //
1979   // the predicate needs to be capable of distinguishing the following
1980   // volatile put graph which may arises when a GC post barrier
1981   // inserts a card mark membar
1982   //
1983   //   MemBarRelease {leading}
1984   //   {MemBarCPUOrder}__
1985   //     Bot |   \       \
1986   //         |   StoreN/P \
1987   //         |    / \     |
1988   //        MergeMem \    |
1989   //         |        \   |
1990   //   MemBarVolatile  \  |
1991   //    {card mark}     \ |
1992   //                  MergeMem
1993   //                      |
1994   // {not card mark} MemBarVolatile
1995   //
1996   // if the correct configuration is present returns the trailing
1997   // membar otherwise NULL.
1998   //
1999   // the input membar is expected to be either a cpuorder membar or a
2000   // release membar. in the latter case it should not have a cpu membar
2001   // child.
2002   //
2003   // the returned value may be a card mark or trailing membar
2004   //
2005 
2006   MemBarNode *leading_to_trailing(MemBarNode *leading)
2007   {
2008     assert((leading->Opcode() == Op_MemBarRelease ||
2009             leading->Opcode() == Op_MemBarCPUOrder),
2010            "expecting a volatile or cpuroder membar!");
2011 
2012     // check the mem flow
2013     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2014 
2015     if (!mem) {
2016       return NULL;
2017     }
2018 
2019     Node *x = NULL;
2020     StoreNode * st = NULL;
2021     LoadStoreNode *cas = NULL;
2022     MergeMemNode *mm = NULL;
2023     MergeMemNode *mm2 = NULL;
2024 
2025     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2026       x = mem->fast_out(i);
2027       if (x->is_MergeMem()) {
2028         if (mm != NULL) {
2029           if (mm2 != NULL) {
2030           // should not see more than 2 merge mems
2031             return NULL;
2032           } else {
2033             mm2 = x->as_MergeMem();
2034           }
2035         } else {
2036           mm = x->as_MergeMem();
2037         }
2038       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2039         // two releasing stores/CAS nodes is one too many
2040         if (st != NULL || cas != NULL) {
2041           return NULL;
2042         }
2043         st = x->as_Store();
2044       } else if (is_CAS(x->Opcode())) {
2045         if (st != NULL || cas != NULL) {
2046           return NULL;
2047         }
2048         cas = x->as_LoadStore();
2049       }
2050     }
2051 
2052     // must have a store or a cas
2053     if (!st && !cas) {
2054       return NULL;
2055     }
2056 
2057     // must have at least one merge if we also have st
2058     if (st && !mm) {
2059       return NULL;
2060     }
2061 
2062     if (cas) {
2063       Node *y = NULL;
2064       // look for an SCMemProj
2065       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2066         x = cas->fast_out(i);
2067         if (x->is_Proj()) {
2068           y = x;
2069           break;
2070         }
2071       }
2072       if (y == NULL) {
2073         return NULL;
2074       }
2075       // the proj must feed a MergeMem
2076       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2077         x = y->fast_out(i);
2078         if (x->is_MergeMem()) {
2079           mm = x->as_MergeMem();
2080           break;
2081         }
2082       }
2083       if (mm == NULL) {
2084         return NULL;
2085       }
2086       MemBarNode *mbar = NULL;
2087       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2088       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2089         x = mm->fast_out(i);
2090         if (x->is_MemBar()) {
2091           int opcode = x->Opcode();
2092           if (opcode == Op_MemBarCPUOrder) {
2093             MemBarNode *z =  x->as_MemBar();
2094             z = child_membar(z);
2095             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2096               mbar = z;
2097             }
2098           }
2099           break;
2100         }
2101       }
2102       return mbar;
2103     } else {
2104       Node *y = NULL;
2105       // ensure the store feeds the first mergemem;
2106       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2107         if (st->fast_out(i) == mm) {
2108           y = st;
2109           break;
2110         }
2111       }
2112       if (y == NULL) {
2113         return NULL;
2114       }
2115       if (mm2 != NULL) {
2116         // ensure the store feeds the second mergemem;
2117         y = NULL;
2118         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2119           if (st->fast_out(i) == mm2) {
2120             y = st;
2121           }
2122         }
2123         if (y == NULL) {
2124           return NULL;
2125         }
2126       }
2127 
2128       MemBarNode *mbar = NULL;
2129       // ensure the first mergemem feeds a volatile membar
2130       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2131         x = mm->fast_out(i);
2132         if (x->is_MemBar()) {
2133           int opcode = x->Opcode();
2134           if (opcode == Op_MemBarVolatile) {
2135             mbar = x->as_MemBar();
2136           }
2137           break;
2138         }
2139       }
2140       if (mm2 == NULL) {
2141         // this is our only option for a trailing membar
2142         return mbar;
2143       }
2144       // ensure the second mergemem feeds a volatile membar
2145       MemBarNode *mbar2 = NULL;
2146       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2147         x = mm2->fast_out(i);
2148         if (x->is_MemBar()) {
2149           int opcode = x->Opcode();
2150           if (opcode == Op_MemBarVolatile) {
2151             mbar2 = x->as_MemBar();
2152           }
2153           break;
2154         }
2155       }
2156       // if we have two merge mems we must have two volatile membars
2157       if (mbar == NULL || mbar2 == NULL) {
2158         return NULL;
2159       }
2160       // return the trailing membar
2161       if (is_card_mark_membar(mbar2)) {
2162         return mbar;
2163       } else {
2164         if (is_card_mark_membar(mbar)) {
2165           return mbar2;
2166         } else {
2167           return NULL;
2168         }
2169       }
2170     }
2171   }
2172 
2173   // trailing_to_leading
2174   //
2175   // graph traversal helper which detects the normal case Mem feed
2176   // from a trailing membar to a preceding release membar (optionally
2177   // its cpuorder child) i.e. it ensures that one or other of the
2178   // following Mem flow subgraphs is present.
2179   //
2180   //   MemBarRelease {leading}
2181   //   MemBarCPUOrder {optional}
2182   //    | Bot |  \      . . .
2183   //    |     |  StoreN/P[mo_release]  . . .
2184   //    |     |   /
2185   //    |    MergeMem
2186   //    |     |
2187   //   MemBarVolatile {not card mark}
2188   //
2189   //   MemBarRelease {leading}
2190   //   MemBarCPUOrder {optional}
2191   //      |       \      . . .
2192   //      |     CompareAndSwapX  . . .
2193   //               |
2194   //     . . .    SCMemProj
2195   //           \   |
2196   //      |    MergeMem
2197   //      |       |
2198   //    MemBarCPUOrder
2199   //    MemBarAcquire {trailing}
2200   //
2201   // this predicate checks for the same flow as the previous predicate
2202   // but starting from the bottom rather than the top.
2203   //
2204   // if the configuration is present returns the cpuorder member for
2205   // preference or when absent the release membar otherwise NULL.
2206   //
2207   // n.b. the input membar is expected to be a MemBarVolatile or
2208   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2209   // mark membar.
2210 
2211   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2212   {
2213     // input must be a volatile membar
2214     assert((barrier->Opcode() == Op_MemBarVolatile ||
2215             barrier->Opcode() == Op_MemBarAcquire),
2216            "expecting a volatile or an acquire membar");
2217 
2218     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2219            !is_card_mark_membar(barrier),
2220            "not expecting a card mark membar");
2221     Node *x;
2222     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2223 
2224     // if we have an acquire membar then it must be fed via a CPUOrder
2225     // membar
2226 
2227     if (is_cas) {
2228       // skip to parent barrier which must be a cpuorder
2229       x = parent_membar(barrier);
2230       if (x->Opcode() != Op_MemBarCPUOrder)
2231         return NULL;
2232     } else {
2233       // start from the supplied barrier
2234       x = (Node *)barrier;
2235     }
2236 
2237     // the Mem feed to the membar should be a merge
2238     x = x ->in(TypeFunc::Memory);
2239     if (!x->is_MergeMem())
2240       return NULL;
2241 
2242     MergeMemNode *mm = x->as_MergeMem();
2243 
2244     if (is_cas) {
2245       // the merge should be fed from the CAS via an SCMemProj node
2246       x = NULL;
2247       for (uint idx = 1; idx < mm->req(); idx++) {
2248         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2249           x = mm->in(idx);
2250           break;
2251         }
2252       }
2253       if (x == NULL) {
2254         return NULL;
2255       }
2256       // check for a CAS feeding this proj
2257       x = x->in(0);
2258       int opcode = x->Opcode();
2259       if (!is_CAS(opcode)) {
2260         return NULL;
2261       }
2262       // the CAS should get its mem feed from the leading membar
2263       x = x->in(MemNode::Memory);
2264     } else {
2265       // the merge should get its Bottom mem feed from the leading membar
2266       x = mm->in(Compile::AliasIdxBot);
2267     }
2268 
2269     // ensure this is a non control projection
2270     if (!x->is_Proj() || x->is_CFG()) {
2271       return NULL;
2272     }
2273     // if it is fed by a membar that's the one we want
2274     x = x->in(0);
2275 
2276     if (!x->is_MemBar()) {
2277       return NULL;
2278     }
2279 
2280     MemBarNode *leading = x->as_MemBar();
2281     // reject invalid candidates
2282     if (!leading_membar(leading)) {
2283       return NULL;
2284     }
2285 
2286     // ok, we have a leading membar, now for the sanity clauses
2287 
2288     // the leading membar must feed Mem to a releasing store or CAS
2289     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2290     StoreNode *st = NULL;
2291     LoadStoreNode *cas = NULL;
2292     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2293       x = mem->fast_out(i);
2294       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2295         // two stores or CASes is one too many
2296         if (st != NULL || cas != NULL) {
2297           return NULL;
2298         }
2299         st = x->as_Store();
2300       } else if (is_CAS(x->Opcode())) {
2301         if (st != NULL || cas != NULL) {
2302           return NULL;
2303         }
2304         cas = x->as_LoadStore();
2305       }
2306     }
2307 
2308     // we should not have both a store and a cas
2309     if (st == NULL & cas == NULL) {
2310       return NULL;
2311     }
2312 
2313     if (st == NULL) {
2314       // nothing more to check
2315       return leading;
2316     } else {
2317       // we should not have a store if we started from an acquire
2318       if (is_cas) {
2319         return NULL;
2320       }
2321 
2322       // the store should feed the merge we used to get here
2323       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2324         if (st->fast_out(i) == mm) {
2325           return leading;
2326         }
2327       }
2328     }
2329 
2330     return NULL;
2331   }
2332 
2333   // card_mark_to_leading
2334   //
2335   // graph traversal helper which traverses from a card mark volatile
2336   // membar to a leading membar i.e. it ensures that the following Mem
2337   // flow subgraph is present.
2338   //
2339   //    MemBarRelease {leading}
2340   //   {MemBarCPUOrder} {optional}
2341   //         |   . . .
2342   //     Bot |   /
2343   //      MergeMem
2344   //         |
2345   //     MemBarVolatile (card mark)
2346   //        |     \
2347   //      . . .   StoreCM
2348   //
2349   // if the configuration is present returns the cpuorder member for
2350   // preference or when absent the release membar otherwise NULL.
2351   //
2352   // n.b. the input membar is expected to be a MemBarVolatile amd must
2353   // be a card mark membar.
2354 
2355   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2356   {
2357     // input must be a card mark volatile membar
2358     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2359 
2360     // the Mem feed to the membar should be a merge
2361     Node *x = barrier->in(TypeFunc::Memory);
2362     if (!x->is_MergeMem()) {
2363       return NULL;
2364     }
2365 
2366     MergeMemNode *mm = x->as_MergeMem();
2367 
2368     x = mm->in(Compile::AliasIdxBot);
2369 
2370     if (!x->is_MemBar()) {
2371       return NULL;
2372     }
2373 
2374     MemBarNode *leading = x->as_MemBar();
2375 
2376     if (leading_membar(leading)) {
2377       return leading;
2378     }
2379 
2380     return NULL;
2381   }
2382 
2383 bool unnecessary_acquire(const Node *barrier)
2384 {
2385   assert(barrier->is_MemBar(), "expecting a membar");
2386 
2387   if (UseBarriersForVolatile) {
2388     // we need to plant a dmb
2389     return false;
2390   }
2391 
2392   // a volatile read derived from bytecode (or also from an inlined
2393   // SHA field read via LibraryCallKit::load_field_from_object)
2394   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2395   // with a bogus read dependency on it's preceding load. so in those
2396   // cases we will find the load node at the PARMS offset of the
2397   // acquire membar.  n.b. there may be an intervening DecodeN node.
2398   //
2399   // a volatile load derived from an inlined unsafe field access
2400   // manifests as a cpuorder membar with Ctl and Mem projections
2401   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2402   // acquire then feeds another cpuorder membar via Ctl and Mem
2403   // projections. The load has no output dependency on these trailing
2404   // membars because subsequent nodes inserted into the graph take
2405   // their control feed from the final membar cpuorder meaning they
2406   // are all ordered after the load.
2407 
2408   Node *x = barrier->lookup(TypeFunc::Parms);
2409   if (x) {
2410     // we are starting from an acquire and it has a fake dependency
2411     //
2412     // need to check for
2413     //
2414     //   LoadX[mo_acquire]
2415     //   {  |1   }
2416     //   {DecodeN}
2417     //      |Parms
2418     //   MemBarAcquire*
2419     //
2420     // where * tags node we were passed
2421     // and |k means input k
2422     if (x->is_DecodeNarrowPtr()) {
2423       x = x->in(1);
2424     }
2425 
2426     return (x->is_Load() && x->as_Load()->is_acquire());
2427   }
2428 
2429   // now check for an unsafe volatile get
2430 
2431   // need to check for
2432   //
2433   //   MemBarCPUOrder
2434   //        ||       \\
2435   //   MemBarAcquire* LoadX[mo_acquire]
2436   //        ||
2437   //   MemBarCPUOrder
2438   //
2439   // where * tags node we were passed
2440   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2441 
2442   // check for a parent MemBarCPUOrder
2443   ProjNode *ctl;
2444   ProjNode *mem;
2445   MemBarNode *parent = parent_membar(barrier);
2446   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2447     return false;
2448   ctl = parent->proj_out(TypeFunc::Control);
2449   mem = parent->proj_out(TypeFunc::Memory);
2450   if (!ctl || !mem) {
2451     return false;
2452   }
2453   // ensure the proj nodes both feed a LoadX[mo_acquire]
2454   LoadNode *ld = NULL;
2455   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2456     x = ctl->fast_out(i);
2457     // if we see a load we keep hold of it and stop searching
2458     if (x->is_Load()) {
2459       ld = x->as_Load();
2460       break;
2461     }
2462   }
2463   // it must be an acquiring load
2464   if (ld && ld->is_acquire()) {
2465 
2466     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2467       x = mem->fast_out(i);
2468       // if we see the same load we drop it and stop searching
2469       if (x == ld) {
2470         ld = NULL;
2471         break;
2472       }
2473     }
2474     // we must have dropped the load
2475     if (ld == NULL) {
2476       // check for a child cpuorder membar
2477       MemBarNode *child  = child_membar(barrier->as_MemBar());
2478       if (child && child->Opcode() == Op_MemBarCPUOrder)
2479         return true;
2480     }
2481   }
2482 
2483   // final option for unnecessary mebar is that it is a trailing node
2484   // belonging to a CAS
2485 
2486   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2487 
2488   return leading != NULL;
2489 }
2490 
2491 bool needs_acquiring_load(const Node *n)
2492 {
2493   assert(n->is_Load(), "expecting a load");
2494   if (UseBarriersForVolatile) {
2495     // we use a normal load and a dmb
2496     return false;
2497   }
2498 
2499   LoadNode *ld = n->as_Load();
2500 
2501   if (!ld->is_acquire()) {
2502     return false;
2503   }
2504 
2505   // check if this load is feeding an acquire membar
2506   //
2507   //   LoadX[mo_acquire]
2508   //   {  |1   }
2509   //   {DecodeN}
2510   //      |Parms
2511   //   MemBarAcquire*
2512   //
2513   // where * tags node we were passed
2514   // and |k means input k
2515 
2516   Node *start = ld;
2517   Node *mbacq = NULL;
2518 
2519   // if we hit a DecodeNarrowPtr we reset the start node and restart
2520   // the search through the outputs
2521  restart:
2522 
2523   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2524     Node *x = start->fast_out(i);
2525     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2526       mbacq = x;
2527     } else if (!mbacq &&
2528                (x->is_DecodeNarrowPtr() ||
2529                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2530       start = x;
2531       goto restart;
2532     }
2533   }
2534 
2535   if (mbacq) {
2536     return true;
2537   }
2538 
2539   // now check for an unsafe volatile get
2540 
2541   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2542   //
2543   //     MemBarCPUOrder
2544   //        ||       \\
2545   //   MemBarAcquire* LoadX[mo_acquire]
2546   //        ||
2547   //   MemBarCPUOrder
2548 
2549   MemBarNode *membar;
2550 
2551   membar = parent_membar(ld);
2552 
2553   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2554     return false;
2555   }
2556 
2557   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2558 
2559   membar = child_membar(membar);
2560 
2561   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2562     return false;
2563   }
2564 
2565   membar = child_membar(membar);
2566 
2567   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2568     return false;
2569   }
2570 
2571   return true;
2572 }
2573 
2574 bool unnecessary_release(const Node *n)
2575 {
2576   assert((n->is_MemBar() &&
2577           n->Opcode() == Op_MemBarRelease),
2578          "expecting a release membar");
2579 
2580   if (UseBarriersForVolatile) {
2581     // we need to plant a dmb
2582     return false;
2583   }
2584 
2585   // if there is a dependent CPUOrder barrier then use that as the
2586   // leading
2587 
2588   MemBarNode *barrier = n->as_MemBar();
2589   // check for an intervening cpuorder membar
2590   MemBarNode *b = child_membar(barrier);
2591   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2592     // ok, so start the check from the dependent cpuorder barrier
2593     barrier = b;
2594   }
2595 
2596   // must start with a normal feed
2597   MemBarNode *trailing = leading_to_trailing(barrier);
2598 
2599   return (trailing != NULL);
2600 }
2601 
2602 bool unnecessary_volatile(const Node *n)
2603 {
2604   // assert n->is_MemBar();
2605   if (UseBarriersForVolatile) {
2606     // we need to plant a dmb
2607     return false;
2608   }
2609 
2610   MemBarNode *mbvol = n->as_MemBar();
2611 
2612   // first we check if this is part of a card mark. if so then we have
2613   // to generate a StoreLoad barrier
2614 
2615   if (is_card_mark_membar(mbvol)) {
2616       return false;
2617   }
2618 
2619   // ok, if it's not a card mark then we still need to check if it is
2620   // a trailing membar of a volatile put graph.
2621 
2622   return (trailing_to_leading(mbvol) != NULL);
2623 }
2624 
2625 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2626 
2627 bool needs_releasing_store(const Node *n)
2628 {
2629   // assert n->is_Store();
2630   if (UseBarriersForVolatile) {
2631     // we use a normal store and dmb combination
2632     return false;
2633   }
2634 
2635   StoreNode *st = n->as_Store();
2636 
2637   // the store must be marked as releasing
2638   if (!st->is_release()) {
2639     return false;
2640   }
2641 
2642   // the store must be fed by a membar
2643 
2644   Node *x = st->lookup(StoreNode::Memory);
2645 
2646   if (! x || !x->is_Proj()) {
2647     return false;
2648   }
2649 
2650   ProjNode *proj = x->as_Proj();
2651 
2652   x = proj->lookup(0);
2653 
2654   if (!x || !x->is_MemBar()) {
2655     return false;
2656   }
2657 
2658   MemBarNode *barrier = x->as_MemBar();
2659 
2660   // if the barrier is a release membar or a cpuorder mmebar fed by a
2661   // release membar then we need to check whether that forms part of a
2662   // volatile put graph.
2663 
2664   // reject invalid candidates
2665   if (!leading_membar(barrier)) {
2666     return false;
2667   }
2668 
2669   // does this lead a normal subgraph?
2670   MemBarNode *trailing = leading_to_trailing(barrier);
2671 
2672   return (trailing != NULL);
2673 }
2674 
2675 // predicate controlling translation of CAS
2676 //
2677 // returns true if CAS needs to use an acquiring load otherwise false
2678 
2679 bool needs_acquiring_load_exclusive(const Node *n)
2680 {
2681   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2682   if (UseBarriersForVolatile) {
2683     return false;
2684   }
2685 
2686   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2687 #ifdef ASSERT
2688   LoadStoreNode *st = n->as_LoadStore();
2689 
2690   // the store must be fed by a membar
2691 
2692   Node *x = st->lookup(StoreNode::Memory);
2693 
2694   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2695 
2696   ProjNode *proj = x->as_Proj();
2697 
2698   x = proj->lookup(0);
2699 
2700   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2701 
2702   MemBarNode *barrier = x->as_MemBar();
2703 
2704   // the barrier must be a cpuorder mmebar fed by a release membar
2705 
2706   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2707          "CAS not fed by cpuorder membar!");
2708 
2709   MemBarNode *b = parent_membar(barrier);
2710   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2711           "CAS not fed by cpuorder+release membar pair!");
2712 
2713   // does this lead a normal subgraph?
2714   MemBarNode *mbar = leading_to_trailing(barrier);
2715 
2716   assert(mbar != NULL, "CAS not embedded in normal graph!");
2717 
2718   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2719 #endif // ASSERT
2720   // so we can just return true here
2721   return true;
2722 }
2723 
2724 // predicate controlling translation of StoreCM
2725 //
2726 // returns true if a StoreStore must precede the card write otherwise
2727 // false
2728 
2729 bool unnecessary_storestore(const Node *storecm)
2730 {
2731   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2732 
2733   // we only ever need to generate a dmb ishst between an object put
2734   // and the associated card mark when we are using CMS without
2735   // conditional card marking. Any other occurence will happen when
2736   // performing a card mark using CMS with conditional card marking or
2737   // G1. In those cases the preceding MamBarVolatile will be
2738   // translated to a dmb ish which guarantes visibility of the
2739   // preceding StoreN/P before this StoreCM
2740 
2741   if (!UseConcMarkSweepGC || UseCondCardMark) {
2742     return true;
2743   }
2744 
2745   // if we are implementing volatile puts using barriers then we must
2746   // insert the dmb ishst
2747 
2748   if (UseBarriersForVolatile) {
2749     return false;
2750   }
2751 
2752   // we must be using CMS with conditional card marking so we ahve to
2753   // generate the StoreStore
2754 
2755   return false;
2756 }
2757 
2758 
2759 #define __ _masm.
2760 
2761 // advance declarations for helper functions to convert register
2762 // indices to register objects
2763 
2764 // the ad file has to provide implementations of certain methods
2765 // expected by the generic code
2766 //
2767 // REQUIRED FUNCTIONALITY
2768 
2769 //=============================================================================
2770 
2771 // !!!!! Special hack to get all types of calls to specify the byte offset
2772 //       from the start of the call to the point where the return address
2773 //       will point.
2774 
2775 int MachCallStaticJavaNode::ret_addr_offset()
2776 {
2777   // call should be a simple bl
2778   int off = 4;
2779   return off;
2780 }
2781 
2782 int MachCallDynamicJavaNode::ret_addr_offset()
2783 {
2784   return 16; // movz, movk, movk, bl
2785 }
2786 
2787 int MachCallRuntimeNode::ret_addr_offset() {
2788   // for generated stubs the call will be
2789   //   far_call(addr)
2790   // for real runtime callouts it will be six instructions
2791   // see aarch64_enc_java_to_runtime
2792   //   adr(rscratch2, retaddr)
2793   //   lea(rscratch1, RuntimeAddress(addr)
2794   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2795   //   blrt rscratch1
2796   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2797   if (cb) {
2798     return MacroAssembler::far_branch_size();
2799   } else {
2800     return 6 * NativeInstruction::instruction_size;
2801   }
2802 }
2803 
2804 // Indicate if the safepoint node needs the polling page as an input
2805 
2806 // the shared code plants the oop data at the start of the generated
2807 // code for the safepoint node and that needs ot be at the load
2808 // instruction itself. so we cannot plant a mov of the safepoint poll
2809 // address followed by a load. setting this to true means the mov is
2810 // scheduled as a prior instruction. that's better for scheduling
2811 // anyway.
2812 
2813 bool SafePointNode::needs_polling_address_input()
2814 {
2815   return true;
2816 }
2817 
2818 //=============================================================================
2819 
2820 #ifndef PRODUCT
2821 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2822   st->print("BREAKPOINT");
2823 }
2824 #endif
2825 
2826 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2827   MacroAssembler _masm(&cbuf);
2828   __ brk(0);
2829 }
2830 
2831 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2832   return MachNode::size(ra_);
2833 }
2834 
2835 //=============================================================================
2836 
2837 #ifndef PRODUCT
2838   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2839     st->print("nop \t# %d bytes pad for loops and calls", _count);
2840   }
2841 #endif
2842 
2843   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2844     MacroAssembler _masm(&cbuf);
2845     for (int i = 0; i < _count; i++) {
2846       __ nop();
2847     }
2848   }
2849 
2850   uint MachNopNode::size(PhaseRegAlloc*) const {
2851     return _count * NativeInstruction::instruction_size;
2852   }
2853 
2854 //=============================================================================
2855 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2856 
2857 int Compile::ConstantTable::calculate_table_base_offset() const {
2858   return 0;  // absolute addressing, no offset
2859 }
2860 
2861 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2862 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2863   ShouldNotReachHere();
2864 }
2865 
2866 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2867   // Empty encoding
2868 }
2869 
2870 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2871   return 0;
2872 }
2873 
2874 #ifndef PRODUCT
2875 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2876   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2877 }
2878 #endif
2879 
2880 #ifndef PRODUCT
2881 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2882   Compile* C = ra_->C;
2883 
2884   int framesize = C->frame_slots() << LogBytesPerInt;
2885 
2886   if (C->need_stack_bang(framesize))
2887     st->print("# stack bang size=%d\n\t", framesize);
2888 
2889   if (framesize < ((1 << 9) + 2 * wordSize)) {
2890     st->print("sub  sp, sp, #%d\n\t", framesize);
2891     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2892     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2893   } else {
2894     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2895     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2896     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2897     st->print("sub  sp, sp, rscratch1");
2898   }
2899 }
2900 #endif
2901 
2902 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2903   Compile* C = ra_->C;
2904   MacroAssembler _masm(&cbuf);
2905 
2906   // n.b. frame size includes space for return pc and rfp
2907   const long framesize = C->frame_size_in_bytes();
2908   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2909 
2910   // insert a nop at the start of the prolog so we can patch in a
2911   // branch if we need to invalidate the method later
2912   __ nop();
2913 
2914   int bangsize = C->bang_size_in_bytes();
2915   if (C->need_stack_bang(bangsize) && UseStackBanging)
2916     __ generate_stack_overflow_check(bangsize);
2917 
2918   __ build_frame(framesize);
2919 
2920   if (NotifySimulator) {
2921     __ notify(Assembler::method_entry);
2922   }
2923 
2924   if (VerifyStackAtCalls) {
2925     Unimplemented();
2926   }
2927 
2928   C->set_frame_complete(cbuf.insts_size());
2929 
2930   if (C->has_mach_constant_base_node()) {
2931     // NOTE: We set the table base offset here because users might be
2932     // emitted before MachConstantBaseNode.
2933     Compile::ConstantTable& constant_table = C->constant_table();
2934     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2935   }
2936 }
2937 
2938 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2939 {
2940   return MachNode::size(ra_); // too many variables; just compute it
2941                               // the hard way
2942 }
2943 
2944 int MachPrologNode::reloc() const
2945 {
2946   return 0;
2947 }
2948 
2949 //=============================================================================
2950 
2951 #ifndef PRODUCT
2952 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2953   Compile* C = ra_->C;
2954   int framesize = C->frame_slots() << LogBytesPerInt;
2955 
2956   st->print("# pop frame %d\n\t",framesize);
2957 
2958   if (framesize == 0) {
2959     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2960   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2961     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2962     st->print("add  sp, sp, #%d\n\t", framesize);
2963   } else {
2964     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2965     st->print("add  sp, sp, rscratch1\n\t");
2966     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2967   }
2968 
2969   if (do_polling() && C->is_method_compilation()) {
2970     st->print("# touch polling page\n\t");
2971     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2972     st->print("ldr zr, [rscratch1]");
2973   }
2974 }
2975 #endif
2976 
2977 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2978   Compile* C = ra_->C;
2979   MacroAssembler _masm(&cbuf);
2980   int framesize = C->frame_slots() << LogBytesPerInt;
2981 
2982   __ remove_frame(framesize);
2983 
2984   if (NotifySimulator) {
2985     __ notify(Assembler::method_reentry);
2986   }
2987 
2988   if (do_polling() && C->is_method_compilation()) {
2989     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2990   }
2991 }
2992 
2993 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2994   // Variable size. Determine dynamically.
2995   return MachNode::size(ra_);
2996 }
2997 
2998 int MachEpilogNode::reloc() const {
2999   // Return number of relocatable values contained in this instruction.
3000   return 1; // 1 for polling page.
3001 }
3002 
3003 const Pipeline * MachEpilogNode::pipeline() const {
3004   return MachNode::pipeline_class();
3005 }
3006 
3007 // This method seems to be obsolete. It is declared in machnode.hpp
3008 // and defined in all *.ad files, but it is never called. Should we
3009 // get rid of it?
3010 int MachEpilogNode::safepoint_offset() const {
3011   assert(do_polling(), "no return for this epilog node");
3012   return 4;
3013 }
3014 
3015 //=============================================================================
3016 
3017 // Figure out which register class each belongs in: rc_int, rc_float or
3018 // rc_stack.
3019 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3020 
3021 static enum RC rc_class(OptoReg::Name reg) {
3022 
3023   if (reg == OptoReg::Bad) {
3024     return rc_bad;
3025   }
3026 
3027   // we have 30 int registers * 2 halves
3028   // (rscratch1 and rscratch2 are omitted)
3029 
3030   if (reg < 60) {
3031     return rc_int;
3032   }
3033 
3034   // we have 32 float register * 2 halves
3035   if (reg < 60 + 128) {
3036     return rc_float;
3037   }
3038 
3039   // Between float regs & stack is the flags regs.
3040   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3041 
3042   return rc_stack;
3043 }
3044 
3045 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3046   Compile* C = ra_->C;
3047 
3048   // Get registers to move.
3049   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3050   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3051   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3052   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3053 
3054   enum RC src_hi_rc = rc_class(src_hi);
3055   enum RC src_lo_rc = rc_class(src_lo);
3056   enum RC dst_hi_rc = rc_class(dst_hi);
3057   enum RC dst_lo_rc = rc_class(dst_lo);
3058 
3059   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3060 
3061   if (src_hi != OptoReg::Bad) {
3062     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3063            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3064            "expected aligned-adjacent pairs");
3065   }
3066 
3067   if (src_lo == dst_lo && src_hi == dst_hi) {
3068     return 0;            // Self copy, no move.
3069   }
3070 
3071   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3072               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3073   int src_offset = ra_->reg2offset(src_lo);
3074   int dst_offset = ra_->reg2offset(dst_lo);
3075 
3076   if (bottom_type()->isa_vect() != NULL) {
3077     uint ireg = ideal_reg();
3078     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3079     if (cbuf) {
3080       MacroAssembler _masm(cbuf);
3081       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3082       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3083         // stack->stack
3084         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3085         if (ireg == Op_VecD) {
3086           __ unspill(rscratch1, true, src_offset);
3087           __ spill(rscratch1, true, dst_offset);
3088         } else {
3089           __ spill_copy128(src_offset, dst_offset);
3090         }
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3092         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3093                ireg == Op_VecD ? __ T8B : __ T16B,
3094                as_FloatRegister(Matcher::_regEncode[src_lo]));
3095       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3096         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(dst_lo));
3099       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3100         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3101                        ireg == Op_VecD ? __ D : __ Q,
3102                        ra_->reg2offset(src_lo));
3103       } else {
3104         ShouldNotReachHere();
3105       }
3106     }
3107   } else if (cbuf) {
3108     MacroAssembler _masm(cbuf);
3109     switch (src_lo_rc) {
3110     case rc_int:
3111       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3112         if (is64) {
3113             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3114                    as_Register(Matcher::_regEncode[src_lo]));
3115         } else {
3116             MacroAssembler _masm(cbuf);
3117             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3118                     as_Register(Matcher::_regEncode[src_lo]));
3119         }
3120       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3121         if (is64) {
3122             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3123                      as_Register(Matcher::_regEncode[src_lo]));
3124         } else {
3125             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3126                      as_Register(Matcher::_regEncode[src_lo]));
3127         }
3128       } else {                    // gpr --> stack spill
3129         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3130         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3131       }
3132       break;
3133     case rc_float:
3134       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3135         if (is64) {
3136             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3137                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3138         } else {
3139             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3140                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3141         }
3142       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3143           if (cbuf) {
3144             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3145                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3146         } else {
3147             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3148                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3149         }
3150       } else {                    // fpr --> stack spill
3151         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3152         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3153                  is64 ? __ D : __ S, dst_offset);
3154       }
3155       break;
3156     case rc_stack:
3157       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3158         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3159       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3160         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3161                    is64 ? __ D : __ S, src_offset);
3162       } else {                    // stack --> stack copy
3163         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3164         __ unspill(rscratch1, is64, src_offset);
3165         __ spill(rscratch1, is64, dst_offset);
3166       }
3167       break;
3168     default:
3169       assert(false, "bad rc_class for spill");
3170       ShouldNotReachHere();
3171     }
3172   }
3173 
3174   if (st) {
3175     st->print("spill ");
3176     if (src_lo_rc == rc_stack) {
3177       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3178     } else {
3179       st->print("%s -> ", Matcher::regName[src_lo]);
3180     }
3181     if (dst_lo_rc == rc_stack) {
3182       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3183     } else {
3184       st->print("%s", Matcher::regName[dst_lo]);
3185     }
3186     if (bottom_type()->isa_vect() != NULL) {
3187       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3188     } else {
3189       st->print("\t# spill size = %d", is64 ? 64:32);
3190     }
3191   }
3192 
3193   return 0;
3194 
3195 }
3196 
3197 #ifndef PRODUCT
3198 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3199   if (!ra_)
3200     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3201   else
3202     implementation(NULL, ra_, false, st);
3203 }
3204 #endif
3205 
3206 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3207   implementation(&cbuf, ra_, false, NULL);
3208 }
3209 
3210 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3211   return MachNode::size(ra_);
3212 }
3213 
3214 //=============================================================================
3215 
3216 #ifndef PRODUCT
3217 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3218   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3219   int reg = ra_->get_reg_first(this);
3220   st->print("add %s, rsp, #%d]\t# box lock",
3221             Matcher::regName[reg], offset);
3222 }
3223 #endif
3224 
3225 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3226   MacroAssembler _masm(&cbuf);
3227 
3228   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3229   int reg    = ra_->get_encode(this);
3230 
3231   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3232     __ add(as_Register(reg), sp, offset);
3233   } else {
3234     ShouldNotReachHere();
3235   }
3236 }
3237 
3238 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3239   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3240   return 4;
3241 }
3242 
3243 //=============================================================================
3244 
3245 #ifndef PRODUCT
3246 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3247 {
3248   st->print_cr("# MachUEPNode");
3249   if (UseCompressedClassPointers) {
3250     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3251     if (Universe::narrow_klass_shift() != 0) {
3252       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3253     }
3254   } else {
3255    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3256   }
3257   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3258   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3259 }
3260 #endif
3261 
3262 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3263 {
3264   // This is the unverified entry point.
3265   MacroAssembler _masm(&cbuf);
3266 
3267   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3268   Label skip;
3269   // TODO
3270   // can we avoid this skip and still use a reloc?
3271   __ br(Assembler::EQ, skip);
3272   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3273   __ bind(skip);
3274 }
3275 
3276 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3277 {
3278   return MachNode::size(ra_);
3279 }
3280 
3281 // REQUIRED EMIT CODE
3282 
3283 //=============================================================================
3284 
3285 // Emit exception handler code.
3286 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3287 {
3288   // mov rscratch1 #exception_blob_entry_point
3289   // br rscratch1
3290   // Note that the code buffer's insts_mark is always relative to insts.
3291   // That's why we must use the macroassembler to generate a handler.
3292   MacroAssembler _masm(&cbuf);
3293   address base = __ start_a_stub(size_exception_handler());
3294   if (base == NULL) {
3295     ciEnv::current()->record_failure("CodeCache is full");
3296     return 0;  // CodeBuffer::expand failed
3297   }
3298   int offset = __ offset();
3299   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3300   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3301   __ end_a_stub();
3302   return offset;
3303 }
3304 
3305 // Emit deopt handler code.
3306 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3307 {
3308   // Note that the code buffer's insts_mark is always relative to insts.
3309   // That's why we must use the macroassembler to generate a handler.
3310   MacroAssembler _masm(&cbuf);
3311   address base = __ start_a_stub(size_deopt_handler());
3312   if (base == NULL) {
3313     ciEnv::current()->record_failure("CodeCache is full");
3314     return 0;  // CodeBuffer::expand failed
3315   }
3316   int offset = __ offset();
3317 
3318   __ adr(lr, __ pc());
3319   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3320 
3321   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3322   __ end_a_stub();
3323   return offset;
3324 }
3325 
3326 // REQUIRED MATCHER CODE
3327 
3328 //=============================================================================
3329 
3330 const bool Matcher::match_rule_supported(int opcode) {
3331 
3332   switch (opcode) {
3333   case Op_StrIndexOf:
3334     if (CompactStrings)  return false;
3335     break;
3336   default:
3337     break;
3338   }
3339 
3340   if (!has_match_rule(opcode)) {
3341     return false;
3342   }
3343 
3344   return true;  // Per default match rules are supported.
3345 }
3346 
3347 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3348 
3349   // TODO
3350   // identify extra cases that we might want to provide match rules for
3351   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3352   bool ret_value = match_rule_supported(opcode);
3353   // Add rules here.
3354 
3355   return ret_value;  // Per default match rules are supported.
3356 }
3357 
3358 const bool Matcher::has_predicated_vectors(void) {
3359   return false;
3360 }
3361 
3362 const int Matcher::float_pressure(int default_pressure_threshold) {
3363   return default_pressure_threshold;
3364 }
3365 
3366 int Matcher::regnum_to_fpu_offset(int regnum)
3367 {
3368   Unimplemented();
3369   return 0;
3370 }
3371 
3372 // Is this branch offset short enough that a short branch can be used?
3373 //
3374 // NOTE: If the platform does not provide any short branch variants, then
3375 //       this method should return false for offset 0.
3376 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3377   // The passed offset is relative to address of the branch.
3378 
3379   return (-32768 <= offset && offset < 32768);
3380 }
3381 
3382 const bool Matcher::isSimpleConstant64(jlong value) {
3383   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3384   // Probably always true, even if a temp register is required.
3385   return true;
3386 }
3387 
3388 // true just means we have fast l2f conversion
3389 const bool Matcher::convL2FSupported(void) {
3390   return true;
3391 }
3392 
3393 // Vector width in bytes.
3394 const int Matcher::vector_width_in_bytes(BasicType bt) {
3395   int size = MIN2(16,(int)MaxVectorSize);
3396   // Minimum 2 values in vector
3397   if (size < 2*type2aelembytes(bt)) size = 0;
3398   // But never < 4
3399   if (size < 4) size = 0;
3400   return size;
3401 }
3402 
3403 // Limits on vector size (number of elements) loaded into vector.
3404 const int Matcher::max_vector_size(const BasicType bt) {
3405   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3406 }
3407 const int Matcher::min_vector_size(const BasicType bt) {
3408 //  For the moment limit the vector size to 8 bytes
3409     int size = 8 / type2aelembytes(bt);
3410     if (size < 2) size = 2;
3411     return size;
3412 }
3413 
3414 // Vector ideal reg.
3415 const int Matcher::vector_ideal_reg(int len) {
3416   switch(len) {
3417     case  8: return Op_VecD;
3418     case 16: return Op_VecX;
3419   }
3420   ShouldNotReachHere();
3421   return 0;
3422 }
3423 
3424 const int Matcher::vector_shift_count_ideal_reg(int size) {
3425   return Op_VecX;
3426 }
3427 
3428 // AES support not yet implemented
3429 const bool Matcher::pass_original_key_for_aes() {
3430   return false;
3431 }
3432 
3433 // x86 supports misaligned vectors store/load.
3434 const bool Matcher::misaligned_vectors_ok() {
3435   return !AlignVector; // can be changed by flag
3436 }
3437 
3438 // false => size gets scaled to BytesPerLong, ok.
3439 const bool Matcher::init_array_count_is_in_bytes = false;
3440 
3441 // Use conditional move (CMOVL)
3442 const int Matcher::long_cmove_cost() {
3443   // long cmoves are no more expensive than int cmoves
3444   return 0;
3445 }
3446 
3447 const int Matcher::float_cmove_cost() {
3448   // float cmoves are no more expensive than int cmoves
3449   return 0;
3450 }
3451 
3452 // Does the CPU require late expand (see block.cpp for description of late expand)?
3453 const bool Matcher::require_postalloc_expand = false;
3454 
3455 // Do we need to mask the count passed to shift instructions or does
3456 // the cpu only look at the lower 5/6 bits anyway?
3457 const bool Matcher::need_masked_shift_count = false;
3458 
3459 // This affects two different things:
3460 //  - how Decode nodes are matched
3461 //  - how ImplicitNullCheck opportunities are recognized
3462 // If true, the matcher will try to remove all Decodes and match them
3463 // (as operands) into nodes. NullChecks are not prepared to deal with
3464 // Decodes by final_graph_reshaping().
3465 // If false, final_graph_reshaping() forces the decode behind the Cmp
3466 // for a NullCheck. The matcher matches the Decode node into a register.
3467 // Implicit_null_check optimization moves the Decode along with the
3468 // memory operation back up before the NullCheck.
3469 bool Matcher::narrow_oop_use_complex_address() {
3470   return Universe::narrow_oop_shift() == 0;
3471 }
3472 
3473 bool Matcher::narrow_klass_use_complex_address() {
3474 // TODO
3475 // decide whether we need to set this to true
3476   return false;
3477 }
3478 
3479 // Is it better to copy float constants, or load them directly from
3480 // memory?  Intel can load a float constant from a direct address,
3481 // requiring no extra registers.  Most RISCs will have to materialize
3482 // an address into a register first, so they would do better to copy
3483 // the constant from stack.
3484 const bool Matcher::rematerialize_float_constants = false;
3485 
3486 // If CPU can load and store mis-aligned doubles directly then no
3487 // fixup is needed.  Else we split the double into 2 integer pieces
3488 // and move it piece-by-piece.  Only happens when passing doubles into
3489 // C code as the Java calling convention forces doubles to be aligned.
3490 const bool Matcher::misaligned_doubles_ok = true;
3491 
3492 // No-op on amd64
3493 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3494   Unimplemented();
3495 }
3496 
3497 // Advertise here if the CPU requires explicit rounding operations to
3498 // implement the UseStrictFP mode.
3499 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3500 
3501 // Are floats converted to double when stored to stack during
3502 // deoptimization?
3503 bool Matcher::float_in_double() { return true; }
3504 
3505 // Do ints take an entire long register or just half?
3506 // The relevant question is how the int is callee-saved:
3507 // the whole long is written but de-opt'ing will have to extract
3508 // the relevant 32 bits.
3509 const bool Matcher::int_in_long = true;
3510 
3511 // Return whether or not this register is ever used as an argument.
3512 // This function is used on startup to build the trampoline stubs in
3513 // generateOptoStub.  Registers not mentioned will be killed by the VM
3514 // call in the trampoline, and arguments in those registers not be
3515 // available to the callee.
3516 bool Matcher::can_be_java_arg(int reg)
3517 {
3518   return
3519     reg ==  R0_num || reg == R0_H_num ||
3520     reg ==  R1_num || reg == R1_H_num ||
3521     reg ==  R2_num || reg == R2_H_num ||
3522     reg ==  R3_num || reg == R3_H_num ||
3523     reg ==  R4_num || reg == R4_H_num ||
3524     reg ==  R5_num || reg == R5_H_num ||
3525     reg ==  R6_num || reg == R6_H_num ||
3526     reg ==  R7_num || reg == R7_H_num ||
3527     reg ==  V0_num || reg == V0_H_num ||
3528     reg ==  V1_num || reg == V1_H_num ||
3529     reg ==  V2_num || reg == V2_H_num ||
3530     reg ==  V3_num || reg == V3_H_num ||
3531     reg ==  V4_num || reg == V4_H_num ||
3532     reg ==  V5_num || reg == V5_H_num ||
3533     reg ==  V6_num || reg == V6_H_num ||
3534     reg ==  V7_num || reg == V7_H_num;
3535 }
3536 
3537 bool Matcher::is_spillable_arg(int reg)
3538 {
3539   return can_be_java_arg(reg);
3540 }
3541 
3542 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3543   return false;
3544 }
3545 
3546 RegMask Matcher::divI_proj_mask() {
3547   ShouldNotReachHere();
3548   return RegMask();
3549 }
3550 
3551 // Register for MODI projection of divmodI.
3552 RegMask Matcher::modI_proj_mask() {
3553   ShouldNotReachHere();
3554   return RegMask();
3555 }
3556 
3557 // Register for DIVL projection of divmodL.
3558 RegMask Matcher::divL_proj_mask() {
3559   ShouldNotReachHere();
3560   return RegMask();
3561 }
3562 
3563 // Register for MODL projection of divmodL.
3564 RegMask Matcher::modL_proj_mask() {
3565   ShouldNotReachHere();
3566   return RegMask();
3567 }
3568 
3569 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3570   return FP_REG_mask();
3571 }
3572 
3573 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3574   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3575     Node* u = addp->fast_out(i);
3576     if (u->is_Mem()) {
3577       int opsize = u->as_Mem()->memory_size();
3578       assert(opsize > 0, "unexpected memory operand size");
3579       if (u->as_Mem()->memory_size() != (1<<shift)) {
3580         return false;
3581       }
3582     }
3583   }
3584   return true;
3585 }
3586 
3587 const bool Matcher::convi2l_type_required = false;
3588 
3589 // Should the Matcher clone shifts on addressing modes, expecting them
3590 // to be subsumed into complex addressing expressions or compute them
3591 // into registers?
3592 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3593   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3594     return true;
3595   }
3596 
3597   Node *off = m->in(AddPNode::Offset);
3598   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3599       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3600       // Are there other uses besides address expressions?
3601       !is_visited(off)) {
3602     address_visited.set(off->_idx); // Flag as address_visited
3603     mstack.push(off->in(2), Visit);
3604     Node *conv = off->in(1);
3605     if (conv->Opcode() == Op_ConvI2L &&
3606         // Are there other uses besides address expressions?
3607         !is_visited(conv)) {
3608       address_visited.set(conv->_idx); // Flag as address_visited
3609       mstack.push(conv->in(1), Pre_Visit);
3610     } else {
3611       mstack.push(conv, Pre_Visit);
3612     }
3613     address_visited.test_set(m->_idx); // Flag as address_visited
3614     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3615     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3616     return true;
3617   } else if (off->Opcode() == Op_ConvI2L &&
3618              // Are there other uses besides address expressions?
3619              !is_visited(off)) {
3620     address_visited.test_set(m->_idx); // Flag as address_visited
3621     address_visited.set(off->_idx); // Flag as address_visited
3622     mstack.push(off->in(1), Pre_Visit);
3623     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3624     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3625     return true;
3626   }
3627   return false;
3628 }
3629 
3630 // Transform:
3631 // (AddP base (AddP base address (LShiftL index con)) offset)
3632 // into:
3633 // (AddP base (AddP base offset) (LShiftL index con))
3634 // to take full advantage of ARM's addressing modes
3635 void Compile::reshape_address(AddPNode* addp) {
3636   Node *addr = addp->in(AddPNode::Address);
3637   if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
3638     const AddPNode *addp2 = addr->as_AddP();
3639     if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
3640          addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
3641          size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
3642         addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
3643 
3644       // Any use that can't embed the address computation?
3645       for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3646         Node* u = addp->fast_out(i);
3647         if (!u->is_Mem() || u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
3648           return;
3649         }
3650       }
3651       
3652       Node* off = addp->in(AddPNode::Offset);
3653       Node* addr2 = addp2->in(AddPNode::Address);
3654       Node* base = addp->in(AddPNode::Base);
3655       
3656       Node* new_addr = NULL;
3657       // Check whether the graph already has the new AddP we need
3658       // before we create one (no GVN available here).
3659       for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
3660         Node* u = addr2->fast_out(i);
3661         if (u->is_AddP() &&
3662             u->in(AddPNode::Base) == base &&
3663             u->in(AddPNode::Address) == addr2 &&
3664             u->in(AddPNode::Offset) == off) {
3665           new_addr = u;
3666           break;
3667         }
3668       }
3669       
3670       if (new_addr == NULL) {
3671         new_addr = new AddPNode(base, addr2, off);
3672       }
3673       Node* new_off = addp2->in(AddPNode::Offset);
3674       addp->set_req(AddPNode::Address, new_addr);
3675       if (addr->outcnt() == 0) {
3676         addr->disconnect_inputs(NULL, this);
3677       }
3678       addp->set_req(AddPNode::Offset, new_off);
3679       if (off->outcnt() == 0) {
3680         off->disconnect_inputs(NULL, this);
3681       }
3682     }
3683   }
3684 }
3685 
3686 // helper for encoding java_to_runtime calls on sim
3687 //
3688 // this is needed to compute the extra arguments required when
3689 // planting a call to the simulator blrt instruction. the TypeFunc
3690 // can be queried to identify the counts for integral, and floating
3691 // arguments and the return type
3692 
3693 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3694 {
3695   int gps = 0;
3696   int fps = 0;
3697   const TypeTuple *domain = tf->domain();
3698   int max = domain->cnt();
3699   for (int i = TypeFunc::Parms; i < max; i++) {
3700     const Type *t = domain->field_at(i);
3701     switch(t->basic_type()) {
3702     case T_FLOAT:
3703     case T_DOUBLE:
3704       fps++;
3705     default:
3706       gps++;
3707     }
3708   }
3709   gpcnt = gps;
3710   fpcnt = fps;
3711   BasicType rt = tf->return_type();
3712   switch (rt) {
3713   case T_VOID:
3714     rtype = MacroAssembler::ret_type_void;
3715     break;
3716   default:
3717     rtype = MacroAssembler::ret_type_integral;
3718     break;
3719   case T_FLOAT:
3720     rtype = MacroAssembler::ret_type_float;
3721     break;
3722   case T_DOUBLE:
3723     rtype = MacroAssembler::ret_type_double;
3724     break;
3725   }
3726 }
3727 
3728 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3729   MacroAssembler _masm(&cbuf);                                          \
3730   {                                                                     \
3731     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3732     guarantee(DISP == 0, "mode not permitted for volatile");            \
3733     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3734     __ INSN(REG, as_Register(BASE));                                    \
3735   }
3736 
3737 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3738 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3739 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3740                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3741 
3742   // Used for all non-volatile memory accesses.  The use of
3743   // $mem->opcode() to discover whether this pattern uses sign-extended
3744   // offsets is something of a kludge.
3745   static void loadStore(MacroAssembler masm, mem_insn insn,
3746                          Register reg, int opcode,
3747                          Register base, int index, int size, int disp)
3748   {
3749     Address::extend scale;
3750 
3751     // Hooboy, this is fugly.  We need a way to communicate to the
3752     // encoder that the index needs to be sign extended, so we have to
3753     // enumerate all the cases.
3754     switch (opcode) {
3755     case INDINDEXSCALEDI2L:
3756     case INDINDEXSCALEDI2LN:
3757     case INDINDEXI2L:
3758     case INDINDEXI2LN:
3759       scale = Address::sxtw(size);
3760       break;
3761     default:
3762       scale = Address::lsl(size);
3763     }
3764 
3765     if (index == -1) {
3766       (masm.*insn)(reg, Address(base, disp));
3767     } else {
3768       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3769       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3770     }
3771   }
3772 
3773   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3774                          FloatRegister reg, int opcode,
3775                          Register base, int index, int size, int disp)
3776   {
3777     Address::extend scale;
3778 
3779     switch (opcode) {
3780     case INDINDEXSCALEDI2L:
3781     case INDINDEXSCALEDI2LN:
3782       scale = Address::sxtw(size);
3783       break;
3784     default:
3785       scale = Address::lsl(size);
3786     }
3787 
3788      if (index == -1) {
3789       (masm.*insn)(reg, Address(base, disp));
3790     } else {
3791       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3792       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3793     }
3794   }
3795 
3796   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3797                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3798                          int opcode, Register base, int index, int size, int disp)
3799   {
3800     if (index == -1) {
3801       (masm.*insn)(reg, T, Address(base, disp));
3802     } else {
3803       assert(disp == 0, "unsupported address mode");
3804       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3805     }
3806   }
3807 
3808 %}
3809 
3810 
3811 
3812 //----------ENCODING BLOCK-----------------------------------------------------
3813 // This block specifies the encoding classes used by the compiler to
3814 // output byte streams.  Encoding classes are parameterized macros
3815 // used by Machine Instruction Nodes in order to generate the bit
3816 // encoding of the instruction.  Operands specify their base encoding
3817 // interface with the interface keyword.  There are currently
3818 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3819 // COND_INTER.  REG_INTER causes an operand to generate a function
3820 // which returns its register number when queried.  CONST_INTER causes
3821 // an operand to generate a function which returns the value of the
3822 // constant when queried.  MEMORY_INTER causes an operand to generate
3823 // four functions which return the Base Register, the Index Register,
3824 // the Scale Value, and the Offset Value of the operand when queried.
3825 // COND_INTER causes an operand to generate six functions which return
3826 // the encoding code (ie - encoding bits for the instruction)
3827 // associated with each basic boolean condition for a conditional
3828 // instruction.
3829 //
3830 // Instructions specify two basic values for encoding.  Again, a
3831 // function is available to check if the constant displacement is an
3832 // oop. They use the ins_encode keyword to specify their encoding
3833 // classes (which must be a sequence of enc_class names, and their
3834 // parameters, specified in the encoding block), and they use the
3835 // opcode keyword to specify, in order, their primary, secondary, and
3836 // tertiary opcode.  Only the opcode sections which a particular
3837 // instruction needs for encoding need to be specified.
3838 encode %{
3839   // Build emit functions for each basic byte or larger field in the
3840   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3841   // from C++ code in the enc_class source block.  Emit functions will
3842   // live in the main source block for now.  In future, we can
3843   // generalize this by adding a syntax that specifies the sizes of
3844   // fields in an order, so that the adlc can build the emit functions
3845   // automagically
3846 
3847   // catch all for unimplemented encodings
3848   enc_class enc_unimplemented %{
3849     MacroAssembler _masm(&cbuf);
3850     __ unimplemented("C2 catch all");
3851   %}
3852 
3853   // BEGIN Non-volatile memory access
3854 
3855   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3856     Register dst_reg = as_Register($dst$$reg);
3857     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3858                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3859   %}
3860 
3861   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3862     Register dst_reg = as_Register($dst$$reg);
3863     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3864                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3865   %}
3866 
3867   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3868     Register dst_reg = as_Register($dst$$reg);
3869     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3870                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3871   %}
3872 
3873   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3874     Register dst_reg = as_Register($dst$$reg);
3875     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3876                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3877   %}
3878 
3879   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3880     Register dst_reg = as_Register($dst$$reg);
3881     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3882                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3883   %}
3884 
3885   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3886     Register dst_reg = as_Register($dst$$reg);
3887     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3888                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3889   %}
3890 
3891   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3892     Register dst_reg = as_Register($dst$$reg);
3893     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3894                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3895   %}
3896 
3897   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3898     Register dst_reg = as_Register($dst$$reg);
3899     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3900                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3901   %}
3902 
3903   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3904     Register dst_reg = as_Register($dst$$reg);
3905     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3906                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3907   %}
3908 
3909   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3910     Register dst_reg = as_Register($dst$$reg);
3911     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3912                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3913   %}
3914 
3915   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3916     Register dst_reg = as_Register($dst$$reg);
3917     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3919   %}
3920 
3921   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3922     Register dst_reg = as_Register($dst$$reg);
3923     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3924                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3925   %}
3926 
3927   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3928     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3929     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3930                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3931   %}
3932 
3933   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3934     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3935     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3936                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3937   %}
3938 
3939   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3940     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3941     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3942        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3943   %}
3944 
3945   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3946     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3947     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3948        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3949   %}
3950 
3951   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3952     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3953     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3954        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3955   %}
3956 
3957   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3958     Register src_reg = as_Register($src$$reg);
3959     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3960                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3961   %}
3962 
3963   enc_class aarch64_enc_strb0(memory mem) %{
3964     MacroAssembler _masm(&cbuf);
3965     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3966                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3967   %}
3968 
3969   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3970     MacroAssembler _masm(&cbuf);
3971     __ membar(Assembler::StoreStore);
3972     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3974   %}
3975 
3976   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3977     Register src_reg = as_Register($src$$reg);
3978     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3979                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3980   %}
3981 
3982   enc_class aarch64_enc_strh0(memory mem) %{
3983     MacroAssembler _masm(&cbuf);
3984     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3985                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3986   %}
3987 
3988   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3989     Register src_reg = as_Register($src$$reg);
3990     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3991                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3992   %}
3993 
3994   enc_class aarch64_enc_strw0(memory mem) %{
3995     MacroAssembler _masm(&cbuf);
3996     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3997                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3998   %}
3999 
4000   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4001     Register src_reg = as_Register($src$$reg);
4002     // we sometimes get asked to store the stack pointer into the
4003     // current thread -- we cannot do that directly on AArch64
4004     if (src_reg == r31_sp) {
4005       MacroAssembler _masm(&cbuf);
4006       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4007       __ mov(rscratch2, sp);
4008       src_reg = rscratch2;
4009     }
4010     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4011                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4012   %}
4013 
4014   enc_class aarch64_enc_str0(memory mem) %{
4015     MacroAssembler _masm(&cbuf);
4016     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4017                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4018   %}
4019 
4020   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4021     FloatRegister src_reg = as_FloatRegister($src$$reg);
4022     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4023                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4024   %}
4025 
4026   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4027     FloatRegister src_reg = as_FloatRegister($src$$reg);
4028     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4030   %}
4031 
4032   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4033     FloatRegister src_reg = as_FloatRegister($src$$reg);
4034     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4035        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4036   %}
4037 
4038   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4039     FloatRegister src_reg = as_FloatRegister($src$$reg);
4040     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4041        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4042   %}
4043 
4044   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4045     FloatRegister src_reg = as_FloatRegister($src$$reg);
4046     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4047        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4048   %}
4049 
4050   // END Non-volatile memory access
4051 
4052   // volatile loads and stores
4053 
4054   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4055     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4056                  rscratch1, stlrb);
4057   %}
4058 
4059   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4060     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4061                  rscratch1, stlrh);
4062   %}
4063 
4064   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4065     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4066                  rscratch1, stlrw);
4067   %}
4068 
4069 
4070   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4071     Register dst_reg = as_Register($dst$$reg);
4072     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4073              rscratch1, ldarb);
4074     __ sxtbw(dst_reg, dst_reg);
4075   %}
4076 
4077   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4078     Register dst_reg = as_Register($dst$$reg);
4079     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4080              rscratch1, ldarb);
4081     __ sxtb(dst_reg, dst_reg);
4082   %}
4083 
4084   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4085     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4086              rscratch1, ldarb);
4087   %}
4088 
4089   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4090     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4091              rscratch1, ldarb);
4092   %}
4093 
4094   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4095     Register dst_reg = as_Register($dst$$reg);
4096     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4097              rscratch1, ldarh);
4098     __ sxthw(dst_reg, dst_reg);
4099   %}
4100 
4101   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4102     Register dst_reg = as_Register($dst$$reg);
4103     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4104              rscratch1, ldarh);
4105     __ sxth(dst_reg, dst_reg);
4106   %}
4107 
4108   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4109     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4110              rscratch1, ldarh);
4111   %}
4112 
4113   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4114     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4115              rscratch1, ldarh);
4116   %}
4117 
4118   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4119     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4120              rscratch1, ldarw);
4121   %}
4122 
4123   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4124     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4125              rscratch1, ldarw);
4126   %}
4127 
4128   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4129     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4130              rscratch1, ldar);
4131   %}
4132 
4133   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4134     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4135              rscratch1, ldarw);
4136     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4137   %}
4138 
4139   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4140     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4141              rscratch1, ldar);
4142     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4143   %}
4144 
4145   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4146     Register src_reg = as_Register($src$$reg);
4147     // we sometimes get asked to store the stack pointer into the
4148     // current thread -- we cannot do that directly on AArch64
4149     if (src_reg == r31_sp) {
4150         MacroAssembler _masm(&cbuf);
4151       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4152       __ mov(rscratch2, sp);
4153       src_reg = rscratch2;
4154     }
4155     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4156                  rscratch1, stlr);
4157   %}
4158 
4159   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4160     {
4161       MacroAssembler _masm(&cbuf);
4162       FloatRegister src_reg = as_FloatRegister($src$$reg);
4163       __ fmovs(rscratch2, src_reg);
4164     }
4165     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4166                  rscratch1, stlrw);
4167   %}
4168 
4169   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4170     {
4171       MacroAssembler _masm(&cbuf);
4172       FloatRegister src_reg = as_FloatRegister($src$$reg);
4173       __ fmovd(rscratch2, src_reg);
4174     }
4175     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4176                  rscratch1, stlr);
4177   %}
4178 
4179   // synchronized read/update encodings
4180 
4181   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4182     MacroAssembler _masm(&cbuf);
4183     Register dst_reg = as_Register($dst$$reg);
4184     Register base = as_Register($mem$$base);
4185     int index = $mem$$index;
4186     int scale = $mem$$scale;
4187     int disp = $mem$$disp;
4188     if (index == -1) {
4189        if (disp != 0) {
4190         __ lea(rscratch1, Address(base, disp));
4191         __ ldaxr(dst_reg, rscratch1);
4192       } else {
4193         // TODO
4194         // should we ever get anything other than this case?
4195         __ ldaxr(dst_reg, base);
4196       }
4197     } else {
4198       Register index_reg = as_Register(index);
4199       if (disp == 0) {
4200         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4201         __ ldaxr(dst_reg, rscratch1);
4202       } else {
4203         __ lea(rscratch1, Address(base, disp));
4204         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4205         __ ldaxr(dst_reg, rscratch1);
4206       }
4207     }
4208   %}
4209 
4210   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4211     MacroAssembler _masm(&cbuf);
4212     Register src_reg = as_Register($src$$reg);
4213     Register base = as_Register($mem$$base);
4214     int index = $mem$$index;
4215     int scale = $mem$$scale;
4216     int disp = $mem$$disp;
4217     if (index == -1) {
4218        if (disp != 0) {
4219         __ lea(rscratch2, Address(base, disp));
4220         __ stlxr(rscratch1, src_reg, rscratch2);
4221       } else {
4222         // TODO
4223         // should we ever get anything other than this case?
4224         __ stlxr(rscratch1, src_reg, base);
4225       }
4226     } else {
4227       Register index_reg = as_Register(index);
4228       if (disp == 0) {
4229         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4230         __ stlxr(rscratch1, src_reg, rscratch2);
4231       } else {
4232         __ lea(rscratch2, Address(base, disp));
4233         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4234         __ stlxr(rscratch1, src_reg, rscratch2);
4235       }
4236     }
4237     __ cmpw(rscratch1, zr);
4238   %}
4239 
4240   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4241     MacroAssembler _masm(&cbuf);
4242     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4243     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4244                Assembler::xword, /*acquire*/ false, /*release*/ true);
4245   %}
4246 
4247   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4248     MacroAssembler _masm(&cbuf);
4249     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4250     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4251                Assembler::word, /*acquire*/ false, /*release*/ true);
4252   %}
4253 
4254 
4255   // The only difference between aarch64_enc_cmpxchg and
4256   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4257   // CompareAndSwap sequence to serve as a barrier on acquiring a
4258   // lock.
4259   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4260     MacroAssembler _masm(&cbuf);
4261     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4262     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4263                Assembler::xword, /*acquire*/ true, /*release*/ true);
4264   %}
4265 
4266   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4267     MacroAssembler _masm(&cbuf);
4268     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4269     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4270                Assembler::word, /*acquire*/ true, /*release*/ true);
4271   %}
4272 
4273 
4274   // auxiliary used for CompareAndSwapX to set result register
4275   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4276     MacroAssembler _masm(&cbuf);
4277     Register res_reg = as_Register($res$$reg);
4278     __ cset(res_reg, Assembler::EQ);
4279   %}
4280 
4281   // prefetch encodings
4282 
4283   enc_class aarch64_enc_prefetchw(memory mem) %{
4284     MacroAssembler _masm(&cbuf);
4285     Register base = as_Register($mem$$base);
4286     int index = $mem$$index;
4287     int scale = $mem$$scale;
4288     int disp = $mem$$disp;
4289     if (index == -1) {
4290       __ prfm(Address(base, disp), PSTL1KEEP);
4291     } else {
4292       Register index_reg = as_Register(index);
4293       if (disp == 0) {
4294         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4295       } else {
4296         __ lea(rscratch1, Address(base, disp));
4297         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4298       }
4299     }
4300   %}
4301 
4302   /// mov envcodings
4303 
4304   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4305     MacroAssembler _masm(&cbuf);
4306     u_int32_t con = (u_int32_t)$src$$constant;
4307     Register dst_reg = as_Register($dst$$reg);
4308     if (con == 0) {
4309       __ movw(dst_reg, zr);
4310     } else {
4311       __ movw(dst_reg, con);
4312     }
4313   %}
4314 
4315   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4316     MacroAssembler _masm(&cbuf);
4317     Register dst_reg = as_Register($dst$$reg);
4318     u_int64_t con = (u_int64_t)$src$$constant;
4319     if (con == 0) {
4320       __ mov(dst_reg, zr);
4321     } else {
4322       __ mov(dst_reg, con);
4323     }
4324   %}
4325 
4326   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4327     MacroAssembler _masm(&cbuf);
4328     Register dst_reg = as_Register($dst$$reg);
4329     address con = (address)$src$$constant;
4330     if (con == NULL || con == (address)1) {
4331       ShouldNotReachHere();
4332     } else {
4333       relocInfo::relocType rtype = $src->constant_reloc();
4334       if (rtype == relocInfo::oop_type) {
4335         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4336       } else if (rtype == relocInfo::metadata_type) {
4337         __ mov_metadata(dst_reg, (Metadata*)con);
4338       } else {
4339         assert(rtype == relocInfo::none, "unexpected reloc type");
4340         if (con < (address)(uintptr_t)os::vm_page_size()) {
4341           __ mov(dst_reg, con);
4342         } else {
4343           unsigned long offset;
4344           __ adrp(dst_reg, con, offset);
4345           __ add(dst_reg, dst_reg, offset);
4346         }
4347       }
4348     }
4349   %}
4350 
4351   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4352     MacroAssembler _masm(&cbuf);
4353     Register dst_reg = as_Register($dst$$reg);
4354     __ mov(dst_reg, zr);
4355   %}
4356 
4357   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4358     MacroAssembler _masm(&cbuf);
4359     Register dst_reg = as_Register($dst$$reg);
4360     __ mov(dst_reg, (u_int64_t)1);
4361   %}
4362 
4363   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4364     MacroAssembler _masm(&cbuf);
4365     address page = (address)$src$$constant;
4366     Register dst_reg = as_Register($dst$$reg);
4367     unsigned long off;
4368     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4369     assert(off == 0, "assumed offset == 0");
4370   %}
4371 
4372   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4373     MacroAssembler _masm(&cbuf);
4374     __ load_byte_map_base($dst$$Register);
4375   %}
4376 
4377   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4378     MacroAssembler _masm(&cbuf);
4379     Register dst_reg = as_Register($dst$$reg);
4380     address con = (address)$src$$constant;
4381     if (con == NULL) {
4382       ShouldNotReachHere();
4383     } else {
4384       relocInfo::relocType rtype = $src->constant_reloc();
4385       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4386       __ set_narrow_oop(dst_reg, (jobject)con);
4387     }
4388   %}
4389 
4390   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4391     MacroAssembler _masm(&cbuf);
4392     Register dst_reg = as_Register($dst$$reg);
4393     __ mov(dst_reg, zr);
4394   %}
4395 
4396   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4397     MacroAssembler _masm(&cbuf);
4398     Register dst_reg = as_Register($dst$$reg);
4399     address con = (address)$src$$constant;
4400     if (con == NULL) {
4401       ShouldNotReachHere();
4402     } else {
4403       relocInfo::relocType rtype = $src->constant_reloc();
4404       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4405       __ set_narrow_klass(dst_reg, (Klass *)con);
4406     }
4407   %}
4408 
4409   // arithmetic encodings
4410 
4411   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4412     MacroAssembler _masm(&cbuf);
4413     Register dst_reg = as_Register($dst$$reg);
4414     Register src_reg = as_Register($src1$$reg);
4415     int32_t con = (int32_t)$src2$$constant;
4416     // add has primary == 0, subtract has primary == 1
4417     if ($primary) { con = -con; }
4418     if (con < 0) {
4419       __ subw(dst_reg, src_reg, -con);
4420     } else {
4421       __ addw(dst_reg, src_reg, con);
4422     }
4423   %}
4424 
4425   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4426     MacroAssembler _masm(&cbuf);
4427     Register dst_reg = as_Register($dst$$reg);
4428     Register src_reg = as_Register($src1$$reg);
4429     int32_t con = (int32_t)$src2$$constant;
4430     // add has primary == 0, subtract has primary == 1
4431     if ($primary) { con = -con; }
4432     if (con < 0) {
4433       __ sub(dst_reg, src_reg, -con);
4434     } else {
4435       __ add(dst_reg, src_reg, con);
4436     }
4437   %}
4438 
4439   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4440     MacroAssembler _masm(&cbuf);
4441    Register dst_reg = as_Register($dst$$reg);
4442    Register src1_reg = as_Register($src1$$reg);
4443    Register src2_reg = as_Register($src2$$reg);
4444     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4445   %}
4446 
4447   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4448     MacroAssembler _masm(&cbuf);
4449    Register dst_reg = as_Register($dst$$reg);
4450    Register src1_reg = as_Register($src1$$reg);
4451    Register src2_reg = as_Register($src2$$reg);
4452     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4453   %}
4454 
4455   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4456     MacroAssembler _masm(&cbuf);
4457    Register dst_reg = as_Register($dst$$reg);
4458    Register src1_reg = as_Register($src1$$reg);
4459    Register src2_reg = as_Register($src2$$reg);
4460     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4461   %}
4462 
4463   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4464     MacroAssembler _masm(&cbuf);
4465    Register dst_reg = as_Register($dst$$reg);
4466    Register src1_reg = as_Register($src1$$reg);
4467    Register src2_reg = as_Register($src2$$reg);
4468     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4469   %}
4470 
4471   // compare instruction encodings
4472 
4473   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4474     MacroAssembler _masm(&cbuf);
4475     Register reg1 = as_Register($src1$$reg);
4476     Register reg2 = as_Register($src2$$reg);
4477     __ cmpw(reg1, reg2);
4478   %}
4479 
4480   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4481     MacroAssembler _masm(&cbuf);
4482     Register reg = as_Register($src1$$reg);
4483     int32_t val = $src2$$constant;
4484     if (val >= 0) {
4485       __ subsw(zr, reg, val);
4486     } else {
4487       __ addsw(zr, reg, -val);
4488     }
4489   %}
4490 
4491   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4492     MacroAssembler _masm(&cbuf);
4493     Register reg1 = as_Register($src1$$reg);
4494     u_int32_t val = (u_int32_t)$src2$$constant;
4495     __ movw(rscratch1, val);
4496     __ cmpw(reg1, rscratch1);
4497   %}
4498 
4499   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4500     MacroAssembler _masm(&cbuf);
4501     Register reg1 = as_Register($src1$$reg);
4502     Register reg2 = as_Register($src2$$reg);
4503     __ cmp(reg1, reg2);
4504   %}
4505 
4506   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4507     MacroAssembler _masm(&cbuf);
4508     Register reg = as_Register($src1$$reg);
4509     int64_t val = $src2$$constant;
4510     if (val >= 0) {
4511       __ subs(zr, reg, val);
4512     } else if (val != -val) {
4513       __ adds(zr, reg, -val);
4514     } else {
4515     // aargh, Long.MIN_VALUE is a special case
4516       __ orr(rscratch1, zr, (u_int64_t)val);
4517       __ subs(zr, reg, rscratch1);
4518     }
4519   %}
4520 
4521   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4522     MacroAssembler _masm(&cbuf);
4523     Register reg1 = as_Register($src1$$reg);
4524     u_int64_t val = (u_int64_t)$src2$$constant;
4525     __ mov(rscratch1, val);
4526     __ cmp(reg1, rscratch1);
4527   %}
4528 
4529   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4530     MacroAssembler _masm(&cbuf);
4531     Register reg1 = as_Register($src1$$reg);
4532     Register reg2 = as_Register($src2$$reg);
4533     __ cmp(reg1, reg2);
4534   %}
4535 
4536   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4537     MacroAssembler _masm(&cbuf);
4538     Register reg1 = as_Register($src1$$reg);
4539     Register reg2 = as_Register($src2$$reg);
4540     __ cmpw(reg1, reg2);
4541   %}
4542 
4543   enc_class aarch64_enc_testp(iRegP src) %{
4544     MacroAssembler _masm(&cbuf);
4545     Register reg = as_Register($src$$reg);
4546     __ cmp(reg, zr);
4547   %}
4548 
4549   enc_class aarch64_enc_testn(iRegN src) %{
4550     MacroAssembler _masm(&cbuf);
4551     Register reg = as_Register($src$$reg);
4552     __ cmpw(reg, zr);
4553   %}
4554 
4555   enc_class aarch64_enc_b(label lbl) %{
4556     MacroAssembler _masm(&cbuf);
4557     Label *L = $lbl$$label;
4558     __ b(*L);
4559   %}
4560 
4561   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4562     MacroAssembler _masm(&cbuf);
4563     Label *L = $lbl$$label;
4564     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4565   %}
4566 
4567   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4568     MacroAssembler _masm(&cbuf);
4569     Label *L = $lbl$$label;
4570     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4571   %}
4572 
4573   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4574   %{
4575      Register sub_reg = as_Register($sub$$reg);
4576      Register super_reg = as_Register($super$$reg);
4577      Register temp_reg = as_Register($temp$$reg);
4578      Register result_reg = as_Register($result$$reg);
4579 
4580      Label miss;
4581      MacroAssembler _masm(&cbuf);
4582      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4583                                      NULL, &miss,
4584                                      /*set_cond_codes:*/ true);
4585      if ($primary) {
4586        __ mov(result_reg, zr);
4587      }
4588      __ bind(miss);
4589   %}
4590 
4591   enc_class aarch64_enc_java_static_call(method meth) %{
4592     MacroAssembler _masm(&cbuf);
4593 
4594     address addr = (address)$meth$$method;
4595     address call;
4596     if (!_method) {
4597       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4598       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4599     } else {
4600       int method_index = resolved_method_index(cbuf);
4601       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4602                                                   : static_call_Relocation::spec(method_index);
4603       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4604 
4605       // Emit stub for static call
4606       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4607       if (stub == NULL) {
4608         ciEnv::current()->record_failure("CodeCache is full");
4609         return;
4610       }
4611     }
4612     if (call == NULL) {
4613       ciEnv::current()->record_failure("CodeCache is full");
4614       return;
4615     }
4616   %}
4617 
4618   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4619     MacroAssembler _masm(&cbuf);
4620     int method_index = resolved_method_index(cbuf);
4621     address call = __ ic_call((address)$meth$$method, method_index);
4622     if (call == NULL) {
4623       ciEnv::current()->record_failure("CodeCache is full");
4624       return;
4625     }
4626   %}
4627 
4628   enc_class aarch64_enc_call_epilog() %{
4629     MacroAssembler _masm(&cbuf);
4630     if (VerifyStackAtCalls) {
4631       // Check that stack depth is unchanged: find majik cookie on stack
4632       __ call_Unimplemented();
4633     }
4634   %}
4635 
4636   enc_class aarch64_enc_java_to_runtime(method meth) %{
4637     MacroAssembler _masm(&cbuf);
4638 
4639     // some calls to generated routines (arraycopy code) are scheduled
4640     // by C2 as runtime calls. if so we can call them using a br (they
4641     // will be in a reachable segment) otherwise we have to use a blrt
4642     // which loads the absolute address into a register.
4643     address entry = (address)$meth$$method;
4644     CodeBlob *cb = CodeCache::find_blob(entry);
4645     if (cb) {
4646       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4647       if (call == NULL) {
4648         ciEnv::current()->record_failure("CodeCache is full");
4649         return;
4650       }
4651     } else {
4652       int gpcnt;
4653       int fpcnt;
4654       int rtype;
4655       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4656       Label retaddr;
4657       __ adr(rscratch2, retaddr);
4658       __ lea(rscratch1, RuntimeAddress(entry));
4659       // Leave a breadcrumb for JavaThread::pd_last_frame().
4660       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4661       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4662       __ bind(retaddr);
4663       __ add(sp, sp, 2 * wordSize);
4664     }
4665   %}
4666 
4667   enc_class aarch64_enc_rethrow() %{
4668     MacroAssembler _masm(&cbuf);
4669     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4670   %}
4671 
4672   enc_class aarch64_enc_ret() %{
4673     MacroAssembler _masm(&cbuf);
4674     __ ret(lr);
4675   %}
4676 
4677   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4678     MacroAssembler _masm(&cbuf);
4679     Register target_reg = as_Register($jump_target$$reg);
4680     __ br(target_reg);
4681   %}
4682 
4683   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4684     MacroAssembler _masm(&cbuf);
4685     Register target_reg = as_Register($jump_target$$reg);
4686     // exception oop should be in r0
4687     // ret addr has been popped into lr
4688     // callee expects it in r3
4689     __ mov(r3, lr);
4690     __ br(target_reg);
4691   %}
4692 
4693   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4694     MacroAssembler _masm(&cbuf);
4695     Register oop = as_Register($object$$reg);
4696     Register box = as_Register($box$$reg);
4697     Register disp_hdr = as_Register($tmp$$reg);
4698     Register tmp = as_Register($tmp2$$reg);
4699     Label cont;
4700     Label object_has_monitor;
4701     Label cas_failed;
4702 
4703     assert_different_registers(oop, box, tmp, disp_hdr);
4704 
4705     // Load markOop from object into displaced_header.
4706     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4707 
4708     // Always do locking in runtime.
4709     if (EmitSync & 0x01) {
4710       __ cmp(oop, zr);
4711       return;
4712     }
4713 
4714     if (UseBiasedLocking && !UseOptoBiasInlining) {
4715       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4716     }
4717 
4718     // Handle existing monitor
4719     if ((EmitSync & 0x02) == 0) {
4720       // we can use AArch64's bit test and branch here but
4721       // markoopDesc does not define a bit index just the bit value
4722       // so assert in case the bit pos changes
4723 #     define __monitor_value_log2 1
4724       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4725       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4726 #     undef __monitor_value_log2
4727     }
4728 
4729     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4730     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4731 
4732     // Load Compare Value application register.
4733 
4734     // Initialize the box. (Must happen before we update the object mark!)
4735     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4736 
4737     // Compare object markOop with mark and if equal exchange scratch1
4738     // with object markOop.
4739     if (UseLSE) {
4740       __ mov(tmp, disp_hdr);
4741       __ casal(Assembler::xword, tmp, box, oop);
4742       __ cmp(tmp, disp_hdr);
4743       __ br(Assembler::EQ, cont);
4744     } else {
4745       Label retry_load;
4746       __ prfm(Address(oop), PSTL1STRM);
4747       __ bind(retry_load);
4748       __ ldaxr(tmp, oop);
4749       __ cmp(tmp, disp_hdr);
4750       __ br(Assembler::NE, cas_failed);
4751       // use stlxr to ensure update is immediately visible
4752       __ stlxr(tmp, box, oop);
4753       __ cbzw(tmp, cont);
4754       __ b(retry_load);
4755     }
4756 
4757     // Formerly:
4758     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4759     //               /*newv=*/box,
4760     //               /*addr=*/oop,
4761     //               /*tmp=*/tmp,
4762     //               cont,
4763     //               /*fail*/NULL);
4764 
4765     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4766 
4767     // If the compare-and-exchange succeeded, then we found an unlocked
4768     // object, will have now locked it will continue at label cont
4769 
4770     __ bind(cas_failed);
4771     // We did not see an unlocked object so try the fast recursive case.
4772 
4773     // Check if the owner is self by comparing the value in the
4774     // markOop of object (disp_hdr) with the stack pointer.
4775     __ mov(rscratch1, sp);
4776     __ sub(disp_hdr, disp_hdr, rscratch1);
4777     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4778     // If condition is true we are cont and hence we can store 0 as the
4779     // displaced header in the box, which indicates that it is a recursive lock.
4780     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4781     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4782 
4783     // Handle existing monitor.
4784     if ((EmitSync & 0x02) == 0) {
4785       __ b(cont);
4786 
4787       __ bind(object_has_monitor);
4788       // The object's monitor m is unlocked iff m->owner == NULL,
4789       // otherwise m->owner may contain a thread or a stack address.
4790       //
4791       // Try to CAS m->owner from NULL to current thread.
4792       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4793       __ mov(disp_hdr, zr);
4794 
4795       if (UseLSE) {
4796         __ mov(rscratch1, disp_hdr);
4797         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4798         __ cmp(rscratch1, disp_hdr);
4799       } else {
4800         Label retry_load, fail;
4801         __ prfm(Address(tmp), PSTL1STRM);
4802         __ bind(retry_load);
4803         __ ldaxr(rscratch1, tmp);
4804         __ cmp(disp_hdr, rscratch1);
4805         __ br(Assembler::NE, fail);
4806         // use stlxr to ensure update is immediately visible
4807         __ stlxr(rscratch1, rthread, tmp);
4808         __ cbnzw(rscratch1, retry_load);
4809         __ bind(fail);
4810       }
4811 
4812       // Label next;
4813       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4814       //               /*newv=*/rthread,
4815       //               /*addr=*/tmp,
4816       //               /*tmp=*/rscratch1,
4817       //               /*succeed*/next,
4818       //               /*fail*/NULL);
4819       // __ bind(next);
4820 
4821       // store a non-null value into the box.
4822       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4823 
4824       // PPC port checks the following invariants
4825       // #ifdef ASSERT
4826       // bne(flag, cont);
4827       // We have acquired the monitor, check some invariants.
4828       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4829       // Invariant 1: _recursions should be 0.
4830       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4831       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4832       //                        "monitor->_recursions should be 0", -1);
4833       // Invariant 2: OwnerIsThread shouldn't be 0.
4834       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4835       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4836       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4837       // #endif
4838     }
4839 
4840     __ bind(cont);
4841     // flag == EQ indicates success
4842     // flag == NE indicates failure
4843 
4844   %}
4845 
4846   // TODO
4847   // reimplement this with custom cmpxchgptr code
4848   // which avoids some of the unnecessary branching
4849   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4850     MacroAssembler _masm(&cbuf);
4851     Register oop = as_Register($object$$reg);
4852     Register box = as_Register($box$$reg);
4853     Register disp_hdr = as_Register($tmp$$reg);
4854     Register tmp = as_Register($tmp2$$reg);
4855     Label cont;
4856     Label object_has_monitor;
4857     Label cas_failed;
4858 
4859     assert_different_registers(oop, box, tmp, disp_hdr);
4860 
4861     // Always do locking in runtime.
4862     if (EmitSync & 0x01) {
4863       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4864       return;
4865     }
4866 
4867     if (UseBiasedLocking && !UseOptoBiasInlining) {
4868       __ biased_locking_exit(oop, tmp, cont);
4869     }
4870 
4871     // Find the lock address and load the displaced header from the stack.
4872     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4873 
4874     // If the displaced header is 0, we have a recursive unlock.
4875     __ cmp(disp_hdr, zr);
4876     __ br(Assembler::EQ, cont);
4877 
4878 
4879     // Handle existing monitor.
4880     if ((EmitSync & 0x02) == 0) {
4881       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4882       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4883     }
4884 
4885     // Check if it is still a light weight lock, this is is true if we
4886     // see the stack address of the basicLock in the markOop of the
4887     // object.
4888 
4889       if (UseLSE) {
4890         __ mov(tmp, box);
4891         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4892         __ cmp(tmp, box);
4893       } else {
4894         Label retry_load;
4895         __ prfm(Address(oop), PSTL1STRM);
4896         __ bind(retry_load);
4897         __ ldxr(tmp, oop);
4898         __ cmp(box, tmp);
4899         __ br(Assembler::NE, cas_failed);
4900         // use stlxr to ensure update is immediately visible
4901         __ stlxr(tmp, disp_hdr, oop);
4902         __ cbzw(tmp, cont);
4903         __ b(retry_load);
4904       }
4905 
4906     // __ cmpxchgptr(/*compare_value=*/box,
4907     //               /*exchange_value=*/disp_hdr,
4908     //               /*where=*/oop,
4909     //               /*result=*/tmp,
4910     //               cont,
4911     //               /*cas_failed*/NULL);
4912     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4913 
4914     __ bind(cas_failed);
4915 
4916     // Handle existing monitor.
4917     if ((EmitSync & 0x02) == 0) {
4918       __ b(cont);
4919 
4920       __ bind(object_has_monitor);
4921       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4922       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4923       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4924       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4925       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4926       __ cmp(rscratch1, zr);
4927       __ br(Assembler::NE, cont);
4928 
4929       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4930       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4931       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4932       __ cmp(rscratch1, zr);
4933       __ cbnz(rscratch1, cont);
4934       // need a release store here
4935       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4936       __ stlr(rscratch1, tmp); // rscratch1 is zero
4937     }
4938 
4939     __ bind(cont);
4940     // flag == EQ indicates success
4941     // flag == NE indicates failure
4942   %}
4943 
4944 %}
4945 
4946 //----------FRAME--------------------------------------------------------------
4947 // Definition of frame structure and management information.
4948 //
4949 //  S T A C K   L A Y O U T    Allocators stack-slot number
4950 //                             |   (to get allocators register number
4951 //  G  Owned by    |        |  v    add OptoReg::stack0())
4952 //  r   CALLER     |        |
4953 //  o     |        +--------+      pad to even-align allocators stack-slot
4954 //  w     V        |  pad0  |        numbers; owned by CALLER
4955 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4956 //  h     ^        |   in   |  5
4957 //        |        |  args  |  4   Holes in incoming args owned by SELF
4958 //  |     |        |        |  3
4959 //  |     |        +--------+
4960 //  V     |        | old out|      Empty on Intel, window on Sparc
4961 //        |    old |preserve|      Must be even aligned.
4962 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4963 //        |        |   in   |  3   area for Intel ret address
4964 //     Owned by    |preserve|      Empty on Sparc.
4965 //       SELF      +--------+
4966 //        |        |  pad2  |  2   pad to align old SP
4967 //        |        +--------+  1
4968 //        |        | locks  |  0
4969 //        |        +--------+----> OptoReg::stack0(), even aligned
4970 //        |        |  pad1  | 11   pad to align new SP
4971 //        |        +--------+
4972 //        |        |        | 10
4973 //        |        | spills |  9   spills
4974 //        V        |        |  8   (pad0 slot for callee)
4975 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4976 //        ^        |  out   |  7
4977 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4978 //     Owned by    +--------+
4979 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4980 //        |    new |preserve|      Must be even-aligned.
4981 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4982 //        |        |        |
4983 //
4984 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4985 //         known from SELF's arguments and the Java calling convention.
4986 //         Region 6-7 is determined per call site.
4987 // Note 2: If the calling convention leaves holes in the incoming argument
4988 //         area, those holes are owned by SELF.  Holes in the outgoing area
4989 //         are owned by the CALLEE.  Holes should not be nessecary in the
4990 //         incoming area, as the Java calling convention is completely under
4991 //         the control of the AD file.  Doubles can be sorted and packed to
4992 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4993 //         varargs C calling conventions.
4994 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4995 //         even aligned with pad0 as needed.
4996 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4997 //           (the latter is true on Intel but is it false on AArch64?)
4998 //         region 6-11 is even aligned; it may be padded out more so that
4999 //         the region from SP to FP meets the minimum stack alignment.
5000 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5001 //         alignment.  Region 11, pad1, may be dynamically extended so that
5002 //         SP meets the minimum alignment.
5003 
5004 frame %{
5005   // What direction does stack grow in (assumed to be same for C & Java)
5006   stack_direction(TOWARDS_LOW);
5007 
5008   // These three registers define part of the calling convention
5009   // between compiled code and the interpreter.
5010 
5011   // Inline Cache Register or methodOop for I2C.
5012   inline_cache_reg(R12);
5013 
5014   // Method Oop Register when calling interpreter.
5015   interpreter_method_oop_reg(R12);
5016 
5017   // Number of stack slots consumed by locking an object
5018   sync_stack_slots(2);
5019 
5020   // Compiled code's Frame Pointer
5021   frame_pointer(R31);
5022 
5023   // Interpreter stores its frame pointer in a register which is
5024   // stored to the stack by I2CAdaptors.
5025   // I2CAdaptors convert from interpreted java to compiled java.
5026   interpreter_frame_pointer(R29);
5027 
5028   // Stack alignment requirement
5029   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5030 
5031   // Number of stack slots between incoming argument block and the start of
5032   // a new frame.  The PROLOG must add this many slots to the stack.  The
5033   // EPILOG must remove this many slots. aarch64 needs two slots for
5034   // return address and fp.
5035   // TODO think this is correct but check
5036   in_preserve_stack_slots(4);
5037 
5038   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5039   // for calls to C.  Supports the var-args backing area for register parms.
5040   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5041 
5042   // The after-PROLOG location of the return address.  Location of
5043   // return address specifies a type (REG or STACK) and a number
5044   // representing the register number (i.e. - use a register name) or
5045   // stack slot.
5046   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5047   // Otherwise, it is above the locks and verification slot and alignment word
5048   // TODO this may well be correct but need to check why that - 2 is there
5049   // ppc port uses 0 but we definitely need to allow for fixed_slots
5050   // which folds in the space used for monitors
5051   return_addr(STACK - 2 +
5052               round_to((Compile::current()->in_preserve_stack_slots() +
5053                         Compile::current()->fixed_slots()),
5054                        stack_alignment_in_slots()));
5055 
5056   // Body of function which returns an integer array locating
5057   // arguments either in registers or in stack slots.  Passed an array
5058   // of ideal registers called "sig" and a "length" count.  Stack-slot
5059   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5060   // arguments for a CALLEE.  Incoming stack arguments are
5061   // automatically biased by the preserve_stack_slots field above.
5062 
5063   calling_convention
5064   %{
5065     // No difference between ingoing/outgoing just pass false
5066     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5067   %}
5068 
5069   c_calling_convention
5070   %{
5071     // This is obviously always outgoing
5072     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5073   %}
5074 
5075   // Location of compiled Java return values.  Same as C for now.
5076   return_value
5077   %{
5078     // TODO do we allow ideal_reg == Op_RegN???
5079     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5080            "only return normal values");
5081 
5082     static const int lo[Op_RegL + 1] = { // enum name
5083       0,                                 // Op_Node
5084       0,                                 // Op_Set
5085       R0_num,                            // Op_RegN
5086       R0_num,                            // Op_RegI
5087       R0_num,                            // Op_RegP
5088       V0_num,                            // Op_RegF
5089       V0_num,                            // Op_RegD
5090       R0_num                             // Op_RegL
5091     };
5092 
5093     static const int hi[Op_RegL + 1] = { // enum name
5094       0,                                 // Op_Node
5095       0,                                 // Op_Set
5096       OptoReg::Bad,                       // Op_RegN
5097       OptoReg::Bad,                      // Op_RegI
5098       R0_H_num,                          // Op_RegP
5099       OptoReg::Bad,                      // Op_RegF
5100       V0_H_num,                          // Op_RegD
5101       R0_H_num                           // Op_RegL
5102     };
5103 
5104     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5105   %}
5106 %}
5107 
5108 //----------ATTRIBUTES---------------------------------------------------------
5109 //----------Operand Attributes-------------------------------------------------
5110 op_attrib op_cost(1);        // Required cost attribute
5111 
5112 //----------Instruction Attributes---------------------------------------------
5113 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5114 ins_attrib ins_size(32);        // Required size attribute (in bits)
5115 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5116                                 // a non-matching short branch variant
5117                                 // of some long branch?
5118 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5119                                 // be a power of 2) specifies the
5120                                 // alignment that some part of the
5121                                 // instruction (not necessarily the
5122                                 // start) requires.  If > 1, a
5123                                 // compute_padding() function must be
5124                                 // provided for the instruction
5125 
5126 //----------OPERANDS-----------------------------------------------------------
5127 // Operand definitions must precede instruction definitions for correct parsing
5128 // in the ADLC because operands constitute user defined types which are used in
5129 // instruction definitions.
5130 
5131 //----------Simple Operands----------------------------------------------------
5132 
5133 // Integer operands 32 bit
5134 // 32 bit immediate
5135 operand immI()
5136 %{
5137   match(ConI);
5138 
5139   op_cost(0);
5140   format %{ %}
5141   interface(CONST_INTER);
5142 %}
5143 
5144 // 32 bit zero
5145 operand immI0()
5146 %{
5147   predicate(n->get_int() == 0);
5148   match(ConI);
5149 
5150   op_cost(0);
5151   format %{ %}
5152   interface(CONST_INTER);
5153 %}
5154 
5155 // 32 bit unit increment
5156 operand immI_1()
5157 %{
5158   predicate(n->get_int() == 1);
5159   match(ConI);
5160 
5161   op_cost(0);
5162   format %{ %}
5163   interface(CONST_INTER);
5164 %}
5165 
5166 // 32 bit unit decrement
5167 operand immI_M1()
5168 %{
5169   predicate(n->get_int() == -1);
5170   match(ConI);
5171 
5172   op_cost(0);
5173   format %{ %}
5174   interface(CONST_INTER);
5175 %}
5176 
5177 operand immI_le_4()
5178 %{
5179   predicate(n->get_int() <= 4);
5180   match(ConI);
5181 
5182   op_cost(0);
5183   format %{ %}
5184   interface(CONST_INTER);
5185 %}
5186 
5187 operand immI_31()
5188 %{
5189   predicate(n->get_int() == 31);
5190   match(ConI);
5191 
5192   op_cost(0);
5193   format %{ %}
5194   interface(CONST_INTER);
5195 %}
5196 
5197 operand immI_8()
5198 %{
5199   predicate(n->get_int() == 8);
5200   match(ConI);
5201 
5202   op_cost(0);
5203   format %{ %}
5204   interface(CONST_INTER);
5205 %}
5206 
5207 operand immI_16()
5208 %{
5209   predicate(n->get_int() == 16);
5210   match(ConI);
5211 
5212   op_cost(0);
5213   format %{ %}
5214   interface(CONST_INTER);
5215 %}
5216 
5217 operand immI_24()
5218 %{
5219   predicate(n->get_int() == 24);
5220   match(ConI);
5221 
5222   op_cost(0);
5223   format %{ %}
5224   interface(CONST_INTER);
5225 %}
5226 
5227 operand immI_32()
5228 %{
5229   predicate(n->get_int() == 32);
5230   match(ConI);
5231 
5232   op_cost(0);
5233   format %{ %}
5234   interface(CONST_INTER);
5235 %}
5236 
5237 operand immI_48()
5238 %{
5239   predicate(n->get_int() == 48);
5240   match(ConI);
5241 
5242   op_cost(0);
5243   format %{ %}
5244   interface(CONST_INTER);
5245 %}
5246 
5247 operand immI_56()
5248 %{
5249   predicate(n->get_int() == 56);
5250   match(ConI);
5251 
5252   op_cost(0);
5253   format %{ %}
5254   interface(CONST_INTER);
5255 %}
5256 
5257 operand immI_64()
5258 %{
5259   predicate(n->get_int() == 64);
5260   match(ConI);
5261 
5262   op_cost(0);
5263   format %{ %}
5264   interface(CONST_INTER);
5265 %}
5266 
5267 operand immI_255()
5268 %{
5269   predicate(n->get_int() == 255);
5270   match(ConI);
5271 
5272   op_cost(0);
5273   format %{ %}
5274   interface(CONST_INTER);
5275 %}
5276 
5277 operand immI_65535()
5278 %{
5279   predicate(n->get_int() == 65535);
5280   match(ConI);
5281 
5282   op_cost(0);
5283   format %{ %}
5284   interface(CONST_INTER);
5285 %}
5286 
5287 operand immL_63()
5288 %{
5289   predicate(n->get_int() == 63);
5290   match(ConI);
5291 
5292   op_cost(0);
5293   format %{ %}
5294   interface(CONST_INTER);
5295 %}
5296 
5297 operand immL_255()
5298 %{
5299   predicate(n->get_int() == 255);
5300   match(ConI);
5301 
5302   op_cost(0);
5303   format %{ %}
5304   interface(CONST_INTER);
5305 %}
5306 
5307 operand immL_65535()
5308 %{
5309   predicate(n->get_long() == 65535L);
5310   match(ConL);
5311 
5312   op_cost(0);
5313   format %{ %}
5314   interface(CONST_INTER);
5315 %}
5316 
5317 operand immL_4294967295()
5318 %{
5319   predicate(n->get_long() == 4294967295L);
5320   match(ConL);
5321 
5322   op_cost(0);
5323   format %{ %}
5324   interface(CONST_INTER);
5325 %}
5326 
5327 operand immL_bitmask()
5328 %{
5329   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5330             && is_power_of_2(n->get_long() + 1));
5331   match(ConL);
5332 
5333   op_cost(0);
5334   format %{ %}
5335   interface(CONST_INTER);
5336 %}
5337 
5338 operand immI_bitmask()
5339 %{
5340   predicate(((n->get_int() & 0xc0000000) == 0)
5341             && is_power_of_2(n->get_int() + 1));
5342   match(ConI);
5343 
5344   op_cost(0);
5345   format %{ %}
5346   interface(CONST_INTER);
5347 %}
5348 
5349 // Scale values for scaled offset addressing modes (up to long but not quad)
5350 operand immIScale()
5351 %{
5352   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5353   match(ConI);
5354 
5355   op_cost(0);
5356   format %{ %}
5357   interface(CONST_INTER);
5358 %}
5359 
5360 // 26 bit signed offset -- for pc-relative branches
5361 operand immI26()
5362 %{
5363   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5364   match(ConI);
5365 
5366   op_cost(0);
5367   format %{ %}
5368   interface(CONST_INTER);
5369 %}
5370 
5371 // 19 bit signed offset -- for pc-relative loads
5372 operand immI19()
5373 %{
5374   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5375   match(ConI);
5376 
5377   op_cost(0);
5378   format %{ %}
5379   interface(CONST_INTER);
5380 %}
5381 
5382 // 12 bit unsigned offset -- for base plus immediate loads
5383 operand immIU12()
5384 %{
5385   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5386   match(ConI);
5387 
5388   op_cost(0);
5389   format %{ %}
5390   interface(CONST_INTER);
5391 %}
5392 
5393 operand immLU12()
5394 %{
5395   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5396   match(ConL);
5397 
5398   op_cost(0);
5399   format %{ %}
5400   interface(CONST_INTER);
5401 %}
5402 
5403 // Offset for scaled or unscaled immediate loads and stores
5404 operand immIOffset()
5405 %{
5406   predicate(Address::offset_ok_for_immed(n->get_int()));
5407   match(ConI);
5408 
5409   op_cost(0);
5410   format %{ %}
5411   interface(CONST_INTER);
5412 %}
5413 
5414 operand immIOffset4()
5415 %{
5416   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5417   match(ConI);
5418 
5419   op_cost(0);
5420   format %{ %}
5421   interface(CONST_INTER);
5422 %}
5423 
5424 operand immIOffset8()
5425 %{
5426   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5427   match(ConI);
5428 
5429   op_cost(0);
5430   format %{ %}
5431   interface(CONST_INTER);
5432 %}
5433 
5434 operand immIOffset16()
5435 %{
5436   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5437   match(ConI);
5438 
5439   op_cost(0);
5440   format %{ %}
5441   interface(CONST_INTER);
5442 %}
5443 
5444 operand immLoffset()
5445 %{
5446   predicate(Address::offset_ok_for_immed(n->get_long()));
5447   match(ConL);
5448 
5449   op_cost(0);
5450   format %{ %}
5451   interface(CONST_INTER);
5452 %}
5453 
5454 operand immLoffset4()
5455 %{
5456   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5457   match(ConL);
5458 
5459   op_cost(0);
5460   format %{ %}
5461   interface(CONST_INTER);
5462 %}
5463 
5464 operand immLoffset8()
5465 %{
5466   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5467   match(ConL);
5468 
5469   op_cost(0);
5470   format %{ %}
5471   interface(CONST_INTER);
5472 %}
5473 
5474 operand immLoffset16()
5475 %{
5476   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5477   match(ConL);
5478 
5479   op_cost(0);
5480   format %{ %}
5481   interface(CONST_INTER);
5482 %}
5483 
5484 // 32 bit integer valid for add sub immediate
5485 operand immIAddSub()
5486 %{
5487   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5488   match(ConI);
5489   op_cost(0);
5490   format %{ %}
5491   interface(CONST_INTER);
5492 %}
5493 
5494 // 32 bit unsigned integer valid for logical immediate
5495 // TODO -- check this is right when e.g the mask is 0x80000000
5496 operand immILog()
5497 %{
5498   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5499   match(ConI);
5500 
5501   op_cost(0);
5502   format %{ %}
5503   interface(CONST_INTER);
5504 %}
5505 
5506 // Integer operands 64 bit
5507 // 64 bit immediate
5508 operand immL()
5509 %{
5510   match(ConL);
5511 
5512   op_cost(0);
5513   format %{ %}
5514   interface(CONST_INTER);
5515 %}
5516 
5517 // 64 bit zero
5518 operand immL0()
5519 %{
5520   predicate(n->get_long() == 0);
5521   match(ConL);
5522 
5523   op_cost(0);
5524   format %{ %}
5525   interface(CONST_INTER);
5526 %}
5527 
5528 // 64 bit unit increment
5529 operand immL_1()
5530 %{
5531   predicate(n->get_long() == 1);
5532   match(ConL);
5533 
5534   op_cost(0);
5535   format %{ %}
5536   interface(CONST_INTER);
5537 %}
5538 
5539 // 64 bit unit decrement
5540 operand immL_M1()
5541 %{
5542   predicate(n->get_long() == -1);
5543   match(ConL);
5544 
5545   op_cost(0);
5546   format %{ %}
5547   interface(CONST_INTER);
5548 %}
5549 
5550 // 32 bit offset of pc in thread anchor
5551 
5552 operand immL_pc_off()
5553 %{
5554   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5555                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5556   match(ConL);
5557 
5558   op_cost(0);
5559   format %{ %}
5560   interface(CONST_INTER);
5561 %}
5562 
5563 // 64 bit integer valid for add sub immediate
5564 operand immLAddSub()
5565 %{
5566   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5567   match(ConL);
5568   op_cost(0);
5569   format %{ %}
5570   interface(CONST_INTER);
5571 %}
5572 
5573 // 64 bit integer valid for logical immediate
5574 operand immLLog()
5575 %{
5576   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5577   match(ConL);
5578   op_cost(0);
5579   format %{ %}
5580   interface(CONST_INTER);
5581 %}
5582 
5583 // Long Immediate: low 32-bit mask
5584 operand immL_32bits()
5585 %{
5586   predicate(n->get_long() == 0xFFFFFFFFL);
5587   match(ConL);
5588   op_cost(0);
5589   format %{ %}
5590   interface(CONST_INTER);
5591 %}
5592 
5593 // Pointer operands
5594 // Pointer Immediate
5595 operand immP()
5596 %{
5597   match(ConP);
5598 
5599   op_cost(0);
5600   format %{ %}
5601   interface(CONST_INTER);
5602 %}
5603 
5604 // NULL Pointer Immediate
5605 operand immP0()
5606 %{
5607   predicate(n->get_ptr() == 0);
5608   match(ConP);
5609 
5610   op_cost(0);
5611   format %{ %}
5612   interface(CONST_INTER);
5613 %}
5614 
5615 // Pointer Immediate One
5616 // this is used in object initialization (initial object header)
5617 operand immP_1()
5618 %{
5619   predicate(n->get_ptr() == 1);
5620   match(ConP);
5621 
5622   op_cost(0);
5623   format %{ %}
5624   interface(CONST_INTER);
5625 %}
5626 
5627 // Polling Page Pointer Immediate
5628 operand immPollPage()
5629 %{
5630   predicate((address)n->get_ptr() == os::get_polling_page());
5631   match(ConP);
5632 
5633   op_cost(0);
5634   format %{ %}
5635   interface(CONST_INTER);
5636 %}
5637 
5638 // Card Table Byte Map Base
5639 operand immByteMapBase()
5640 %{
5641   // Get base of card map
5642   predicate((jbyte*)n->get_ptr() ==
5643         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5644   match(ConP);
5645 
5646   op_cost(0);
5647   format %{ %}
5648   interface(CONST_INTER);
5649 %}
5650 
5651 // Pointer Immediate Minus One
5652 // this is used when we want to write the current PC to the thread anchor
5653 operand immP_M1()
5654 %{
5655   predicate(n->get_ptr() == -1);
5656   match(ConP);
5657 
5658   op_cost(0);
5659   format %{ %}
5660   interface(CONST_INTER);
5661 %}
5662 
5663 // Pointer Immediate Minus Two
5664 // this is used when we want to write the current PC to the thread anchor
5665 operand immP_M2()
5666 %{
5667   predicate(n->get_ptr() == -2);
5668   match(ConP);
5669 
5670   op_cost(0);
5671   format %{ %}
5672   interface(CONST_INTER);
5673 %}
5674 
5675 // Float and Double operands
5676 // Double Immediate
5677 operand immD()
5678 %{
5679   match(ConD);
5680   op_cost(0);
5681   format %{ %}
5682   interface(CONST_INTER);
5683 %}
5684 
5685 // Double Immediate: +0.0d
5686 operand immD0()
5687 %{
5688   predicate(jlong_cast(n->getd()) == 0);
5689   match(ConD);
5690 
5691   op_cost(0);
5692   format %{ %}
5693   interface(CONST_INTER);
5694 %}
5695 
5696 // constant 'double +0.0'.
5697 operand immDPacked()
5698 %{
5699   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5700   match(ConD);
5701   op_cost(0);
5702   format %{ %}
5703   interface(CONST_INTER);
5704 %}
5705 
5706 // Float Immediate
5707 operand immF()
5708 %{
5709   match(ConF);
5710   op_cost(0);
5711   format %{ %}
5712   interface(CONST_INTER);
5713 %}
5714 
5715 // Float Immediate: +0.0f.
5716 operand immF0()
5717 %{
5718   predicate(jint_cast(n->getf()) == 0);
5719   match(ConF);
5720 
5721   op_cost(0);
5722   format %{ %}
5723   interface(CONST_INTER);
5724 %}
5725 
5726 //
5727 operand immFPacked()
5728 %{
5729   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5730   match(ConF);
5731   op_cost(0);
5732   format %{ %}
5733   interface(CONST_INTER);
5734 %}
5735 
5736 // Narrow pointer operands
5737 // Narrow Pointer Immediate
5738 operand immN()
5739 %{
5740   match(ConN);
5741 
5742   op_cost(0);
5743   format %{ %}
5744   interface(CONST_INTER);
5745 %}
5746 
5747 // Narrow NULL Pointer Immediate
5748 operand immN0()
5749 %{
5750   predicate(n->get_narrowcon() == 0);
5751   match(ConN);
5752 
5753   op_cost(0);
5754   format %{ %}
5755   interface(CONST_INTER);
5756 %}
5757 
5758 operand immNKlass()
5759 %{
5760   match(ConNKlass);
5761 
5762   op_cost(0);
5763   format %{ %}
5764   interface(CONST_INTER);
5765 %}
5766 
5767 // Integer 32 bit Register Operands
5768 // Integer 32 bitRegister (excludes SP)
5769 operand iRegI()
5770 %{
5771   constraint(ALLOC_IN_RC(any_reg32));
5772   match(RegI);
5773   match(iRegINoSp);
5774   op_cost(0);
5775   format %{ %}
5776   interface(REG_INTER);
5777 %}
5778 
5779 // Integer 32 bit Register not Special
5780 operand iRegINoSp()
5781 %{
5782   constraint(ALLOC_IN_RC(no_special_reg32));
5783   match(RegI);
5784   op_cost(0);
5785   format %{ %}
5786   interface(REG_INTER);
5787 %}
5788 
5789 // Integer 64 bit Register Operands
5790 // Integer 64 bit Register (includes SP)
5791 operand iRegL()
5792 %{
5793   constraint(ALLOC_IN_RC(any_reg));
5794   match(RegL);
5795   match(iRegLNoSp);
5796   op_cost(0);
5797   format %{ %}
5798   interface(REG_INTER);
5799 %}
5800 
5801 // Integer 64 bit Register not Special
5802 operand iRegLNoSp()
5803 %{
5804   constraint(ALLOC_IN_RC(no_special_reg));
5805   match(RegL);
5806   format %{ %}
5807   interface(REG_INTER);
5808 %}
5809 
5810 // Pointer Register Operands
5811 // Pointer Register
5812 operand iRegP()
5813 %{
5814   constraint(ALLOC_IN_RC(ptr_reg));
5815   match(RegP);
5816   match(iRegPNoSp);
5817   match(iRegP_R0);
5818   //match(iRegP_R2);
5819   //match(iRegP_R4);
5820   //match(iRegP_R5);
5821   match(thread_RegP);
5822   op_cost(0);
5823   format %{ %}
5824   interface(REG_INTER);
5825 %}
5826 
5827 // Pointer 64 bit Register not Special
5828 operand iRegPNoSp()
5829 %{
5830   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5831   match(RegP);
5832   // match(iRegP);
5833   // match(iRegP_R0);
5834   // match(iRegP_R2);
5835   // match(iRegP_R4);
5836   // match(iRegP_R5);
5837   // match(thread_RegP);
5838   op_cost(0);
5839   format %{ %}
5840   interface(REG_INTER);
5841 %}
5842 
5843 // Pointer 64 bit Register R0 only
5844 operand iRegP_R0()
5845 %{
5846   constraint(ALLOC_IN_RC(r0_reg));
5847   match(RegP);
5848   // match(iRegP);
5849   match(iRegPNoSp);
5850   op_cost(0);
5851   format %{ %}
5852   interface(REG_INTER);
5853 %}
5854 
5855 // Pointer 64 bit Register R1 only
5856 operand iRegP_R1()
5857 %{
5858   constraint(ALLOC_IN_RC(r1_reg));
5859   match(RegP);
5860   // match(iRegP);
5861   match(iRegPNoSp);
5862   op_cost(0);
5863   format %{ %}
5864   interface(REG_INTER);
5865 %}
5866 
5867 // Pointer 64 bit Register R2 only
5868 operand iRegP_R2()
5869 %{
5870   constraint(ALLOC_IN_RC(r2_reg));
5871   match(RegP);
5872   // match(iRegP);
5873   match(iRegPNoSp);
5874   op_cost(0);
5875   format %{ %}
5876   interface(REG_INTER);
5877 %}
5878 
5879 // Pointer 64 bit Register R3 only
5880 operand iRegP_R3()
5881 %{
5882   constraint(ALLOC_IN_RC(r3_reg));
5883   match(RegP);
5884   // match(iRegP);
5885   match(iRegPNoSp);
5886   op_cost(0);
5887   format %{ %}
5888   interface(REG_INTER);
5889 %}
5890 
5891 // Pointer 64 bit Register R4 only
5892 operand iRegP_R4()
5893 %{
5894   constraint(ALLOC_IN_RC(r4_reg));
5895   match(RegP);
5896   // match(iRegP);
5897   match(iRegPNoSp);
5898   op_cost(0);
5899   format %{ %}
5900   interface(REG_INTER);
5901 %}
5902 
5903 // Pointer 64 bit Register R5 only
5904 operand iRegP_R5()
5905 %{
5906   constraint(ALLOC_IN_RC(r5_reg));
5907   match(RegP);
5908   // match(iRegP);
5909   match(iRegPNoSp);
5910   op_cost(0);
5911   format %{ %}
5912   interface(REG_INTER);
5913 %}
5914 
5915 // Pointer 64 bit Register R10 only
5916 operand iRegP_R10()
5917 %{
5918   constraint(ALLOC_IN_RC(r10_reg));
5919   match(RegP);
5920   // match(iRegP);
5921   match(iRegPNoSp);
5922   op_cost(0);
5923   format %{ %}
5924   interface(REG_INTER);
5925 %}
5926 
5927 // Long 64 bit Register R11 only
5928 operand iRegL_R11()
5929 %{
5930   constraint(ALLOC_IN_RC(r11_reg));
5931   match(RegL);
5932   match(iRegLNoSp);
5933   op_cost(0);
5934   format %{ %}
5935   interface(REG_INTER);
5936 %}
5937 
5938 // Pointer 64 bit Register FP only
5939 operand iRegP_FP()
5940 %{
5941   constraint(ALLOC_IN_RC(fp_reg));
5942   match(RegP);
5943   // match(iRegP);
5944   op_cost(0);
5945   format %{ %}
5946   interface(REG_INTER);
5947 %}
5948 
5949 // Register R0 only
5950 operand iRegI_R0()
5951 %{
5952   constraint(ALLOC_IN_RC(int_r0_reg));
5953   match(RegI);
5954   match(iRegINoSp);
5955   op_cost(0);
5956   format %{ %}
5957   interface(REG_INTER);
5958 %}
5959 
5960 // Register R2 only
5961 operand iRegI_R2()
5962 %{
5963   constraint(ALLOC_IN_RC(int_r2_reg));
5964   match(RegI);
5965   match(iRegINoSp);
5966   op_cost(0);
5967   format %{ %}
5968   interface(REG_INTER);
5969 %}
5970 
5971 // Register R3 only
5972 operand iRegI_R3()
5973 %{
5974   constraint(ALLOC_IN_RC(int_r3_reg));
5975   match(RegI);
5976   match(iRegINoSp);
5977   op_cost(0);
5978   format %{ %}
5979   interface(REG_INTER);
5980 %}
5981 
5982 
5983 // Register R2 only
5984 operand iRegI_R4()
5985 %{
5986   constraint(ALLOC_IN_RC(int_r4_reg));
5987   match(RegI);
5988   match(iRegINoSp);
5989   op_cost(0);
5990   format %{ %}
5991   interface(REG_INTER);
5992 %}
5993 
5994 
5995 // Pointer Register Operands
5996 // Narrow Pointer Register
5997 operand iRegN()
5998 %{
5999   constraint(ALLOC_IN_RC(any_reg32));
6000   match(RegN);
6001   match(iRegNNoSp);
6002   op_cost(0);
6003   format %{ %}
6004   interface(REG_INTER);
6005 %}
6006 
6007 // Integer 64 bit Register not Special
6008 operand iRegNNoSp()
6009 %{
6010   constraint(ALLOC_IN_RC(no_special_reg32));
6011   match(RegN);
6012   op_cost(0);
6013   format %{ %}
6014   interface(REG_INTER);
6015 %}
6016 
6017 // heap base register -- used for encoding immN0
6018 
6019 operand iRegIHeapbase()
6020 %{
6021   constraint(ALLOC_IN_RC(heapbase_reg));
6022   match(RegI);
6023   op_cost(0);
6024   format %{ %}
6025   interface(REG_INTER);
6026 %}
6027 
6028 // Float Register
6029 // Float register operands
6030 operand vRegF()
6031 %{
6032   constraint(ALLOC_IN_RC(float_reg));
6033   match(RegF);
6034 
6035   op_cost(0);
6036   format %{ %}
6037   interface(REG_INTER);
6038 %}
6039 
6040 // Double Register
6041 // Double register operands
6042 operand vRegD()
6043 %{
6044   constraint(ALLOC_IN_RC(double_reg));
6045   match(RegD);
6046 
6047   op_cost(0);
6048   format %{ %}
6049   interface(REG_INTER);
6050 %}
6051 
6052 operand vecD()
6053 %{
6054   constraint(ALLOC_IN_RC(vectord_reg));
6055   match(VecD);
6056 
6057   op_cost(0);
6058   format %{ %}
6059   interface(REG_INTER);
6060 %}
6061 
6062 operand vecX()
6063 %{
6064   constraint(ALLOC_IN_RC(vectorx_reg));
6065   match(VecX);
6066 
6067   op_cost(0);
6068   format %{ %}
6069   interface(REG_INTER);
6070 %}
6071 
6072 operand vRegD_V0()
6073 %{
6074   constraint(ALLOC_IN_RC(v0_reg));
6075   match(RegD);
6076   op_cost(0);
6077   format %{ %}
6078   interface(REG_INTER);
6079 %}
6080 
6081 operand vRegD_V1()
6082 %{
6083   constraint(ALLOC_IN_RC(v1_reg));
6084   match(RegD);
6085   op_cost(0);
6086   format %{ %}
6087   interface(REG_INTER);
6088 %}
6089 
6090 operand vRegD_V2()
6091 %{
6092   constraint(ALLOC_IN_RC(v2_reg));
6093   match(RegD);
6094   op_cost(0);
6095   format %{ %}
6096   interface(REG_INTER);
6097 %}
6098 
6099 operand vRegD_V3()
6100 %{
6101   constraint(ALLOC_IN_RC(v3_reg));
6102   match(RegD);
6103   op_cost(0);
6104   format %{ %}
6105   interface(REG_INTER);
6106 %}
6107 
6108 // Flags register, used as output of signed compare instructions
6109 
6110 // note that on AArch64 we also use this register as the output for
6111 // for floating point compare instructions (CmpF CmpD). this ensures
6112 // that ordered inequality tests use GT, GE, LT or LE none of which
6113 // pass through cases where the result is unordered i.e. one or both
6114 // inputs to the compare is a NaN. this means that the ideal code can
6115 // replace e.g. a GT with an LE and not end up capturing the NaN case
6116 // (where the comparison should always fail). EQ and NE tests are
6117 // always generated in ideal code so that unordered folds into the NE
6118 // case, matching the behaviour of AArch64 NE.
6119 //
6120 // This differs from x86 where the outputs of FP compares use a
6121 // special FP flags registers and where compares based on this
6122 // register are distinguished into ordered inequalities (cmpOpUCF) and
6123 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6124 // to explicitly handle the unordered case in branches. x86 also has
6125 // to include extra CMoveX rules to accept a cmpOpUCF input.
6126 
6127 operand rFlagsReg()
6128 %{
6129   constraint(ALLOC_IN_RC(int_flags));
6130   match(RegFlags);
6131 
6132   op_cost(0);
6133   format %{ "RFLAGS" %}
6134   interface(REG_INTER);
6135 %}
6136 
6137 // Flags register, used as output of unsigned compare instructions
6138 operand rFlagsRegU()
6139 %{
6140   constraint(ALLOC_IN_RC(int_flags));
6141   match(RegFlags);
6142 
6143   op_cost(0);
6144   format %{ "RFLAGSU" %}
6145   interface(REG_INTER);
6146 %}
6147 
6148 // Special Registers
6149 
6150 // Method Register
6151 operand inline_cache_RegP(iRegP reg)
6152 %{
6153   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6154   match(reg);
6155   match(iRegPNoSp);
6156   op_cost(0);
6157   format %{ %}
6158   interface(REG_INTER);
6159 %}
6160 
6161 operand interpreter_method_oop_RegP(iRegP reg)
6162 %{
6163   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6164   match(reg);
6165   match(iRegPNoSp);
6166   op_cost(0);
6167   format %{ %}
6168   interface(REG_INTER);
6169 %}
6170 
6171 // Thread Register
6172 operand thread_RegP(iRegP reg)
6173 %{
6174   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6175   match(reg);
6176   op_cost(0);
6177   format %{ %}
6178   interface(REG_INTER);
6179 %}
6180 
6181 operand lr_RegP(iRegP reg)
6182 %{
6183   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6184   match(reg);
6185   op_cost(0);
6186   format %{ %}
6187   interface(REG_INTER);
6188 %}
6189 
6190 //----------Memory Operands----------------------------------------------------
6191 
6192 operand indirect(iRegP reg)
6193 %{
6194   constraint(ALLOC_IN_RC(ptr_reg));
6195   match(reg);
6196   op_cost(0);
6197   format %{ "[$reg]" %}
6198   interface(MEMORY_INTER) %{
6199     base($reg);
6200     index(0xffffffff);
6201     scale(0x0);
6202     disp(0x0);
6203   %}
6204 %}
6205 
6206 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6207 %{
6208   constraint(ALLOC_IN_RC(ptr_reg));
6209   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6210   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6211   op_cost(0);
6212   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6213   interface(MEMORY_INTER) %{
6214     base($reg);
6215     index($ireg);
6216     scale($scale);
6217     disp(0x0);
6218   %}
6219 %}
6220 
6221 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6222 %{
6223   constraint(ALLOC_IN_RC(ptr_reg));
6224   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6225   match(AddP reg (LShiftL lreg scale));
6226   op_cost(0);
6227   format %{ "$reg, $lreg lsl($scale)" %}
6228   interface(MEMORY_INTER) %{
6229     base($reg);
6230     index($lreg);
6231     scale($scale);
6232     disp(0x0);
6233   %}
6234 %}
6235 
6236 operand indIndexI2L(iRegP reg, iRegI ireg)
6237 %{
6238   constraint(ALLOC_IN_RC(ptr_reg));
6239   match(AddP reg (ConvI2L ireg));
6240   op_cost(0);
6241   format %{ "$reg, $ireg, 0, I2L" %}
6242   interface(MEMORY_INTER) %{
6243     base($reg);
6244     index($ireg);
6245     scale(0x0);
6246     disp(0x0);
6247   %}
6248 %}
6249 
6250 operand indIndex(iRegP reg, iRegL lreg)
6251 %{
6252   constraint(ALLOC_IN_RC(ptr_reg));
6253   match(AddP reg lreg);
6254   op_cost(0);
6255   format %{ "$reg, $lreg" %}
6256   interface(MEMORY_INTER) %{
6257     base($reg);
6258     index($lreg);
6259     scale(0x0);
6260     disp(0x0);
6261   %}
6262 %}
6263 
6264 operand indOffI(iRegP reg, immIOffset off)
6265 %{
6266   constraint(ALLOC_IN_RC(ptr_reg));
6267   match(AddP reg off);
6268   op_cost(0);
6269   format %{ "[$reg, $off]" %}
6270   interface(MEMORY_INTER) %{
6271     base($reg);
6272     index(0xffffffff);
6273     scale(0x0);
6274     disp($off);
6275   %}
6276 %}
6277 
6278 operand indOffI4(iRegP reg, immIOffset4 off)
6279 %{
6280   constraint(ALLOC_IN_RC(ptr_reg));
6281   match(AddP reg off);
6282   op_cost(0);
6283   format %{ "[$reg, $off]" %}
6284   interface(MEMORY_INTER) %{
6285     base($reg);
6286     index(0xffffffff);
6287     scale(0x0);
6288     disp($off);
6289   %}
6290 %}
6291 
6292 operand indOffI8(iRegP reg, immIOffset8 off)
6293 %{
6294   constraint(ALLOC_IN_RC(ptr_reg));
6295   match(AddP reg off);
6296   op_cost(0);
6297   format %{ "[$reg, $off]" %}
6298   interface(MEMORY_INTER) %{
6299     base($reg);
6300     index(0xffffffff);
6301     scale(0x0);
6302     disp($off);
6303   %}
6304 %}
6305 
6306 operand indOffI16(iRegP reg, immIOffset16 off)
6307 %{
6308   constraint(ALLOC_IN_RC(ptr_reg));
6309   match(AddP reg off);
6310   op_cost(0);
6311   format %{ "[$reg, $off]" %}
6312   interface(MEMORY_INTER) %{
6313     base($reg);
6314     index(0xffffffff);
6315     scale(0x0);
6316     disp($off);
6317   %}
6318 %}
6319 
6320 operand indOffL(iRegP reg, immLoffset off)
6321 %{
6322   constraint(ALLOC_IN_RC(ptr_reg));
6323   match(AddP reg off);
6324   op_cost(0);
6325   format %{ "[$reg, $off]" %}
6326   interface(MEMORY_INTER) %{
6327     base($reg);
6328     index(0xffffffff);
6329     scale(0x0);
6330     disp($off);
6331   %}
6332 %}
6333 
6334 operand indOffL4(iRegP reg, immLoffset4 off)
6335 %{
6336   constraint(ALLOC_IN_RC(ptr_reg));
6337   match(AddP reg off);
6338   op_cost(0);
6339   format %{ "[$reg, $off]" %}
6340   interface(MEMORY_INTER) %{
6341     base($reg);
6342     index(0xffffffff);
6343     scale(0x0);
6344     disp($off);
6345   %}
6346 %}
6347 
6348 operand indOffL8(iRegP reg, immLoffset8 off)
6349 %{
6350   constraint(ALLOC_IN_RC(ptr_reg));
6351   match(AddP reg off);
6352   op_cost(0);
6353   format %{ "[$reg, $off]" %}
6354   interface(MEMORY_INTER) %{
6355     base($reg);
6356     index(0xffffffff);
6357     scale(0x0);
6358     disp($off);
6359   %}
6360 %}
6361 
6362 operand indOffL16(iRegP reg, immLoffset16 off)
6363 %{
6364   constraint(ALLOC_IN_RC(ptr_reg));
6365   match(AddP reg off);
6366   op_cost(0);
6367   format %{ "[$reg, $off]" %}
6368   interface(MEMORY_INTER) %{
6369     base($reg);
6370     index(0xffffffff);
6371     scale(0x0);
6372     disp($off);
6373   %}
6374 %}
6375 
6376 operand indirectN(iRegN reg)
6377 %{
6378   predicate(Universe::narrow_oop_shift() == 0);
6379   constraint(ALLOC_IN_RC(ptr_reg));
6380   match(DecodeN reg);
6381   op_cost(0);
6382   format %{ "[$reg]\t# narrow" %}
6383   interface(MEMORY_INTER) %{
6384     base($reg);
6385     index(0xffffffff);
6386     scale(0x0);
6387     disp(0x0);
6388   %}
6389 %}
6390 
6391 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6392 %{
6393   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6394   constraint(ALLOC_IN_RC(ptr_reg));
6395   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6396   op_cost(0);
6397   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6398   interface(MEMORY_INTER) %{
6399     base($reg);
6400     index($ireg);
6401     scale($scale);
6402     disp(0x0);
6403   %}
6404 %}
6405 
6406 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6407 %{
6408   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6409   constraint(ALLOC_IN_RC(ptr_reg));
6410   match(AddP (DecodeN reg) (LShiftL lreg scale));
6411   op_cost(0);
6412   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6413   interface(MEMORY_INTER) %{
6414     base($reg);
6415     index($lreg);
6416     scale($scale);
6417     disp(0x0);
6418   %}
6419 %}
6420 
6421 operand indIndexI2LN(iRegN reg, iRegI ireg)
6422 %{
6423   predicate(Universe::narrow_oop_shift() == 0);
6424   constraint(ALLOC_IN_RC(ptr_reg));
6425   match(AddP (DecodeN reg) (ConvI2L ireg));
6426   op_cost(0);
6427   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6428   interface(MEMORY_INTER) %{
6429     base($reg);
6430     index($ireg);
6431     scale(0x0);
6432     disp(0x0);
6433   %}
6434 %}
6435 
6436 operand indIndexN(iRegN reg, iRegL lreg)
6437 %{
6438   predicate(Universe::narrow_oop_shift() == 0);
6439   constraint(ALLOC_IN_RC(ptr_reg));
6440   match(AddP (DecodeN reg) lreg);
6441   op_cost(0);
6442   format %{ "$reg, $lreg\t# narrow" %}
6443   interface(MEMORY_INTER) %{
6444     base($reg);
6445     index($lreg);
6446     scale(0x0);
6447     disp(0x0);
6448   %}
6449 %}
6450 
6451 operand indOffIN(iRegN reg, immIOffset off)
6452 %{
6453   predicate(Universe::narrow_oop_shift() == 0);
6454   constraint(ALLOC_IN_RC(ptr_reg));
6455   match(AddP (DecodeN reg) off);
6456   op_cost(0);
6457   format %{ "[$reg, $off]\t# narrow" %}
6458   interface(MEMORY_INTER) %{
6459     base($reg);
6460     index(0xffffffff);
6461     scale(0x0);
6462     disp($off);
6463   %}
6464 %}
6465 
6466 operand indOffLN(iRegN reg, immLoffset off)
6467 %{
6468   predicate(Universe::narrow_oop_shift() == 0);
6469   constraint(ALLOC_IN_RC(ptr_reg));
6470   match(AddP (DecodeN reg) off);
6471   op_cost(0);
6472   format %{ "[$reg, $off]\t# narrow" %}
6473   interface(MEMORY_INTER) %{
6474     base($reg);
6475     index(0xffffffff);
6476     scale(0x0);
6477     disp($off);
6478   %}
6479 %}
6480 
6481 
6482 
6483 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6484 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6485 %{
6486   constraint(ALLOC_IN_RC(ptr_reg));
6487   match(AddP reg off);
6488   op_cost(0);
6489   format %{ "[$reg, $off]" %}
6490   interface(MEMORY_INTER) %{
6491     base($reg);
6492     index(0xffffffff);
6493     scale(0x0);
6494     disp($off);
6495   %}
6496 %}
6497 
6498 //----------Special Memory Operands--------------------------------------------
6499 // Stack Slot Operand - This operand is used for loading and storing temporary
6500 //                      values on the stack where a match requires a value to
6501 //                      flow through memory.
6502 operand stackSlotP(sRegP reg)
6503 %{
6504   constraint(ALLOC_IN_RC(stack_slots));
6505   op_cost(100);
6506   // No match rule because this operand is only generated in matching
6507   // match(RegP);
6508   format %{ "[$reg]" %}
6509   interface(MEMORY_INTER) %{
6510     base(0x1e);  // RSP
6511     index(0x0);  // No Index
6512     scale(0x0);  // No Scale
6513     disp($reg);  // Stack Offset
6514   %}
6515 %}
6516 
6517 operand stackSlotI(sRegI reg)
6518 %{
6519   constraint(ALLOC_IN_RC(stack_slots));
6520   // No match rule because this operand is only generated in matching
6521   // match(RegI);
6522   format %{ "[$reg]" %}
6523   interface(MEMORY_INTER) %{
6524     base(0x1e);  // RSP
6525     index(0x0);  // No Index
6526     scale(0x0);  // No Scale
6527     disp($reg);  // Stack Offset
6528   %}
6529 %}
6530 
6531 operand stackSlotF(sRegF reg)
6532 %{
6533   constraint(ALLOC_IN_RC(stack_slots));
6534   // No match rule because this operand is only generated in matching
6535   // match(RegF);
6536   format %{ "[$reg]" %}
6537   interface(MEMORY_INTER) %{
6538     base(0x1e);  // RSP
6539     index(0x0);  // No Index
6540     scale(0x0);  // No Scale
6541     disp($reg);  // Stack Offset
6542   %}
6543 %}
6544 
6545 operand stackSlotD(sRegD reg)
6546 %{
6547   constraint(ALLOC_IN_RC(stack_slots));
6548   // No match rule because this operand is only generated in matching
6549   // match(RegD);
6550   format %{ "[$reg]" %}
6551   interface(MEMORY_INTER) %{
6552     base(0x1e);  // RSP
6553     index(0x0);  // No Index
6554     scale(0x0);  // No Scale
6555     disp($reg);  // Stack Offset
6556   %}
6557 %}
6558 
6559 operand stackSlotL(sRegL reg)
6560 %{
6561   constraint(ALLOC_IN_RC(stack_slots));
6562   // No match rule because this operand is only generated in matching
6563   // match(RegL);
6564   format %{ "[$reg]" %}
6565   interface(MEMORY_INTER) %{
6566     base(0x1e);  // RSP
6567     index(0x0);  // No Index
6568     scale(0x0);  // No Scale
6569     disp($reg);  // Stack Offset
6570   %}
6571 %}
6572 
6573 // Operands for expressing Control Flow
6574 // NOTE: Label is a predefined operand which should not be redefined in
6575 //       the AD file. It is generically handled within the ADLC.
6576 
6577 //----------Conditional Branch Operands----------------------------------------
6578 // Comparison Op  - This is the operation of the comparison, and is limited to
6579 //                  the following set of codes:
6580 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6581 //
6582 // Other attributes of the comparison, such as unsignedness, are specified
6583 // by the comparison instruction that sets a condition code flags register.
6584 // That result is represented by a flags operand whose subtype is appropriate
6585 // to the unsignedness (etc.) of the comparison.
6586 //
6587 // Later, the instruction which matches both the Comparison Op (a Bool) and
6588 // the flags (produced by the Cmp) specifies the coding of the comparison op
6589 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6590 
6591 // used for signed integral comparisons and fp comparisons
6592 
6593 operand cmpOp()
6594 %{
6595   match(Bool);
6596 
6597   format %{ "" %}
6598   interface(COND_INTER) %{
6599     equal(0x0, "eq");
6600     not_equal(0x1, "ne");
6601     less(0xb, "lt");
6602     greater_equal(0xa, "ge");
6603     less_equal(0xd, "le");
6604     greater(0xc, "gt");
6605     overflow(0x6, "vs");
6606     no_overflow(0x7, "vc");
6607   %}
6608 %}
6609 
6610 // used for unsigned integral comparisons
6611 
6612 operand cmpOpU()
6613 %{
6614   match(Bool);
6615 
6616   format %{ "" %}
6617   interface(COND_INTER) %{
6618     equal(0x0, "eq");
6619     not_equal(0x1, "ne");
6620     less(0x3, "lo");
6621     greater_equal(0x2, "hs");
6622     less_equal(0x9, "ls");
6623     greater(0x8, "hi");
6624     overflow(0x6, "vs");
6625     no_overflow(0x7, "vc");
6626   %}
6627 %}
6628 
6629 // Special operand allowing long args to int ops to be truncated for free
6630 
6631 operand iRegL2I(iRegL reg) %{
6632 
6633   op_cost(0);
6634 
6635   match(ConvL2I reg);
6636 
6637   format %{ "l2i($reg)" %}
6638 
6639   interface(REG_INTER)
6640 %}
6641 
6642 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6643 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6644 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6645 
6646 //----------OPERAND CLASSES----------------------------------------------------
6647 // Operand Classes are groups of operands that are used as to simplify
6648 // instruction definitions by not requiring the AD writer to specify
6649 // separate instructions for every form of operand when the
6650 // instruction accepts multiple operand types with the same basic
6651 // encoding and format. The classic case of this is memory operands.
6652 
6653 // memory is used to define read/write location for load/store
6654 // instruction defs. we can turn a memory op into an Address
6655 
6656 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6657                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6658 
6659 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6660 // operations. it allows the src to be either an iRegI or a (ConvL2I
6661 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6662 // can be elided because the 32-bit instruction will just employ the
6663 // lower 32 bits anyway.
6664 //
6665 // n.b. this does not elide all L2I conversions. if the truncated
6666 // value is consumed by more than one operation then the ConvL2I
6667 // cannot be bundled into the consuming nodes so an l2i gets planted
6668 // (actually a movw $dst $src) and the downstream instructions consume
6669 // the result of the l2i as an iRegI input. That's a shame since the
6670 // movw is actually redundant but its not too costly.
6671 
6672 opclass iRegIorL2I(iRegI, iRegL2I);
6673 
6674 //----------PIPELINE-----------------------------------------------------------
6675 // Rules which define the behavior of the target architectures pipeline.
6676 
6677 // For specific pipelines, eg A53, define the stages of that pipeline
6678 //pipe_desc(ISS, EX1, EX2, WR);
6679 #define ISS S0
6680 #define EX1 S1
6681 #define EX2 S2
6682 #define WR  S3
6683 
6684 // Integer ALU reg operation
6685 pipeline %{
6686 
6687 attributes %{
6688   // ARM instructions are of fixed length
6689   fixed_size_instructions;        // Fixed size instructions TODO does
6690   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6691   // ARM instructions come in 32-bit word units
6692   instruction_unit_size = 4;         // An instruction is 4 bytes long
6693   instruction_fetch_unit_size = 64;  // The processor fetches one line
6694   instruction_fetch_units = 1;       // of 64 bytes
6695 
6696   // List of nop instructions
6697   nops( MachNop );
6698 %}
6699 
6700 // We don't use an actual pipeline model so don't care about resources
6701 // or description. we do use pipeline classes to introduce fixed
6702 // latencies
6703 
6704 //----------RESOURCES----------------------------------------------------------
6705 // Resources are the functional units available to the machine
6706 
6707 resources( INS0, INS1, INS01 = INS0 | INS1,
6708            ALU0, ALU1, ALU = ALU0 | ALU1,
6709            MAC,
6710            DIV,
6711            BRANCH,
6712            LDST,
6713            NEON_FP);
6714 
6715 //----------PIPELINE DESCRIPTION-----------------------------------------------
6716 // Pipeline Description specifies the stages in the machine's pipeline
6717 
6718 // Define the pipeline as a generic 6 stage pipeline
6719 pipe_desc(S0, S1, S2, S3, S4, S5);
6720 
6721 //----------PIPELINE CLASSES---------------------------------------------------
6722 // Pipeline Classes describe the stages in which input and output are
6723 // referenced by the hardware pipeline.
6724 
6725 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6726 %{
6727   single_instruction;
6728   src1   : S1(read);
6729   src2   : S2(read);
6730   dst    : S5(write);
6731   INS01  : ISS;
6732   NEON_FP : S5;
6733 %}
6734 
6735 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6736 %{
6737   single_instruction;
6738   src1   : S1(read);
6739   src2   : S2(read);
6740   dst    : S5(write);
6741   INS01  : ISS;
6742   NEON_FP : S5;
6743 %}
6744 
6745 pipe_class fp_uop_s(vRegF dst, vRegF src)
6746 %{
6747   single_instruction;
6748   src    : S1(read);
6749   dst    : S5(write);
6750   INS01  : ISS;
6751   NEON_FP : S5;
6752 %}
6753 
6754 pipe_class fp_uop_d(vRegD dst, vRegD src)
6755 %{
6756   single_instruction;
6757   src    : S1(read);
6758   dst    : S5(write);
6759   INS01  : ISS;
6760   NEON_FP : S5;
6761 %}
6762 
6763 pipe_class fp_d2f(vRegF dst, vRegD src)
6764 %{
6765   single_instruction;
6766   src    : S1(read);
6767   dst    : S5(write);
6768   INS01  : ISS;
6769   NEON_FP : S5;
6770 %}
6771 
6772 pipe_class fp_f2d(vRegD dst, vRegF src)
6773 %{
6774   single_instruction;
6775   src    : S1(read);
6776   dst    : S5(write);
6777   INS01  : ISS;
6778   NEON_FP : S5;
6779 %}
6780 
6781 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6782 %{
6783   single_instruction;
6784   src    : S1(read);
6785   dst    : S5(write);
6786   INS01  : ISS;
6787   NEON_FP : S5;
6788 %}
6789 
6790 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6791 %{
6792   single_instruction;
6793   src    : S1(read);
6794   dst    : S5(write);
6795   INS01  : ISS;
6796   NEON_FP : S5;
6797 %}
6798 
6799 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6800 %{
6801   single_instruction;
6802   src    : S1(read);
6803   dst    : S5(write);
6804   INS01  : ISS;
6805   NEON_FP : S5;
6806 %}
6807 
6808 pipe_class fp_l2f(vRegF dst, iRegL src)
6809 %{
6810   single_instruction;
6811   src    : S1(read);
6812   dst    : S5(write);
6813   INS01  : ISS;
6814   NEON_FP : S5;
6815 %}
6816 
6817 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6818 %{
6819   single_instruction;
6820   src    : S1(read);
6821   dst    : S5(write);
6822   INS01  : ISS;
6823   NEON_FP : S5;
6824 %}
6825 
6826 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6827 %{
6828   single_instruction;
6829   src    : S1(read);
6830   dst    : S5(write);
6831   INS01  : ISS;
6832   NEON_FP : S5;
6833 %}
6834 
6835 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6836 %{
6837   single_instruction;
6838   src    : S1(read);
6839   dst    : S5(write);
6840   INS01  : ISS;
6841   NEON_FP : S5;
6842 %}
6843 
6844 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6845 %{
6846   single_instruction;
6847   src    : S1(read);
6848   dst    : S5(write);
6849   INS01  : ISS;
6850   NEON_FP : S5;
6851 %}
6852 
6853 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6854 %{
6855   single_instruction;
6856   src1   : S1(read);
6857   src2   : S2(read);
6858   dst    : S5(write);
6859   INS0   : ISS;
6860   NEON_FP : S5;
6861 %}
6862 
6863 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6864 %{
6865   single_instruction;
6866   src1   : S1(read);
6867   src2   : S2(read);
6868   dst    : S5(write);
6869   INS0   : ISS;
6870   NEON_FP : S5;
6871 %}
6872 
6873 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6874 %{
6875   single_instruction;
6876   cr     : S1(read);
6877   src1   : S1(read);
6878   src2   : S1(read);
6879   dst    : S3(write);
6880   INS01  : ISS;
6881   NEON_FP : S3;
6882 %}
6883 
6884 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6885 %{
6886   single_instruction;
6887   cr     : S1(read);
6888   src1   : S1(read);
6889   src2   : S1(read);
6890   dst    : S3(write);
6891   INS01  : ISS;
6892   NEON_FP : S3;
6893 %}
6894 
6895 pipe_class fp_imm_s(vRegF dst)
6896 %{
6897   single_instruction;
6898   dst    : S3(write);
6899   INS01  : ISS;
6900   NEON_FP : S3;
6901 %}
6902 
6903 pipe_class fp_imm_d(vRegD dst)
6904 %{
6905   single_instruction;
6906   dst    : S3(write);
6907   INS01  : ISS;
6908   NEON_FP : S3;
6909 %}
6910 
6911 pipe_class fp_load_constant_s(vRegF dst)
6912 %{
6913   single_instruction;
6914   dst    : S4(write);
6915   INS01  : ISS;
6916   NEON_FP : S4;
6917 %}
6918 
6919 pipe_class fp_load_constant_d(vRegD dst)
6920 %{
6921   single_instruction;
6922   dst    : S4(write);
6923   INS01  : ISS;
6924   NEON_FP : S4;
6925 %}
6926 
6927 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6928 %{
6929   single_instruction;
6930   dst    : S5(write);
6931   src1   : S1(read);
6932   src2   : S1(read);
6933   INS01  : ISS;
6934   NEON_FP : S5;
6935 %}
6936 
6937 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6938 %{
6939   single_instruction;
6940   dst    : S5(write);
6941   src1   : S1(read);
6942   src2   : S1(read);
6943   INS0   : ISS;
6944   NEON_FP : S5;
6945 %}
6946 
6947 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6948 %{
6949   single_instruction;
6950   dst    : S5(write);
6951   src1   : S1(read);
6952   src2   : S1(read);
6953   dst    : S1(read);
6954   INS01  : ISS;
6955   NEON_FP : S5;
6956 %}
6957 
6958 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6959 %{
6960   single_instruction;
6961   dst    : S5(write);
6962   src1   : S1(read);
6963   src2   : S1(read);
6964   dst    : S1(read);
6965   INS0   : ISS;
6966   NEON_FP : S5;
6967 %}
6968 
6969 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6970 %{
6971   single_instruction;
6972   dst    : S4(write);
6973   src1   : S2(read);
6974   src2   : S2(read);
6975   INS01  : ISS;
6976   NEON_FP : S4;
6977 %}
6978 
6979 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6980 %{
6981   single_instruction;
6982   dst    : S4(write);
6983   src1   : S2(read);
6984   src2   : S2(read);
6985   INS0   : ISS;
6986   NEON_FP : S4;
6987 %}
6988 
6989 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6990 %{
6991   single_instruction;
6992   dst    : S3(write);
6993   src1   : S2(read);
6994   src2   : S2(read);
6995   INS01  : ISS;
6996   NEON_FP : S3;
6997 %}
6998 
6999 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7000 %{
7001   single_instruction;
7002   dst    : S3(write);
7003   src1   : S2(read);
7004   src2   : S2(read);
7005   INS0   : ISS;
7006   NEON_FP : S3;
7007 %}
7008 
7009 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7010 %{
7011   single_instruction;
7012   dst    : S3(write);
7013   src    : S1(read);
7014   shift  : S1(read);
7015   INS01  : ISS;
7016   NEON_FP : S3;
7017 %}
7018 
7019 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7020 %{
7021   single_instruction;
7022   dst    : S3(write);
7023   src    : S1(read);
7024   shift  : S1(read);
7025   INS0   : ISS;
7026   NEON_FP : S3;
7027 %}
7028 
7029 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7030 %{
7031   single_instruction;
7032   dst    : S3(write);
7033   src    : S1(read);
7034   INS01  : ISS;
7035   NEON_FP : S3;
7036 %}
7037 
7038 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7039 %{
7040   single_instruction;
7041   dst    : S3(write);
7042   src    : S1(read);
7043   INS0   : ISS;
7044   NEON_FP : S3;
7045 %}
7046 
7047 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7048 %{
7049   single_instruction;
7050   dst    : S5(write);
7051   src1   : S1(read);
7052   src2   : S1(read);
7053   INS01  : ISS;
7054   NEON_FP : S5;
7055 %}
7056 
7057 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7058 %{
7059   single_instruction;
7060   dst    : S5(write);
7061   src1   : S1(read);
7062   src2   : S1(read);
7063   INS0   : ISS;
7064   NEON_FP : S5;
7065 %}
7066 
7067 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7068 %{
7069   single_instruction;
7070   dst    : S5(write);
7071   src1   : S1(read);
7072   src2   : S1(read);
7073   INS0   : ISS;
7074   NEON_FP : S5;
7075 %}
7076 
7077 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7078 %{
7079   single_instruction;
7080   dst    : S5(write);
7081   src1   : S1(read);
7082   src2   : S1(read);
7083   INS0   : ISS;
7084   NEON_FP : S5;
7085 %}
7086 
7087 pipe_class vsqrt_fp128(vecX dst, vecX src)
7088 %{
7089   single_instruction;
7090   dst    : S5(write);
7091   src    : S1(read);
7092   INS0   : ISS;
7093   NEON_FP : S5;
7094 %}
7095 
7096 pipe_class vunop_fp64(vecD dst, vecD src)
7097 %{
7098   single_instruction;
7099   dst    : S5(write);
7100   src    : S1(read);
7101   INS01  : ISS;
7102   NEON_FP : S5;
7103 %}
7104 
7105 pipe_class vunop_fp128(vecX dst, vecX src)
7106 %{
7107   single_instruction;
7108   dst    : S5(write);
7109   src    : S1(read);
7110   INS0   : ISS;
7111   NEON_FP : S5;
7112 %}
7113 
7114 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7115 %{
7116   single_instruction;
7117   dst    : S3(write);
7118   src    : S1(read);
7119   INS01  : ISS;
7120   NEON_FP : S3;
7121 %}
7122 
7123 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7124 %{
7125   single_instruction;
7126   dst    : S3(write);
7127   src    : S1(read);
7128   INS01  : ISS;
7129   NEON_FP : S3;
7130 %}
7131 
7132 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7133 %{
7134   single_instruction;
7135   dst    : S3(write);
7136   src    : S1(read);
7137   INS01  : ISS;
7138   NEON_FP : S3;
7139 %}
7140 
7141 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7142 %{
7143   single_instruction;
7144   dst    : S3(write);
7145   src    : S1(read);
7146   INS01  : ISS;
7147   NEON_FP : S3;
7148 %}
7149 
7150 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7151 %{
7152   single_instruction;
7153   dst    : S3(write);
7154   src    : S1(read);
7155   INS01  : ISS;
7156   NEON_FP : S3;
7157 %}
7158 
7159 pipe_class vmovi_reg_imm64(vecD dst)
7160 %{
7161   single_instruction;
7162   dst    : S3(write);
7163   INS01  : ISS;
7164   NEON_FP : S3;
7165 %}
7166 
7167 pipe_class vmovi_reg_imm128(vecX dst)
7168 %{
7169   single_instruction;
7170   dst    : S3(write);
7171   INS0   : ISS;
7172   NEON_FP : S3;
7173 %}
7174 
7175 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7176 %{
7177   single_instruction;
7178   dst    : S5(write);
7179   mem    : ISS(read);
7180   INS01  : ISS;
7181   NEON_FP : S3;
7182 %}
7183 
7184 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7185 %{
7186   single_instruction;
7187   dst    : S5(write);
7188   mem    : ISS(read);
7189   INS01  : ISS;
7190   NEON_FP : S3;
7191 %}
7192 
7193 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7194 %{
7195   single_instruction;
7196   mem    : ISS(read);
7197   src    : S2(read);
7198   INS01  : ISS;
7199   NEON_FP : S3;
7200 %}
7201 
7202 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7203 %{
7204   single_instruction;
7205   mem    : ISS(read);
7206   src    : S2(read);
7207   INS01  : ISS;
7208   NEON_FP : S3;
7209 %}
7210 
7211 //------- Integer ALU operations --------------------------
7212 
7213 // Integer ALU reg-reg operation
7214 // Operands needed in EX1, result generated in EX2
7215 // Eg.  ADD     x0, x1, x2
7216 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7217 %{
7218   single_instruction;
7219   dst    : EX2(write);
7220   src1   : EX1(read);
7221   src2   : EX1(read);
7222   INS01  : ISS; // Dual issue as instruction 0 or 1
7223   ALU    : EX2;
7224 %}
7225 
7226 // Integer ALU reg-reg operation with constant shift
7227 // Shifted register must be available in LATE_ISS instead of EX1
7228 // Eg.  ADD     x0, x1, x2, LSL #2
7229 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7230 %{
7231   single_instruction;
7232   dst    : EX2(write);
7233   src1   : EX1(read);
7234   src2   : ISS(read);
7235   INS01  : ISS;
7236   ALU    : EX2;
7237 %}
7238 
7239 // Integer ALU reg operation with constant shift
7240 // Eg.  LSL     x0, x1, #shift
7241 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7242 %{
7243   single_instruction;
7244   dst    : EX2(write);
7245   src1   : ISS(read);
7246   INS01  : ISS;
7247   ALU    : EX2;
7248 %}
7249 
7250 // Integer ALU reg-reg operation with variable shift
7251 // Both operands must be available in LATE_ISS instead of EX1
7252 // Result is available in EX1 instead of EX2
7253 // Eg.  LSLV    x0, x1, x2
7254 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7255 %{
7256   single_instruction;
7257   dst    : EX1(write);
7258   src1   : ISS(read);
7259   src2   : ISS(read);
7260   INS01  : ISS;
7261   ALU    : EX1;
7262 %}
7263 
7264 // Integer ALU reg-reg operation with extract
7265 // As for _vshift above, but result generated in EX2
7266 // Eg.  EXTR    x0, x1, x2, #N
7267 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7268 %{
7269   single_instruction;
7270   dst    : EX2(write);
7271   src1   : ISS(read);
7272   src2   : ISS(read);
7273   INS1   : ISS; // Can only dual issue as Instruction 1
7274   ALU    : EX1;
7275 %}
7276 
7277 // Integer ALU reg operation
7278 // Eg.  NEG     x0, x1
7279 pipe_class ialu_reg(iRegI dst, iRegI src)
7280 %{
7281   single_instruction;
7282   dst    : EX2(write);
7283   src    : EX1(read);
7284   INS01  : ISS;
7285   ALU    : EX2;
7286 %}
7287 
7288 // Integer ALU reg mmediate operation
7289 // Eg.  ADD     x0, x1, #N
7290 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7291 %{
7292   single_instruction;
7293   dst    : EX2(write);
7294   src1   : EX1(read);
7295   INS01  : ISS;
7296   ALU    : EX2;
7297 %}
7298 
7299 // Integer ALU immediate operation (no source operands)
7300 // Eg.  MOV     x0, #N
7301 pipe_class ialu_imm(iRegI dst)
7302 %{
7303   single_instruction;
7304   dst    : EX1(write);
7305   INS01  : ISS;
7306   ALU    : EX1;
7307 %}
7308 
7309 //------- Compare operation -------------------------------
7310 
7311 // Compare reg-reg
7312 // Eg.  CMP     x0, x1
7313 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7314 %{
7315   single_instruction;
7316 //  fixed_latency(16);
7317   cr     : EX2(write);
7318   op1    : EX1(read);
7319   op2    : EX1(read);
7320   INS01  : ISS;
7321   ALU    : EX2;
7322 %}
7323 
7324 // Compare reg-reg
7325 // Eg.  CMP     x0, #N
7326 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7327 %{
7328   single_instruction;
7329 //  fixed_latency(16);
7330   cr     : EX2(write);
7331   op1    : EX1(read);
7332   INS01  : ISS;
7333   ALU    : EX2;
7334 %}
7335 
7336 //------- Conditional instructions ------------------------
7337 
7338 // Conditional no operands
7339 // Eg.  CSINC   x0, zr, zr, <cond>
7340 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7341 %{
7342   single_instruction;
7343   cr     : EX1(read);
7344   dst    : EX2(write);
7345   INS01  : ISS;
7346   ALU    : EX2;
7347 %}
7348 
7349 // Conditional 2 operand
7350 // EG.  CSEL    X0, X1, X2, <cond>
7351 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7352 %{
7353   single_instruction;
7354   cr     : EX1(read);
7355   src1   : EX1(read);
7356   src2   : EX1(read);
7357   dst    : EX2(write);
7358   INS01  : ISS;
7359   ALU    : EX2;
7360 %}
7361 
7362 // Conditional 2 operand
7363 // EG.  CSEL    X0, X1, X2, <cond>
7364 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7365 %{
7366   single_instruction;
7367   cr     : EX1(read);
7368   src    : EX1(read);
7369   dst    : EX2(write);
7370   INS01  : ISS;
7371   ALU    : EX2;
7372 %}
7373 
7374 //------- Multiply pipeline operations --------------------
7375 
7376 // Multiply reg-reg
7377 // Eg.  MUL     w0, w1, w2
7378 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7379 %{
7380   single_instruction;
7381   dst    : WR(write);
7382   src1   : ISS(read);
7383   src2   : ISS(read);
7384   INS01  : ISS;
7385   MAC    : WR;
7386 %}
7387 
7388 // Multiply accumulate
7389 // Eg.  MADD    w0, w1, w2, w3
7390 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7391 %{
7392   single_instruction;
7393   dst    : WR(write);
7394   src1   : ISS(read);
7395   src2   : ISS(read);
7396   src3   : ISS(read);
7397   INS01  : ISS;
7398   MAC    : WR;
7399 %}
7400 
7401 // Eg.  MUL     w0, w1, w2
7402 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7403 %{
7404   single_instruction;
7405   fixed_latency(3); // Maximum latency for 64 bit mul
7406   dst    : WR(write);
7407   src1   : ISS(read);
7408   src2   : ISS(read);
7409   INS01  : ISS;
7410   MAC    : WR;
7411 %}
7412 
7413 // Multiply accumulate
7414 // Eg.  MADD    w0, w1, w2, w3
7415 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7416 %{
7417   single_instruction;
7418   fixed_latency(3); // Maximum latency for 64 bit mul
7419   dst    : WR(write);
7420   src1   : ISS(read);
7421   src2   : ISS(read);
7422   src3   : ISS(read);
7423   INS01  : ISS;
7424   MAC    : WR;
7425 %}
7426 
7427 //------- Divide pipeline operations --------------------
7428 
7429 // Eg.  SDIV    w0, w1, w2
7430 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7431 %{
7432   single_instruction;
7433   fixed_latency(8); // Maximum latency for 32 bit divide
7434   dst    : WR(write);
7435   src1   : ISS(read);
7436   src2   : ISS(read);
7437   INS0   : ISS; // Can only dual issue as instruction 0
7438   DIV    : WR;
7439 %}
7440 
7441 // Eg.  SDIV    x0, x1, x2
7442 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7443 %{
7444   single_instruction;
7445   fixed_latency(16); // Maximum latency for 64 bit divide
7446   dst    : WR(write);
7447   src1   : ISS(read);
7448   src2   : ISS(read);
7449   INS0   : ISS; // Can only dual issue as instruction 0
7450   DIV    : WR;
7451 %}
7452 
7453 //------- Load pipeline operations ------------------------
7454 
7455 // Load - prefetch
7456 // Eg.  PFRM    <mem>
7457 pipe_class iload_prefetch(memory mem)
7458 %{
7459   single_instruction;
7460   mem    : ISS(read);
7461   INS01  : ISS;
7462   LDST   : WR;
7463 %}
7464 
7465 // Load - reg, mem
7466 // Eg.  LDR     x0, <mem>
7467 pipe_class iload_reg_mem(iRegI dst, memory mem)
7468 %{
7469   single_instruction;
7470   dst    : WR(write);
7471   mem    : ISS(read);
7472   INS01  : ISS;
7473   LDST   : WR;
7474 %}
7475 
7476 // Load - reg, reg
7477 // Eg.  LDR     x0, [sp, x1]
7478 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7479 %{
7480   single_instruction;
7481   dst    : WR(write);
7482   src    : ISS(read);
7483   INS01  : ISS;
7484   LDST   : WR;
7485 %}
7486 
7487 //------- Store pipeline operations -----------------------
7488 
7489 // Store - zr, mem
7490 // Eg.  STR     zr, <mem>
7491 pipe_class istore_mem(memory mem)
7492 %{
7493   single_instruction;
7494   mem    : ISS(read);
7495   INS01  : ISS;
7496   LDST   : WR;
7497 %}
7498 
7499 // Store - reg, mem
7500 // Eg.  STR     x0, <mem>
7501 pipe_class istore_reg_mem(iRegI src, memory mem)
7502 %{
7503   single_instruction;
7504   mem    : ISS(read);
7505   src    : EX2(read);
7506   INS01  : ISS;
7507   LDST   : WR;
7508 %}
7509 
7510 // Store - reg, reg
7511 // Eg. STR      x0, [sp, x1]
7512 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7513 %{
7514   single_instruction;
7515   dst    : ISS(read);
7516   src    : EX2(read);
7517   INS01  : ISS;
7518   LDST   : WR;
7519 %}
7520 
7521 //------- Store pipeline operations -----------------------
7522 
7523 // Branch
7524 pipe_class pipe_branch()
7525 %{
7526   single_instruction;
7527   INS01  : ISS;
7528   BRANCH : EX1;
7529 %}
7530 
7531 // Conditional branch
7532 pipe_class pipe_branch_cond(rFlagsReg cr)
7533 %{
7534   single_instruction;
7535   cr     : EX1(read);
7536   INS01  : ISS;
7537   BRANCH : EX1;
7538 %}
7539 
7540 // Compare & Branch
7541 // EG.  CBZ/CBNZ
7542 pipe_class pipe_cmp_branch(iRegI op1)
7543 %{
7544   single_instruction;
7545   op1    : EX1(read);
7546   INS01  : ISS;
7547   BRANCH : EX1;
7548 %}
7549 
7550 //------- Synchronisation operations ----------------------
7551 
7552 // Any operation requiring serialization.
7553 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7554 pipe_class pipe_serial()
7555 %{
7556   single_instruction;
7557   force_serialization;
7558   fixed_latency(16);
7559   INS01  : ISS(2); // Cannot dual issue with any other instruction
7560   LDST   : WR;
7561 %}
7562 
7563 // Generic big/slow expanded idiom - also serialized
7564 pipe_class pipe_slow()
7565 %{
7566   instruction_count(10);
7567   multiple_bundles;
7568   force_serialization;
7569   fixed_latency(16);
7570   INS01  : ISS(2); // Cannot dual issue with any other instruction
7571   LDST   : WR;
7572 %}
7573 
7574 // Empty pipeline class
7575 pipe_class pipe_class_empty()
7576 %{
7577   single_instruction;
7578   fixed_latency(0);
7579 %}
7580 
7581 // Default pipeline class.
7582 pipe_class pipe_class_default()
7583 %{
7584   single_instruction;
7585   fixed_latency(2);
7586 %}
7587 
7588 // Pipeline class for compares.
7589 pipe_class pipe_class_compare()
7590 %{
7591   single_instruction;
7592   fixed_latency(16);
7593 %}
7594 
7595 // Pipeline class for memory operations.
7596 pipe_class pipe_class_memory()
7597 %{
7598   single_instruction;
7599   fixed_latency(16);
7600 %}
7601 
7602 // Pipeline class for call.
7603 pipe_class pipe_class_call()
7604 %{
7605   single_instruction;
7606   fixed_latency(100);
7607 %}
7608 
7609 // Define the class for the Nop node.
7610 define %{
7611    MachNop = pipe_class_empty;
7612 %}
7613 
7614 %}
7615 //----------INSTRUCTIONS-------------------------------------------------------
7616 //
7617 // match      -- States which machine-independent subtree may be replaced
7618 //               by this instruction.
7619 // ins_cost   -- The estimated cost of this instruction is used by instruction
7620 //               selection to identify a minimum cost tree of machine
7621 //               instructions that matches a tree of machine-independent
7622 //               instructions.
7623 // format     -- A string providing the disassembly for this instruction.
7624 //               The value of an instruction's operand may be inserted
7625 //               by referring to it with a '$' prefix.
7626 // opcode     -- Three instruction opcodes may be provided.  These are referred
7627 //               to within an encode class as $primary, $secondary, and $tertiary
7628 //               rrspectively.  The primary opcode is commonly used to
7629 //               indicate the type of machine instruction, while secondary
7630 //               and tertiary are often used for prefix options or addressing
7631 //               modes.
7632 // ins_encode -- A list of encode classes with parameters. The encode class
7633 //               name must have been defined in an 'enc_class' specification
7634 //               in the encode section of the architecture description.
7635 
7636 // ============================================================================
7637 // Memory (Load/Store) Instructions
7638 
7639 // Load Instructions
7640 
7641 // Load Byte (8 bit signed)
7642 instruct loadB(iRegINoSp dst, memory mem)
7643 %{
7644   match(Set dst (LoadB mem));
7645   predicate(!needs_acquiring_load(n));
7646 
7647   ins_cost(4 * INSN_COST);
7648   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7649 
7650   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7651 
7652   ins_pipe(iload_reg_mem);
7653 %}
7654 
7655 // Load Byte (8 bit signed) into long
7656 instruct loadB2L(iRegLNoSp dst, memory mem)
7657 %{
7658   match(Set dst (ConvI2L (LoadB mem)));
7659   predicate(!needs_acquiring_load(n->in(1)));
7660 
7661   ins_cost(4 * INSN_COST);
7662   format %{ "ldrsb  $dst, $mem\t# byte" %}
7663 
7664   ins_encode(aarch64_enc_ldrsb(dst, mem));
7665 
7666   ins_pipe(iload_reg_mem);
7667 %}
7668 
7669 // Load Byte (8 bit unsigned)
7670 instruct loadUB(iRegINoSp dst, memory mem)
7671 %{
7672   match(Set dst (LoadUB mem));
7673   predicate(!needs_acquiring_load(n));
7674 
7675   ins_cost(4 * INSN_COST);
7676   format %{ "ldrbw  $dst, $mem\t# byte" %}
7677 
7678   ins_encode(aarch64_enc_ldrb(dst, mem));
7679 
7680   ins_pipe(iload_reg_mem);
7681 %}
7682 
7683 // Load Byte (8 bit unsigned) into long
7684 instruct loadUB2L(iRegLNoSp dst, memory mem)
7685 %{
7686   match(Set dst (ConvI2L (LoadUB mem)));
7687   predicate(!needs_acquiring_load(n->in(1)));
7688 
7689   ins_cost(4 * INSN_COST);
7690   format %{ "ldrb  $dst, $mem\t# byte" %}
7691 
7692   ins_encode(aarch64_enc_ldrb(dst, mem));
7693 
7694   ins_pipe(iload_reg_mem);
7695 %}
7696 
7697 // Load Short (16 bit signed)
7698 instruct loadS(iRegINoSp dst, memory mem)
7699 %{
7700   match(Set dst (LoadS mem));
7701   predicate(!needs_acquiring_load(n));
7702 
7703   ins_cost(4 * INSN_COST);
7704   format %{ "ldrshw  $dst, $mem\t# short" %}
7705 
7706   ins_encode(aarch64_enc_ldrshw(dst, mem));
7707 
7708   ins_pipe(iload_reg_mem);
7709 %}
7710 
7711 // Load Short (16 bit signed) into long
7712 instruct loadS2L(iRegLNoSp dst, memory mem)
7713 %{
7714   match(Set dst (ConvI2L (LoadS mem)));
7715   predicate(!needs_acquiring_load(n->in(1)));
7716 
7717   ins_cost(4 * INSN_COST);
7718   format %{ "ldrsh  $dst, $mem\t# short" %}
7719 
7720   ins_encode(aarch64_enc_ldrsh(dst, mem));
7721 
7722   ins_pipe(iload_reg_mem);
7723 %}
7724 
7725 // Load Char (16 bit unsigned)
7726 instruct loadUS(iRegINoSp dst, memory mem)
7727 %{
7728   match(Set dst (LoadUS mem));
7729   predicate(!needs_acquiring_load(n));
7730 
7731   ins_cost(4 * INSN_COST);
7732   format %{ "ldrh  $dst, $mem\t# short" %}
7733 
7734   ins_encode(aarch64_enc_ldrh(dst, mem));
7735 
7736   ins_pipe(iload_reg_mem);
7737 %}
7738 
7739 // Load Short/Char (16 bit unsigned) into long
7740 instruct loadUS2L(iRegLNoSp dst, memory mem)
7741 %{
7742   match(Set dst (ConvI2L (LoadUS mem)));
7743   predicate(!needs_acquiring_load(n->in(1)));
7744 
7745   ins_cost(4 * INSN_COST);
7746   format %{ "ldrh  $dst, $mem\t# short" %}
7747 
7748   ins_encode(aarch64_enc_ldrh(dst, mem));
7749 
7750   ins_pipe(iload_reg_mem);
7751 %}
7752 
7753 // Load Integer (32 bit signed)
7754 instruct loadI(iRegINoSp dst, memory mem)
7755 %{
7756   match(Set dst (LoadI mem));
7757   predicate(!needs_acquiring_load(n));
7758 
7759   ins_cost(4 * INSN_COST);
7760   format %{ "ldrw  $dst, $mem\t# int" %}
7761 
7762   ins_encode(aarch64_enc_ldrw(dst, mem));
7763 
7764   ins_pipe(iload_reg_mem);
7765 %}
7766 
7767 // Load Integer (32 bit signed) into long
7768 instruct loadI2L(iRegLNoSp dst, memory mem)
7769 %{
7770   match(Set dst (ConvI2L (LoadI mem)));
7771   predicate(!needs_acquiring_load(n->in(1)));
7772 
7773   ins_cost(4 * INSN_COST);
7774   format %{ "ldrsw  $dst, $mem\t# int" %}
7775 
7776   ins_encode(aarch64_enc_ldrsw(dst, mem));
7777 
7778   ins_pipe(iload_reg_mem);
7779 %}
7780 
7781 // Load Integer (32 bit unsigned) into long
7782 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7783 %{
7784   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7785   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7786 
7787   ins_cost(4 * INSN_COST);
7788   format %{ "ldrw  $dst, $mem\t# int" %}
7789 
7790   ins_encode(aarch64_enc_ldrw(dst, mem));
7791 
7792   ins_pipe(iload_reg_mem);
7793 %}
7794 
7795 // Load Long (64 bit signed)
7796 instruct loadL(iRegLNoSp dst, memory mem)
7797 %{
7798   match(Set dst (LoadL mem));
7799   predicate(!needs_acquiring_load(n));
7800 
7801   ins_cost(4 * INSN_COST);
7802   format %{ "ldr  $dst, $mem\t# int" %}
7803 
7804   ins_encode(aarch64_enc_ldr(dst, mem));
7805 
7806   ins_pipe(iload_reg_mem);
7807 %}
7808 
7809 // Load Range
7810 instruct loadRange(iRegINoSp dst, memory mem)
7811 %{
7812   match(Set dst (LoadRange mem));
7813 
7814   ins_cost(4 * INSN_COST);
7815   format %{ "ldrw  $dst, $mem\t# range" %}
7816 
7817   ins_encode(aarch64_enc_ldrw(dst, mem));
7818 
7819   ins_pipe(iload_reg_mem);
7820 %}
7821 
7822 // Load Pointer
7823 instruct loadP(iRegPNoSp dst, memory mem)
7824 %{
7825   match(Set dst (LoadP mem));
7826   predicate(!needs_acquiring_load(n));
7827 
7828   ins_cost(4 * INSN_COST);
7829   format %{ "ldr  $dst, $mem\t# ptr" %}
7830 
7831   ins_encode(aarch64_enc_ldr(dst, mem));
7832 
7833   ins_pipe(iload_reg_mem);
7834 %}
7835 
7836 // Load Compressed Pointer
7837 instruct loadN(iRegNNoSp dst, memory mem)
7838 %{
7839   match(Set dst (LoadN mem));
7840   predicate(!needs_acquiring_load(n));
7841 
7842   ins_cost(4 * INSN_COST);
7843   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7844 
7845   ins_encode(aarch64_enc_ldrw(dst, mem));
7846 
7847   ins_pipe(iload_reg_mem);
7848 %}
7849 
7850 // Load Klass Pointer
7851 instruct loadKlass(iRegPNoSp dst, memory mem)
7852 %{
7853   match(Set dst (LoadKlass mem));
7854   predicate(!needs_acquiring_load(n));
7855 
7856   ins_cost(4 * INSN_COST);
7857   format %{ "ldr  $dst, $mem\t# class" %}
7858 
7859   ins_encode(aarch64_enc_ldr(dst, mem));
7860 
7861   ins_pipe(iload_reg_mem);
7862 %}
7863 
7864 // Load Narrow Klass Pointer
7865 instruct loadNKlass(iRegNNoSp dst, memory mem)
7866 %{
7867   match(Set dst (LoadNKlass mem));
7868   predicate(!needs_acquiring_load(n));
7869 
7870   ins_cost(4 * INSN_COST);
7871   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7872 
7873   ins_encode(aarch64_enc_ldrw(dst, mem));
7874 
7875   ins_pipe(iload_reg_mem);
7876 %}
7877 
7878 // Load Float
7879 instruct loadF(vRegF dst, memory mem)
7880 %{
7881   match(Set dst (LoadF mem));
7882   predicate(!needs_acquiring_load(n));
7883 
7884   ins_cost(4 * INSN_COST);
7885   format %{ "ldrs  $dst, $mem\t# float" %}
7886 
7887   ins_encode( aarch64_enc_ldrs(dst, mem) );
7888 
7889   ins_pipe(pipe_class_memory);
7890 %}
7891 
7892 // Load Double
7893 instruct loadD(vRegD dst, memory mem)
7894 %{
7895   match(Set dst (LoadD mem));
7896   predicate(!needs_acquiring_load(n));
7897 
7898   ins_cost(4 * INSN_COST);
7899   format %{ "ldrd  $dst, $mem\t# double" %}
7900 
7901   ins_encode( aarch64_enc_ldrd(dst, mem) );
7902 
7903   ins_pipe(pipe_class_memory);
7904 %}
7905 
7906 
7907 // Load Int Constant
7908 instruct loadConI(iRegINoSp dst, immI src)
7909 %{
7910   match(Set dst src);
7911 
7912   ins_cost(INSN_COST);
7913   format %{ "mov $dst, $src\t# int" %}
7914 
7915   ins_encode( aarch64_enc_movw_imm(dst, src) );
7916 
7917   ins_pipe(ialu_imm);
7918 %}
7919 
7920 // Load Long Constant
7921 instruct loadConL(iRegLNoSp dst, immL src)
7922 %{
7923   match(Set dst src);
7924 
7925   ins_cost(INSN_COST);
7926   format %{ "mov $dst, $src\t# long" %}
7927 
7928   ins_encode( aarch64_enc_mov_imm(dst, src) );
7929 
7930   ins_pipe(ialu_imm);
7931 %}
7932 
7933 // Load Pointer Constant
7934 
7935 instruct loadConP(iRegPNoSp dst, immP con)
7936 %{
7937   match(Set dst con);
7938 
7939   ins_cost(INSN_COST * 4);
7940   format %{
7941     "mov  $dst, $con\t# ptr\n\t"
7942   %}
7943 
7944   ins_encode(aarch64_enc_mov_p(dst, con));
7945 
7946   ins_pipe(ialu_imm);
7947 %}
7948 
7949 // Load Null Pointer Constant
7950 
7951 instruct loadConP0(iRegPNoSp dst, immP0 con)
7952 %{
7953   match(Set dst con);
7954 
7955   ins_cost(INSN_COST);
7956   format %{ "mov  $dst, $con\t# NULL ptr" %}
7957 
7958   ins_encode(aarch64_enc_mov_p0(dst, con));
7959 
7960   ins_pipe(ialu_imm);
7961 %}
7962 
7963 // Load Pointer Constant One
7964 
7965 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7966 %{
7967   match(Set dst con);
7968 
7969   ins_cost(INSN_COST);
7970   format %{ "mov  $dst, $con\t# NULL ptr" %}
7971 
7972   ins_encode(aarch64_enc_mov_p1(dst, con));
7973 
7974   ins_pipe(ialu_imm);
7975 %}
7976 
7977 // Load Poll Page Constant
7978 
7979 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7980 %{
7981   match(Set dst con);
7982 
7983   ins_cost(INSN_COST);
7984   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7985 
7986   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7987 
7988   ins_pipe(ialu_imm);
7989 %}
7990 
7991 // Load Byte Map Base Constant
7992 
7993 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7994 %{
7995   match(Set dst con);
7996 
7997   ins_cost(INSN_COST);
7998   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7999 
8000   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8001 
8002   ins_pipe(ialu_imm);
8003 %}
8004 
8005 // Load Narrow Pointer Constant
8006 
8007 instruct loadConN(iRegNNoSp dst, immN con)
8008 %{
8009   match(Set dst con);
8010 
8011   ins_cost(INSN_COST * 4);
8012   format %{ "mov  $dst, $con\t# compressed ptr" %}
8013 
8014   ins_encode(aarch64_enc_mov_n(dst, con));
8015 
8016   ins_pipe(ialu_imm);
8017 %}
8018 
8019 // Load Narrow Null Pointer Constant
8020 
8021 instruct loadConN0(iRegNNoSp dst, immN0 con)
8022 %{
8023   match(Set dst con);
8024 
8025   ins_cost(INSN_COST);
8026   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8027 
8028   ins_encode(aarch64_enc_mov_n0(dst, con));
8029 
8030   ins_pipe(ialu_imm);
8031 %}
8032 
8033 // Load Narrow Klass Constant
8034 
8035 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8036 %{
8037   match(Set dst con);
8038 
8039   ins_cost(INSN_COST);
8040   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8041 
8042   ins_encode(aarch64_enc_mov_nk(dst, con));
8043 
8044   ins_pipe(ialu_imm);
8045 %}
8046 
8047 // Load Packed Float Constant
8048 
8049 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8050   match(Set dst con);
8051   ins_cost(INSN_COST * 4);
8052   format %{ "fmovs  $dst, $con"%}
8053   ins_encode %{
8054     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8055   %}
8056 
8057   ins_pipe(fp_imm_s);
8058 %}
8059 
8060 // Load Float Constant
8061 
8062 instruct loadConF(vRegF dst, immF con) %{
8063   match(Set dst con);
8064 
8065   ins_cost(INSN_COST * 4);
8066 
8067   format %{
8068     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8069   %}
8070 
8071   ins_encode %{
8072     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8073   %}
8074 
8075   ins_pipe(fp_load_constant_s);
8076 %}
8077 
8078 // Load Packed Double Constant
8079 
8080 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8081   match(Set dst con);
8082   ins_cost(INSN_COST);
8083   format %{ "fmovd  $dst, $con"%}
8084   ins_encode %{
8085     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8086   %}
8087 
8088   ins_pipe(fp_imm_d);
8089 %}
8090 
8091 // Load Double Constant
8092 
8093 instruct loadConD(vRegD dst, immD con) %{
8094   match(Set dst con);
8095 
8096   ins_cost(INSN_COST * 5);
8097   format %{
8098     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8099   %}
8100 
8101   ins_encode %{
8102     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8103   %}
8104 
8105   ins_pipe(fp_load_constant_d);
8106 %}
8107 
8108 // Store Instructions
8109 
8110 // Store CMS card-mark Immediate
8111 instruct storeimmCM0(immI0 zero, memory mem)
8112 %{
8113   match(Set mem (StoreCM mem zero));
8114   predicate(unnecessary_storestore(n));
8115 
8116   ins_cost(INSN_COST);
8117   format %{ "strb zr, $mem\t# byte" %}
8118 
8119   ins_encode(aarch64_enc_strb0(mem));
8120 
8121   ins_pipe(istore_mem);
8122 %}
8123 
8124 // Store CMS card-mark Immediate with intervening StoreStore
8125 // needed when using CMS with no conditional card marking
8126 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8127 %{
8128   match(Set mem (StoreCM mem zero));
8129 
8130   ins_cost(INSN_COST * 2);
8131   format %{ "dmb ishst"
8132       "\n\tstrb zr, $mem\t# byte" %}
8133 
8134   ins_encode(aarch64_enc_strb0_ordered(mem));
8135 
8136   ins_pipe(istore_mem);
8137 %}
8138 
8139 // Store Byte
8140 instruct storeB(iRegIorL2I src, memory mem)
8141 %{
8142   match(Set mem (StoreB mem src));
8143   predicate(!needs_releasing_store(n));
8144 
8145   ins_cost(INSN_COST);
8146   format %{ "strb  $src, $mem\t# byte" %}
8147 
8148   ins_encode(aarch64_enc_strb(src, mem));
8149 
8150   ins_pipe(istore_reg_mem);
8151 %}
8152 
8153 
8154 instruct storeimmB0(immI0 zero, memory mem)
8155 %{
8156   match(Set mem (StoreB mem zero));
8157   predicate(!needs_releasing_store(n));
8158 
8159   ins_cost(INSN_COST);
8160   format %{ "strb rscractch2, $mem\t# byte" %}
8161 
8162   ins_encode(aarch64_enc_strb0(mem));
8163 
8164   ins_pipe(istore_mem);
8165 %}
8166 
8167 // Store Char/Short
8168 instruct storeC(iRegIorL2I src, memory mem)
8169 %{
8170   match(Set mem (StoreC mem src));
8171   predicate(!needs_releasing_store(n));
8172 
8173   ins_cost(INSN_COST);
8174   format %{ "strh  $src, $mem\t# short" %}
8175 
8176   ins_encode(aarch64_enc_strh(src, mem));
8177 
8178   ins_pipe(istore_reg_mem);
8179 %}
8180 
8181 instruct storeimmC0(immI0 zero, memory mem)
8182 %{
8183   match(Set mem (StoreC mem zero));
8184   predicate(!needs_releasing_store(n));
8185 
8186   ins_cost(INSN_COST);
8187   format %{ "strh  zr, $mem\t# short" %}
8188 
8189   ins_encode(aarch64_enc_strh0(mem));
8190 
8191   ins_pipe(istore_mem);
8192 %}
8193 
8194 // Store Integer
8195 
8196 instruct storeI(iRegIorL2I src, memory mem)
8197 %{
8198   match(Set mem(StoreI mem src));
8199   predicate(!needs_releasing_store(n));
8200 
8201   ins_cost(INSN_COST);
8202   format %{ "strw  $src, $mem\t# int" %}
8203 
8204   ins_encode(aarch64_enc_strw(src, mem));
8205 
8206   ins_pipe(istore_reg_mem);
8207 %}
8208 
8209 instruct storeimmI0(immI0 zero, memory mem)
8210 %{
8211   match(Set mem(StoreI mem zero));
8212   predicate(!needs_releasing_store(n));
8213 
8214   ins_cost(INSN_COST);
8215   format %{ "strw  zr, $mem\t# int" %}
8216 
8217   ins_encode(aarch64_enc_strw0(mem));
8218 
8219   ins_pipe(istore_mem);
8220 %}
8221 
8222 // Store Long (64 bit signed)
8223 instruct storeL(iRegL src, memory mem)
8224 %{
8225   match(Set mem (StoreL mem src));
8226   predicate(!needs_releasing_store(n));
8227 
8228   ins_cost(INSN_COST);
8229   format %{ "str  $src, $mem\t# int" %}
8230 
8231   ins_encode(aarch64_enc_str(src, mem));
8232 
8233   ins_pipe(istore_reg_mem);
8234 %}
8235 
8236 // Store Long (64 bit signed)
8237 instruct storeimmL0(immL0 zero, memory mem)
8238 %{
8239   match(Set mem (StoreL mem zero));
8240   predicate(!needs_releasing_store(n));
8241 
8242   ins_cost(INSN_COST);
8243   format %{ "str  zr, $mem\t# int" %}
8244 
8245   ins_encode(aarch64_enc_str0(mem));
8246 
8247   ins_pipe(istore_mem);
8248 %}
8249 
8250 // Store Pointer
8251 instruct storeP(iRegP src, memory mem)
8252 %{
8253   match(Set mem (StoreP mem src));
8254   predicate(!needs_releasing_store(n));
8255 
8256   ins_cost(INSN_COST);
8257   format %{ "str  $src, $mem\t# ptr" %}
8258 
8259   ins_encode(aarch64_enc_str(src, mem));
8260 
8261   ins_pipe(istore_reg_mem);
8262 %}
8263 
8264 // Store Pointer
8265 instruct storeimmP0(immP0 zero, memory mem)
8266 %{
8267   match(Set mem (StoreP mem zero));
8268   predicate(!needs_releasing_store(n));
8269 
8270   ins_cost(INSN_COST);
8271   format %{ "str zr, $mem\t# ptr" %}
8272 
8273   ins_encode(aarch64_enc_str0(mem));
8274 
8275   ins_pipe(istore_mem);
8276 %}
8277 
8278 // Store Compressed Pointer
8279 instruct storeN(iRegN src, memory mem)
8280 %{
8281   match(Set mem (StoreN mem src));
8282   predicate(!needs_releasing_store(n));
8283 
8284   ins_cost(INSN_COST);
8285   format %{ "strw  $src, $mem\t# compressed ptr" %}
8286 
8287   ins_encode(aarch64_enc_strw(src, mem));
8288 
8289   ins_pipe(istore_reg_mem);
8290 %}
8291 
8292 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8293 %{
8294   match(Set mem (StoreN mem zero));
8295   predicate(Universe::narrow_oop_base() == NULL &&
8296             Universe::narrow_klass_base() == NULL &&
8297             (!needs_releasing_store(n)));
8298 
8299   ins_cost(INSN_COST);
8300   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8301 
8302   ins_encode(aarch64_enc_strw(heapbase, mem));
8303 
8304   ins_pipe(istore_reg_mem);
8305 %}
8306 
8307 // Store Float
8308 instruct storeF(vRegF src, memory mem)
8309 %{
8310   match(Set mem (StoreF mem src));
8311   predicate(!needs_releasing_store(n));
8312 
8313   ins_cost(INSN_COST);
8314   format %{ "strs  $src, $mem\t# float" %}
8315 
8316   ins_encode( aarch64_enc_strs(src, mem) );
8317 
8318   ins_pipe(pipe_class_memory);
8319 %}
8320 
8321 // TODO
8322 // implement storeImmF0 and storeFImmPacked
8323 
8324 // Store Double
8325 instruct storeD(vRegD src, memory mem)
8326 %{
8327   match(Set mem (StoreD mem src));
8328   predicate(!needs_releasing_store(n));
8329 
8330   ins_cost(INSN_COST);
8331   format %{ "strd  $src, $mem\t# double" %}
8332 
8333   ins_encode( aarch64_enc_strd(src, mem) );
8334 
8335   ins_pipe(pipe_class_memory);
8336 %}
8337 
8338 // Store Compressed Klass Pointer
8339 instruct storeNKlass(iRegN src, memory mem)
8340 %{
8341   predicate(!needs_releasing_store(n));
8342   match(Set mem (StoreNKlass mem src));
8343 
8344   ins_cost(INSN_COST);
8345   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8346 
8347   ins_encode(aarch64_enc_strw(src, mem));
8348 
8349   ins_pipe(istore_reg_mem);
8350 %}
8351 
8352 // TODO
8353 // implement storeImmD0 and storeDImmPacked
8354 
8355 // prefetch instructions
8356 // Must be safe to execute with invalid address (cannot fault).
8357 
8358 instruct prefetchalloc( memory mem ) %{
8359   match(PrefetchAllocation mem);
8360 
8361   ins_cost(INSN_COST);
8362   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8363 
8364   ins_encode( aarch64_enc_prefetchw(mem) );
8365 
8366   ins_pipe(iload_prefetch);
8367 %}
8368 
8369 //  ---------------- volatile loads and stores ----------------
8370 
8371 // Load Byte (8 bit signed)
8372 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8373 %{
8374   match(Set dst (LoadB mem));
8375 
8376   ins_cost(VOLATILE_REF_COST);
8377   format %{ "ldarsb  $dst, $mem\t# byte" %}
8378 
8379   ins_encode(aarch64_enc_ldarsb(dst, mem));
8380 
8381   ins_pipe(pipe_serial);
8382 %}
8383 
8384 // Load Byte (8 bit signed) into long
8385 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8386 %{
8387   match(Set dst (ConvI2L (LoadB mem)));
8388 
8389   ins_cost(VOLATILE_REF_COST);
8390   format %{ "ldarsb  $dst, $mem\t# byte" %}
8391 
8392   ins_encode(aarch64_enc_ldarsb(dst, mem));
8393 
8394   ins_pipe(pipe_serial);
8395 %}
8396 
8397 // Load Byte (8 bit unsigned)
8398 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8399 %{
8400   match(Set dst (LoadUB mem));
8401 
8402   ins_cost(VOLATILE_REF_COST);
8403   format %{ "ldarb  $dst, $mem\t# byte" %}
8404 
8405   ins_encode(aarch64_enc_ldarb(dst, mem));
8406 
8407   ins_pipe(pipe_serial);
8408 %}
8409 
8410 // Load Byte (8 bit unsigned) into long
8411 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8412 %{
8413   match(Set dst (ConvI2L (LoadUB mem)));
8414 
8415   ins_cost(VOLATILE_REF_COST);
8416   format %{ "ldarb  $dst, $mem\t# byte" %}
8417 
8418   ins_encode(aarch64_enc_ldarb(dst, mem));
8419 
8420   ins_pipe(pipe_serial);
8421 %}
8422 
8423 // Load Short (16 bit signed)
8424 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8425 %{
8426   match(Set dst (LoadS mem));
8427 
8428   ins_cost(VOLATILE_REF_COST);
8429   format %{ "ldarshw  $dst, $mem\t# short" %}
8430 
8431   ins_encode(aarch64_enc_ldarshw(dst, mem));
8432 
8433   ins_pipe(pipe_serial);
8434 %}
8435 
8436 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8437 %{
8438   match(Set dst (LoadUS mem));
8439 
8440   ins_cost(VOLATILE_REF_COST);
8441   format %{ "ldarhw  $dst, $mem\t# short" %}
8442 
8443   ins_encode(aarch64_enc_ldarhw(dst, mem));
8444 
8445   ins_pipe(pipe_serial);
8446 %}
8447 
8448 // Load Short/Char (16 bit unsigned) into long
8449 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8450 %{
8451   match(Set dst (ConvI2L (LoadUS mem)));
8452 
8453   ins_cost(VOLATILE_REF_COST);
8454   format %{ "ldarh  $dst, $mem\t# short" %}
8455 
8456   ins_encode(aarch64_enc_ldarh(dst, mem));
8457 
8458   ins_pipe(pipe_serial);
8459 %}
8460 
8461 // Load Short/Char (16 bit signed) into long
8462 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8463 %{
8464   match(Set dst (ConvI2L (LoadS mem)));
8465 
8466   ins_cost(VOLATILE_REF_COST);
8467   format %{ "ldarh  $dst, $mem\t# short" %}
8468 
8469   ins_encode(aarch64_enc_ldarsh(dst, mem));
8470 
8471   ins_pipe(pipe_serial);
8472 %}
8473 
8474 // Load Integer (32 bit signed)
8475 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8476 %{
8477   match(Set dst (LoadI mem));
8478 
8479   ins_cost(VOLATILE_REF_COST);
8480   format %{ "ldarw  $dst, $mem\t# int" %}
8481 
8482   ins_encode(aarch64_enc_ldarw(dst, mem));
8483 
8484   ins_pipe(pipe_serial);
8485 %}
8486 
8487 // Load Integer (32 bit unsigned) into long
8488 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8489 %{
8490   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8491 
8492   ins_cost(VOLATILE_REF_COST);
8493   format %{ "ldarw  $dst, $mem\t# int" %}
8494 
8495   ins_encode(aarch64_enc_ldarw(dst, mem));
8496 
8497   ins_pipe(pipe_serial);
8498 %}
8499 
8500 // Load Long (64 bit signed)
8501 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8502 %{
8503   match(Set dst (LoadL mem));
8504 
8505   ins_cost(VOLATILE_REF_COST);
8506   format %{ "ldar  $dst, $mem\t# int" %}
8507 
8508   ins_encode(aarch64_enc_ldar(dst, mem));
8509 
8510   ins_pipe(pipe_serial);
8511 %}
8512 
8513 // Load Pointer
8514 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8515 %{
8516   match(Set dst (LoadP mem));
8517 
8518   ins_cost(VOLATILE_REF_COST);
8519   format %{ "ldar  $dst, $mem\t# ptr" %}
8520 
8521   ins_encode(aarch64_enc_ldar(dst, mem));
8522 
8523   ins_pipe(pipe_serial);
8524 %}
8525 
8526 // Load Compressed Pointer
8527 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8528 %{
8529   match(Set dst (LoadN mem));
8530 
8531   ins_cost(VOLATILE_REF_COST);
8532   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8533 
8534   ins_encode(aarch64_enc_ldarw(dst, mem));
8535 
8536   ins_pipe(pipe_serial);
8537 %}
8538 
8539 // Load Float
8540 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8541 %{
8542   match(Set dst (LoadF mem));
8543 
8544   ins_cost(VOLATILE_REF_COST);
8545   format %{ "ldars  $dst, $mem\t# float" %}
8546 
8547   ins_encode( aarch64_enc_fldars(dst, mem) );
8548 
8549   ins_pipe(pipe_serial);
8550 %}
8551 
8552 // Load Double
8553 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8554 %{
8555   match(Set dst (LoadD mem));
8556 
8557   ins_cost(VOLATILE_REF_COST);
8558   format %{ "ldard  $dst, $mem\t# double" %}
8559 
8560   ins_encode( aarch64_enc_fldard(dst, mem) );
8561 
8562   ins_pipe(pipe_serial);
8563 %}
8564 
8565 // Store Byte
8566 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8567 %{
8568   match(Set mem (StoreB mem src));
8569 
8570   ins_cost(VOLATILE_REF_COST);
8571   format %{ "stlrb  $src, $mem\t# byte" %}
8572 
8573   ins_encode(aarch64_enc_stlrb(src, mem));
8574 
8575   ins_pipe(pipe_class_memory);
8576 %}
8577 
8578 // Store Char/Short
8579 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8580 %{
8581   match(Set mem (StoreC mem src));
8582 
8583   ins_cost(VOLATILE_REF_COST);
8584   format %{ "stlrh  $src, $mem\t# short" %}
8585 
8586   ins_encode(aarch64_enc_stlrh(src, mem));
8587 
8588   ins_pipe(pipe_class_memory);
8589 %}
8590 
8591 // Store Integer
8592 
8593 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8594 %{
8595   match(Set mem(StoreI mem src));
8596 
8597   ins_cost(VOLATILE_REF_COST);
8598   format %{ "stlrw  $src, $mem\t# int" %}
8599 
8600   ins_encode(aarch64_enc_stlrw(src, mem));
8601 
8602   ins_pipe(pipe_class_memory);
8603 %}
8604 
8605 // Store Long (64 bit signed)
8606 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8607 %{
8608   match(Set mem (StoreL mem src));
8609 
8610   ins_cost(VOLATILE_REF_COST);
8611   format %{ "stlr  $src, $mem\t# int" %}
8612 
8613   ins_encode(aarch64_enc_stlr(src, mem));
8614 
8615   ins_pipe(pipe_class_memory);
8616 %}
8617 
8618 // Store Pointer
8619 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8620 %{
8621   match(Set mem (StoreP mem src));
8622 
8623   ins_cost(VOLATILE_REF_COST);
8624   format %{ "stlr  $src, $mem\t# ptr" %}
8625 
8626   ins_encode(aarch64_enc_stlr(src, mem));
8627 
8628   ins_pipe(pipe_class_memory);
8629 %}
8630 
8631 // Store Compressed Pointer
8632 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8633 %{
8634   match(Set mem (StoreN mem src));
8635 
8636   ins_cost(VOLATILE_REF_COST);
8637   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8638 
8639   ins_encode(aarch64_enc_stlrw(src, mem));
8640 
8641   ins_pipe(pipe_class_memory);
8642 %}
8643 
8644 // Store Float
8645 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8646 %{
8647   match(Set mem (StoreF mem src));
8648 
8649   ins_cost(VOLATILE_REF_COST);
8650   format %{ "stlrs  $src, $mem\t# float" %}
8651 
8652   ins_encode( aarch64_enc_fstlrs(src, mem) );
8653 
8654   ins_pipe(pipe_class_memory);
8655 %}
8656 
8657 // TODO
8658 // implement storeImmF0 and storeFImmPacked
8659 
8660 // Store Double
8661 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8662 %{
8663   match(Set mem (StoreD mem src));
8664 
8665   ins_cost(VOLATILE_REF_COST);
8666   format %{ "stlrd  $src, $mem\t# double" %}
8667 
8668   ins_encode( aarch64_enc_fstlrd(src, mem) );
8669 
8670   ins_pipe(pipe_class_memory);
8671 %}
8672 
8673 //  ---------------- end of volatile loads and stores ----------------
8674 
8675 // ============================================================================
8676 // BSWAP Instructions
8677 
8678 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8679   match(Set dst (ReverseBytesI src));
8680 
8681   ins_cost(INSN_COST);
8682   format %{ "revw  $dst, $src" %}
8683 
8684   ins_encode %{
8685     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8686   %}
8687 
8688   ins_pipe(ialu_reg);
8689 %}
8690 
8691 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8692   match(Set dst (ReverseBytesL src));
8693 
8694   ins_cost(INSN_COST);
8695   format %{ "rev  $dst, $src" %}
8696 
8697   ins_encode %{
8698     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8699   %}
8700 
8701   ins_pipe(ialu_reg);
8702 %}
8703 
8704 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8705   match(Set dst (ReverseBytesUS src));
8706 
8707   ins_cost(INSN_COST);
8708   format %{ "rev16w  $dst, $src" %}
8709 
8710   ins_encode %{
8711     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8712   %}
8713 
8714   ins_pipe(ialu_reg);
8715 %}
8716 
8717 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8718   match(Set dst (ReverseBytesS src));
8719 
8720   ins_cost(INSN_COST);
8721   format %{ "rev16w  $dst, $src\n\t"
8722             "sbfmw $dst, $dst, #0, #15" %}
8723 
8724   ins_encode %{
8725     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8726     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8727   %}
8728 
8729   ins_pipe(ialu_reg);
8730 %}
8731 
8732 // ============================================================================
8733 // Zero Count Instructions
8734 
8735 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8736   match(Set dst (CountLeadingZerosI src));
8737 
8738   ins_cost(INSN_COST);
8739   format %{ "clzw  $dst, $src" %}
8740   ins_encode %{
8741     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8742   %}
8743 
8744   ins_pipe(ialu_reg);
8745 %}
8746 
8747 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8748   match(Set dst (CountLeadingZerosL src));
8749 
8750   ins_cost(INSN_COST);
8751   format %{ "clz   $dst, $src" %}
8752   ins_encode %{
8753     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8754   %}
8755 
8756   ins_pipe(ialu_reg);
8757 %}
8758 
8759 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8760   match(Set dst (CountTrailingZerosI src));
8761 
8762   ins_cost(INSN_COST * 2);
8763   format %{ "rbitw  $dst, $src\n\t"
8764             "clzw   $dst, $dst" %}
8765   ins_encode %{
8766     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8767     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8768   %}
8769 
8770   ins_pipe(ialu_reg);
8771 %}
8772 
8773 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8774   match(Set dst (CountTrailingZerosL src));
8775 
8776   ins_cost(INSN_COST * 2);
8777   format %{ "rbit   $dst, $src\n\t"
8778             "clz    $dst, $dst" %}
8779   ins_encode %{
8780     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8781     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8782   %}
8783 
8784   ins_pipe(ialu_reg);
8785 %}
8786 
8787 //---------- Population Count Instructions -------------------------------------
8788 //
8789 
8790 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8791   predicate(UsePopCountInstruction);
8792   match(Set dst (PopCountI src));
8793   effect(TEMP tmp);
8794   ins_cost(INSN_COST * 13);
8795 
8796   format %{ "movw   $src, $src\n\t"
8797             "mov    $tmp, $src\t# vector (1D)\n\t"
8798             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8799             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8800             "mov    $dst, $tmp\t# vector (1D)" %}
8801   ins_encode %{
8802     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8803     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8804     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8805     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8806     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8807   %}
8808 
8809   ins_pipe(pipe_class_default);
8810 %}
8811 
8812 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8813   predicate(UsePopCountInstruction);
8814   match(Set dst (PopCountI (LoadI mem)));
8815   effect(TEMP tmp);
8816   ins_cost(INSN_COST * 13);
8817 
8818   format %{ "ldrs   $tmp, $mem\n\t"
8819             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8820             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8821             "mov    $dst, $tmp\t# vector (1D)" %}
8822   ins_encode %{
8823     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8824     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8825                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8826     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8827     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8828     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8829   %}
8830 
8831   ins_pipe(pipe_class_default);
8832 %}
8833 
8834 // Note: Long.bitCount(long) returns an int.
8835 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8836   predicate(UsePopCountInstruction);
8837   match(Set dst (PopCountL src));
8838   effect(TEMP tmp);
8839   ins_cost(INSN_COST * 13);
8840 
8841   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8842             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8843             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8844             "mov    $dst, $tmp\t# vector (1D)" %}
8845   ins_encode %{
8846     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8847     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8848     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8849     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8850   %}
8851 
8852   ins_pipe(pipe_class_default);
8853 %}
8854 
8855 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8856   predicate(UsePopCountInstruction);
8857   match(Set dst (PopCountL (LoadL mem)));
8858   effect(TEMP tmp);
8859   ins_cost(INSN_COST * 13);
8860 
8861   format %{ "ldrd   $tmp, $mem\n\t"
8862             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8863             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8864             "mov    $dst, $tmp\t# vector (1D)" %}
8865   ins_encode %{
8866     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8867     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8868                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8869     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8870     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8871     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8872   %}
8873 
8874   ins_pipe(pipe_class_default);
8875 %}
8876 
8877 // ============================================================================
8878 // MemBar Instruction
8879 
8880 instruct load_fence() %{
8881   match(LoadFence);
8882   ins_cost(VOLATILE_REF_COST);
8883 
8884   format %{ "load_fence" %}
8885 
8886   ins_encode %{
8887     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8888   %}
8889   ins_pipe(pipe_serial);
8890 %}
8891 
8892 instruct unnecessary_membar_acquire() %{
8893   predicate(unnecessary_acquire(n));
8894   match(MemBarAcquire);
8895   ins_cost(0);
8896 
8897   format %{ "membar_acquire (elided)" %}
8898 
8899   ins_encode %{
8900     __ block_comment("membar_acquire (elided)");
8901   %}
8902 
8903   ins_pipe(pipe_class_empty);
8904 %}
8905 
8906 instruct membar_acquire() %{
8907   match(MemBarAcquire);
8908   ins_cost(VOLATILE_REF_COST);
8909 
8910   format %{ "membar_acquire" %}
8911 
8912   ins_encode %{
8913     __ block_comment("membar_acquire");
8914     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8915   %}
8916 
8917   ins_pipe(pipe_serial);
8918 %}
8919 
8920 
8921 instruct membar_acquire_lock() %{
8922   match(MemBarAcquireLock);
8923   ins_cost(VOLATILE_REF_COST);
8924 
8925   format %{ "membar_acquire_lock (elided)" %}
8926 
8927   ins_encode %{
8928     __ block_comment("membar_acquire_lock (elided)");
8929   %}
8930 
8931   ins_pipe(pipe_serial);
8932 %}
8933 
8934 instruct store_fence() %{
8935   match(StoreFence);
8936   ins_cost(VOLATILE_REF_COST);
8937 
8938   format %{ "store_fence" %}
8939 
8940   ins_encode %{
8941     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8942   %}
8943   ins_pipe(pipe_serial);
8944 %}
8945 
8946 instruct unnecessary_membar_release() %{
8947   predicate(unnecessary_release(n));
8948   match(MemBarRelease);
8949   ins_cost(0);
8950 
8951   format %{ "membar_release (elided)" %}
8952 
8953   ins_encode %{
8954     __ block_comment("membar_release (elided)");
8955   %}
8956   ins_pipe(pipe_serial);
8957 %}
8958 
8959 instruct membar_release() %{
8960   match(MemBarRelease);
8961   ins_cost(VOLATILE_REF_COST);
8962 
8963   format %{ "membar_release" %}
8964 
8965   ins_encode %{
8966     __ block_comment("membar_release");
8967     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8968   %}
8969   ins_pipe(pipe_serial);
8970 %}
8971 
8972 instruct membar_storestore() %{
8973   match(MemBarStoreStore);
8974   ins_cost(VOLATILE_REF_COST);
8975 
8976   format %{ "MEMBAR-store-store" %}
8977 
8978   ins_encode %{
8979     __ membar(Assembler::StoreStore);
8980   %}
8981   ins_pipe(pipe_serial);
8982 %}
8983 
8984 instruct membar_release_lock() %{
8985   match(MemBarReleaseLock);
8986   ins_cost(VOLATILE_REF_COST);
8987 
8988   format %{ "membar_release_lock (elided)" %}
8989 
8990   ins_encode %{
8991     __ block_comment("membar_release_lock (elided)");
8992   %}
8993 
8994   ins_pipe(pipe_serial);
8995 %}
8996 
8997 instruct unnecessary_membar_volatile() %{
8998   predicate(unnecessary_volatile(n));
8999   match(MemBarVolatile);
9000   ins_cost(0);
9001 
9002   format %{ "membar_volatile (elided)" %}
9003 
9004   ins_encode %{
9005     __ block_comment("membar_volatile (elided)");
9006   %}
9007 
9008   ins_pipe(pipe_serial);
9009 %}
9010 
9011 instruct membar_volatile() %{
9012   match(MemBarVolatile);
9013   ins_cost(VOLATILE_REF_COST*100);
9014 
9015   format %{ "membar_volatile" %}
9016 
9017   ins_encode %{
9018     __ block_comment("membar_volatile");
9019     __ membar(Assembler::StoreLoad);
9020   %}
9021 
9022   ins_pipe(pipe_serial);
9023 %}
9024 
9025 // ============================================================================
9026 // Cast/Convert Instructions
9027 
9028 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9029   match(Set dst (CastX2P src));
9030 
9031   ins_cost(INSN_COST);
9032   format %{ "mov $dst, $src\t# long -> ptr" %}
9033 
9034   ins_encode %{
9035     if ($dst$$reg != $src$$reg) {
9036       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9037     }
9038   %}
9039 
9040   ins_pipe(ialu_reg);
9041 %}
9042 
9043 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9044   match(Set dst (CastP2X src));
9045 
9046   ins_cost(INSN_COST);
9047   format %{ "mov $dst, $src\t# ptr -> long" %}
9048 
9049   ins_encode %{
9050     if ($dst$$reg != $src$$reg) {
9051       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9052     }
9053   %}
9054 
9055   ins_pipe(ialu_reg);
9056 %}
9057 
9058 // Convert oop into int for vectors alignment masking
9059 instruct convP2I(iRegINoSp dst, iRegP src) %{
9060   match(Set dst (ConvL2I (CastP2X src)));
9061 
9062   ins_cost(INSN_COST);
9063   format %{ "movw $dst, $src\t# ptr -> int" %}
9064   ins_encode %{
9065     __ movw($dst$$Register, $src$$Register);
9066   %}
9067 
9068   ins_pipe(ialu_reg);
9069 %}
9070 
9071 // Convert compressed oop into int for vectors alignment masking
9072 // in case of 32bit oops (heap < 4Gb).
9073 instruct convN2I(iRegINoSp dst, iRegN src)
9074 %{
9075   predicate(Universe::narrow_oop_shift() == 0);
9076   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9077 
9078   ins_cost(INSN_COST);
9079   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9080   ins_encode %{
9081     __ movw($dst$$Register, $src$$Register);
9082   %}
9083 
9084   ins_pipe(ialu_reg);
9085 %}
9086 
9087 
9088 // Convert oop pointer into compressed form
9089 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9090   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9091   match(Set dst (EncodeP src));
9092   effect(KILL cr);
9093   ins_cost(INSN_COST * 3);
9094   format %{ "encode_heap_oop $dst, $src" %}
9095   ins_encode %{
9096     Register s = $src$$Register;
9097     Register d = $dst$$Register;
9098     __ encode_heap_oop(d, s);
9099   %}
9100   ins_pipe(ialu_reg);
9101 %}
9102 
9103 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9104   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9105   match(Set dst (EncodeP src));
9106   ins_cost(INSN_COST * 3);
9107   format %{ "encode_heap_oop_not_null $dst, $src" %}
9108   ins_encode %{
9109     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9110   %}
9111   ins_pipe(ialu_reg);
9112 %}
9113 
9114 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9115   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9116             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9117   match(Set dst (DecodeN src));
9118   ins_cost(INSN_COST * 3);
9119   format %{ "decode_heap_oop $dst, $src" %}
9120   ins_encode %{
9121     Register s = $src$$Register;
9122     Register d = $dst$$Register;
9123     __ decode_heap_oop(d, s);
9124   %}
9125   ins_pipe(ialu_reg);
9126 %}
9127 
9128 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9129   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9130             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9131   match(Set dst (DecodeN src));
9132   ins_cost(INSN_COST * 3);
9133   format %{ "decode_heap_oop_not_null $dst, $src" %}
9134   ins_encode %{
9135     Register s = $src$$Register;
9136     Register d = $dst$$Register;
9137     __ decode_heap_oop_not_null(d, s);
9138   %}
9139   ins_pipe(ialu_reg);
9140 %}
9141 
9142 // n.b. AArch64 implementations of encode_klass_not_null and
9143 // decode_klass_not_null do not modify the flags register so, unlike
9144 // Intel, we don't kill CR as a side effect here
9145 
9146 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9147   match(Set dst (EncodePKlass src));
9148 
9149   ins_cost(INSN_COST * 3);
9150   format %{ "encode_klass_not_null $dst,$src" %}
9151 
9152   ins_encode %{
9153     Register src_reg = as_Register($src$$reg);
9154     Register dst_reg = as_Register($dst$$reg);
9155     __ encode_klass_not_null(dst_reg, src_reg);
9156   %}
9157 
9158    ins_pipe(ialu_reg);
9159 %}
9160 
9161 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9162   match(Set dst (DecodeNKlass src));
9163 
9164   ins_cost(INSN_COST * 3);
9165   format %{ "decode_klass_not_null $dst,$src" %}
9166 
9167   ins_encode %{
9168     Register src_reg = as_Register($src$$reg);
9169     Register dst_reg = as_Register($dst$$reg);
9170     if (dst_reg != src_reg) {
9171       __ decode_klass_not_null(dst_reg, src_reg);
9172     } else {
9173       __ decode_klass_not_null(dst_reg);
9174     }
9175   %}
9176 
9177    ins_pipe(ialu_reg);
9178 %}
9179 
9180 instruct checkCastPP(iRegPNoSp dst)
9181 %{
9182   match(Set dst (CheckCastPP dst));
9183 
9184   size(0);
9185   format %{ "# checkcastPP of $dst" %}
9186   ins_encode(/* empty encoding */);
9187   ins_pipe(pipe_class_empty);
9188 %}
9189 
9190 instruct castPP(iRegPNoSp dst)
9191 %{
9192   match(Set dst (CastPP dst));
9193 
9194   size(0);
9195   format %{ "# castPP of $dst" %}
9196   ins_encode(/* empty encoding */);
9197   ins_pipe(pipe_class_empty);
9198 %}
9199 
9200 instruct castII(iRegI dst)
9201 %{
9202   match(Set dst (CastII dst));
9203 
9204   size(0);
9205   format %{ "# castII of $dst" %}
9206   ins_encode(/* empty encoding */);
9207   ins_cost(0);
9208   ins_pipe(pipe_class_empty);
9209 %}
9210 
9211 // ============================================================================
9212 // Atomic operation instructions
9213 //
9214 // Intel and SPARC both implement Ideal Node LoadPLocked and
9215 // Store{PIL}Conditional instructions using a normal load for the
9216 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9217 //
9218 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9219 // pair to lock object allocations from Eden space when not using
9220 // TLABs.
9221 //
9222 // There does not appear to be a Load{IL}Locked Ideal Node and the
9223 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9224 // and to use StoreIConditional only for 32-bit and StoreLConditional
9225 // only for 64-bit.
9226 //
9227 // We implement LoadPLocked and StorePLocked instructions using,
9228 // respectively the AArch64 hw load-exclusive and store-conditional
9229 // instructions. Whereas we must implement each of
9230 // Store{IL}Conditional using a CAS which employs a pair of
9231 // instructions comprising a load-exclusive followed by a
9232 // store-conditional.
9233 
9234 
9235 // Locked-load (linked load) of the current heap-top
9236 // used when updating the eden heap top
9237 // implemented using ldaxr on AArch64
9238 
9239 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9240 %{
9241   match(Set dst (LoadPLocked mem));
9242 
9243   ins_cost(VOLATILE_REF_COST);
9244 
9245   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9246 
9247   ins_encode(aarch64_enc_ldaxr(dst, mem));
9248 
9249   ins_pipe(pipe_serial);
9250 %}
9251 
9252 // Conditional-store of the updated heap-top.
9253 // Used during allocation of the shared heap.
9254 // Sets flag (EQ) on success.
9255 // implemented using stlxr on AArch64.
9256 
9257 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9258 %{
9259   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9260 
9261   ins_cost(VOLATILE_REF_COST);
9262 
9263  // TODO
9264  // do we need to do a store-conditional release or can we just use a
9265  // plain store-conditional?
9266 
9267   format %{
9268     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9269     "cmpw rscratch1, zr\t# EQ on successful write"
9270   %}
9271 
9272   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9273 
9274   ins_pipe(pipe_serial);
9275 %}
9276 
9277 
9278 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9279 // when attempting to rebias a lock towards the current thread.  We
9280 // must use the acquire form of cmpxchg in order to guarantee acquire
9281 // semantics in this case.
9282 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9283 %{
9284   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9285 
9286   ins_cost(VOLATILE_REF_COST);
9287 
9288   format %{
9289     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9290     "cmpw rscratch1, zr\t# EQ on successful write"
9291   %}
9292 
9293   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9294 
9295   ins_pipe(pipe_slow);
9296 %}
9297 
9298 // storeIConditional also has acquire semantics, for no better reason
9299 // than matching storeLConditional.  At the time of writing this
9300 // comment storeIConditional was not used anywhere by AArch64.
9301 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9302 %{
9303   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9304 
9305   ins_cost(VOLATILE_REF_COST);
9306 
9307   format %{
9308     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9309     "cmpw rscratch1, zr\t# EQ on successful write"
9310   %}
9311 
9312   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9313 
9314   ins_pipe(pipe_slow);
9315 %}
9316 
9317 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9318 // can't match them
9319 
9320 // standard CompareAndSwapX when we are using barriers
9321 // these have higher priority than the rules selected by a predicate
9322 
9323 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9324 
9325   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9326   ins_cost(2 * VOLATILE_REF_COST);
9327 
9328   effect(KILL cr);
9329 
9330  format %{
9331     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9332     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9333  %}
9334 
9335  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9336             aarch64_enc_cset_eq(res));
9337 
9338   ins_pipe(pipe_slow);
9339 %}
9340 
9341 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9342 
9343   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9344   ins_cost(2 * VOLATILE_REF_COST);
9345 
9346   effect(KILL cr);
9347 
9348  format %{
9349     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9350     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9351  %}
9352 
9353  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9354             aarch64_enc_cset_eq(res));
9355 
9356   ins_pipe(pipe_slow);
9357 %}
9358 
9359 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9360 
9361   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9362   ins_cost(2 * VOLATILE_REF_COST);
9363 
9364   effect(KILL cr);
9365 
9366  format %{
9367     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9368     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9369  %}
9370 
9371  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9372             aarch64_enc_cset_eq(res));
9373 
9374   ins_pipe(pipe_slow);
9375 %}
9376 
9377 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9378 
9379   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9380   ins_cost(2 * VOLATILE_REF_COST);
9381 
9382   effect(KILL cr);
9383 
9384  format %{
9385     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9386     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9387  %}
9388 
9389  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9390             aarch64_enc_cset_eq(res));
9391 
9392   ins_pipe(pipe_slow);
9393 %}
9394 
9395 // alternative CompareAndSwapX when we are eliding barriers
9396 
9397 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9398 
9399   predicate(needs_acquiring_load_exclusive(n));
9400   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9401   ins_cost(VOLATILE_REF_COST);
9402 
9403   effect(KILL cr);
9404 
9405  format %{
9406     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9407     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9408  %}
9409 
9410  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9411             aarch64_enc_cset_eq(res));
9412 
9413   ins_pipe(pipe_slow);
9414 %}
9415 
9416 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9417 
9418   predicate(needs_acquiring_load_exclusive(n));
9419   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9420   ins_cost(VOLATILE_REF_COST);
9421 
9422   effect(KILL cr);
9423 
9424  format %{
9425     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9426     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9427  %}
9428 
9429  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9430             aarch64_enc_cset_eq(res));
9431 
9432   ins_pipe(pipe_slow);
9433 %}
9434 
9435 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9436 
9437   predicate(needs_acquiring_load_exclusive(n));
9438   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9439   ins_cost(VOLATILE_REF_COST);
9440 
9441   effect(KILL cr);
9442 
9443  format %{
9444     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9445     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9446  %}
9447 
9448  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9449             aarch64_enc_cset_eq(res));
9450 
9451   ins_pipe(pipe_slow);
9452 %}
9453 
9454 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9455 
9456   predicate(needs_acquiring_load_exclusive(n));
9457   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9458   ins_cost(VOLATILE_REF_COST);
9459 
9460   effect(KILL cr);
9461 
9462  format %{
9463     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9464     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9465  %}
9466 
9467  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9468             aarch64_enc_cset_eq(res));
9469 
9470   ins_pipe(pipe_slow);
9471 %}
9472 
9473 
9474 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9475   match(Set prev (GetAndSetI mem newv));
9476   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9477   ins_encode %{
9478     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9479   %}
9480   ins_pipe(pipe_serial);
9481 %}
9482 
9483 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9484   match(Set prev (GetAndSetL mem newv));
9485   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9486   ins_encode %{
9487     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9488   %}
9489   ins_pipe(pipe_serial);
9490 %}
9491 
9492 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9493   match(Set prev (GetAndSetN mem newv));
9494   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9495   ins_encode %{
9496     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9497   %}
9498   ins_pipe(pipe_serial);
9499 %}
9500 
9501 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9502   match(Set prev (GetAndSetP mem newv));
9503   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9504   ins_encode %{
9505     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9506   %}
9507   ins_pipe(pipe_serial);
9508 %}
9509 
9510 
9511 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9512   match(Set newval (GetAndAddL mem incr));
9513   ins_cost(INSN_COST * 10);
9514   format %{ "get_and_addL $newval, [$mem], $incr" %}
9515   ins_encode %{
9516     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9517   %}
9518   ins_pipe(pipe_serial);
9519 %}
9520 
9521 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9522   predicate(n->as_LoadStore()->result_not_used());
9523   match(Set dummy (GetAndAddL mem incr));
9524   ins_cost(INSN_COST * 9);
9525   format %{ "get_and_addL [$mem], $incr" %}
9526   ins_encode %{
9527     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9528   %}
9529   ins_pipe(pipe_serial);
9530 %}
9531 
9532 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9533   match(Set newval (GetAndAddL mem incr));
9534   ins_cost(INSN_COST * 10);
9535   format %{ "get_and_addL $newval, [$mem], $incr" %}
9536   ins_encode %{
9537     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9538   %}
9539   ins_pipe(pipe_serial);
9540 %}
9541 
9542 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9543   predicate(n->as_LoadStore()->result_not_used());
9544   match(Set dummy (GetAndAddL mem incr));
9545   ins_cost(INSN_COST * 9);
9546   format %{ "get_and_addL [$mem], $incr" %}
9547   ins_encode %{
9548     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9549   %}
9550   ins_pipe(pipe_serial);
9551 %}
9552 
9553 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9554   match(Set newval (GetAndAddI mem incr));
9555   ins_cost(INSN_COST * 10);
9556   format %{ "get_and_addI $newval, [$mem], $incr" %}
9557   ins_encode %{
9558     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9559   %}
9560   ins_pipe(pipe_serial);
9561 %}
9562 
9563 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9564   predicate(n->as_LoadStore()->result_not_used());
9565   match(Set dummy (GetAndAddI mem incr));
9566   ins_cost(INSN_COST * 9);
9567   format %{ "get_and_addI [$mem], $incr" %}
9568   ins_encode %{
9569     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9570   %}
9571   ins_pipe(pipe_serial);
9572 %}
9573 
9574 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9575   match(Set newval (GetAndAddI mem incr));
9576   ins_cost(INSN_COST * 10);
9577   format %{ "get_and_addI $newval, [$mem], $incr" %}
9578   ins_encode %{
9579     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9580   %}
9581   ins_pipe(pipe_serial);
9582 %}
9583 
9584 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9585   predicate(n->as_LoadStore()->result_not_used());
9586   match(Set dummy (GetAndAddI mem incr));
9587   ins_cost(INSN_COST * 9);
9588   format %{ "get_and_addI [$mem], $incr" %}
9589   ins_encode %{
9590     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9591   %}
9592   ins_pipe(pipe_serial);
9593 %}
9594 
9595 // Manifest a CmpL result in an integer register.
9596 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9597 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9598 %{
9599   match(Set dst (CmpL3 src1 src2));
9600   effect(KILL flags);
9601 
9602   ins_cost(INSN_COST * 6);
9603   format %{
9604       "cmp $src1, $src2"
9605       "csetw $dst, ne"
9606       "cnegw $dst, lt"
9607   %}
9608   // format %{ "CmpL3 $dst, $src1, $src2" %}
9609   ins_encode %{
9610     __ cmp($src1$$Register, $src2$$Register);
9611     __ csetw($dst$$Register, Assembler::NE);
9612     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9613   %}
9614 
9615   ins_pipe(pipe_class_default);
9616 %}
9617 
9618 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9619 %{
9620   match(Set dst (CmpL3 src1 src2));
9621   effect(KILL flags);
9622 
9623   ins_cost(INSN_COST * 6);
9624   format %{
9625       "cmp $src1, $src2"
9626       "csetw $dst, ne"
9627       "cnegw $dst, lt"
9628   %}
9629   ins_encode %{
9630     int32_t con = (int32_t)$src2$$constant;
9631      if (con < 0) {
9632       __ adds(zr, $src1$$Register, -con);
9633     } else {
9634       __ subs(zr, $src1$$Register, con);
9635     }
9636     __ csetw($dst$$Register, Assembler::NE);
9637     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9638   %}
9639 
9640   ins_pipe(pipe_class_default);
9641 %}
9642 
9643 // ============================================================================
9644 // Conditional Move Instructions
9645 
9646 // n.b. we have identical rules for both a signed compare op (cmpOp)
9647 // and an unsigned compare op (cmpOpU). it would be nice if we could
9648 // define an op class which merged both inputs and use it to type the
9649 // argument to a single rule. unfortunatelyt his fails because the
9650 // opclass does not live up to the COND_INTER interface of its
9651 // component operands. When the generic code tries to negate the
9652 // operand it ends up running the generci Machoper::negate method
9653 // which throws a ShouldNotHappen. So, we have to provide two flavours
9654 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9655 
9656 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9657   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9658 
9659   ins_cost(INSN_COST * 2);
9660   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9661 
9662   ins_encode %{
9663     __ cselw(as_Register($dst$$reg),
9664              as_Register($src2$$reg),
9665              as_Register($src1$$reg),
9666              (Assembler::Condition)$cmp$$cmpcode);
9667   %}
9668 
9669   ins_pipe(icond_reg_reg);
9670 %}
9671 
9672 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9673   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9674 
9675   ins_cost(INSN_COST * 2);
9676   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9677 
9678   ins_encode %{
9679     __ cselw(as_Register($dst$$reg),
9680              as_Register($src2$$reg),
9681              as_Register($src1$$reg),
9682              (Assembler::Condition)$cmp$$cmpcode);
9683   %}
9684 
9685   ins_pipe(icond_reg_reg);
9686 %}
9687 
9688 // special cases where one arg is zero
9689 
9690 // n.b. this is selected in preference to the rule above because it
9691 // avoids loading constant 0 into a source register
9692 
9693 // TODO
9694 // we ought only to be able to cull one of these variants as the ideal
9695 // transforms ought always to order the zero consistently (to left/right?)
9696 
9697 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9698   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9699 
9700   ins_cost(INSN_COST * 2);
9701   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9702 
9703   ins_encode %{
9704     __ cselw(as_Register($dst$$reg),
9705              as_Register($src$$reg),
9706              zr,
9707              (Assembler::Condition)$cmp$$cmpcode);
9708   %}
9709 
9710   ins_pipe(icond_reg);
9711 %}
9712 
9713 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9714   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9715 
9716   ins_cost(INSN_COST * 2);
9717   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9718 
9719   ins_encode %{
9720     __ cselw(as_Register($dst$$reg),
9721              as_Register($src$$reg),
9722              zr,
9723              (Assembler::Condition)$cmp$$cmpcode);
9724   %}
9725 
9726   ins_pipe(icond_reg);
9727 %}
9728 
9729 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9730   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9731 
9732   ins_cost(INSN_COST * 2);
9733   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9734 
9735   ins_encode %{
9736     __ cselw(as_Register($dst$$reg),
9737              zr,
9738              as_Register($src$$reg),
9739              (Assembler::Condition)$cmp$$cmpcode);
9740   %}
9741 
9742   ins_pipe(icond_reg);
9743 %}
9744 
9745 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9746   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9747 
9748   ins_cost(INSN_COST * 2);
9749   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9750 
9751   ins_encode %{
9752     __ cselw(as_Register($dst$$reg),
9753              zr,
9754              as_Register($src$$reg),
9755              (Assembler::Condition)$cmp$$cmpcode);
9756   %}
9757 
9758   ins_pipe(icond_reg);
9759 %}
9760 
9761 // special case for creating a boolean 0 or 1
9762 
9763 // n.b. this is selected in preference to the rule above because it
9764 // avoids loading constants 0 and 1 into a source register
9765 
9766 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9767   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9768 
9769   ins_cost(INSN_COST * 2);
9770   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9771 
9772   ins_encode %{
9773     // equivalently
9774     // cset(as_Register($dst$$reg),
9775     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9776     __ csincw(as_Register($dst$$reg),
9777              zr,
9778              zr,
9779              (Assembler::Condition)$cmp$$cmpcode);
9780   %}
9781 
9782   ins_pipe(icond_none);
9783 %}
9784 
9785 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9786   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9787 
9788   ins_cost(INSN_COST * 2);
9789   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9790 
9791   ins_encode %{
9792     // equivalently
9793     // cset(as_Register($dst$$reg),
9794     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9795     __ csincw(as_Register($dst$$reg),
9796              zr,
9797              zr,
9798              (Assembler::Condition)$cmp$$cmpcode);
9799   %}
9800 
9801   ins_pipe(icond_none);
9802 %}
9803 
9804 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9805   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9806 
9807   ins_cost(INSN_COST * 2);
9808   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9809 
9810   ins_encode %{
9811     __ csel(as_Register($dst$$reg),
9812             as_Register($src2$$reg),
9813             as_Register($src1$$reg),
9814             (Assembler::Condition)$cmp$$cmpcode);
9815   %}
9816 
9817   ins_pipe(icond_reg_reg);
9818 %}
9819 
9820 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9821   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9822 
9823   ins_cost(INSN_COST * 2);
9824   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9825 
9826   ins_encode %{
9827     __ csel(as_Register($dst$$reg),
9828             as_Register($src2$$reg),
9829             as_Register($src1$$reg),
9830             (Assembler::Condition)$cmp$$cmpcode);
9831   %}
9832 
9833   ins_pipe(icond_reg_reg);
9834 %}
9835 
9836 // special cases where one arg is zero
9837 
9838 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9839   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9840 
9841   ins_cost(INSN_COST * 2);
9842   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9843 
9844   ins_encode %{
9845     __ csel(as_Register($dst$$reg),
9846             zr,
9847             as_Register($src$$reg),
9848             (Assembler::Condition)$cmp$$cmpcode);
9849   %}
9850 
9851   ins_pipe(icond_reg);
9852 %}
9853 
9854 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9855   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9856 
9857   ins_cost(INSN_COST * 2);
9858   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9859 
9860   ins_encode %{
9861     __ csel(as_Register($dst$$reg),
9862             zr,
9863             as_Register($src$$reg),
9864             (Assembler::Condition)$cmp$$cmpcode);
9865   %}
9866 
9867   ins_pipe(icond_reg);
9868 %}
9869 
9870 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9871   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9872 
9873   ins_cost(INSN_COST * 2);
9874   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9875 
9876   ins_encode %{
9877     __ csel(as_Register($dst$$reg),
9878             as_Register($src$$reg),
9879             zr,
9880             (Assembler::Condition)$cmp$$cmpcode);
9881   %}
9882 
9883   ins_pipe(icond_reg);
9884 %}
9885 
9886 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9887   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9888 
9889   ins_cost(INSN_COST * 2);
9890   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9891 
9892   ins_encode %{
9893     __ csel(as_Register($dst$$reg),
9894             as_Register($src$$reg),
9895             zr,
9896             (Assembler::Condition)$cmp$$cmpcode);
9897   %}
9898 
9899   ins_pipe(icond_reg);
9900 %}
9901 
9902 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9903   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9904 
9905   ins_cost(INSN_COST * 2);
9906   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9907 
9908   ins_encode %{
9909     __ csel(as_Register($dst$$reg),
9910             as_Register($src2$$reg),
9911             as_Register($src1$$reg),
9912             (Assembler::Condition)$cmp$$cmpcode);
9913   %}
9914 
9915   ins_pipe(icond_reg_reg);
9916 %}
9917 
9918 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9919   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9920 
9921   ins_cost(INSN_COST * 2);
9922   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9923 
9924   ins_encode %{
9925     __ csel(as_Register($dst$$reg),
9926             as_Register($src2$$reg),
9927             as_Register($src1$$reg),
9928             (Assembler::Condition)$cmp$$cmpcode);
9929   %}
9930 
9931   ins_pipe(icond_reg_reg);
9932 %}
9933 
9934 // special cases where one arg is zero
9935 
9936 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9937   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9938 
9939   ins_cost(INSN_COST * 2);
9940   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9941 
9942   ins_encode %{
9943     __ csel(as_Register($dst$$reg),
9944             zr,
9945             as_Register($src$$reg),
9946             (Assembler::Condition)$cmp$$cmpcode);
9947   %}
9948 
9949   ins_pipe(icond_reg);
9950 %}
9951 
9952 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9953   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9954 
9955   ins_cost(INSN_COST * 2);
9956   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9957 
9958   ins_encode %{
9959     __ csel(as_Register($dst$$reg),
9960             zr,
9961             as_Register($src$$reg),
9962             (Assembler::Condition)$cmp$$cmpcode);
9963   %}
9964 
9965   ins_pipe(icond_reg);
9966 %}
9967 
9968 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9969   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9970 
9971   ins_cost(INSN_COST * 2);
9972   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9973 
9974   ins_encode %{
9975     __ csel(as_Register($dst$$reg),
9976             as_Register($src$$reg),
9977             zr,
9978             (Assembler::Condition)$cmp$$cmpcode);
9979   %}
9980 
9981   ins_pipe(icond_reg);
9982 %}
9983 
9984 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9985   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9986 
9987   ins_cost(INSN_COST * 2);
9988   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9989 
9990   ins_encode %{
9991     __ csel(as_Register($dst$$reg),
9992             as_Register($src$$reg),
9993             zr,
9994             (Assembler::Condition)$cmp$$cmpcode);
9995   %}
9996 
9997   ins_pipe(icond_reg);
9998 %}
9999 
10000 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10001   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10002 
10003   ins_cost(INSN_COST * 2);
10004   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10005 
10006   ins_encode %{
10007     __ cselw(as_Register($dst$$reg),
10008              as_Register($src2$$reg),
10009              as_Register($src1$$reg),
10010              (Assembler::Condition)$cmp$$cmpcode);
10011   %}
10012 
10013   ins_pipe(icond_reg_reg);
10014 %}
10015 
10016 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10017   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10018 
10019   ins_cost(INSN_COST * 2);
10020   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10021 
10022   ins_encode %{
10023     __ cselw(as_Register($dst$$reg),
10024              as_Register($src2$$reg),
10025              as_Register($src1$$reg),
10026              (Assembler::Condition)$cmp$$cmpcode);
10027   %}
10028 
10029   ins_pipe(icond_reg_reg);
10030 %}
10031 
10032 // special cases where one arg is zero
10033 
10034 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10035   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10036 
10037   ins_cost(INSN_COST * 2);
10038   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10039 
10040   ins_encode %{
10041     __ cselw(as_Register($dst$$reg),
10042              zr,
10043              as_Register($src$$reg),
10044              (Assembler::Condition)$cmp$$cmpcode);
10045   %}
10046 
10047   ins_pipe(icond_reg);
10048 %}
10049 
10050 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10051   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10052 
10053   ins_cost(INSN_COST * 2);
10054   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10055 
10056   ins_encode %{
10057     __ cselw(as_Register($dst$$reg),
10058              zr,
10059              as_Register($src$$reg),
10060              (Assembler::Condition)$cmp$$cmpcode);
10061   %}
10062 
10063   ins_pipe(icond_reg);
10064 %}
10065 
10066 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10067   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10068 
10069   ins_cost(INSN_COST * 2);
10070   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10071 
10072   ins_encode %{
10073     __ cselw(as_Register($dst$$reg),
10074              as_Register($src$$reg),
10075              zr,
10076              (Assembler::Condition)$cmp$$cmpcode);
10077   %}
10078 
10079   ins_pipe(icond_reg);
10080 %}
10081 
10082 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10083   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10084 
10085   ins_cost(INSN_COST * 2);
10086   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10087 
10088   ins_encode %{
10089     __ cselw(as_Register($dst$$reg),
10090              as_Register($src$$reg),
10091              zr,
10092              (Assembler::Condition)$cmp$$cmpcode);
10093   %}
10094 
10095   ins_pipe(icond_reg);
10096 %}
10097 
10098 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10099 %{
10100   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10101 
10102   ins_cost(INSN_COST * 3);
10103 
10104   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10105   ins_encode %{
10106     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10107     __ fcsels(as_FloatRegister($dst$$reg),
10108               as_FloatRegister($src2$$reg),
10109               as_FloatRegister($src1$$reg),
10110               cond);
10111   %}
10112 
10113   ins_pipe(fp_cond_reg_reg_s);
10114 %}
10115 
10116 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10117 %{
10118   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10119 
10120   ins_cost(INSN_COST * 3);
10121 
10122   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10123   ins_encode %{
10124     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10125     __ fcsels(as_FloatRegister($dst$$reg),
10126               as_FloatRegister($src2$$reg),
10127               as_FloatRegister($src1$$reg),
10128               cond);
10129   %}
10130 
10131   ins_pipe(fp_cond_reg_reg_s);
10132 %}
10133 
10134 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10135 %{
10136   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10137 
10138   ins_cost(INSN_COST * 3);
10139 
10140   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10141   ins_encode %{
10142     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10143     __ fcseld(as_FloatRegister($dst$$reg),
10144               as_FloatRegister($src2$$reg),
10145               as_FloatRegister($src1$$reg),
10146               cond);
10147   %}
10148 
10149   ins_pipe(fp_cond_reg_reg_d);
10150 %}
10151 
10152 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10153 %{
10154   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10155 
10156   ins_cost(INSN_COST * 3);
10157 
10158   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10159   ins_encode %{
10160     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10161     __ fcseld(as_FloatRegister($dst$$reg),
10162               as_FloatRegister($src2$$reg),
10163               as_FloatRegister($src1$$reg),
10164               cond);
10165   %}
10166 
10167   ins_pipe(fp_cond_reg_reg_d);
10168 %}
10169 
10170 // ============================================================================
10171 // Arithmetic Instructions
10172 //
10173 
10174 // Integer Addition
10175 
10176 // TODO
10177 // these currently employ operations which do not set CR and hence are
10178 // not flagged as killing CR but we would like to isolate the cases
10179 // where we want to set flags from those where we don't. need to work
10180 // out how to do that.
10181 
10182 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10183   match(Set dst (AddI src1 src2));
10184 
10185   ins_cost(INSN_COST);
10186   format %{ "addw  $dst, $src1, $src2" %}
10187 
10188   ins_encode %{
10189     __ addw(as_Register($dst$$reg),
10190             as_Register($src1$$reg),
10191             as_Register($src2$$reg));
10192   %}
10193 
10194   ins_pipe(ialu_reg_reg);
10195 %}
10196 
10197 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10198   match(Set dst (AddI src1 src2));
10199 
10200   ins_cost(INSN_COST);
10201   format %{ "addw $dst, $src1, $src2" %}
10202 
10203   // use opcode to indicate that this is an add not a sub
10204   opcode(0x0);
10205 
10206   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10207 
10208   ins_pipe(ialu_reg_imm);
10209 %}
10210 
10211 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10212   match(Set dst (AddI (ConvL2I src1) src2));
10213 
10214   ins_cost(INSN_COST);
10215   format %{ "addw $dst, $src1, $src2" %}
10216 
10217   // use opcode to indicate that this is an add not a sub
10218   opcode(0x0);
10219 
10220   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10221 
10222   ins_pipe(ialu_reg_imm);
10223 %}
10224 
10225 // Pointer Addition
10226 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10227   match(Set dst (AddP src1 src2));
10228 
10229   ins_cost(INSN_COST);
10230   format %{ "add $dst, $src1, $src2\t# ptr" %}
10231 
10232   ins_encode %{
10233     __ add(as_Register($dst$$reg),
10234            as_Register($src1$$reg),
10235            as_Register($src2$$reg));
10236   %}
10237 
10238   ins_pipe(ialu_reg_reg);
10239 %}
10240 
10241 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10242   match(Set dst (AddP src1 (ConvI2L src2)));
10243 
10244   ins_cost(1.9 * INSN_COST);
10245   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10246 
10247   ins_encode %{
10248     __ add(as_Register($dst$$reg),
10249            as_Register($src1$$reg),
10250            as_Register($src2$$reg), ext::sxtw);
10251   %}
10252 
10253   ins_pipe(ialu_reg_reg);
10254 %}
10255 
10256 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10257   match(Set dst (AddP src1 (LShiftL src2 scale)));
10258 
10259   ins_cost(1.9 * INSN_COST);
10260   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10261 
10262   ins_encode %{
10263     __ lea(as_Register($dst$$reg),
10264            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10265                    Address::lsl($scale$$constant)));
10266   %}
10267 
10268   ins_pipe(ialu_reg_reg_shift);
10269 %}
10270 
10271 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10272   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10273 
10274   ins_cost(1.9 * INSN_COST);
10275   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10276 
10277   ins_encode %{
10278     __ lea(as_Register($dst$$reg),
10279            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10280                    Address::sxtw($scale$$constant)));
10281   %}
10282 
10283   ins_pipe(ialu_reg_reg_shift);
10284 %}
10285 
10286 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10287   match(Set dst (LShiftL (ConvI2L src) scale));
10288 
10289   ins_cost(INSN_COST);
10290   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10291 
10292   ins_encode %{
10293     __ sbfiz(as_Register($dst$$reg),
10294           as_Register($src$$reg),
10295           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10296   %}
10297 
10298   ins_pipe(ialu_reg_shift);
10299 %}
10300 
10301 // Pointer Immediate Addition
10302 // n.b. this needs to be more expensive than using an indirect memory
10303 // operand
10304 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10305   match(Set dst (AddP src1 src2));
10306 
10307   ins_cost(INSN_COST);
10308   format %{ "add $dst, $src1, $src2\t# ptr" %}
10309 
10310   // use opcode to indicate that this is an add not a sub
10311   opcode(0x0);
10312 
10313   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10314 
10315   ins_pipe(ialu_reg_imm);
10316 %}
10317 
10318 // Long Addition
10319 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10320 
10321   match(Set dst (AddL src1 src2));
10322 
10323   ins_cost(INSN_COST);
10324   format %{ "add  $dst, $src1, $src2" %}
10325 
10326   ins_encode %{
10327     __ add(as_Register($dst$$reg),
10328            as_Register($src1$$reg),
10329            as_Register($src2$$reg));
10330   %}
10331 
10332   ins_pipe(ialu_reg_reg);
10333 %}
10334 
10335 // No constant pool entries requiredLong Immediate Addition.
10336 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10337   match(Set dst (AddL src1 src2));
10338 
10339   ins_cost(INSN_COST);
10340   format %{ "add $dst, $src1, $src2" %}
10341 
10342   // use opcode to indicate that this is an add not a sub
10343   opcode(0x0);
10344 
10345   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10346 
10347   ins_pipe(ialu_reg_imm);
10348 %}
10349 
10350 // Integer Subtraction
10351 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10352   match(Set dst (SubI src1 src2));
10353 
10354   ins_cost(INSN_COST);
10355   format %{ "subw  $dst, $src1, $src2" %}
10356 
10357   ins_encode %{
10358     __ subw(as_Register($dst$$reg),
10359             as_Register($src1$$reg),
10360             as_Register($src2$$reg));
10361   %}
10362 
10363   ins_pipe(ialu_reg_reg);
10364 %}
10365 
10366 // Immediate Subtraction
10367 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10368   match(Set dst (SubI src1 src2));
10369 
10370   ins_cost(INSN_COST);
10371   format %{ "subw $dst, $src1, $src2" %}
10372 
10373   // use opcode to indicate that this is a sub not an add
10374   opcode(0x1);
10375 
10376   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10377 
10378   ins_pipe(ialu_reg_imm);
10379 %}
10380 
10381 // Long Subtraction
10382 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10383 
10384   match(Set dst (SubL src1 src2));
10385 
10386   ins_cost(INSN_COST);
10387   format %{ "sub  $dst, $src1, $src2" %}
10388 
10389   ins_encode %{
10390     __ sub(as_Register($dst$$reg),
10391            as_Register($src1$$reg),
10392            as_Register($src2$$reg));
10393   %}
10394 
10395   ins_pipe(ialu_reg_reg);
10396 %}
10397 
10398 // No constant pool entries requiredLong Immediate Subtraction.
10399 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10400   match(Set dst (SubL src1 src2));
10401 
10402   ins_cost(INSN_COST);
10403   format %{ "sub$dst, $src1, $src2" %}
10404 
10405   // use opcode to indicate that this is a sub not an add
10406   opcode(0x1);
10407 
10408   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10409 
10410   ins_pipe(ialu_reg_imm);
10411 %}
10412 
10413 // Integer Negation (special case for sub)
10414 
10415 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10416   match(Set dst (SubI zero src));
10417 
10418   ins_cost(INSN_COST);
10419   format %{ "negw $dst, $src\t# int" %}
10420 
10421   ins_encode %{
10422     __ negw(as_Register($dst$$reg),
10423             as_Register($src$$reg));
10424   %}
10425 
10426   ins_pipe(ialu_reg);
10427 %}
10428 
10429 // Long Negation
10430 
10431 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10432   match(Set dst (SubL zero src));
10433 
10434   ins_cost(INSN_COST);
10435   format %{ "neg $dst, $src\t# long" %}
10436 
10437   ins_encode %{
10438     __ neg(as_Register($dst$$reg),
10439            as_Register($src$$reg));
10440   %}
10441 
10442   ins_pipe(ialu_reg);
10443 %}
10444 
10445 // Integer Multiply
10446 
10447 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10448   match(Set dst (MulI src1 src2));
10449 
10450   ins_cost(INSN_COST * 3);
10451   format %{ "mulw  $dst, $src1, $src2" %}
10452 
10453   ins_encode %{
10454     __ mulw(as_Register($dst$$reg),
10455             as_Register($src1$$reg),
10456             as_Register($src2$$reg));
10457   %}
10458 
10459   ins_pipe(imul_reg_reg);
10460 %}
10461 
10462 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10463   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10464 
10465   ins_cost(INSN_COST * 3);
10466   format %{ "smull  $dst, $src1, $src2" %}
10467 
10468   ins_encode %{
10469     __ smull(as_Register($dst$$reg),
10470              as_Register($src1$$reg),
10471              as_Register($src2$$reg));
10472   %}
10473 
10474   ins_pipe(imul_reg_reg);
10475 %}
10476 
10477 // Long Multiply
10478 
10479 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10480   match(Set dst (MulL src1 src2));
10481 
10482   ins_cost(INSN_COST * 5);
10483   format %{ "mul  $dst, $src1, $src2" %}
10484 
10485   ins_encode %{
10486     __ mul(as_Register($dst$$reg),
10487            as_Register($src1$$reg),
10488            as_Register($src2$$reg));
10489   %}
10490 
10491   ins_pipe(lmul_reg_reg);
10492 %}
10493 
10494 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10495 %{
10496   match(Set dst (MulHiL src1 src2));
10497 
10498   ins_cost(INSN_COST * 7);
10499   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10500 
10501   ins_encode %{
10502     __ smulh(as_Register($dst$$reg),
10503              as_Register($src1$$reg),
10504              as_Register($src2$$reg));
10505   %}
10506 
10507   ins_pipe(lmul_reg_reg);
10508 %}
10509 
10510 // Combined Integer Multiply & Add/Sub
10511 
10512 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10513   match(Set dst (AddI src3 (MulI src1 src2)));
10514 
10515   ins_cost(INSN_COST * 3);
10516   format %{ "madd  $dst, $src1, $src2, $src3" %}
10517 
10518   ins_encode %{
10519     __ maddw(as_Register($dst$$reg),
10520              as_Register($src1$$reg),
10521              as_Register($src2$$reg),
10522              as_Register($src3$$reg));
10523   %}
10524 
10525   ins_pipe(imac_reg_reg);
10526 %}
10527 
10528 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10529   match(Set dst (SubI src3 (MulI src1 src2)));
10530 
10531   ins_cost(INSN_COST * 3);
10532   format %{ "msub  $dst, $src1, $src2, $src3" %}
10533 
10534   ins_encode %{
10535     __ msubw(as_Register($dst$$reg),
10536              as_Register($src1$$reg),
10537              as_Register($src2$$reg),
10538              as_Register($src3$$reg));
10539   %}
10540 
10541   ins_pipe(imac_reg_reg);
10542 %}
10543 
10544 // Combined Long Multiply & Add/Sub
10545 
10546 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10547   match(Set dst (AddL src3 (MulL src1 src2)));
10548 
10549   ins_cost(INSN_COST * 5);
10550   format %{ "madd  $dst, $src1, $src2, $src3" %}
10551 
10552   ins_encode %{
10553     __ madd(as_Register($dst$$reg),
10554             as_Register($src1$$reg),
10555             as_Register($src2$$reg),
10556             as_Register($src3$$reg));
10557   %}
10558 
10559   ins_pipe(lmac_reg_reg);
10560 %}
10561 
10562 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10563   match(Set dst (SubL src3 (MulL src1 src2)));
10564 
10565   ins_cost(INSN_COST * 5);
10566   format %{ "msub  $dst, $src1, $src2, $src3" %}
10567 
10568   ins_encode %{
10569     __ msub(as_Register($dst$$reg),
10570             as_Register($src1$$reg),
10571             as_Register($src2$$reg),
10572             as_Register($src3$$reg));
10573   %}
10574 
10575   ins_pipe(lmac_reg_reg);
10576 %}
10577 
10578 // Integer Divide
10579 
10580 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10581   match(Set dst (DivI src1 src2));
10582 
10583   ins_cost(INSN_COST * 19);
10584   format %{ "sdivw  $dst, $src1, $src2" %}
10585 
10586   ins_encode(aarch64_enc_divw(dst, src1, src2));
10587   ins_pipe(idiv_reg_reg);
10588 %}
10589 
10590 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10591   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10592   ins_cost(INSN_COST);
10593   format %{ "lsrw $dst, $src1, $div1" %}
10594   ins_encode %{
10595     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10596   %}
10597   ins_pipe(ialu_reg_shift);
10598 %}
10599 
10600 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10601   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10602   ins_cost(INSN_COST);
10603   format %{ "addw $dst, $src, LSR $div1" %}
10604 
10605   ins_encode %{
10606     __ addw(as_Register($dst$$reg),
10607               as_Register($src$$reg),
10608               as_Register($src$$reg),
10609               Assembler::LSR, 31);
10610   %}
10611   ins_pipe(ialu_reg);
10612 %}
10613 
10614 // Long Divide
10615 
10616 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10617   match(Set dst (DivL src1 src2));
10618 
10619   ins_cost(INSN_COST * 35);
10620   format %{ "sdiv   $dst, $src1, $src2" %}
10621 
10622   ins_encode(aarch64_enc_div(dst, src1, src2));
10623   ins_pipe(ldiv_reg_reg);
10624 %}
10625 
10626 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10627   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10628   ins_cost(INSN_COST);
10629   format %{ "lsr $dst, $src1, $div1" %}
10630   ins_encode %{
10631     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10632   %}
10633   ins_pipe(ialu_reg_shift);
10634 %}
10635 
10636 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10637   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10638   ins_cost(INSN_COST);
10639   format %{ "add $dst, $src, $div1" %}
10640 
10641   ins_encode %{
10642     __ add(as_Register($dst$$reg),
10643               as_Register($src$$reg),
10644               as_Register($src$$reg),
10645               Assembler::LSR, 63);
10646   %}
10647   ins_pipe(ialu_reg);
10648 %}
10649 
10650 // Integer Remainder
10651 
10652 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10653   match(Set dst (ModI src1 src2));
10654 
10655   ins_cost(INSN_COST * 22);
10656   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10657             "msubw($dst, rscratch1, $src2, $src1" %}
10658 
10659   ins_encode(aarch64_enc_modw(dst, src1, src2));
10660   ins_pipe(idiv_reg_reg);
10661 %}
10662 
10663 // Long Remainder
10664 
10665 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10666   match(Set dst (ModL src1 src2));
10667 
10668   ins_cost(INSN_COST * 38);
10669   format %{ "sdiv   rscratch1, $src1, $src2\n"
10670             "msub($dst, rscratch1, $src2, $src1" %}
10671 
10672   ins_encode(aarch64_enc_mod(dst, src1, src2));
10673   ins_pipe(ldiv_reg_reg);
10674 %}
10675 
10676 // Integer Shifts
10677 
10678 // Shift Left Register
10679 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10680   match(Set dst (LShiftI src1 src2));
10681 
10682   ins_cost(INSN_COST * 2);
10683   format %{ "lslvw  $dst, $src1, $src2" %}
10684 
10685   ins_encode %{
10686     __ lslvw(as_Register($dst$$reg),
10687              as_Register($src1$$reg),
10688              as_Register($src2$$reg));
10689   %}
10690 
10691   ins_pipe(ialu_reg_reg_vshift);
10692 %}
10693 
10694 // Shift Left Immediate
10695 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10696   match(Set dst (LShiftI src1 src2));
10697 
10698   ins_cost(INSN_COST);
10699   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10700 
10701   ins_encode %{
10702     __ lslw(as_Register($dst$$reg),
10703             as_Register($src1$$reg),
10704             $src2$$constant & 0x1f);
10705   %}
10706 
10707   ins_pipe(ialu_reg_shift);
10708 %}
10709 
10710 // Shift Right Logical Register
10711 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10712   match(Set dst (URShiftI src1 src2));
10713 
10714   ins_cost(INSN_COST * 2);
10715   format %{ "lsrvw  $dst, $src1, $src2" %}
10716 
10717   ins_encode %{
10718     __ lsrvw(as_Register($dst$$reg),
10719              as_Register($src1$$reg),
10720              as_Register($src2$$reg));
10721   %}
10722 
10723   ins_pipe(ialu_reg_reg_vshift);
10724 %}
10725 
10726 // Shift Right Logical Immediate
10727 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10728   match(Set dst (URShiftI src1 src2));
10729 
10730   ins_cost(INSN_COST);
10731   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10732 
10733   ins_encode %{
10734     __ lsrw(as_Register($dst$$reg),
10735             as_Register($src1$$reg),
10736             $src2$$constant & 0x1f);
10737   %}
10738 
10739   ins_pipe(ialu_reg_shift);
10740 %}
10741 
10742 // Shift Right Arithmetic Register
10743 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10744   match(Set dst (RShiftI src1 src2));
10745 
10746   ins_cost(INSN_COST * 2);
10747   format %{ "asrvw  $dst, $src1, $src2" %}
10748 
10749   ins_encode %{
10750     __ asrvw(as_Register($dst$$reg),
10751              as_Register($src1$$reg),
10752              as_Register($src2$$reg));
10753   %}
10754 
10755   ins_pipe(ialu_reg_reg_vshift);
10756 %}
10757 
10758 // Shift Right Arithmetic Immediate
10759 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10760   match(Set dst (RShiftI src1 src2));
10761 
10762   ins_cost(INSN_COST);
10763   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10764 
10765   ins_encode %{
10766     __ asrw(as_Register($dst$$reg),
10767             as_Register($src1$$reg),
10768             $src2$$constant & 0x1f);
10769   %}
10770 
10771   ins_pipe(ialu_reg_shift);
10772 %}
10773 
10774 // Combined Int Mask and Right Shift (using UBFM)
10775 // TODO
10776 
10777 // Long Shifts
10778 
10779 // Shift Left Register
10780 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10781   match(Set dst (LShiftL src1 src2));
10782 
10783   ins_cost(INSN_COST * 2);
10784   format %{ "lslv  $dst, $src1, $src2" %}
10785 
10786   ins_encode %{
10787     __ lslv(as_Register($dst$$reg),
10788             as_Register($src1$$reg),
10789             as_Register($src2$$reg));
10790   %}
10791 
10792   ins_pipe(ialu_reg_reg_vshift);
10793 %}
10794 
10795 // Shift Left Immediate
10796 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10797   match(Set dst (LShiftL src1 src2));
10798 
10799   ins_cost(INSN_COST);
10800   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10801 
10802   ins_encode %{
10803     __ lsl(as_Register($dst$$reg),
10804             as_Register($src1$$reg),
10805             $src2$$constant & 0x3f);
10806   %}
10807 
10808   ins_pipe(ialu_reg_shift);
10809 %}
10810 
10811 // Shift Right Logical Register
10812 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10813   match(Set dst (URShiftL src1 src2));
10814 
10815   ins_cost(INSN_COST * 2);
10816   format %{ "lsrv  $dst, $src1, $src2" %}
10817 
10818   ins_encode %{
10819     __ lsrv(as_Register($dst$$reg),
10820             as_Register($src1$$reg),
10821             as_Register($src2$$reg));
10822   %}
10823 
10824   ins_pipe(ialu_reg_reg_vshift);
10825 %}
10826 
10827 // Shift Right Logical Immediate
10828 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10829   match(Set dst (URShiftL src1 src2));
10830 
10831   ins_cost(INSN_COST);
10832   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10833 
10834   ins_encode %{
10835     __ lsr(as_Register($dst$$reg),
10836            as_Register($src1$$reg),
10837            $src2$$constant & 0x3f);
10838   %}
10839 
10840   ins_pipe(ialu_reg_shift);
10841 %}
10842 
10843 // A special-case pattern for card table stores.
10844 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10845   match(Set dst (URShiftL (CastP2X src1) src2));
10846 
10847   ins_cost(INSN_COST);
10848   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10849 
10850   ins_encode %{
10851     __ lsr(as_Register($dst$$reg),
10852            as_Register($src1$$reg),
10853            $src2$$constant & 0x3f);
10854   %}
10855 
10856   ins_pipe(ialu_reg_shift);
10857 %}
10858 
10859 // Shift Right Arithmetic Register
10860 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10861   match(Set dst (RShiftL src1 src2));
10862 
10863   ins_cost(INSN_COST * 2);
10864   format %{ "asrv  $dst, $src1, $src2" %}
10865 
10866   ins_encode %{
10867     __ asrv(as_Register($dst$$reg),
10868             as_Register($src1$$reg),
10869             as_Register($src2$$reg));
10870   %}
10871 
10872   ins_pipe(ialu_reg_reg_vshift);
10873 %}
10874 
10875 // Shift Right Arithmetic Immediate
10876 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10877   match(Set dst (RShiftL src1 src2));
10878 
10879   ins_cost(INSN_COST);
10880   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10881 
10882   ins_encode %{
10883     __ asr(as_Register($dst$$reg),
10884            as_Register($src1$$reg),
10885            $src2$$constant & 0x3f);
10886   %}
10887 
10888   ins_pipe(ialu_reg_shift);
10889 %}
10890 
10891 // BEGIN This section of the file is automatically generated. Do not edit --------------
10892 
10893 instruct regL_not_reg(iRegLNoSp dst,
10894                          iRegL src1, immL_M1 m1,
10895                          rFlagsReg cr) %{
10896   match(Set dst (XorL src1 m1));
10897   ins_cost(INSN_COST);
10898   format %{ "eon  $dst, $src1, zr" %}
10899 
10900   ins_encode %{
10901     __ eon(as_Register($dst$$reg),
10902               as_Register($src1$$reg),
10903               zr,
10904               Assembler::LSL, 0);
10905   %}
10906 
10907   ins_pipe(ialu_reg);
10908 %}
10909 instruct regI_not_reg(iRegINoSp dst,
10910                          iRegIorL2I src1, immI_M1 m1,
10911                          rFlagsReg cr) %{
10912   match(Set dst (XorI src1 m1));
10913   ins_cost(INSN_COST);
10914   format %{ "eonw  $dst, $src1, zr" %}
10915 
10916   ins_encode %{
10917     __ eonw(as_Register($dst$$reg),
10918               as_Register($src1$$reg),
10919               zr,
10920               Assembler::LSL, 0);
10921   %}
10922 
10923   ins_pipe(ialu_reg);
10924 %}
10925 
10926 instruct AndI_reg_not_reg(iRegINoSp dst,
10927                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10928                          rFlagsReg cr) %{
10929   match(Set dst (AndI src1 (XorI src2 m1)));
10930   ins_cost(INSN_COST);
10931   format %{ "bicw  $dst, $src1, $src2" %}
10932 
10933   ins_encode %{
10934     __ bicw(as_Register($dst$$reg),
10935               as_Register($src1$$reg),
10936               as_Register($src2$$reg),
10937               Assembler::LSL, 0);
10938   %}
10939 
10940   ins_pipe(ialu_reg_reg);
10941 %}
10942 
10943 instruct AndL_reg_not_reg(iRegLNoSp dst,
10944                          iRegL src1, iRegL src2, immL_M1 m1,
10945                          rFlagsReg cr) %{
10946   match(Set dst (AndL src1 (XorL src2 m1)));
10947   ins_cost(INSN_COST);
10948   format %{ "bic  $dst, $src1, $src2" %}
10949 
10950   ins_encode %{
10951     __ bic(as_Register($dst$$reg),
10952               as_Register($src1$$reg),
10953               as_Register($src2$$reg),
10954               Assembler::LSL, 0);
10955   %}
10956 
10957   ins_pipe(ialu_reg_reg);
10958 %}
10959 
10960 instruct OrI_reg_not_reg(iRegINoSp dst,
10961                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10962                          rFlagsReg cr) %{
10963   match(Set dst (OrI src1 (XorI src2 m1)));
10964   ins_cost(INSN_COST);
10965   format %{ "ornw  $dst, $src1, $src2" %}
10966 
10967   ins_encode %{
10968     __ ornw(as_Register($dst$$reg),
10969               as_Register($src1$$reg),
10970               as_Register($src2$$reg),
10971               Assembler::LSL, 0);
10972   %}
10973 
10974   ins_pipe(ialu_reg_reg);
10975 %}
10976 
10977 instruct OrL_reg_not_reg(iRegLNoSp dst,
10978                          iRegL src1, iRegL src2, immL_M1 m1,
10979                          rFlagsReg cr) %{
10980   match(Set dst (OrL src1 (XorL src2 m1)));
10981   ins_cost(INSN_COST);
10982   format %{ "orn  $dst, $src1, $src2" %}
10983 
10984   ins_encode %{
10985     __ orn(as_Register($dst$$reg),
10986               as_Register($src1$$reg),
10987               as_Register($src2$$reg),
10988               Assembler::LSL, 0);
10989   %}
10990 
10991   ins_pipe(ialu_reg_reg);
10992 %}
10993 
10994 instruct XorI_reg_not_reg(iRegINoSp dst,
10995                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10996                          rFlagsReg cr) %{
10997   match(Set dst (XorI m1 (XorI src2 src1)));
10998   ins_cost(INSN_COST);
10999   format %{ "eonw  $dst, $src1, $src2" %}
11000 
11001   ins_encode %{
11002     __ eonw(as_Register($dst$$reg),
11003               as_Register($src1$$reg),
11004               as_Register($src2$$reg),
11005               Assembler::LSL, 0);
11006   %}
11007 
11008   ins_pipe(ialu_reg_reg);
11009 %}
11010 
11011 instruct XorL_reg_not_reg(iRegLNoSp dst,
11012                          iRegL src1, iRegL src2, immL_M1 m1,
11013                          rFlagsReg cr) %{
11014   match(Set dst (XorL m1 (XorL src2 src1)));
11015   ins_cost(INSN_COST);
11016   format %{ "eon  $dst, $src1, $src2" %}
11017 
11018   ins_encode %{
11019     __ eon(as_Register($dst$$reg),
11020               as_Register($src1$$reg),
11021               as_Register($src2$$reg),
11022               Assembler::LSL, 0);
11023   %}
11024 
11025   ins_pipe(ialu_reg_reg);
11026 %}
11027 
11028 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11029                          iRegIorL2I src1, iRegIorL2I src2,
11030                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11031   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11032   ins_cost(1.9 * INSN_COST);
11033   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11034 
11035   ins_encode %{
11036     __ bicw(as_Register($dst$$reg),
11037               as_Register($src1$$reg),
11038               as_Register($src2$$reg),
11039               Assembler::LSR,
11040               $src3$$constant & 0x1f);
11041   %}
11042 
11043   ins_pipe(ialu_reg_reg_shift);
11044 %}
11045 
11046 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11047                          iRegL src1, iRegL src2,
11048                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11049   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11050   ins_cost(1.9 * INSN_COST);
11051   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11052 
11053   ins_encode %{
11054     __ bic(as_Register($dst$$reg),
11055               as_Register($src1$$reg),
11056               as_Register($src2$$reg),
11057               Assembler::LSR,
11058               $src3$$constant & 0x3f);
11059   %}
11060 
11061   ins_pipe(ialu_reg_reg_shift);
11062 %}
11063 
11064 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11065                          iRegIorL2I src1, iRegIorL2I src2,
11066                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11067   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11068   ins_cost(1.9 * INSN_COST);
11069   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11070 
11071   ins_encode %{
11072     __ bicw(as_Register($dst$$reg),
11073               as_Register($src1$$reg),
11074               as_Register($src2$$reg),
11075               Assembler::ASR,
11076               $src3$$constant & 0x1f);
11077   %}
11078 
11079   ins_pipe(ialu_reg_reg_shift);
11080 %}
11081 
11082 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11083                          iRegL src1, iRegL src2,
11084                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11085   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11086   ins_cost(1.9 * INSN_COST);
11087   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11088 
11089   ins_encode %{
11090     __ bic(as_Register($dst$$reg),
11091               as_Register($src1$$reg),
11092               as_Register($src2$$reg),
11093               Assembler::ASR,
11094               $src3$$constant & 0x3f);
11095   %}
11096 
11097   ins_pipe(ialu_reg_reg_shift);
11098 %}
11099 
11100 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11101                          iRegIorL2I src1, iRegIorL2I src2,
11102                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11103   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11104   ins_cost(1.9 * INSN_COST);
11105   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11106 
11107   ins_encode %{
11108     __ bicw(as_Register($dst$$reg),
11109               as_Register($src1$$reg),
11110               as_Register($src2$$reg),
11111               Assembler::LSL,
11112               $src3$$constant & 0x1f);
11113   %}
11114 
11115   ins_pipe(ialu_reg_reg_shift);
11116 %}
11117 
11118 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11119                          iRegL src1, iRegL src2,
11120                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11121   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11122   ins_cost(1.9 * INSN_COST);
11123   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11124 
11125   ins_encode %{
11126     __ bic(as_Register($dst$$reg),
11127               as_Register($src1$$reg),
11128               as_Register($src2$$reg),
11129               Assembler::LSL,
11130               $src3$$constant & 0x3f);
11131   %}
11132 
11133   ins_pipe(ialu_reg_reg_shift);
11134 %}
11135 
11136 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11137                          iRegIorL2I src1, iRegIorL2I src2,
11138                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11139   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11140   ins_cost(1.9 * INSN_COST);
11141   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11142 
11143   ins_encode %{
11144     __ eonw(as_Register($dst$$reg),
11145               as_Register($src1$$reg),
11146               as_Register($src2$$reg),
11147               Assembler::LSR,
11148               $src3$$constant & 0x1f);
11149   %}
11150 
11151   ins_pipe(ialu_reg_reg_shift);
11152 %}
11153 
11154 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11155                          iRegL src1, iRegL src2,
11156                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11157   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11158   ins_cost(1.9 * INSN_COST);
11159   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11160 
11161   ins_encode %{
11162     __ eon(as_Register($dst$$reg),
11163               as_Register($src1$$reg),
11164               as_Register($src2$$reg),
11165               Assembler::LSR,
11166               $src3$$constant & 0x3f);
11167   %}
11168 
11169   ins_pipe(ialu_reg_reg_shift);
11170 %}
11171 
11172 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11173                          iRegIorL2I src1, iRegIorL2I src2,
11174                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11175   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11176   ins_cost(1.9 * INSN_COST);
11177   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11178 
11179   ins_encode %{
11180     __ eonw(as_Register($dst$$reg),
11181               as_Register($src1$$reg),
11182               as_Register($src2$$reg),
11183               Assembler::ASR,
11184               $src3$$constant & 0x1f);
11185   %}
11186 
11187   ins_pipe(ialu_reg_reg_shift);
11188 %}
11189 
11190 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11191                          iRegL src1, iRegL src2,
11192                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11193   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11194   ins_cost(1.9 * INSN_COST);
11195   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11196 
11197   ins_encode %{
11198     __ eon(as_Register($dst$$reg),
11199               as_Register($src1$$reg),
11200               as_Register($src2$$reg),
11201               Assembler::ASR,
11202               $src3$$constant & 0x3f);
11203   %}
11204 
11205   ins_pipe(ialu_reg_reg_shift);
11206 %}
11207 
11208 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11209                          iRegIorL2I src1, iRegIorL2I src2,
11210                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11211   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11212   ins_cost(1.9 * INSN_COST);
11213   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11214 
11215   ins_encode %{
11216     __ eonw(as_Register($dst$$reg),
11217               as_Register($src1$$reg),
11218               as_Register($src2$$reg),
11219               Assembler::LSL,
11220               $src3$$constant & 0x1f);
11221   %}
11222 
11223   ins_pipe(ialu_reg_reg_shift);
11224 %}
11225 
11226 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11227                          iRegL src1, iRegL src2,
11228                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11229   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11230   ins_cost(1.9 * INSN_COST);
11231   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11232 
11233   ins_encode %{
11234     __ eon(as_Register($dst$$reg),
11235               as_Register($src1$$reg),
11236               as_Register($src2$$reg),
11237               Assembler::LSL,
11238               $src3$$constant & 0x3f);
11239   %}
11240 
11241   ins_pipe(ialu_reg_reg_shift);
11242 %}
11243 
11244 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11245                          iRegIorL2I src1, iRegIorL2I src2,
11246                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11247   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11248   ins_cost(1.9 * INSN_COST);
11249   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11250 
11251   ins_encode %{
11252     __ ornw(as_Register($dst$$reg),
11253               as_Register($src1$$reg),
11254               as_Register($src2$$reg),
11255               Assembler::LSR,
11256               $src3$$constant & 0x1f);
11257   %}
11258 
11259   ins_pipe(ialu_reg_reg_shift);
11260 %}
11261 
11262 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11263                          iRegL src1, iRegL src2,
11264                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11265   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11266   ins_cost(1.9 * INSN_COST);
11267   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11268 
11269   ins_encode %{
11270     __ orn(as_Register($dst$$reg),
11271               as_Register($src1$$reg),
11272               as_Register($src2$$reg),
11273               Assembler::LSR,
11274               $src3$$constant & 0x3f);
11275   %}
11276 
11277   ins_pipe(ialu_reg_reg_shift);
11278 %}
11279 
11280 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11281                          iRegIorL2I src1, iRegIorL2I src2,
11282                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11283   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11284   ins_cost(1.9 * INSN_COST);
11285   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11286 
11287   ins_encode %{
11288     __ ornw(as_Register($dst$$reg),
11289               as_Register($src1$$reg),
11290               as_Register($src2$$reg),
11291               Assembler::ASR,
11292               $src3$$constant & 0x1f);
11293   %}
11294 
11295   ins_pipe(ialu_reg_reg_shift);
11296 %}
11297 
11298 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11299                          iRegL src1, iRegL src2,
11300                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11301   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11302   ins_cost(1.9 * INSN_COST);
11303   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11304 
11305   ins_encode %{
11306     __ orn(as_Register($dst$$reg),
11307               as_Register($src1$$reg),
11308               as_Register($src2$$reg),
11309               Assembler::ASR,
11310               $src3$$constant & 0x3f);
11311   %}
11312 
11313   ins_pipe(ialu_reg_reg_shift);
11314 %}
11315 
11316 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11317                          iRegIorL2I src1, iRegIorL2I src2,
11318                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11319   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11320   ins_cost(1.9 * INSN_COST);
11321   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11322 
11323   ins_encode %{
11324     __ ornw(as_Register($dst$$reg),
11325               as_Register($src1$$reg),
11326               as_Register($src2$$reg),
11327               Assembler::LSL,
11328               $src3$$constant & 0x1f);
11329   %}
11330 
11331   ins_pipe(ialu_reg_reg_shift);
11332 %}
11333 
11334 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11335                          iRegL src1, iRegL src2,
11336                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11337   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11338   ins_cost(1.9 * INSN_COST);
11339   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11340 
11341   ins_encode %{
11342     __ orn(as_Register($dst$$reg),
11343               as_Register($src1$$reg),
11344               as_Register($src2$$reg),
11345               Assembler::LSL,
11346               $src3$$constant & 0x3f);
11347   %}
11348 
11349   ins_pipe(ialu_reg_reg_shift);
11350 %}
11351 
11352 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11353                          iRegIorL2I src1, iRegIorL2I src2,
11354                          immI src3, rFlagsReg cr) %{
11355   match(Set dst (AndI src1 (URShiftI src2 src3)));
11356 
11357   ins_cost(1.9 * INSN_COST);
11358   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11359 
11360   ins_encode %{
11361     __ andw(as_Register($dst$$reg),
11362               as_Register($src1$$reg),
11363               as_Register($src2$$reg),
11364               Assembler::LSR,
11365               $src3$$constant & 0x1f);
11366   %}
11367 
11368   ins_pipe(ialu_reg_reg_shift);
11369 %}
11370 
11371 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11372                          iRegL src1, iRegL src2,
11373                          immI src3, rFlagsReg cr) %{
11374   match(Set dst (AndL src1 (URShiftL src2 src3)));
11375 
11376   ins_cost(1.9 * INSN_COST);
11377   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11378 
11379   ins_encode %{
11380     __ andr(as_Register($dst$$reg),
11381               as_Register($src1$$reg),
11382               as_Register($src2$$reg),
11383               Assembler::LSR,
11384               $src3$$constant & 0x3f);
11385   %}
11386 
11387   ins_pipe(ialu_reg_reg_shift);
11388 %}
11389 
11390 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11391                          iRegIorL2I src1, iRegIorL2I src2,
11392                          immI src3, rFlagsReg cr) %{
11393   match(Set dst (AndI src1 (RShiftI src2 src3)));
11394 
11395   ins_cost(1.9 * INSN_COST);
11396   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11397 
11398   ins_encode %{
11399     __ andw(as_Register($dst$$reg),
11400               as_Register($src1$$reg),
11401               as_Register($src2$$reg),
11402               Assembler::ASR,
11403               $src3$$constant & 0x1f);
11404   %}
11405 
11406   ins_pipe(ialu_reg_reg_shift);
11407 %}
11408 
11409 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11410                          iRegL src1, iRegL src2,
11411                          immI src3, rFlagsReg cr) %{
11412   match(Set dst (AndL src1 (RShiftL src2 src3)));
11413 
11414   ins_cost(1.9 * INSN_COST);
11415   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11416 
11417   ins_encode %{
11418     __ andr(as_Register($dst$$reg),
11419               as_Register($src1$$reg),
11420               as_Register($src2$$reg),
11421               Assembler::ASR,
11422               $src3$$constant & 0x3f);
11423   %}
11424 
11425   ins_pipe(ialu_reg_reg_shift);
11426 %}
11427 
11428 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11429                          iRegIorL2I src1, iRegIorL2I src2,
11430                          immI src3, rFlagsReg cr) %{
11431   match(Set dst (AndI src1 (LShiftI src2 src3)));
11432 
11433   ins_cost(1.9 * INSN_COST);
11434   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11435 
11436   ins_encode %{
11437     __ andw(as_Register($dst$$reg),
11438               as_Register($src1$$reg),
11439               as_Register($src2$$reg),
11440               Assembler::LSL,
11441               $src3$$constant & 0x1f);
11442   %}
11443 
11444   ins_pipe(ialu_reg_reg_shift);
11445 %}
11446 
11447 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11448                          iRegL src1, iRegL src2,
11449                          immI src3, rFlagsReg cr) %{
11450   match(Set dst (AndL src1 (LShiftL src2 src3)));
11451 
11452   ins_cost(1.9 * INSN_COST);
11453   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11454 
11455   ins_encode %{
11456     __ andr(as_Register($dst$$reg),
11457               as_Register($src1$$reg),
11458               as_Register($src2$$reg),
11459               Assembler::LSL,
11460               $src3$$constant & 0x3f);
11461   %}
11462 
11463   ins_pipe(ialu_reg_reg_shift);
11464 %}
11465 
11466 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11467                          iRegIorL2I src1, iRegIorL2I src2,
11468                          immI src3, rFlagsReg cr) %{
11469   match(Set dst (XorI src1 (URShiftI src2 src3)));
11470 
11471   ins_cost(1.9 * INSN_COST);
11472   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11473 
11474   ins_encode %{
11475     __ eorw(as_Register($dst$$reg),
11476               as_Register($src1$$reg),
11477               as_Register($src2$$reg),
11478               Assembler::LSR,
11479               $src3$$constant & 0x1f);
11480   %}
11481 
11482   ins_pipe(ialu_reg_reg_shift);
11483 %}
11484 
11485 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11486                          iRegL src1, iRegL src2,
11487                          immI src3, rFlagsReg cr) %{
11488   match(Set dst (XorL src1 (URShiftL src2 src3)));
11489 
11490   ins_cost(1.9 * INSN_COST);
11491   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11492 
11493   ins_encode %{
11494     __ eor(as_Register($dst$$reg),
11495               as_Register($src1$$reg),
11496               as_Register($src2$$reg),
11497               Assembler::LSR,
11498               $src3$$constant & 0x3f);
11499   %}
11500 
11501   ins_pipe(ialu_reg_reg_shift);
11502 %}
11503 
11504 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11505                          iRegIorL2I src1, iRegIorL2I src2,
11506                          immI src3, rFlagsReg cr) %{
11507   match(Set dst (XorI src1 (RShiftI src2 src3)));
11508 
11509   ins_cost(1.9 * INSN_COST);
11510   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11511 
11512   ins_encode %{
11513     __ eorw(as_Register($dst$$reg),
11514               as_Register($src1$$reg),
11515               as_Register($src2$$reg),
11516               Assembler::ASR,
11517               $src3$$constant & 0x1f);
11518   %}
11519 
11520   ins_pipe(ialu_reg_reg_shift);
11521 %}
11522 
11523 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11524                          iRegL src1, iRegL src2,
11525                          immI src3, rFlagsReg cr) %{
11526   match(Set dst (XorL src1 (RShiftL src2 src3)));
11527 
11528   ins_cost(1.9 * INSN_COST);
11529   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11530 
11531   ins_encode %{
11532     __ eor(as_Register($dst$$reg),
11533               as_Register($src1$$reg),
11534               as_Register($src2$$reg),
11535               Assembler::ASR,
11536               $src3$$constant & 0x3f);
11537   %}
11538 
11539   ins_pipe(ialu_reg_reg_shift);
11540 %}
11541 
11542 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11543                          iRegIorL2I src1, iRegIorL2I src2,
11544                          immI src3, rFlagsReg cr) %{
11545   match(Set dst (XorI src1 (LShiftI src2 src3)));
11546 
11547   ins_cost(1.9 * INSN_COST);
11548   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11549 
11550   ins_encode %{
11551     __ eorw(as_Register($dst$$reg),
11552               as_Register($src1$$reg),
11553               as_Register($src2$$reg),
11554               Assembler::LSL,
11555               $src3$$constant & 0x1f);
11556   %}
11557 
11558   ins_pipe(ialu_reg_reg_shift);
11559 %}
11560 
11561 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11562                          iRegL src1, iRegL src2,
11563                          immI src3, rFlagsReg cr) %{
11564   match(Set dst (XorL src1 (LShiftL src2 src3)));
11565 
11566   ins_cost(1.9 * INSN_COST);
11567   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11568 
11569   ins_encode %{
11570     __ eor(as_Register($dst$$reg),
11571               as_Register($src1$$reg),
11572               as_Register($src2$$reg),
11573               Assembler::LSL,
11574               $src3$$constant & 0x3f);
11575   %}
11576 
11577   ins_pipe(ialu_reg_reg_shift);
11578 %}
11579 
11580 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11581                          iRegIorL2I src1, iRegIorL2I src2,
11582                          immI src3, rFlagsReg cr) %{
11583   match(Set dst (OrI src1 (URShiftI src2 src3)));
11584 
11585   ins_cost(1.9 * INSN_COST);
11586   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11587 
11588   ins_encode %{
11589     __ orrw(as_Register($dst$$reg),
11590               as_Register($src1$$reg),
11591               as_Register($src2$$reg),
11592               Assembler::LSR,
11593               $src3$$constant & 0x1f);
11594   %}
11595 
11596   ins_pipe(ialu_reg_reg_shift);
11597 %}
11598 
11599 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11600                          iRegL src1, iRegL src2,
11601                          immI src3, rFlagsReg cr) %{
11602   match(Set dst (OrL src1 (URShiftL src2 src3)));
11603 
11604   ins_cost(1.9 * INSN_COST);
11605   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11606 
11607   ins_encode %{
11608     __ orr(as_Register($dst$$reg),
11609               as_Register($src1$$reg),
11610               as_Register($src2$$reg),
11611               Assembler::LSR,
11612               $src3$$constant & 0x3f);
11613   %}
11614 
11615   ins_pipe(ialu_reg_reg_shift);
11616 %}
11617 
11618 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11619                          iRegIorL2I src1, iRegIorL2I src2,
11620                          immI src3, rFlagsReg cr) %{
11621   match(Set dst (OrI src1 (RShiftI src2 src3)));
11622 
11623   ins_cost(1.9 * INSN_COST);
11624   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11625 
11626   ins_encode %{
11627     __ orrw(as_Register($dst$$reg),
11628               as_Register($src1$$reg),
11629               as_Register($src2$$reg),
11630               Assembler::ASR,
11631               $src3$$constant & 0x1f);
11632   %}
11633 
11634   ins_pipe(ialu_reg_reg_shift);
11635 %}
11636 
11637 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11638                          iRegL src1, iRegL src2,
11639                          immI src3, rFlagsReg cr) %{
11640   match(Set dst (OrL src1 (RShiftL src2 src3)));
11641 
11642   ins_cost(1.9 * INSN_COST);
11643   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11644 
11645   ins_encode %{
11646     __ orr(as_Register($dst$$reg),
11647               as_Register($src1$$reg),
11648               as_Register($src2$$reg),
11649               Assembler::ASR,
11650               $src3$$constant & 0x3f);
11651   %}
11652 
11653   ins_pipe(ialu_reg_reg_shift);
11654 %}
11655 
11656 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11657                          iRegIorL2I src1, iRegIorL2I src2,
11658                          immI src3, rFlagsReg cr) %{
11659   match(Set dst (OrI src1 (LShiftI src2 src3)));
11660 
11661   ins_cost(1.9 * INSN_COST);
11662   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11663 
11664   ins_encode %{
11665     __ orrw(as_Register($dst$$reg),
11666               as_Register($src1$$reg),
11667               as_Register($src2$$reg),
11668               Assembler::LSL,
11669               $src3$$constant & 0x1f);
11670   %}
11671 
11672   ins_pipe(ialu_reg_reg_shift);
11673 %}
11674 
11675 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11676                          iRegL src1, iRegL src2,
11677                          immI src3, rFlagsReg cr) %{
11678   match(Set dst (OrL src1 (LShiftL src2 src3)));
11679 
11680   ins_cost(1.9 * INSN_COST);
11681   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11682 
11683   ins_encode %{
11684     __ orr(as_Register($dst$$reg),
11685               as_Register($src1$$reg),
11686               as_Register($src2$$reg),
11687               Assembler::LSL,
11688               $src3$$constant & 0x3f);
11689   %}
11690 
11691   ins_pipe(ialu_reg_reg_shift);
11692 %}
11693 
11694 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11695                          iRegIorL2I src1, iRegIorL2I src2,
11696                          immI src3, rFlagsReg cr) %{
11697   match(Set dst (AddI src1 (URShiftI src2 src3)));
11698 
11699   ins_cost(1.9 * INSN_COST);
11700   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11701 
11702   ins_encode %{
11703     __ addw(as_Register($dst$$reg),
11704               as_Register($src1$$reg),
11705               as_Register($src2$$reg),
11706               Assembler::LSR,
11707               $src3$$constant & 0x1f);
11708   %}
11709 
11710   ins_pipe(ialu_reg_reg_shift);
11711 %}
11712 
11713 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11714                          iRegL src1, iRegL src2,
11715                          immI src3, rFlagsReg cr) %{
11716   match(Set dst (AddL src1 (URShiftL src2 src3)));
11717 
11718   ins_cost(1.9 * INSN_COST);
11719   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11720 
11721   ins_encode %{
11722     __ add(as_Register($dst$$reg),
11723               as_Register($src1$$reg),
11724               as_Register($src2$$reg),
11725               Assembler::LSR,
11726               $src3$$constant & 0x3f);
11727   %}
11728 
11729   ins_pipe(ialu_reg_reg_shift);
11730 %}
11731 
11732 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11733                          iRegIorL2I src1, iRegIorL2I src2,
11734                          immI src3, rFlagsReg cr) %{
11735   match(Set dst (AddI src1 (RShiftI src2 src3)));
11736 
11737   ins_cost(1.9 * INSN_COST);
11738   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11739 
11740   ins_encode %{
11741     __ addw(as_Register($dst$$reg),
11742               as_Register($src1$$reg),
11743               as_Register($src2$$reg),
11744               Assembler::ASR,
11745               $src3$$constant & 0x1f);
11746   %}
11747 
11748   ins_pipe(ialu_reg_reg_shift);
11749 %}
11750 
11751 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11752                          iRegL src1, iRegL src2,
11753                          immI src3, rFlagsReg cr) %{
11754   match(Set dst (AddL src1 (RShiftL src2 src3)));
11755 
11756   ins_cost(1.9 * INSN_COST);
11757   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11758 
11759   ins_encode %{
11760     __ add(as_Register($dst$$reg),
11761               as_Register($src1$$reg),
11762               as_Register($src2$$reg),
11763               Assembler::ASR,
11764               $src3$$constant & 0x3f);
11765   %}
11766 
11767   ins_pipe(ialu_reg_reg_shift);
11768 %}
11769 
11770 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11771                          iRegIorL2I src1, iRegIorL2I src2,
11772                          immI src3, rFlagsReg cr) %{
11773   match(Set dst (AddI src1 (LShiftI src2 src3)));
11774 
11775   ins_cost(1.9 * INSN_COST);
11776   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11777 
11778   ins_encode %{
11779     __ addw(as_Register($dst$$reg),
11780               as_Register($src1$$reg),
11781               as_Register($src2$$reg),
11782               Assembler::LSL,
11783               $src3$$constant & 0x1f);
11784   %}
11785 
11786   ins_pipe(ialu_reg_reg_shift);
11787 %}
11788 
11789 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11790                          iRegL src1, iRegL src2,
11791                          immI src3, rFlagsReg cr) %{
11792   match(Set dst (AddL src1 (LShiftL src2 src3)));
11793 
11794   ins_cost(1.9 * INSN_COST);
11795   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11796 
11797   ins_encode %{
11798     __ add(as_Register($dst$$reg),
11799               as_Register($src1$$reg),
11800               as_Register($src2$$reg),
11801               Assembler::LSL,
11802               $src3$$constant & 0x3f);
11803   %}
11804 
11805   ins_pipe(ialu_reg_reg_shift);
11806 %}
11807 
11808 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11809                          iRegIorL2I src1, iRegIorL2I src2,
11810                          immI src3, rFlagsReg cr) %{
11811   match(Set dst (SubI src1 (URShiftI src2 src3)));
11812 
11813   ins_cost(1.9 * INSN_COST);
11814   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11815 
11816   ins_encode %{
11817     __ subw(as_Register($dst$$reg),
11818               as_Register($src1$$reg),
11819               as_Register($src2$$reg),
11820               Assembler::LSR,
11821               $src3$$constant & 0x1f);
11822   %}
11823 
11824   ins_pipe(ialu_reg_reg_shift);
11825 %}
11826 
11827 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11828                          iRegL src1, iRegL src2,
11829                          immI src3, rFlagsReg cr) %{
11830   match(Set dst (SubL src1 (URShiftL src2 src3)));
11831 
11832   ins_cost(1.9 * INSN_COST);
11833   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11834 
11835   ins_encode %{
11836     __ sub(as_Register($dst$$reg),
11837               as_Register($src1$$reg),
11838               as_Register($src2$$reg),
11839               Assembler::LSR,
11840               $src3$$constant & 0x3f);
11841   %}
11842 
11843   ins_pipe(ialu_reg_reg_shift);
11844 %}
11845 
11846 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11847                          iRegIorL2I src1, iRegIorL2I src2,
11848                          immI src3, rFlagsReg cr) %{
11849   match(Set dst (SubI src1 (RShiftI src2 src3)));
11850 
11851   ins_cost(1.9 * INSN_COST);
11852   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11853 
11854   ins_encode %{
11855     __ subw(as_Register($dst$$reg),
11856               as_Register($src1$$reg),
11857               as_Register($src2$$reg),
11858               Assembler::ASR,
11859               $src3$$constant & 0x1f);
11860   %}
11861 
11862   ins_pipe(ialu_reg_reg_shift);
11863 %}
11864 
11865 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11866                          iRegL src1, iRegL src2,
11867                          immI src3, rFlagsReg cr) %{
11868   match(Set dst (SubL src1 (RShiftL src2 src3)));
11869 
11870   ins_cost(1.9 * INSN_COST);
11871   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11872 
11873   ins_encode %{
11874     __ sub(as_Register($dst$$reg),
11875               as_Register($src1$$reg),
11876               as_Register($src2$$reg),
11877               Assembler::ASR,
11878               $src3$$constant & 0x3f);
11879   %}
11880 
11881   ins_pipe(ialu_reg_reg_shift);
11882 %}
11883 
11884 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11885                          iRegIorL2I src1, iRegIorL2I src2,
11886                          immI src3, rFlagsReg cr) %{
11887   match(Set dst (SubI src1 (LShiftI src2 src3)));
11888 
11889   ins_cost(1.9 * INSN_COST);
11890   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11891 
11892   ins_encode %{
11893     __ subw(as_Register($dst$$reg),
11894               as_Register($src1$$reg),
11895               as_Register($src2$$reg),
11896               Assembler::LSL,
11897               $src3$$constant & 0x1f);
11898   %}
11899 
11900   ins_pipe(ialu_reg_reg_shift);
11901 %}
11902 
11903 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11904                          iRegL src1, iRegL src2,
11905                          immI src3, rFlagsReg cr) %{
11906   match(Set dst (SubL src1 (LShiftL src2 src3)));
11907 
11908   ins_cost(1.9 * INSN_COST);
11909   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11910 
11911   ins_encode %{
11912     __ sub(as_Register($dst$$reg),
11913               as_Register($src1$$reg),
11914               as_Register($src2$$reg),
11915               Assembler::LSL,
11916               $src3$$constant & 0x3f);
11917   %}
11918 
11919   ins_pipe(ialu_reg_reg_shift);
11920 %}
11921 
11922 
11923 
11924 // Shift Left followed by Shift Right.
11925 // This idiom is used by the compiler for the i2b bytecode etc.
11926 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11927 %{
11928   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11929   // Make sure we are not going to exceed what sbfm can do.
11930   predicate((unsigned int)n->in(2)->get_int() <= 63
11931             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11932 
11933   ins_cost(INSN_COST * 2);
11934   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11935   ins_encode %{
11936     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11937     int s = 63 - lshift;
11938     int r = (rshift - lshift) & 63;
11939     __ sbfm(as_Register($dst$$reg),
11940             as_Register($src$$reg),
11941             r, s);
11942   %}
11943 
11944   ins_pipe(ialu_reg_shift);
11945 %}
11946 
11947 // Shift Left followed by Shift Right.
11948 // This idiom is used by the compiler for the i2b bytecode etc.
11949 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11950 %{
11951   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11952   // Make sure we are not going to exceed what sbfmw can do.
11953   predicate((unsigned int)n->in(2)->get_int() <= 31
11954             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11955 
11956   ins_cost(INSN_COST * 2);
11957   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11958   ins_encode %{
11959     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11960     int s = 31 - lshift;
11961     int r = (rshift - lshift) & 31;
11962     __ sbfmw(as_Register($dst$$reg),
11963             as_Register($src$$reg),
11964             r, s);
11965   %}
11966 
11967   ins_pipe(ialu_reg_shift);
11968 %}
11969 
11970 // Shift Left followed by Shift Right.
11971 // This idiom is used by the compiler for the i2b bytecode etc.
11972 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11973 %{
11974   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11975   // Make sure we are not going to exceed what ubfm can do.
11976   predicate((unsigned int)n->in(2)->get_int() <= 63
11977             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11978 
11979   ins_cost(INSN_COST * 2);
11980   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11981   ins_encode %{
11982     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11983     int s = 63 - lshift;
11984     int r = (rshift - lshift) & 63;
11985     __ ubfm(as_Register($dst$$reg),
11986             as_Register($src$$reg),
11987             r, s);
11988   %}
11989 
11990   ins_pipe(ialu_reg_shift);
11991 %}
11992 
11993 // Shift Left followed by Shift Right.
11994 // This idiom is used by the compiler for the i2b bytecode etc.
11995 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11996 %{
11997   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11998   // Make sure we are not going to exceed what ubfmw can do.
11999   predicate((unsigned int)n->in(2)->get_int() <= 31
12000             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12001 
12002   ins_cost(INSN_COST * 2);
12003   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12004   ins_encode %{
12005     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12006     int s = 31 - lshift;
12007     int r = (rshift - lshift) & 31;
12008     __ ubfmw(as_Register($dst$$reg),
12009             as_Register($src$$reg),
12010             r, s);
12011   %}
12012 
12013   ins_pipe(ialu_reg_shift);
12014 %}
12015 // Bitfield extract with shift & mask
12016 
12017 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12018 %{
12019   match(Set dst (AndI (URShiftI src rshift) mask));
12020 
12021   ins_cost(INSN_COST);
12022   format %{ "ubfxw $dst, $src, $mask" %}
12023   ins_encode %{
12024     int rshift = $rshift$$constant;
12025     long mask = $mask$$constant;
12026     int width = exact_log2(mask+1);
12027     __ ubfxw(as_Register($dst$$reg),
12028             as_Register($src$$reg), rshift, width);
12029   %}
12030   ins_pipe(ialu_reg_shift);
12031 %}
12032 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12033 %{
12034   match(Set dst (AndL (URShiftL src rshift) mask));
12035 
12036   ins_cost(INSN_COST);
12037   format %{ "ubfx $dst, $src, $mask" %}
12038   ins_encode %{
12039     int rshift = $rshift$$constant;
12040     long mask = $mask$$constant;
12041     int width = exact_log2(mask+1);
12042     __ ubfx(as_Register($dst$$reg),
12043             as_Register($src$$reg), rshift, width);
12044   %}
12045   ins_pipe(ialu_reg_shift);
12046 %}
12047 
12048 // We can use ubfx when extending an And with a mask when we know mask
12049 // is positive.  We know that because immI_bitmask guarantees it.
12050 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12051 %{
12052   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12053 
12054   ins_cost(INSN_COST * 2);
12055   format %{ "ubfx $dst, $src, $mask" %}
12056   ins_encode %{
12057     int rshift = $rshift$$constant;
12058     long mask = $mask$$constant;
12059     int width = exact_log2(mask+1);
12060     __ ubfx(as_Register($dst$$reg),
12061             as_Register($src$$reg), rshift, width);
12062   %}
12063   ins_pipe(ialu_reg_shift);
12064 %}
12065 
12066 // Rotations
12067 
12068 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12069 %{
12070   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12071   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12072 
12073   ins_cost(INSN_COST);
12074   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12075 
12076   ins_encode %{
12077     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12078             $rshift$$constant & 63);
12079   %}
12080   ins_pipe(ialu_reg_reg_extr);
12081 %}
12082 
12083 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12084 %{
12085   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12086   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12087 
12088   ins_cost(INSN_COST);
12089   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12090 
12091   ins_encode %{
12092     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12093             $rshift$$constant & 31);
12094   %}
12095   ins_pipe(ialu_reg_reg_extr);
12096 %}
12097 
12098 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12099 %{
12100   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12101   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12102 
12103   ins_cost(INSN_COST);
12104   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12105 
12106   ins_encode %{
12107     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12108             $rshift$$constant & 63);
12109   %}
12110   ins_pipe(ialu_reg_reg_extr);
12111 %}
12112 
12113 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12114 %{
12115   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12116   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12117 
12118   ins_cost(INSN_COST);
12119   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12120 
12121   ins_encode %{
12122     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12123             $rshift$$constant & 31);
12124   %}
12125   ins_pipe(ialu_reg_reg_extr);
12126 %}
12127 
12128 
12129 // rol expander
12130 
12131 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12132 %{
12133   effect(DEF dst, USE src, USE shift);
12134 
12135   format %{ "rol    $dst, $src, $shift" %}
12136   ins_cost(INSN_COST * 3);
12137   ins_encode %{
12138     __ subw(rscratch1, zr, as_Register($shift$$reg));
12139     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12140             rscratch1);
12141     %}
12142   ins_pipe(ialu_reg_reg_vshift);
12143 %}
12144 
12145 // rol expander
12146 
12147 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12148 %{
12149   effect(DEF dst, USE src, USE shift);
12150 
12151   format %{ "rol    $dst, $src, $shift" %}
12152   ins_cost(INSN_COST * 3);
12153   ins_encode %{
12154     __ subw(rscratch1, zr, as_Register($shift$$reg));
12155     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12156             rscratch1);
12157     %}
12158   ins_pipe(ialu_reg_reg_vshift);
12159 %}
12160 
12161 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12162 %{
12163   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12164 
12165   expand %{
12166     rolL_rReg(dst, src, shift, cr);
12167   %}
12168 %}
12169 
12170 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12171 %{
12172   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12173 
12174   expand %{
12175     rolL_rReg(dst, src, shift, cr);
12176   %}
12177 %}
12178 
12179 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12180 %{
12181   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12182 
12183   expand %{
12184     rolL_rReg(dst, src, shift, cr);
12185   %}
12186 %}
12187 
12188 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12189 %{
12190   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12191 
12192   expand %{
12193     rolL_rReg(dst, src, shift, cr);
12194   %}
12195 %}
12196 
12197 // ror expander
12198 
12199 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12200 %{
12201   effect(DEF dst, USE src, USE shift);
12202 
12203   format %{ "ror    $dst, $src, $shift" %}
12204   ins_cost(INSN_COST);
12205   ins_encode %{
12206     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12207             as_Register($shift$$reg));
12208     %}
12209   ins_pipe(ialu_reg_reg_vshift);
12210 %}
12211 
12212 // ror expander
12213 
12214 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12215 %{
12216   effect(DEF dst, USE src, USE shift);
12217 
12218   format %{ "ror    $dst, $src, $shift" %}
12219   ins_cost(INSN_COST);
12220   ins_encode %{
12221     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12222             as_Register($shift$$reg));
12223     %}
12224   ins_pipe(ialu_reg_reg_vshift);
12225 %}
12226 
12227 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12228 %{
12229   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12230 
12231   expand %{
12232     rorL_rReg(dst, src, shift, cr);
12233   %}
12234 %}
12235 
12236 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12237 %{
12238   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12239 
12240   expand %{
12241     rorL_rReg(dst, src, shift, cr);
12242   %}
12243 %}
12244 
12245 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12246 %{
12247   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12248 
12249   expand %{
12250     rorI_rReg(dst, src, shift, cr);
12251   %}
12252 %}
12253 
12254 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12255 %{
12256   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12257 
12258   expand %{
12259     rorI_rReg(dst, src, shift, cr);
12260   %}
12261 %}
12262 
12263 // Add/subtract (extended)
12264 
12265 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12266 %{
12267   match(Set dst (AddL src1 (ConvI2L src2)));
12268   ins_cost(INSN_COST);
12269   format %{ "add  $dst, $src1, sxtw $src2" %}
12270 
12271    ins_encode %{
12272      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12273             as_Register($src2$$reg), ext::sxtw);
12274    %}
12275   ins_pipe(ialu_reg_reg);
12276 %};
12277 
12278 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12279 %{
12280   match(Set dst (SubL src1 (ConvI2L src2)));
12281   ins_cost(INSN_COST);
12282   format %{ "sub  $dst, $src1, sxtw $src2" %}
12283 
12284    ins_encode %{
12285      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12286             as_Register($src2$$reg), ext::sxtw);
12287    %}
12288   ins_pipe(ialu_reg_reg);
12289 %};
12290 
12291 
12292 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12293 %{
12294   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12295   ins_cost(INSN_COST);
12296   format %{ "add  $dst, $src1, sxth $src2" %}
12297 
12298    ins_encode %{
12299      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12300             as_Register($src2$$reg), ext::sxth);
12301    %}
12302   ins_pipe(ialu_reg_reg);
12303 %}
12304 
12305 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12306 %{
12307   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12308   ins_cost(INSN_COST);
12309   format %{ "add  $dst, $src1, sxtb $src2" %}
12310 
12311    ins_encode %{
12312      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12313             as_Register($src2$$reg), ext::sxtb);
12314    %}
12315   ins_pipe(ialu_reg_reg);
12316 %}
12317 
12318 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12319 %{
12320   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12321   ins_cost(INSN_COST);
12322   format %{ "add  $dst, $src1, uxtb $src2" %}
12323 
12324    ins_encode %{
12325      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12326             as_Register($src2$$reg), ext::uxtb);
12327    %}
12328   ins_pipe(ialu_reg_reg);
12329 %}
12330 
12331 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12332 %{
12333   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12334   ins_cost(INSN_COST);
12335   format %{ "add  $dst, $src1, sxth $src2" %}
12336 
12337    ins_encode %{
12338      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12339             as_Register($src2$$reg), ext::sxth);
12340    %}
12341   ins_pipe(ialu_reg_reg);
12342 %}
12343 
12344 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12345 %{
12346   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12347   ins_cost(INSN_COST);
12348   format %{ "add  $dst, $src1, sxtw $src2" %}
12349 
12350    ins_encode %{
12351      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12352             as_Register($src2$$reg), ext::sxtw);
12353    %}
12354   ins_pipe(ialu_reg_reg);
12355 %}
12356 
12357 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12358 %{
12359   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12360   ins_cost(INSN_COST);
12361   format %{ "add  $dst, $src1, sxtb $src2" %}
12362 
12363    ins_encode %{
12364      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12365             as_Register($src2$$reg), ext::sxtb);
12366    %}
12367   ins_pipe(ialu_reg_reg);
12368 %}
12369 
12370 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12371 %{
12372   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12373   ins_cost(INSN_COST);
12374   format %{ "add  $dst, $src1, uxtb $src2" %}
12375 
12376    ins_encode %{
12377      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12378             as_Register($src2$$reg), ext::uxtb);
12379    %}
12380   ins_pipe(ialu_reg_reg);
12381 %}
12382 
12383 
12384 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12385 %{
12386   match(Set dst (AddI src1 (AndI src2 mask)));
12387   ins_cost(INSN_COST);
12388   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12389 
12390    ins_encode %{
12391      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12392             as_Register($src2$$reg), ext::uxtb);
12393    %}
12394   ins_pipe(ialu_reg_reg);
12395 %}
12396 
12397 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12398 %{
12399   match(Set dst (AddI src1 (AndI src2 mask)));
12400   ins_cost(INSN_COST);
12401   format %{ "addw  $dst, $src1, $src2, uxth" %}
12402 
12403    ins_encode %{
12404      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12405             as_Register($src2$$reg), ext::uxth);
12406    %}
12407   ins_pipe(ialu_reg_reg);
12408 %}
12409 
12410 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12411 %{
12412   match(Set dst (AddL src1 (AndL src2 mask)));
12413   ins_cost(INSN_COST);
12414   format %{ "add  $dst, $src1, $src2, uxtb" %}
12415 
12416    ins_encode %{
12417      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12418             as_Register($src2$$reg), ext::uxtb);
12419    %}
12420   ins_pipe(ialu_reg_reg);
12421 %}
12422 
12423 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12424 %{
12425   match(Set dst (AddL src1 (AndL src2 mask)));
12426   ins_cost(INSN_COST);
12427   format %{ "add  $dst, $src1, $src2, uxth" %}
12428 
12429    ins_encode %{
12430      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12431             as_Register($src2$$reg), ext::uxth);
12432    %}
12433   ins_pipe(ialu_reg_reg);
12434 %}
12435 
12436 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12437 %{
12438   match(Set dst (AddL src1 (AndL src2 mask)));
12439   ins_cost(INSN_COST);
12440   format %{ "add  $dst, $src1, $src2, uxtw" %}
12441 
12442    ins_encode %{
12443      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12444             as_Register($src2$$reg), ext::uxtw);
12445    %}
12446   ins_pipe(ialu_reg_reg);
12447 %}
12448 
12449 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12450 %{
12451   match(Set dst (SubI src1 (AndI src2 mask)));
12452   ins_cost(INSN_COST);
12453   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12454 
12455    ins_encode %{
12456      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12457             as_Register($src2$$reg), ext::uxtb);
12458    %}
12459   ins_pipe(ialu_reg_reg);
12460 %}
12461 
12462 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12463 %{
12464   match(Set dst (SubI src1 (AndI src2 mask)));
12465   ins_cost(INSN_COST);
12466   format %{ "subw  $dst, $src1, $src2, uxth" %}
12467 
12468    ins_encode %{
12469      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12470             as_Register($src2$$reg), ext::uxth);
12471    %}
12472   ins_pipe(ialu_reg_reg);
12473 %}
12474 
12475 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12476 %{
12477   match(Set dst (SubL src1 (AndL src2 mask)));
12478   ins_cost(INSN_COST);
12479   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12480 
12481    ins_encode %{
12482      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12483             as_Register($src2$$reg), ext::uxtb);
12484    %}
12485   ins_pipe(ialu_reg_reg);
12486 %}
12487 
12488 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12489 %{
12490   match(Set dst (SubL src1 (AndL src2 mask)));
12491   ins_cost(INSN_COST);
12492   format %{ "sub  $dst, $src1, $src2, uxth" %}
12493 
12494    ins_encode %{
12495      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12496             as_Register($src2$$reg), ext::uxth);
12497    %}
12498   ins_pipe(ialu_reg_reg);
12499 %}
12500 
12501 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12502 %{
12503   match(Set dst (SubL src1 (AndL src2 mask)));
12504   ins_cost(INSN_COST);
12505   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12506 
12507    ins_encode %{
12508      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12509             as_Register($src2$$reg), ext::uxtw);
12510    %}
12511   ins_pipe(ialu_reg_reg);
12512 %}
12513 
12514 // END This section of the file is automatically generated. Do not edit --------------
12515 
12516 // ============================================================================
12517 // Floating Point Arithmetic Instructions
12518 
12519 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12520   match(Set dst (AddF src1 src2));
12521 
12522   ins_cost(INSN_COST * 5);
12523   format %{ "fadds   $dst, $src1, $src2" %}
12524 
12525   ins_encode %{
12526     __ fadds(as_FloatRegister($dst$$reg),
12527              as_FloatRegister($src1$$reg),
12528              as_FloatRegister($src2$$reg));
12529   %}
12530 
12531   ins_pipe(fp_dop_reg_reg_s);
12532 %}
12533 
12534 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12535   match(Set dst (AddD src1 src2));
12536 
12537   ins_cost(INSN_COST * 5);
12538   format %{ "faddd   $dst, $src1, $src2" %}
12539 
12540   ins_encode %{
12541     __ faddd(as_FloatRegister($dst$$reg),
12542              as_FloatRegister($src1$$reg),
12543              as_FloatRegister($src2$$reg));
12544   %}
12545 
12546   ins_pipe(fp_dop_reg_reg_d);
12547 %}
12548 
12549 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12550   match(Set dst (SubF src1 src2));
12551 
12552   ins_cost(INSN_COST * 5);
12553   format %{ "fsubs   $dst, $src1, $src2" %}
12554 
12555   ins_encode %{
12556     __ fsubs(as_FloatRegister($dst$$reg),
12557              as_FloatRegister($src1$$reg),
12558              as_FloatRegister($src2$$reg));
12559   %}
12560 
12561   ins_pipe(fp_dop_reg_reg_s);
12562 %}
12563 
12564 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12565   match(Set dst (SubD src1 src2));
12566 
12567   ins_cost(INSN_COST * 5);
12568   format %{ "fsubd   $dst, $src1, $src2" %}
12569 
12570   ins_encode %{
12571     __ fsubd(as_FloatRegister($dst$$reg),
12572              as_FloatRegister($src1$$reg),
12573              as_FloatRegister($src2$$reg));
12574   %}
12575 
12576   ins_pipe(fp_dop_reg_reg_d);
12577 %}
12578 
12579 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12580   match(Set dst (MulF src1 src2));
12581 
12582   ins_cost(INSN_COST * 6);
12583   format %{ "fmuls   $dst, $src1, $src2" %}
12584 
12585   ins_encode %{
12586     __ fmuls(as_FloatRegister($dst$$reg),
12587              as_FloatRegister($src1$$reg),
12588              as_FloatRegister($src2$$reg));
12589   %}
12590 
12591   ins_pipe(fp_dop_reg_reg_s);
12592 %}
12593 
12594 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12595   match(Set dst (MulD src1 src2));
12596 
12597   ins_cost(INSN_COST * 6);
12598   format %{ "fmuld   $dst, $src1, $src2" %}
12599 
12600   ins_encode %{
12601     __ fmuld(as_FloatRegister($dst$$reg),
12602              as_FloatRegister($src1$$reg),
12603              as_FloatRegister($src2$$reg));
12604   %}
12605 
12606   ins_pipe(fp_dop_reg_reg_d);
12607 %}
12608 
12609 // We cannot use these fused mul w add/sub ops because they don't
12610 // produce the same result as the equivalent separated ops
12611 // (essentially they don't round the intermediate result). that's a
12612 // shame. leaving them here in case we can idenitfy cases where it is
12613 // legitimate to use them
12614 
12615 
12616 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12617 //   match(Set dst (AddF (MulF src1 src2) src3));
12618 
12619 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12620 
12621 //   ins_encode %{
12622 //     __ fmadds(as_FloatRegister($dst$$reg),
12623 //              as_FloatRegister($src1$$reg),
12624 //              as_FloatRegister($src2$$reg),
12625 //              as_FloatRegister($src3$$reg));
12626 //   %}
12627 
12628 //   ins_pipe(pipe_class_default);
12629 // %}
12630 
12631 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12632 //   match(Set dst (AddD (MulD src1 src2) src3));
12633 
12634 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12635 
12636 //   ins_encode %{
12637 //     __ fmaddd(as_FloatRegister($dst$$reg),
12638 //              as_FloatRegister($src1$$reg),
12639 //              as_FloatRegister($src2$$reg),
12640 //              as_FloatRegister($src3$$reg));
12641 //   %}
12642 
12643 //   ins_pipe(pipe_class_default);
12644 // %}
12645 
12646 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12647 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12648 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12649 
12650 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12651 
12652 //   ins_encode %{
12653 //     __ fmsubs(as_FloatRegister($dst$$reg),
12654 //               as_FloatRegister($src1$$reg),
12655 //               as_FloatRegister($src2$$reg),
12656 //              as_FloatRegister($src3$$reg));
12657 //   %}
12658 
12659 //   ins_pipe(pipe_class_default);
12660 // %}
12661 
12662 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12663 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12664 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12665 
12666 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12667 
12668 //   ins_encode %{
12669 //     __ fmsubd(as_FloatRegister($dst$$reg),
12670 //               as_FloatRegister($src1$$reg),
12671 //               as_FloatRegister($src2$$reg),
12672 //               as_FloatRegister($src3$$reg));
12673 //   %}
12674 
12675 //   ins_pipe(pipe_class_default);
12676 // %}
12677 
12678 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12679 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12680 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12681 
12682 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12683 
12684 //   ins_encode %{
12685 //     __ fnmadds(as_FloatRegister($dst$$reg),
12686 //                as_FloatRegister($src1$$reg),
12687 //                as_FloatRegister($src2$$reg),
12688 //                as_FloatRegister($src3$$reg));
12689 //   %}
12690 
12691 //   ins_pipe(pipe_class_default);
12692 // %}
12693 
12694 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12695 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12696 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12697 
12698 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12699 
12700 //   ins_encode %{
12701 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12702 //                as_FloatRegister($src1$$reg),
12703 //                as_FloatRegister($src2$$reg),
12704 //                as_FloatRegister($src3$$reg));
12705 //   %}
12706 
12707 //   ins_pipe(pipe_class_default);
12708 // %}
12709 
12710 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12711 //   match(Set dst (SubF (MulF src1 src2) src3));
12712 
12713 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12714 
12715 //   ins_encode %{
12716 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12717 //                as_FloatRegister($src1$$reg),
12718 //                as_FloatRegister($src2$$reg),
12719 //                as_FloatRegister($src3$$reg));
12720 //   %}
12721 
12722 //   ins_pipe(pipe_class_default);
12723 // %}
12724 
12725 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12726 //   match(Set dst (SubD (MulD src1 src2) src3));
12727 
12728 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12729 
12730 //   ins_encode %{
12731 //   // n.b. insn name should be fnmsubd
12732 //     __ fnmsub(as_FloatRegister($dst$$reg),
12733 //                as_FloatRegister($src1$$reg),
12734 //                as_FloatRegister($src2$$reg),
12735 //                as_FloatRegister($src3$$reg));
12736 //   %}
12737 
12738 //   ins_pipe(pipe_class_default);
12739 // %}
12740 
12741 
12742 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12743   match(Set dst (DivF src1  src2));
12744 
12745   ins_cost(INSN_COST * 18);
12746   format %{ "fdivs   $dst, $src1, $src2" %}
12747 
12748   ins_encode %{
12749     __ fdivs(as_FloatRegister($dst$$reg),
12750              as_FloatRegister($src1$$reg),
12751              as_FloatRegister($src2$$reg));
12752   %}
12753 
12754   ins_pipe(fp_div_s);
12755 %}
12756 
12757 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12758   match(Set dst (DivD src1  src2));
12759 
12760   ins_cost(INSN_COST * 32);
12761   format %{ "fdivd   $dst, $src1, $src2" %}
12762 
12763   ins_encode %{
12764     __ fdivd(as_FloatRegister($dst$$reg),
12765              as_FloatRegister($src1$$reg),
12766              as_FloatRegister($src2$$reg));
12767   %}
12768 
12769   ins_pipe(fp_div_d);
12770 %}
12771 
12772 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12773   match(Set dst (NegF src));
12774 
12775   ins_cost(INSN_COST * 3);
12776   format %{ "fneg   $dst, $src" %}
12777 
12778   ins_encode %{
12779     __ fnegs(as_FloatRegister($dst$$reg),
12780              as_FloatRegister($src$$reg));
12781   %}
12782 
12783   ins_pipe(fp_uop_s);
12784 %}
12785 
12786 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12787   match(Set dst (NegD src));
12788 
12789   ins_cost(INSN_COST * 3);
12790   format %{ "fnegd   $dst, $src" %}
12791 
12792   ins_encode %{
12793     __ fnegd(as_FloatRegister($dst$$reg),
12794              as_FloatRegister($src$$reg));
12795   %}
12796 
12797   ins_pipe(fp_uop_d);
12798 %}
12799 
12800 instruct absF_reg(vRegF dst, vRegF src) %{
12801   match(Set dst (AbsF src));
12802 
12803   ins_cost(INSN_COST * 3);
12804   format %{ "fabss   $dst, $src" %}
12805   ins_encode %{
12806     __ fabss(as_FloatRegister($dst$$reg),
12807              as_FloatRegister($src$$reg));
12808   %}
12809 
12810   ins_pipe(fp_uop_s);
12811 %}
12812 
12813 instruct absD_reg(vRegD dst, vRegD src) %{
12814   match(Set dst (AbsD src));
12815 
12816   ins_cost(INSN_COST * 3);
12817   format %{ "fabsd   $dst, $src" %}
12818   ins_encode %{
12819     __ fabsd(as_FloatRegister($dst$$reg),
12820              as_FloatRegister($src$$reg));
12821   %}
12822 
12823   ins_pipe(fp_uop_d);
12824 %}
12825 
12826 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12827   match(Set dst (SqrtD src));
12828 
12829   ins_cost(INSN_COST * 50);
12830   format %{ "fsqrtd  $dst, $src" %}
12831   ins_encode %{
12832     __ fsqrtd(as_FloatRegister($dst$$reg),
12833              as_FloatRegister($src$$reg));
12834   %}
12835 
12836   ins_pipe(fp_div_s);
12837 %}
12838 
12839 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12840   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12841 
12842   ins_cost(INSN_COST * 50);
12843   format %{ "fsqrts  $dst, $src" %}
12844   ins_encode %{
12845     __ fsqrts(as_FloatRegister($dst$$reg),
12846              as_FloatRegister($src$$reg));
12847   %}
12848 
12849   ins_pipe(fp_div_d);
12850 %}
12851 
12852 // ============================================================================
12853 // Logical Instructions
12854 
12855 // Integer Logical Instructions
12856 
12857 // And Instructions
12858 
12859 
12860 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12861   match(Set dst (AndI src1 src2));
12862 
12863   format %{ "andw  $dst, $src1, $src2\t# int" %}
12864 
12865   ins_cost(INSN_COST);
12866   ins_encode %{
12867     __ andw(as_Register($dst$$reg),
12868             as_Register($src1$$reg),
12869             as_Register($src2$$reg));
12870   %}
12871 
12872   ins_pipe(ialu_reg_reg);
12873 %}
12874 
12875 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12876   match(Set dst (AndI src1 src2));
12877 
12878   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12879 
12880   ins_cost(INSN_COST);
12881   ins_encode %{
12882     __ andw(as_Register($dst$$reg),
12883             as_Register($src1$$reg),
12884             (unsigned long)($src2$$constant));
12885   %}
12886 
12887   ins_pipe(ialu_reg_imm);
12888 %}
12889 
12890 // Or Instructions
12891 
12892 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12893   match(Set dst (OrI src1 src2));
12894 
12895   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12896 
12897   ins_cost(INSN_COST);
12898   ins_encode %{
12899     __ orrw(as_Register($dst$$reg),
12900             as_Register($src1$$reg),
12901             as_Register($src2$$reg));
12902   %}
12903 
12904   ins_pipe(ialu_reg_reg);
12905 %}
12906 
12907 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12908   match(Set dst (OrI src1 src2));
12909 
12910   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12911 
12912   ins_cost(INSN_COST);
12913   ins_encode %{
12914     __ orrw(as_Register($dst$$reg),
12915             as_Register($src1$$reg),
12916             (unsigned long)($src2$$constant));
12917   %}
12918 
12919   ins_pipe(ialu_reg_imm);
12920 %}
12921 
12922 // Xor Instructions
12923 
12924 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12925   match(Set dst (XorI src1 src2));
12926 
12927   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12928 
12929   ins_cost(INSN_COST);
12930   ins_encode %{
12931     __ eorw(as_Register($dst$$reg),
12932             as_Register($src1$$reg),
12933             as_Register($src2$$reg));
12934   %}
12935 
12936   ins_pipe(ialu_reg_reg);
12937 %}
12938 
12939 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12940   match(Set dst (XorI src1 src2));
12941 
12942   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12943 
12944   ins_cost(INSN_COST);
12945   ins_encode %{
12946     __ eorw(as_Register($dst$$reg),
12947             as_Register($src1$$reg),
12948             (unsigned long)($src2$$constant));
12949   %}
12950 
12951   ins_pipe(ialu_reg_imm);
12952 %}
12953 
12954 // Long Logical Instructions
12955 // TODO
12956 
12957 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12958   match(Set dst (AndL src1 src2));
12959 
12960   format %{ "and  $dst, $src1, $src2\t# int" %}
12961 
12962   ins_cost(INSN_COST);
12963   ins_encode %{
12964     __ andr(as_Register($dst$$reg),
12965             as_Register($src1$$reg),
12966             as_Register($src2$$reg));
12967   %}
12968 
12969   ins_pipe(ialu_reg_reg);
12970 %}
12971 
12972 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12973   match(Set dst (AndL src1 src2));
12974 
12975   format %{ "and  $dst, $src1, $src2\t# int" %}
12976 
12977   ins_cost(INSN_COST);
12978   ins_encode %{
12979     __ andr(as_Register($dst$$reg),
12980             as_Register($src1$$reg),
12981             (unsigned long)($src2$$constant));
12982   %}
12983 
12984   ins_pipe(ialu_reg_imm);
12985 %}
12986 
12987 // Or Instructions
12988 
12989 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12990   match(Set dst (OrL src1 src2));
12991 
12992   format %{ "orr  $dst, $src1, $src2\t# int" %}
12993 
12994   ins_cost(INSN_COST);
12995   ins_encode %{
12996     __ orr(as_Register($dst$$reg),
12997            as_Register($src1$$reg),
12998            as_Register($src2$$reg));
12999   %}
13000 
13001   ins_pipe(ialu_reg_reg);
13002 %}
13003 
13004 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13005   match(Set dst (OrL src1 src2));
13006 
13007   format %{ "orr  $dst, $src1, $src2\t# int" %}
13008 
13009   ins_cost(INSN_COST);
13010   ins_encode %{
13011     __ orr(as_Register($dst$$reg),
13012            as_Register($src1$$reg),
13013            (unsigned long)($src2$$constant));
13014   %}
13015 
13016   ins_pipe(ialu_reg_imm);
13017 %}
13018 
13019 // Xor Instructions
13020 
13021 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13022   match(Set dst (XorL src1 src2));
13023 
13024   format %{ "eor  $dst, $src1, $src2\t# int" %}
13025 
13026   ins_cost(INSN_COST);
13027   ins_encode %{
13028     __ eor(as_Register($dst$$reg),
13029            as_Register($src1$$reg),
13030            as_Register($src2$$reg));
13031   %}
13032 
13033   ins_pipe(ialu_reg_reg);
13034 %}
13035 
13036 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13037   match(Set dst (XorL src1 src2));
13038 
13039   ins_cost(INSN_COST);
13040   format %{ "eor  $dst, $src1, $src2\t# int" %}
13041 
13042   ins_encode %{
13043     __ eor(as_Register($dst$$reg),
13044            as_Register($src1$$reg),
13045            (unsigned long)($src2$$constant));
13046   %}
13047 
13048   ins_pipe(ialu_reg_imm);
13049 %}
13050 
13051 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13052 %{
13053   match(Set dst (ConvI2L src));
13054 
13055   ins_cost(INSN_COST);
13056   format %{ "sxtw  $dst, $src\t# i2l" %}
13057   ins_encode %{
13058     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13059   %}
13060   ins_pipe(ialu_reg_shift);
13061 %}
13062 
13063 // this pattern occurs in bigmath arithmetic
13064 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13065 %{
13066   match(Set dst (AndL (ConvI2L src) mask));
13067 
13068   ins_cost(INSN_COST);
13069   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13070   ins_encode %{
13071     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13072   %}
13073 
13074   ins_pipe(ialu_reg_shift);
13075 %}
13076 
13077 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13078   match(Set dst (ConvL2I src));
13079 
13080   ins_cost(INSN_COST);
13081   format %{ "movw  $dst, $src \t// l2i" %}
13082 
13083   ins_encode %{
13084     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13085   %}
13086 
13087   ins_pipe(ialu_reg);
13088 %}
13089 
13090 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13091 %{
13092   match(Set dst (Conv2B src));
13093   effect(KILL cr);
13094 
13095   format %{
13096     "cmpw $src, zr\n\t"
13097     "cset $dst, ne"
13098   %}
13099 
13100   ins_encode %{
13101     __ cmpw(as_Register($src$$reg), zr);
13102     __ cset(as_Register($dst$$reg), Assembler::NE);
13103   %}
13104 
13105   ins_pipe(ialu_reg);
13106 %}
13107 
13108 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13109 %{
13110   match(Set dst (Conv2B src));
13111   effect(KILL cr);
13112 
13113   format %{
13114     "cmp  $src, zr\n\t"
13115     "cset $dst, ne"
13116   %}
13117 
13118   ins_encode %{
13119     __ cmp(as_Register($src$$reg), zr);
13120     __ cset(as_Register($dst$$reg), Assembler::NE);
13121   %}
13122 
13123   ins_pipe(ialu_reg);
13124 %}
13125 
13126 instruct convD2F_reg(vRegF dst, vRegD src) %{
13127   match(Set dst (ConvD2F src));
13128 
13129   ins_cost(INSN_COST * 5);
13130   format %{ "fcvtd  $dst, $src \t// d2f" %}
13131 
13132   ins_encode %{
13133     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13134   %}
13135 
13136   ins_pipe(fp_d2f);
13137 %}
13138 
13139 instruct convF2D_reg(vRegD dst, vRegF src) %{
13140   match(Set dst (ConvF2D src));
13141 
13142   ins_cost(INSN_COST * 5);
13143   format %{ "fcvts  $dst, $src \t// f2d" %}
13144 
13145   ins_encode %{
13146     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13147   %}
13148 
13149   ins_pipe(fp_f2d);
13150 %}
13151 
13152 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13153   match(Set dst (ConvF2I src));
13154 
13155   ins_cost(INSN_COST * 5);
13156   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13157 
13158   ins_encode %{
13159     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13160   %}
13161 
13162   ins_pipe(fp_f2i);
13163 %}
13164 
13165 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13166   match(Set dst (ConvF2L src));
13167 
13168   ins_cost(INSN_COST * 5);
13169   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13170 
13171   ins_encode %{
13172     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13173   %}
13174 
13175   ins_pipe(fp_f2l);
13176 %}
13177 
13178 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13179   match(Set dst (ConvI2F src));
13180 
13181   ins_cost(INSN_COST * 5);
13182   format %{ "scvtfws  $dst, $src \t// i2f" %}
13183 
13184   ins_encode %{
13185     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13186   %}
13187 
13188   ins_pipe(fp_i2f);
13189 %}
13190 
13191 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13192   match(Set dst (ConvL2F src));
13193 
13194   ins_cost(INSN_COST * 5);
13195   format %{ "scvtfs  $dst, $src \t// l2f" %}
13196 
13197   ins_encode %{
13198     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13199   %}
13200 
13201   ins_pipe(fp_l2f);
13202 %}
13203 
13204 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13205   match(Set dst (ConvD2I src));
13206 
13207   ins_cost(INSN_COST * 5);
13208   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13209 
13210   ins_encode %{
13211     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13212   %}
13213 
13214   ins_pipe(fp_d2i);
13215 %}
13216 
13217 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13218   match(Set dst (ConvD2L src));
13219 
13220   ins_cost(INSN_COST * 5);
13221   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13222 
13223   ins_encode %{
13224     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13225   %}
13226 
13227   ins_pipe(fp_d2l);
13228 %}
13229 
13230 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13231   match(Set dst (ConvI2D src));
13232 
13233   ins_cost(INSN_COST * 5);
13234   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13235 
13236   ins_encode %{
13237     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13238   %}
13239 
13240   ins_pipe(fp_i2d);
13241 %}
13242 
13243 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13244   match(Set dst (ConvL2D src));
13245 
13246   ins_cost(INSN_COST * 5);
13247   format %{ "scvtfd  $dst, $src \t// l2d" %}
13248 
13249   ins_encode %{
13250     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13251   %}
13252 
13253   ins_pipe(fp_l2d);
13254 %}
13255 
13256 // stack <-> reg and reg <-> reg shuffles with no conversion
13257 
13258 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13259 
13260   match(Set dst (MoveF2I src));
13261 
13262   effect(DEF dst, USE src);
13263 
13264   ins_cost(4 * INSN_COST);
13265 
13266   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13267 
13268   ins_encode %{
13269     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13270   %}
13271 
13272   ins_pipe(iload_reg_reg);
13273 
13274 %}
13275 
13276 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13277 
13278   match(Set dst (MoveI2F src));
13279 
13280   effect(DEF dst, USE src);
13281 
13282   ins_cost(4 * INSN_COST);
13283 
13284   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13285 
13286   ins_encode %{
13287     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13288   %}
13289 
13290   ins_pipe(pipe_class_memory);
13291 
13292 %}
13293 
13294 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13295 
13296   match(Set dst (MoveD2L src));
13297 
13298   effect(DEF dst, USE src);
13299 
13300   ins_cost(4 * INSN_COST);
13301 
13302   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13303 
13304   ins_encode %{
13305     __ ldr($dst$$Register, Address(sp, $src$$disp));
13306   %}
13307 
13308   ins_pipe(iload_reg_reg);
13309 
13310 %}
13311 
13312 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13313 
13314   match(Set dst (MoveL2D src));
13315 
13316   effect(DEF dst, USE src);
13317 
13318   ins_cost(4 * INSN_COST);
13319 
13320   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13321 
13322   ins_encode %{
13323     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13324   %}
13325 
13326   ins_pipe(pipe_class_memory);
13327 
13328 %}
13329 
13330 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13331 
13332   match(Set dst (MoveF2I src));
13333 
13334   effect(DEF dst, USE src);
13335 
13336   ins_cost(INSN_COST);
13337 
13338   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13339 
13340   ins_encode %{
13341     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13342   %}
13343 
13344   ins_pipe(pipe_class_memory);
13345 
13346 %}
13347 
13348 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13349 
13350   match(Set dst (MoveI2F src));
13351 
13352   effect(DEF dst, USE src);
13353 
13354   ins_cost(INSN_COST);
13355 
13356   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13357 
13358   ins_encode %{
13359     __ strw($src$$Register, Address(sp, $dst$$disp));
13360   %}
13361 
13362   ins_pipe(istore_reg_reg);
13363 
13364 %}
13365 
13366 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13367 
13368   match(Set dst (MoveD2L src));
13369 
13370   effect(DEF dst, USE src);
13371 
13372   ins_cost(INSN_COST);
13373 
13374   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13375 
13376   ins_encode %{
13377     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13378   %}
13379 
13380   ins_pipe(pipe_class_memory);
13381 
13382 %}
13383 
13384 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13385 
13386   match(Set dst (MoveL2D src));
13387 
13388   effect(DEF dst, USE src);
13389 
13390   ins_cost(INSN_COST);
13391 
13392   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13393 
13394   ins_encode %{
13395     __ str($src$$Register, Address(sp, $dst$$disp));
13396   %}
13397 
13398   ins_pipe(istore_reg_reg);
13399 
13400 %}
13401 
13402 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13403 
13404   match(Set dst (MoveF2I src));
13405 
13406   effect(DEF dst, USE src);
13407 
13408   ins_cost(INSN_COST);
13409 
13410   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13411 
13412   ins_encode %{
13413     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13414   %}
13415 
13416   ins_pipe(fp_f2i);
13417 
13418 %}
13419 
13420 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13421 
13422   match(Set dst (MoveI2F src));
13423 
13424   effect(DEF dst, USE src);
13425 
13426   ins_cost(INSN_COST);
13427 
13428   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13429 
13430   ins_encode %{
13431     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13432   %}
13433 
13434   ins_pipe(fp_i2f);
13435 
13436 %}
13437 
13438 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13439 
13440   match(Set dst (MoveD2L src));
13441 
13442   effect(DEF dst, USE src);
13443 
13444   ins_cost(INSN_COST);
13445 
13446   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13447 
13448   ins_encode %{
13449     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13450   %}
13451 
13452   ins_pipe(fp_d2l);
13453 
13454 %}
13455 
13456 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13457 
13458   match(Set dst (MoveL2D src));
13459 
13460   effect(DEF dst, USE src);
13461 
13462   ins_cost(INSN_COST);
13463 
13464   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13465 
13466   ins_encode %{
13467     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13468   %}
13469 
13470   ins_pipe(fp_l2d);
13471 
13472 %}
13473 
13474 // ============================================================================
13475 // clearing of an array
13476 
13477 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13478 %{
13479   match(Set dummy (ClearArray cnt base));
13480   effect(USE_KILL cnt, USE_KILL base);
13481 
13482   ins_cost(4 * INSN_COST);
13483   format %{ "ClearArray $cnt, $base" %}
13484 
13485   ins_encode %{
13486     __ zero_words($base$$Register, $cnt$$Register);
13487   %}
13488 
13489   ins_pipe(pipe_class_memory);
13490 %}
13491 
13492 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 tmp, Universe dummy, rFlagsReg cr)
13493 %{
13494   match(Set dummy (ClearArray cnt base));
13495   effect(USE_KILL base, TEMP tmp);
13496 
13497   ins_cost(4 * INSN_COST);
13498   format %{ "ClearArray $cnt, $base" %}
13499 
13500   ins_encode %{
13501     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13502   %}
13503 
13504   ins_pipe(pipe_class_memory);
13505 %}
13506 
13507 // ============================================================================
13508 // Overflow Math Instructions
13509 
13510 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13511 %{
13512   match(Set cr (OverflowAddI op1 op2));
13513 
13514   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13515   ins_cost(INSN_COST);
13516   ins_encode %{
13517     __ cmnw($op1$$Register, $op2$$Register);
13518   %}
13519 
13520   ins_pipe(icmp_reg_reg);
13521 %}
13522 
13523 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13524 %{
13525   match(Set cr (OverflowAddI op1 op2));
13526 
13527   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13528   ins_cost(INSN_COST);
13529   ins_encode %{
13530     __ cmnw($op1$$Register, $op2$$constant);
13531   %}
13532 
13533   ins_pipe(icmp_reg_imm);
13534 %}
13535 
13536 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13537 %{
13538   match(Set cr (OverflowAddL op1 op2));
13539 
13540   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13541   ins_cost(INSN_COST);
13542   ins_encode %{
13543     __ cmn($op1$$Register, $op2$$Register);
13544   %}
13545 
13546   ins_pipe(icmp_reg_reg);
13547 %}
13548 
13549 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13550 %{
13551   match(Set cr (OverflowAddL op1 op2));
13552 
13553   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13554   ins_cost(INSN_COST);
13555   ins_encode %{
13556     __ cmn($op1$$Register, $op2$$constant);
13557   %}
13558 
13559   ins_pipe(icmp_reg_imm);
13560 %}
13561 
13562 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13563 %{
13564   match(Set cr (OverflowSubI op1 op2));
13565 
13566   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13567   ins_cost(INSN_COST);
13568   ins_encode %{
13569     __ cmpw($op1$$Register, $op2$$Register);
13570   %}
13571 
13572   ins_pipe(icmp_reg_reg);
13573 %}
13574 
13575 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13576 %{
13577   match(Set cr (OverflowSubI op1 op2));
13578 
13579   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13580   ins_cost(INSN_COST);
13581   ins_encode %{
13582     __ cmpw($op1$$Register, $op2$$constant);
13583   %}
13584 
13585   ins_pipe(icmp_reg_imm);
13586 %}
13587 
13588 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13589 %{
13590   match(Set cr (OverflowSubL op1 op2));
13591 
13592   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13593   ins_cost(INSN_COST);
13594   ins_encode %{
13595     __ cmp($op1$$Register, $op2$$Register);
13596   %}
13597 
13598   ins_pipe(icmp_reg_reg);
13599 %}
13600 
13601 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13602 %{
13603   match(Set cr (OverflowSubL op1 op2));
13604 
13605   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13606   ins_cost(INSN_COST);
13607   ins_encode %{
13608     __ cmp($op1$$Register, $op2$$constant);
13609   %}
13610 
13611   ins_pipe(icmp_reg_imm);
13612 %}
13613 
13614 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13615 %{
13616   match(Set cr (OverflowSubI zero op1));
13617 
13618   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13619   ins_cost(INSN_COST);
13620   ins_encode %{
13621     __ cmpw(zr, $op1$$Register);
13622   %}
13623 
13624   ins_pipe(icmp_reg_imm);
13625 %}
13626 
13627 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13628 %{
13629   match(Set cr (OverflowSubL zero op1));
13630 
13631   format %{ "cmp   zr, $op1\t# overflow check long" %}
13632   ins_cost(INSN_COST);
13633   ins_encode %{
13634     __ cmp(zr, $op1$$Register);
13635   %}
13636 
13637   ins_pipe(icmp_reg_imm);
13638 %}
13639 
13640 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13641 %{
13642   match(Set cr (OverflowMulI op1 op2));
13643 
13644   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13645             "cmp   rscratch1, rscratch1, sxtw\n\t"
13646             "movw  rscratch1, #0x80000000\n\t"
13647             "cselw rscratch1, rscratch1, zr, NE\n\t"
13648             "cmpw  rscratch1, #1" %}
13649   ins_cost(5 * INSN_COST);
13650   ins_encode %{
13651     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13652     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13653     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13654     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13655     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13656   %}
13657 
13658   ins_pipe(pipe_slow);
13659 %}
13660 
13661 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13662 %{
13663   match(If cmp (OverflowMulI op1 op2));
13664   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13665             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13666   effect(USE labl, KILL cr);
13667 
13668   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13669             "cmp   rscratch1, rscratch1, sxtw\n\t"
13670             "b$cmp   $labl" %}
13671   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13672   ins_encode %{
13673     Label* L = $labl$$label;
13674     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13675     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13676     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13677     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13678   %}
13679 
13680   ins_pipe(pipe_serial);
13681 %}
13682 
13683 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13684 %{
13685   match(Set cr (OverflowMulL op1 op2));
13686 
13687   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13688             "smulh rscratch2, $op1, $op2\n\t"
13689             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13690             "movw  rscratch1, #0x80000000\n\t"
13691             "cselw rscratch1, rscratch1, zr, NE\n\t"
13692             "cmpw  rscratch1, #1" %}
13693   ins_cost(6 * INSN_COST);
13694   ins_encode %{
13695     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13696     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13697     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13698     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13699     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13700     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13701   %}
13702 
13703   ins_pipe(pipe_slow);
13704 %}
13705 
13706 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13707 %{
13708   match(If cmp (OverflowMulL op1 op2));
13709   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13710             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13711   effect(USE labl, KILL cr);
13712 
13713   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13714             "smulh rscratch2, $op1, $op2\n\t"
13715             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13716             "b$cmp $labl" %}
13717   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13718   ins_encode %{
13719     Label* L = $labl$$label;
13720     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13721     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13722     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13723     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13724     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13725   %}
13726 
13727   ins_pipe(pipe_serial);
13728 %}
13729 
13730 // ============================================================================
13731 // Compare Instructions
13732 
13733 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13734 %{
13735   match(Set cr (CmpI op1 op2));
13736 
13737   effect(DEF cr, USE op1, USE op2);
13738 
13739   ins_cost(INSN_COST);
13740   format %{ "cmpw  $op1, $op2" %}
13741 
13742   ins_encode(aarch64_enc_cmpw(op1, op2));
13743 
13744   ins_pipe(icmp_reg_reg);
13745 %}
13746 
13747 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13748 %{
13749   match(Set cr (CmpI op1 zero));
13750 
13751   effect(DEF cr, USE op1);
13752 
13753   ins_cost(INSN_COST);
13754   format %{ "cmpw $op1, 0" %}
13755 
13756   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13757 
13758   ins_pipe(icmp_reg_imm);
13759 %}
13760 
13761 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13762 %{
13763   match(Set cr (CmpI op1 op2));
13764 
13765   effect(DEF cr, USE op1);
13766 
13767   ins_cost(INSN_COST);
13768   format %{ "cmpw  $op1, $op2" %}
13769 
13770   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13771 
13772   ins_pipe(icmp_reg_imm);
13773 %}
13774 
13775 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13776 %{
13777   match(Set cr (CmpI op1 op2));
13778 
13779   effect(DEF cr, USE op1);
13780 
13781   ins_cost(INSN_COST * 2);
13782   format %{ "cmpw  $op1, $op2" %}
13783 
13784   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13785 
13786   ins_pipe(icmp_reg_imm);
13787 %}
13788 
13789 // Unsigned compare Instructions; really, same as signed compare
13790 // except it should only be used to feed an If or a CMovI which takes a
13791 // cmpOpU.
13792 
13793 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13794 %{
13795   match(Set cr (CmpU op1 op2));
13796 
13797   effect(DEF cr, USE op1, USE op2);
13798 
13799   ins_cost(INSN_COST);
13800   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13801 
13802   ins_encode(aarch64_enc_cmpw(op1, op2));
13803 
13804   ins_pipe(icmp_reg_reg);
13805 %}
13806 
13807 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13808 %{
13809   match(Set cr (CmpU op1 zero));
13810 
13811   effect(DEF cr, USE op1);
13812 
13813   ins_cost(INSN_COST);
13814   format %{ "cmpw $op1, #0\t# unsigned" %}
13815 
13816   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13817 
13818   ins_pipe(icmp_reg_imm);
13819 %}
13820 
13821 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13822 %{
13823   match(Set cr (CmpU op1 op2));
13824 
13825   effect(DEF cr, USE op1);
13826 
13827   ins_cost(INSN_COST);
13828   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13829 
13830   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13831 
13832   ins_pipe(icmp_reg_imm);
13833 %}
13834 
13835 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13836 %{
13837   match(Set cr (CmpU op1 op2));
13838 
13839   effect(DEF cr, USE op1);
13840 
13841   ins_cost(INSN_COST * 2);
13842   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13843 
13844   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13845 
13846   ins_pipe(icmp_reg_imm);
13847 %}
13848 
13849 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13850 %{
13851   match(Set cr (CmpL op1 op2));
13852 
13853   effect(DEF cr, USE op1, USE op2);
13854 
13855   ins_cost(INSN_COST);
13856   format %{ "cmp  $op1, $op2" %}
13857 
13858   ins_encode(aarch64_enc_cmp(op1, op2));
13859 
13860   ins_pipe(icmp_reg_reg);
13861 %}
13862 
13863 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13864 %{
13865   match(Set cr (CmpL op1 zero));
13866 
13867   effect(DEF cr, USE op1);
13868 
13869   ins_cost(INSN_COST);
13870   format %{ "tst  $op1" %}
13871 
13872   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13873 
13874   ins_pipe(icmp_reg_imm);
13875 %}
13876 
13877 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13878 %{
13879   match(Set cr (CmpL op1 op2));
13880 
13881   effect(DEF cr, USE op1);
13882 
13883   ins_cost(INSN_COST);
13884   format %{ "cmp  $op1, $op2" %}
13885 
13886   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13887 
13888   ins_pipe(icmp_reg_imm);
13889 %}
13890 
13891 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13892 %{
13893   match(Set cr (CmpL op1 op2));
13894 
13895   effect(DEF cr, USE op1);
13896 
13897   ins_cost(INSN_COST * 2);
13898   format %{ "cmp  $op1, $op2" %}
13899 
13900   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13901 
13902   ins_pipe(icmp_reg_imm);
13903 %}
13904 
13905 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13906 %{
13907   match(Set cr (CmpP op1 op2));
13908 
13909   effect(DEF cr, USE op1, USE op2);
13910 
13911   ins_cost(INSN_COST);
13912   format %{ "cmp  $op1, $op2\t // ptr" %}
13913 
13914   ins_encode(aarch64_enc_cmpp(op1, op2));
13915 
13916   ins_pipe(icmp_reg_reg);
13917 %}
13918 
13919 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13920 %{
13921   match(Set cr (CmpN op1 op2));
13922 
13923   effect(DEF cr, USE op1, USE op2);
13924 
13925   ins_cost(INSN_COST);
13926   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13927 
13928   ins_encode(aarch64_enc_cmpn(op1, op2));
13929 
13930   ins_pipe(icmp_reg_reg);
13931 %}
13932 
13933 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13934 %{
13935   match(Set cr (CmpP op1 zero));
13936 
13937   effect(DEF cr, USE op1, USE zero);
13938 
13939   ins_cost(INSN_COST);
13940   format %{ "cmp  $op1, 0\t // ptr" %}
13941 
13942   ins_encode(aarch64_enc_testp(op1));
13943 
13944   ins_pipe(icmp_reg_imm);
13945 %}
13946 
13947 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13948 %{
13949   match(Set cr (CmpN op1 zero));
13950 
13951   effect(DEF cr, USE op1, USE zero);
13952 
13953   ins_cost(INSN_COST);
13954   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13955 
13956   ins_encode(aarch64_enc_testn(op1));
13957 
13958   ins_pipe(icmp_reg_imm);
13959 %}
13960 
13961 // FP comparisons
13962 //
13963 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13964 // using normal cmpOp. See declaration of rFlagsReg for details.
13965 
13966 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13967 %{
13968   match(Set cr (CmpF src1 src2));
13969 
13970   ins_cost(3 * INSN_COST);
13971   format %{ "fcmps $src1, $src2" %}
13972 
13973   ins_encode %{
13974     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13975   %}
13976 
13977   ins_pipe(pipe_class_compare);
13978 %}
13979 
13980 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13981 %{
13982   match(Set cr (CmpF src1 src2));
13983 
13984   ins_cost(3 * INSN_COST);
13985   format %{ "fcmps $src1, 0.0" %}
13986 
13987   ins_encode %{
13988     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13989   %}
13990 
13991   ins_pipe(pipe_class_compare);
13992 %}
13993 // FROM HERE
13994 
13995 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13996 %{
13997   match(Set cr (CmpD src1 src2));
13998 
13999   ins_cost(3 * INSN_COST);
14000   format %{ "fcmpd $src1, $src2" %}
14001 
14002   ins_encode %{
14003     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14004   %}
14005 
14006   ins_pipe(pipe_class_compare);
14007 %}
14008 
14009 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14010 %{
14011   match(Set cr (CmpD src1 src2));
14012 
14013   ins_cost(3 * INSN_COST);
14014   format %{ "fcmpd $src1, 0.0" %}
14015 
14016   ins_encode %{
14017     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14018   %}
14019 
14020   ins_pipe(pipe_class_compare);
14021 %}
14022 
14023 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14024 %{
14025   match(Set dst (CmpF3 src1 src2));
14026   effect(KILL cr);
14027 
14028   ins_cost(5 * INSN_COST);
14029   format %{ "fcmps $src1, $src2\n\t"
14030             "csinvw($dst, zr, zr, eq\n\t"
14031             "csnegw($dst, $dst, $dst, lt)"
14032   %}
14033 
14034   ins_encode %{
14035     Label done;
14036     FloatRegister s1 = as_FloatRegister($src1$$reg);
14037     FloatRegister s2 = as_FloatRegister($src2$$reg);
14038     Register d = as_Register($dst$$reg);
14039     __ fcmps(s1, s2);
14040     // installs 0 if EQ else -1
14041     __ csinvw(d, zr, zr, Assembler::EQ);
14042     // keeps -1 if less or unordered else installs 1
14043     __ csnegw(d, d, d, Assembler::LT);
14044     __ bind(done);
14045   %}
14046 
14047   ins_pipe(pipe_class_default);
14048 
14049 %}
14050 
14051 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14052 %{
14053   match(Set dst (CmpD3 src1 src2));
14054   effect(KILL cr);
14055 
14056   ins_cost(5 * INSN_COST);
14057   format %{ "fcmpd $src1, $src2\n\t"
14058             "csinvw($dst, zr, zr, eq\n\t"
14059             "csnegw($dst, $dst, $dst, lt)"
14060   %}
14061 
14062   ins_encode %{
14063     Label done;
14064     FloatRegister s1 = as_FloatRegister($src1$$reg);
14065     FloatRegister s2 = as_FloatRegister($src2$$reg);
14066     Register d = as_Register($dst$$reg);
14067     __ fcmpd(s1, s2);
14068     // installs 0 if EQ else -1
14069     __ csinvw(d, zr, zr, Assembler::EQ);
14070     // keeps -1 if less or unordered else installs 1
14071     __ csnegw(d, d, d, Assembler::LT);
14072     __ bind(done);
14073   %}
14074   ins_pipe(pipe_class_default);
14075 
14076 %}
14077 
14078 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14079 %{
14080   match(Set dst (CmpF3 src1 zero));
14081   effect(KILL cr);
14082 
14083   ins_cost(5 * INSN_COST);
14084   format %{ "fcmps $src1, 0.0\n\t"
14085             "csinvw($dst, zr, zr, eq\n\t"
14086             "csnegw($dst, $dst, $dst, lt)"
14087   %}
14088 
14089   ins_encode %{
14090     Label done;
14091     FloatRegister s1 = as_FloatRegister($src1$$reg);
14092     Register d = as_Register($dst$$reg);
14093     __ fcmps(s1, 0.0D);
14094     // installs 0 if EQ else -1
14095     __ csinvw(d, zr, zr, Assembler::EQ);
14096     // keeps -1 if less or unordered else installs 1
14097     __ csnegw(d, d, d, Assembler::LT);
14098     __ bind(done);
14099   %}
14100 
14101   ins_pipe(pipe_class_default);
14102 
14103 %}
14104 
14105 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14106 %{
14107   match(Set dst (CmpD3 src1 zero));
14108   effect(KILL cr);
14109 
14110   ins_cost(5 * INSN_COST);
14111   format %{ "fcmpd $src1, 0.0\n\t"
14112             "csinvw($dst, zr, zr, eq\n\t"
14113             "csnegw($dst, $dst, $dst, lt)"
14114   %}
14115 
14116   ins_encode %{
14117     Label done;
14118     FloatRegister s1 = as_FloatRegister($src1$$reg);
14119     Register d = as_Register($dst$$reg);
14120     __ fcmpd(s1, 0.0D);
14121     // installs 0 if EQ else -1
14122     __ csinvw(d, zr, zr, Assembler::EQ);
14123     // keeps -1 if less or unordered else installs 1
14124     __ csnegw(d, d, d, Assembler::LT);
14125     __ bind(done);
14126   %}
14127   ins_pipe(pipe_class_default);
14128 
14129 %}
14130 
14131 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14132 %{
14133   match(Set dst (CmpLTMask p q));
14134   effect(KILL cr);
14135 
14136   ins_cost(3 * INSN_COST);
14137 
14138   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14139             "csetw $dst, lt\n\t"
14140             "subw $dst, zr, $dst"
14141   %}
14142 
14143   ins_encode %{
14144     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14145     __ csetw(as_Register($dst$$reg), Assembler::LT);
14146     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14147   %}
14148 
14149   ins_pipe(ialu_reg_reg);
14150 %}
14151 
14152 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14153 %{
14154   match(Set dst (CmpLTMask src zero));
14155   effect(KILL cr);
14156 
14157   ins_cost(INSN_COST);
14158 
14159   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14160 
14161   ins_encode %{
14162     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14163   %}
14164 
14165   ins_pipe(ialu_reg_shift);
14166 %}
14167 
14168 // ============================================================================
14169 // Max and Min
14170 
14171 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14172 %{
14173   match(Set dst (MinI src1 src2));
14174 
14175   effect(DEF dst, USE src1, USE src2, KILL cr);
14176   size(8);
14177 
14178   ins_cost(INSN_COST * 3);
14179   format %{
14180     "cmpw $src1 $src2\t signed int\n\t"
14181     "cselw $dst, $src1, $src2 lt\t"
14182   %}
14183 
14184   ins_encode %{
14185     __ cmpw(as_Register($src1$$reg),
14186             as_Register($src2$$reg));
14187     __ cselw(as_Register($dst$$reg),
14188              as_Register($src1$$reg),
14189              as_Register($src2$$reg),
14190              Assembler::LT);
14191   %}
14192 
14193   ins_pipe(ialu_reg_reg);
14194 %}
14195 // FROM HERE
14196 
14197 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14198 %{
14199   match(Set dst (MaxI src1 src2));
14200 
14201   effect(DEF dst, USE src1, USE src2, KILL cr);
14202   size(8);
14203 
14204   ins_cost(INSN_COST * 3);
14205   format %{
14206     "cmpw $src1 $src2\t signed int\n\t"
14207     "cselw $dst, $src1, $src2 gt\t"
14208   %}
14209 
14210   ins_encode %{
14211     __ cmpw(as_Register($src1$$reg),
14212             as_Register($src2$$reg));
14213     __ cselw(as_Register($dst$$reg),
14214              as_Register($src1$$reg),
14215              as_Register($src2$$reg),
14216              Assembler::GT);
14217   %}
14218 
14219   ins_pipe(ialu_reg_reg);
14220 %}
14221 
14222 // ============================================================================
14223 // Branch Instructions
14224 
14225 // Direct Branch.
14226 instruct branch(label lbl)
14227 %{
14228   match(Goto);
14229 
14230   effect(USE lbl);
14231 
14232   ins_cost(BRANCH_COST);
14233   format %{ "b  $lbl" %}
14234 
14235   ins_encode(aarch64_enc_b(lbl));
14236 
14237   ins_pipe(pipe_branch);
14238 %}
14239 
14240 // Conditional Near Branch
14241 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14242 %{
14243   // Same match rule as `branchConFar'.
14244   match(If cmp cr);
14245 
14246   effect(USE lbl);
14247 
14248   ins_cost(BRANCH_COST);
14249   // If set to 1 this indicates that the current instruction is a
14250   // short variant of a long branch. This avoids using this
14251   // instruction in first-pass matching. It will then only be used in
14252   // the `Shorten_branches' pass.
14253   // ins_short_branch(1);
14254   format %{ "b$cmp  $lbl" %}
14255 
14256   ins_encode(aarch64_enc_br_con(cmp, lbl));
14257 
14258   ins_pipe(pipe_branch_cond);
14259 %}
14260 
14261 // Conditional Near Branch Unsigned
14262 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14263 %{
14264   // Same match rule as `branchConFar'.
14265   match(If cmp cr);
14266 
14267   effect(USE lbl);
14268 
14269   ins_cost(BRANCH_COST);
14270   // If set to 1 this indicates that the current instruction is a
14271   // short variant of a long branch. This avoids using this
14272   // instruction in first-pass matching. It will then only be used in
14273   // the `Shorten_branches' pass.
14274   // ins_short_branch(1);
14275   format %{ "b$cmp  $lbl\t# unsigned" %}
14276 
14277   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14278 
14279   ins_pipe(pipe_branch_cond);
14280 %}
14281 
14282 // Make use of CBZ and CBNZ.  These instructions, as well as being
14283 // shorter than (cmp; branch), have the additional benefit of not
14284 // killing the flags.
14285 
14286 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14287   match(If cmp (CmpI op1 op2));
14288   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14289             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14290   effect(USE labl);
14291 
14292   ins_cost(BRANCH_COST);
14293   format %{ "cbw$cmp   $op1, $labl" %}
14294   ins_encode %{
14295     Label* L = $labl$$label;
14296     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14297     if (cond == Assembler::EQ)
14298       __ cbzw($op1$$Register, *L);
14299     else
14300       __ cbnzw($op1$$Register, *L);
14301   %}
14302   ins_pipe(pipe_cmp_branch);
14303 %}
14304 
14305 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14306   match(If cmp (CmpL op1 op2));
14307   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14308             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14309   effect(USE labl);
14310 
14311   ins_cost(BRANCH_COST);
14312   format %{ "cb$cmp   $op1, $labl" %}
14313   ins_encode %{
14314     Label* L = $labl$$label;
14315     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14316     if (cond == Assembler::EQ)
14317       __ cbz($op1$$Register, *L);
14318     else
14319       __ cbnz($op1$$Register, *L);
14320   %}
14321   ins_pipe(pipe_cmp_branch);
14322 %}
14323 
14324 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14325   match(If cmp (CmpP op1 op2));
14326   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14327             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14328   effect(USE labl);
14329 
14330   ins_cost(BRANCH_COST);
14331   format %{ "cb$cmp   $op1, $labl" %}
14332   ins_encode %{
14333     Label* L = $labl$$label;
14334     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14335     if (cond == Assembler::EQ)
14336       __ cbz($op1$$Register, *L);
14337     else
14338       __ cbnz($op1$$Register, *L);
14339   %}
14340   ins_pipe(pipe_cmp_branch);
14341 %}
14342 
14343 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14344   match(If cmp (CmpN op1 op2));
14345   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14346             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14347   effect(USE labl);
14348 
14349   ins_cost(BRANCH_COST);
14350   format %{ "cbw$cmp   $op1, $labl" %}
14351   ins_encode %{
14352     Label* L = $labl$$label;
14353     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14354     if (cond == Assembler::EQ)
14355       __ cbzw($op1$$Register, *L);
14356     else
14357       __ cbnzw($op1$$Register, *L);
14358   %}
14359   ins_pipe(pipe_cmp_branch);
14360 %}
14361 
14362 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14363   match(If cmp (CmpP (DecodeN oop) zero));
14364   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14365             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14366   effect(USE labl);
14367 
14368   ins_cost(BRANCH_COST);
14369   format %{ "cb$cmp   $oop, $labl" %}
14370   ins_encode %{
14371     Label* L = $labl$$label;
14372     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14373     if (cond == Assembler::EQ)
14374       __ cbzw($oop$$Register, *L);
14375     else
14376       __ cbnzw($oop$$Register, *L);
14377   %}
14378   ins_pipe(pipe_cmp_branch);
14379 %}
14380 
14381 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14382   match(If cmp (CmpU op1 op2));
14383   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14384             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14385             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14386             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
14387   effect(USE labl);
14388 
14389   ins_cost(BRANCH_COST);
14390   format %{ "cbw$cmp   $op1, $labl" %}
14391   ins_encode %{
14392     Label* L = $labl$$label;
14393     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14394     if (cond == Assembler::EQ || cond == Assembler::LS)
14395       __ cbzw($op1$$Register, *L);
14396     else
14397       __ cbnzw($op1$$Register, *L);
14398   %}
14399   ins_pipe(pipe_cmp_branch);
14400 %}
14401 
14402 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14403   match(If cmp (CmpU op1 op2));
14404   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14405             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14406             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14407             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
14408   effect(USE labl);
14409 
14410   ins_cost(BRANCH_COST);
14411   format %{ "cb$cmp   $op1, $labl" %}
14412   ins_encode %{
14413     Label* L = $labl$$label;
14414     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14415     if (cond == Assembler::EQ || cond == Assembler::LS)
14416       __ cbz($op1$$Register, *L);
14417     else
14418       __ cbnz($op1$$Register, *L);
14419   %}
14420   ins_pipe(pipe_cmp_branch);
14421 %}
14422 
14423 // Test bit and Branch
14424 
14425 // Patterns for short (< 32KiB) variants
14426 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14427   match(If cmp (CmpL op1 op2));
14428   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14429             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14430   effect(USE labl);
14431 
14432   ins_cost(BRANCH_COST);
14433   format %{ "cb$cmp   $op1, $labl # long" %}
14434   ins_encode %{
14435     Label* L = $labl$$label;
14436     Assembler::Condition cond =
14437       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14438     __ tbr(cond, $op1$$Register, 63, *L);
14439   %}
14440   ins_pipe(pipe_cmp_branch);
14441   ins_short_branch(1);
14442 %}
14443 
14444 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14445   match(If cmp (CmpI op1 op2));
14446   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14447             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14448   effect(USE labl);
14449 
14450   ins_cost(BRANCH_COST);
14451   format %{ "cb$cmp   $op1, $labl # int" %}
14452   ins_encode %{
14453     Label* L = $labl$$label;
14454     Assembler::Condition cond =
14455       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14456     __ tbr(cond, $op1$$Register, 31, *L);
14457   %}
14458   ins_pipe(pipe_cmp_branch);
14459   ins_short_branch(1);
14460 %}
14461 
14462 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14463   match(If cmp (CmpL (AndL op1 op2) op3));
14464   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14465             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14466             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14467   effect(USE labl);
14468 
14469   ins_cost(BRANCH_COST);
14470   format %{ "tb$cmp   $op1, $op2, $labl" %}
14471   ins_encode %{
14472     Label* L = $labl$$label;
14473     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14474     int bit = exact_log2($op2$$constant);
14475     __ tbr(cond, $op1$$Register, bit, *L);
14476   %}
14477   ins_pipe(pipe_cmp_branch);
14478   ins_short_branch(1);
14479 %}
14480 
14481 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14482   match(If cmp (CmpI (AndI op1 op2) op3));
14483   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14484             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14485             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14486   effect(USE labl);
14487 
14488   ins_cost(BRANCH_COST);
14489   format %{ "tb$cmp   $op1, $op2, $labl" %}
14490   ins_encode %{
14491     Label* L = $labl$$label;
14492     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14493     int bit = exact_log2($op2$$constant);
14494     __ tbr(cond, $op1$$Register, bit, *L);
14495   %}
14496   ins_pipe(pipe_cmp_branch);
14497   ins_short_branch(1);
14498 %}
14499 
14500 // And far variants
14501 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14502   match(If cmp (CmpL op1 op2));
14503   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14504             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14505   effect(USE labl);
14506 
14507   ins_cost(BRANCH_COST);
14508   format %{ "cb$cmp   $op1, $labl # long" %}
14509   ins_encode %{
14510     Label* L = $labl$$label;
14511     Assembler::Condition cond =
14512       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14513     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14514   %}
14515   ins_pipe(pipe_cmp_branch);
14516 %}
14517 
14518 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14519   match(If cmp (CmpI op1 op2));
14520   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14521             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14522   effect(USE labl);
14523 
14524   ins_cost(BRANCH_COST);
14525   format %{ "cb$cmp   $op1, $labl # int" %}
14526   ins_encode %{
14527     Label* L = $labl$$label;
14528     Assembler::Condition cond =
14529       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14530     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14531   %}
14532   ins_pipe(pipe_cmp_branch);
14533 %}
14534 
14535 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14536   match(If cmp (CmpL (AndL op1 op2) op3));
14537   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14538             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14539             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14540   effect(USE labl);
14541 
14542   ins_cost(BRANCH_COST);
14543   format %{ "tb$cmp   $op1, $op2, $labl" %}
14544   ins_encode %{
14545     Label* L = $labl$$label;
14546     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14547     int bit = exact_log2($op2$$constant);
14548     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14549   %}
14550   ins_pipe(pipe_cmp_branch);
14551 %}
14552 
14553 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14554   match(If cmp (CmpI (AndI op1 op2) op3));
14555   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14556             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14557             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14558   effect(USE labl);
14559 
14560   ins_cost(BRANCH_COST);
14561   format %{ "tb$cmp   $op1, $op2, $labl" %}
14562   ins_encode %{
14563     Label* L = $labl$$label;
14564     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14565     int bit = exact_log2($op2$$constant);
14566     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14567   %}
14568   ins_pipe(pipe_cmp_branch);
14569 %}
14570 
14571 // Test bits
14572 
14573 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14574   match(Set cr (CmpL (AndL op1 op2) op3));
14575   predicate(Assembler::operand_valid_for_logical_immediate
14576             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14577 
14578   ins_cost(INSN_COST);
14579   format %{ "tst $op1, $op2 # long" %}
14580   ins_encode %{
14581     __ tst($op1$$Register, $op2$$constant);
14582   %}
14583   ins_pipe(ialu_reg_reg);
14584 %}
14585 
14586 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14587   match(Set cr (CmpI (AndI op1 op2) op3));
14588   predicate(Assembler::operand_valid_for_logical_immediate
14589             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14590 
14591   ins_cost(INSN_COST);
14592   format %{ "tst $op1, $op2 # int" %}
14593   ins_encode %{
14594     __ tstw($op1$$Register, $op2$$constant);
14595   %}
14596   ins_pipe(ialu_reg_reg);
14597 %}
14598 
14599 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14600   match(Set cr (CmpL (AndL op1 op2) op3));
14601 
14602   ins_cost(INSN_COST);
14603   format %{ "tst $op1, $op2 # long" %}
14604   ins_encode %{
14605     __ tst($op1$$Register, $op2$$Register);
14606   %}
14607   ins_pipe(ialu_reg_reg);
14608 %}
14609 
14610 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14611   match(Set cr (CmpI (AndI op1 op2) op3));
14612 
14613   ins_cost(INSN_COST);
14614   format %{ "tstw $op1, $op2 # int" %}
14615   ins_encode %{
14616     __ tstw($op1$$Register, $op2$$Register);
14617   %}
14618   ins_pipe(ialu_reg_reg);
14619 %}
14620 
14621 
14622 // Conditional Far Branch
14623 // Conditional Far Branch Unsigned
14624 // TODO: fixme
14625 
14626 // counted loop end branch near
14627 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14628 %{
14629   match(CountedLoopEnd cmp cr);
14630 
14631   effect(USE lbl);
14632 
14633   ins_cost(BRANCH_COST);
14634   // short variant.
14635   // ins_short_branch(1);
14636   format %{ "b$cmp $lbl \t// counted loop end" %}
14637 
14638   ins_encode(aarch64_enc_br_con(cmp, lbl));
14639 
14640   ins_pipe(pipe_branch);
14641 %}
14642 
14643 // counted loop end branch near Unsigned
14644 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14645 %{
14646   match(CountedLoopEnd cmp cr);
14647 
14648   effect(USE lbl);
14649 
14650   ins_cost(BRANCH_COST);
14651   // short variant.
14652   // ins_short_branch(1);
14653   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14654 
14655   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14656 
14657   ins_pipe(pipe_branch);
14658 %}
14659 
14660 // counted loop end branch far
14661 // counted loop end branch far unsigned
14662 // TODO: fixme
14663 
14664 // ============================================================================
14665 // inlined locking and unlocking
14666 
14667 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14668 %{
14669   match(Set cr (FastLock object box));
14670   effect(TEMP tmp, TEMP tmp2);
14671 
14672   // TODO
14673   // identify correct cost
14674   ins_cost(5 * INSN_COST);
14675   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14676 
14677   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14678 
14679   ins_pipe(pipe_serial);
14680 %}
14681 
14682 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14683 %{
14684   match(Set cr (FastUnlock object box));
14685   effect(TEMP tmp, TEMP tmp2);
14686 
14687   ins_cost(5 * INSN_COST);
14688   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14689 
14690   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14691 
14692   ins_pipe(pipe_serial);
14693 %}
14694 
14695 
14696 // ============================================================================
14697 // Safepoint Instructions
14698 
14699 // TODO
14700 // provide a near and far version of this code
14701 
14702 instruct safePoint(iRegP poll)
14703 %{
14704   match(SafePoint poll);
14705 
14706   format %{
14707     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14708   %}
14709   ins_encode %{
14710     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14711   %}
14712   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14713 %}
14714 
14715 
14716 // ============================================================================
14717 // Procedure Call/Return Instructions
14718 
14719 // Call Java Static Instruction
14720 
14721 instruct CallStaticJavaDirect(method meth)
14722 %{
14723   match(CallStaticJava);
14724 
14725   effect(USE meth);
14726 
14727   ins_cost(CALL_COST);
14728 
14729   format %{ "call,static $meth \t// ==> " %}
14730 
14731   ins_encode( aarch64_enc_java_static_call(meth),
14732               aarch64_enc_call_epilog );
14733 
14734   ins_pipe(pipe_class_call);
14735 %}
14736 
14737 // TO HERE
14738 
14739 // Call Java Dynamic Instruction
14740 instruct CallDynamicJavaDirect(method meth)
14741 %{
14742   match(CallDynamicJava);
14743 
14744   effect(USE meth);
14745 
14746   ins_cost(CALL_COST);
14747 
14748   format %{ "CALL,dynamic $meth \t// ==> " %}
14749 
14750   ins_encode( aarch64_enc_java_dynamic_call(meth),
14751                aarch64_enc_call_epilog );
14752 
14753   ins_pipe(pipe_class_call);
14754 %}
14755 
14756 // Call Runtime Instruction
14757 
14758 instruct CallRuntimeDirect(method meth)
14759 %{
14760   match(CallRuntime);
14761 
14762   effect(USE meth);
14763 
14764   ins_cost(CALL_COST);
14765 
14766   format %{ "CALL, runtime $meth" %}
14767 
14768   ins_encode( aarch64_enc_java_to_runtime(meth) );
14769 
14770   ins_pipe(pipe_class_call);
14771 %}
14772 
14773 // Call Runtime Instruction
14774 
14775 instruct CallLeafDirect(method meth)
14776 %{
14777   match(CallLeaf);
14778 
14779   effect(USE meth);
14780 
14781   ins_cost(CALL_COST);
14782 
14783   format %{ "CALL, runtime leaf $meth" %}
14784 
14785   ins_encode( aarch64_enc_java_to_runtime(meth) );
14786 
14787   ins_pipe(pipe_class_call);
14788 %}
14789 
14790 // Call Runtime Instruction
14791 
14792 instruct CallLeafNoFPDirect(method meth)
14793 %{
14794   match(CallLeafNoFP);
14795 
14796   effect(USE meth);
14797 
14798   ins_cost(CALL_COST);
14799 
14800   format %{ "CALL, runtime leaf nofp $meth" %}
14801 
14802   ins_encode( aarch64_enc_java_to_runtime(meth) );
14803 
14804   ins_pipe(pipe_class_call);
14805 %}
14806 
14807 // Tail Call; Jump from runtime stub to Java code.
14808 // Also known as an 'interprocedural jump'.
14809 // Target of jump will eventually return to caller.
14810 // TailJump below removes the return address.
14811 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14812 %{
14813   match(TailCall jump_target method_oop);
14814 
14815   ins_cost(CALL_COST);
14816 
14817   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14818 
14819   ins_encode(aarch64_enc_tail_call(jump_target));
14820 
14821   ins_pipe(pipe_class_call);
14822 %}
14823 
14824 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14825 %{
14826   match(TailJump jump_target ex_oop);
14827 
14828   ins_cost(CALL_COST);
14829 
14830   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14831 
14832   ins_encode(aarch64_enc_tail_jmp(jump_target));
14833 
14834   ins_pipe(pipe_class_call);
14835 %}
14836 
14837 // Create exception oop: created by stack-crawling runtime code.
14838 // Created exception is now available to this handler, and is setup
14839 // just prior to jumping to this handler. No code emitted.
14840 // TODO check
14841 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14842 instruct CreateException(iRegP_R0 ex_oop)
14843 %{
14844   match(Set ex_oop (CreateEx));
14845 
14846   format %{ " -- \t// exception oop; no code emitted" %}
14847 
14848   size(0);
14849 
14850   ins_encode( /*empty*/ );
14851 
14852   ins_pipe(pipe_class_empty);
14853 %}
14854 
14855 // Rethrow exception: The exception oop will come in the first
14856 // argument position. Then JUMP (not call) to the rethrow stub code.
14857 instruct RethrowException() %{
14858   match(Rethrow);
14859   ins_cost(CALL_COST);
14860 
14861   format %{ "b rethrow_stub" %}
14862 
14863   ins_encode( aarch64_enc_rethrow() );
14864 
14865   ins_pipe(pipe_class_call);
14866 %}
14867 
14868 
14869 // Return Instruction
14870 // epilog node loads ret address into lr as part of frame pop
14871 instruct Ret()
14872 %{
14873   match(Return);
14874 
14875   format %{ "ret\t// return register" %}
14876 
14877   ins_encode( aarch64_enc_ret() );
14878 
14879   ins_pipe(pipe_branch);
14880 %}
14881 
14882 // Die now.
14883 instruct ShouldNotReachHere() %{
14884   match(Halt);
14885 
14886   ins_cost(CALL_COST);
14887   format %{ "ShouldNotReachHere" %}
14888 
14889   ins_encode %{
14890     // TODO
14891     // implement proper trap call here
14892     __ brk(999);
14893   %}
14894 
14895   ins_pipe(pipe_class_default);
14896 %}
14897 
14898 // ============================================================================
14899 // Partial Subtype Check
14900 //
14901 // superklass array for an instance of the superklass.  Set a hidden
14902 // internal cache on a hit (cache is checked with exposed code in
14903 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14904 // encoding ALSO sets flags.
14905 
14906 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14907 %{
14908   match(Set result (PartialSubtypeCheck sub super));
14909   effect(KILL cr, KILL temp);
14910 
14911   ins_cost(1100);  // slightly larger than the next version
14912   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14913 
14914   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14915 
14916   opcode(0x1); // Force zero of result reg on hit
14917 
14918   ins_pipe(pipe_class_memory);
14919 %}
14920 
14921 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14922 %{
14923   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14924   effect(KILL temp, KILL result);
14925 
14926   ins_cost(1100);  // slightly larger than the next version
14927   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14928 
14929   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14930 
14931   opcode(0x0); // Don't zero result reg on hit
14932 
14933   ins_pipe(pipe_class_memory);
14934 %}
14935 
14936 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14937                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14938 %{
14939   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14940   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14941   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14942 
14943   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14944   ins_encode %{
14945     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14946     __ string_compare($str1$$Register, $str2$$Register,
14947                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14948                       $tmp1$$Register,
14949                       fnoreg, fnoreg, StrIntrinsicNode::UU);
14950   %}
14951   ins_pipe(pipe_class_memory);
14952 %}
14953 
14954 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14955                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14956 %{
14957   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
14958   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14959   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14960 
14961   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14962   ins_encode %{
14963     __ string_compare($str1$$Register, $str2$$Register,
14964                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14965                       $tmp1$$Register,
14966                       fnoreg, fnoreg, StrIntrinsicNode::LL);
14967   %}
14968   ins_pipe(pipe_class_memory);
14969 %}
14970 
14971 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14972                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
14973 %{
14974   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
14975   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14976   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
14977 
14978   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14979   ins_encode %{
14980     __ string_compare($str1$$Register, $str2$$Register,
14981                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14982                       $tmp1$$Register,
14983                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL);
14984   %}
14985   ins_pipe(pipe_class_memory);
14986 %}
14987 
14988 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14989                         iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
14990 %{
14991   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
14992   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14993   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
14994 
14995   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14996   ins_encode %{
14997     __ string_compare($str1$$Register, $str2$$Register,
14998                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14999                       $tmp1$$Register,
15000                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU);
15001   %}
15002   ins_pipe(pipe_class_memory);
15003 %}
15004 
15005 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
15006        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15007 %{
15008   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15009   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
15010   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
15011          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15012   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
15013 
15014   ins_encode %{
15015     __ string_indexof($str1$$Register, $str2$$Register,
15016                       $cnt1$$Register, $cnt2$$Register,
15017                       $tmp1$$Register, $tmp2$$Register,
15018                       $tmp3$$Register, $tmp4$$Register,
15019                       -1, $result$$Register);
15020   %}
15021   ins_pipe(pipe_class_memory);
15022 %}
15023 
15024 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
15025                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
15026                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
15027 %{
15028   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
15029   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
15030   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
15031          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
15032   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
15033 
15034   ins_encode %{
15035     int icnt2 = (int)$int_cnt2$$constant;
15036     __ string_indexof($str1$$Register, $str2$$Register,
15037                       $cnt1$$Register, zr,
15038                       $tmp1$$Register, $tmp2$$Register,
15039                       $tmp3$$Register, $tmp4$$Register,
15040                       icnt2, $result$$Register);
15041   %}
15042   ins_pipe(pipe_class_memory);
15043 %}
15044 
15045 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15046                         iRegI_R0 result, rFlagsReg cr)
15047 %{
15048   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15049   match(Set result (StrEquals (Binary str1 str2) cnt));
15050   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15051 
15052   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15053   ins_encode %{
15054     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15055     __ arrays_equals($str1$$Register, $str2$$Register,
15056                      $result$$Register, $cnt$$Register,
15057                      1, /*is_string*/true);
15058   %}
15059   ins_pipe(pipe_class_memory);
15060 %}
15061 
15062 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15063                         iRegI_R0 result, rFlagsReg cr)
15064 %{
15065   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15066   match(Set result (StrEquals (Binary str1 str2) cnt));
15067   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15068 
15069   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15070   ins_encode %{
15071     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15072     __ asrw($cnt$$Register, $cnt$$Register, 1);
15073     __ arrays_equals($str1$$Register, $str2$$Register,
15074                      $result$$Register, $cnt$$Register,
15075                      2, /*is_string*/true);
15076   %}
15077   ins_pipe(pipe_class_memory);
15078 %}
15079 
15080 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15081                       iRegP_R10 tmp, rFlagsReg cr)
15082 %{
15083   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15084   match(Set result (AryEq ary1 ary2));
15085   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15086 
15087   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15088   ins_encode %{
15089     __ arrays_equals($ary1$$Register, $ary2$$Register,
15090                      $result$$Register, $tmp$$Register,
15091                      1, /*is_string*/false);
15092     %}
15093   ins_pipe(pipe_class_memory);
15094 %}
15095 
15096 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15097                       iRegP_R10 tmp, rFlagsReg cr)
15098 %{
15099   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15100   match(Set result (AryEq ary1 ary2));
15101   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15102 
15103   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15104   ins_encode %{
15105     __ arrays_equals($ary1$$Register, $ary2$$Register,
15106                      $result$$Register, $tmp$$Register,
15107                      2, /*is_string*/false);
15108   %}
15109   ins_pipe(pipe_class_memory);
15110 %}
15111 
15112 
15113 // fast char[] to byte[] compression
15114 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15115                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15116                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15117                          iRegI_R0 result, rFlagsReg cr)
15118 %{
15119   match(Set result (StrCompressedCopy src (Binary dst len)));
15120   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15121 
15122   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15123   ins_encode %{
15124     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15125                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15126                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15127                            $result$$Register);
15128   %}
15129   ins_pipe( pipe_slow );
15130 %}
15131 
15132 // fast byte[] to char[] inflation
15133 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15134                         vRegD tmp1, vRegD tmp2, vRegD tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15135 %{
15136   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15137   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15138 
15139   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15140   ins_encode %{
15141     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15142                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15143   %}
15144   ins_pipe(pipe_class_memory);
15145 %}
15146 
15147 // encode char[] to byte[] in ISO_8859_1
15148 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15149                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15150                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15151                           iRegI_R0 result, rFlagsReg cr)
15152 %{
15153   match(Set result (EncodeISOArray src (Binary dst len)));
15154   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15155          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15156 
15157   format %{ "Encode array $src,$dst,$len -> $result" %}
15158   ins_encode %{
15159     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15160          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15161          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15162   %}
15163   ins_pipe( pipe_class_memory );
15164 %}
15165 
15166 // ============================================================================
15167 // This name is KNOWN by the ADLC and cannot be changed.
15168 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15169 // for this guy.
15170 instruct tlsLoadP(thread_RegP dst)
15171 %{
15172   match(Set dst (ThreadLocal));
15173 
15174   ins_cost(0);
15175 
15176   format %{ " -- \t// $dst=Thread::current(), empty" %}
15177 
15178   size(0);
15179 
15180   ins_encode( /*empty*/ );
15181 
15182   ins_pipe(pipe_class_empty);
15183 %}
15184 
15185 // ====================VECTOR INSTRUCTIONS=====================================
15186 
15187 // Load vector (32 bits)
15188 instruct loadV4(vecD dst, vmem4 mem)
15189 %{
15190   predicate(n->as_LoadVector()->memory_size() == 4);
15191   match(Set dst (LoadVector mem));
15192   ins_cost(4 * INSN_COST);
15193   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15194   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15195   ins_pipe(vload_reg_mem64);
15196 %}
15197 
15198 // Load vector (64 bits)
15199 instruct loadV8(vecD dst, vmem8 mem)
15200 %{
15201   predicate(n->as_LoadVector()->memory_size() == 8);
15202   match(Set dst (LoadVector mem));
15203   ins_cost(4 * INSN_COST);
15204   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15205   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15206   ins_pipe(vload_reg_mem64);
15207 %}
15208 
15209 // Load Vector (128 bits)
15210 instruct loadV16(vecX dst, vmem16 mem)
15211 %{
15212   predicate(n->as_LoadVector()->memory_size() == 16);
15213   match(Set dst (LoadVector mem));
15214   ins_cost(4 * INSN_COST);
15215   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15216   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15217   ins_pipe(vload_reg_mem128);
15218 %}
15219 
15220 // Store Vector (32 bits)
15221 instruct storeV4(vecD src, vmem4 mem)
15222 %{
15223   predicate(n->as_StoreVector()->memory_size() == 4);
15224   match(Set mem (StoreVector mem src));
15225   ins_cost(4 * INSN_COST);
15226   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15227   ins_encode( aarch64_enc_strvS(src, mem) );
15228   ins_pipe(vstore_reg_mem64);
15229 %}
15230 
15231 // Store Vector (64 bits)
15232 instruct storeV8(vecD src, vmem8 mem)
15233 %{
15234   predicate(n->as_StoreVector()->memory_size() == 8);
15235   match(Set mem (StoreVector mem src));
15236   ins_cost(4 * INSN_COST);
15237   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15238   ins_encode( aarch64_enc_strvD(src, mem) );
15239   ins_pipe(vstore_reg_mem64);
15240 %}
15241 
15242 // Store Vector (128 bits)
15243 instruct storeV16(vecX src, vmem16 mem)
15244 %{
15245   predicate(n->as_StoreVector()->memory_size() == 16);
15246   match(Set mem (StoreVector mem src));
15247   ins_cost(4 * INSN_COST);
15248   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15249   ins_encode( aarch64_enc_strvQ(src, mem) );
15250   ins_pipe(vstore_reg_mem128);
15251 %}
15252 
15253 instruct replicate8B(vecD dst, iRegIorL2I src)
15254 %{
15255   predicate(n->as_Vector()->length() == 4 ||
15256             n->as_Vector()->length() == 8);
15257   match(Set dst (ReplicateB src));
15258   ins_cost(INSN_COST);
15259   format %{ "dup  $dst, $src\t# vector (8B)" %}
15260   ins_encode %{
15261     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15262   %}
15263   ins_pipe(vdup_reg_reg64);
15264 %}
15265 
15266 instruct replicate16B(vecX dst, iRegIorL2I src)
15267 %{
15268   predicate(n->as_Vector()->length() == 16);
15269   match(Set dst (ReplicateB src));
15270   ins_cost(INSN_COST);
15271   format %{ "dup  $dst, $src\t# vector (16B)" %}
15272   ins_encode %{
15273     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15274   %}
15275   ins_pipe(vdup_reg_reg128);
15276 %}
15277 
15278 instruct replicate8B_imm(vecD dst, immI con)
15279 %{
15280   predicate(n->as_Vector()->length() == 4 ||
15281             n->as_Vector()->length() == 8);
15282   match(Set dst (ReplicateB con));
15283   ins_cost(INSN_COST);
15284   format %{ "movi  $dst, $con\t# vector(8B)" %}
15285   ins_encode %{
15286     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15287   %}
15288   ins_pipe(vmovi_reg_imm64);
15289 %}
15290 
15291 instruct replicate16B_imm(vecX dst, immI con)
15292 %{
15293   predicate(n->as_Vector()->length() == 16);
15294   match(Set dst (ReplicateB con));
15295   ins_cost(INSN_COST);
15296   format %{ "movi  $dst, $con\t# vector(16B)" %}
15297   ins_encode %{
15298     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15299   %}
15300   ins_pipe(vmovi_reg_imm128);
15301 %}
15302 
15303 instruct replicate4S(vecD dst, iRegIorL2I src)
15304 %{
15305   predicate(n->as_Vector()->length() == 2 ||
15306             n->as_Vector()->length() == 4);
15307   match(Set dst (ReplicateS src));
15308   ins_cost(INSN_COST);
15309   format %{ "dup  $dst, $src\t# vector (4S)" %}
15310   ins_encode %{
15311     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15312   %}
15313   ins_pipe(vdup_reg_reg64);
15314 %}
15315 
15316 instruct replicate8S(vecX dst, iRegIorL2I src)
15317 %{
15318   predicate(n->as_Vector()->length() == 8);
15319   match(Set dst (ReplicateS src));
15320   ins_cost(INSN_COST);
15321   format %{ "dup  $dst, $src\t# vector (8S)" %}
15322   ins_encode %{
15323     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15324   %}
15325   ins_pipe(vdup_reg_reg128);
15326 %}
15327 
15328 instruct replicate4S_imm(vecD dst, immI con)
15329 %{
15330   predicate(n->as_Vector()->length() == 2 ||
15331             n->as_Vector()->length() == 4);
15332   match(Set dst (ReplicateS con));
15333   ins_cost(INSN_COST);
15334   format %{ "movi  $dst, $con\t# vector(4H)" %}
15335   ins_encode %{
15336     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15337   %}
15338   ins_pipe(vmovi_reg_imm64);
15339 %}
15340 
15341 instruct replicate8S_imm(vecX dst, immI con)
15342 %{
15343   predicate(n->as_Vector()->length() == 8);
15344   match(Set dst (ReplicateS con));
15345   ins_cost(INSN_COST);
15346   format %{ "movi  $dst, $con\t# vector(8H)" %}
15347   ins_encode %{
15348     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15349   %}
15350   ins_pipe(vmovi_reg_imm128);
15351 %}
15352 
15353 instruct replicate2I(vecD dst, iRegIorL2I src)
15354 %{
15355   predicate(n->as_Vector()->length() == 2);
15356   match(Set dst (ReplicateI src));
15357   ins_cost(INSN_COST);
15358   format %{ "dup  $dst, $src\t# vector (2I)" %}
15359   ins_encode %{
15360     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15361   %}
15362   ins_pipe(vdup_reg_reg64);
15363 %}
15364 
15365 instruct replicate4I(vecX dst, iRegIorL2I src)
15366 %{
15367   predicate(n->as_Vector()->length() == 4);
15368   match(Set dst (ReplicateI src));
15369   ins_cost(INSN_COST);
15370   format %{ "dup  $dst, $src\t# vector (4I)" %}
15371   ins_encode %{
15372     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15373   %}
15374   ins_pipe(vdup_reg_reg128);
15375 %}
15376 
15377 instruct replicate2I_imm(vecD dst, immI con)
15378 %{
15379   predicate(n->as_Vector()->length() == 2);
15380   match(Set dst (ReplicateI con));
15381   ins_cost(INSN_COST);
15382   format %{ "movi  $dst, $con\t# vector(2I)" %}
15383   ins_encode %{
15384     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15385   %}
15386   ins_pipe(vmovi_reg_imm64);
15387 %}
15388 
15389 instruct replicate4I_imm(vecX dst, immI con)
15390 %{
15391   predicate(n->as_Vector()->length() == 4);
15392   match(Set dst (ReplicateI con));
15393   ins_cost(INSN_COST);
15394   format %{ "movi  $dst, $con\t# vector(4I)" %}
15395   ins_encode %{
15396     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15397   %}
15398   ins_pipe(vmovi_reg_imm128);
15399 %}
15400 
15401 instruct replicate2L(vecX dst, iRegL src)
15402 %{
15403   predicate(n->as_Vector()->length() == 2);
15404   match(Set dst (ReplicateL src));
15405   ins_cost(INSN_COST);
15406   format %{ "dup  $dst, $src\t# vector (2L)" %}
15407   ins_encode %{
15408     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15409   %}
15410   ins_pipe(vdup_reg_reg128);
15411 %}
15412 
15413 instruct replicate2L_zero(vecX dst, immI0 zero)
15414 %{
15415   predicate(n->as_Vector()->length() == 2);
15416   match(Set dst (ReplicateI zero));
15417   ins_cost(INSN_COST);
15418   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15419   ins_encode %{
15420     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15421            as_FloatRegister($dst$$reg),
15422            as_FloatRegister($dst$$reg));
15423   %}
15424   ins_pipe(vmovi_reg_imm128);
15425 %}
15426 
15427 instruct replicate2F(vecD dst, vRegF src)
15428 %{
15429   predicate(n->as_Vector()->length() == 2);
15430   match(Set dst (ReplicateF src));
15431   ins_cost(INSN_COST);
15432   format %{ "dup  $dst, $src\t# vector (2F)" %}
15433   ins_encode %{
15434     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15435            as_FloatRegister($src$$reg));
15436   %}
15437   ins_pipe(vdup_reg_freg64);
15438 %}
15439 
15440 instruct replicate4F(vecX dst, vRegF src)
15441 %{
15442   predicate(n->as_Vector()->length() == 4);
15443   match(Set dst (ReplicateF src));
15444   ins_cost(INSN_COST);
15445   format %{ "dup  $dst, $src\t# vector (4F)" %}
15446   ins_encode %{
15447     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15448            as_FloatRegister($src$$reg));
15449   %}
15450   ins_pipe(vdup_reg_freg128);
15451 %}
15452 
15453 instruct replicate2D(vecX dst, vRegD src)
15454 %{
15455   predicate(n->as_Vector()->length() == 2);
15456   match(Set dst (ReplicateD src));
15457   ins_cost(INSN_COST);
15458   format %{ "dup  $dst, $src\t# vector (2D)" %}
15459   ins_encode %{
15460     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15461            as_FloatRegister($src$$reg));
15462   %}
15463   ins_pipe(vdup_reg_dreg128);
15464 %}
15465 
15466 // ====================REDUCTION ARITHMETIC====================================
15467 
15468 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15469 %{
15470   match(Set dst (AddReductionVI src1 src2));
15471   ins_cost(INSN_COST);
15472   effect(TEMP tmp, TEMP tmp2);
15473   format %{ "umov  $tmp, $src2, S, 0\n\t"
15474             "umov  $tmp2, $src2, S, 1\n\t"
15475             "addw  $dst, $src1, $tmp\n\t"
15476             "addw  $dst, $dst, $tmp2\t add reduction2i"
15477   %}
15478   ins_encode %{
15479     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15480     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15481     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15482     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15483   %}
15484   ins_pipe(pipe_class_default);
15485 %}
15486 
15487 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15488 %{
15489   match(Set dst (AddReductionVI src1 src2));
15490   ins_cost(INSN_COST);
15491   effect(TEMP tmp, TEMP tmp2);
15492   format %{ "addv  $tmp, T4S, $src2\n\t"
15493             "umov  $tmp2, $tmp, S, 0\n\t"
15494             "addw  $dst, $tmp2, $src1\t add reduction4i"
15495   %}
15496   ins_encode %{
15497     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15498             as_FloatRegister($src2$$reg));
15499     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15500     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15501   %}
15502   ins_pipe(pipe_class_default);
15503 %}
15504 
15505 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15506 %{
15507   match(Set dst (MulReductionVI src1 src2));
15508   ins_cost(INSN_COST);
15509   effect(TEMP tmp, TEMP dst);
15510   format %{ "umov  $tmp, $src2, S, 0\n\t"
15511             "mul   $dst, $tmp, $src1\n\t"
15512             "umov  $tmp, $src2, S, 1\n\t"
15513             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15514   %}
15515   ins_encode %{
15516     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15517     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15518     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15519     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15520   %}
15521   ins_pipe(pipe_class_default);
15522 %}
15523 
15524 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15525 %{
15526   match(Set dst (MulReductionVI src1 src2));
15527   ins_cost(INSN_COST);
15528   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15529   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15530             "mul   $tmp, $tmp, $src2\n\t"
15531             "umov  $tmp2, $tmp, S, 0\n\t"
15532             "mul   $dst, $tmp2, $src1\n\t"
15533             "umov  $tmp2, $tmp, S, 1\n\t"
15534             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15535   %}
15536   ins_encode %{
15537     __ ins(as_FloatRegister($tmp$$reg), __ D,
15538            as_FloatRegister($src2$$reg), 0, 1);
15539     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15540            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15541     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15542     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15543     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15544     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15545   %}
15546   ins_pipe(pipe_class_default);
15547 %}
15548 
15549 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15550 %{
15551   match(Set dst (AddReductionVF src1 src2));
15552   ins_cost(INSN_COST);
15553   effect(TEMP tmp, TEMP dst);
15554   format %{ "fadds $dst, $src1, $src2\n\t"
15555             "ins   $tmp, S, $src2, 0, 1\n\t"
15556             "fadds $dst, $dst, $tmp\t add reduction2f"
15557   %}
15558   ins_encode %{
15559     __ fadds(as_FloatRegister($dst$$reg),
15560              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15561     __ ins(as_FloatRegister($tmp$$reg), __ S,
15562            as_FloatRegister($src2$$reg), 0, 1);
15563     __ fadds(as_FloatRegister($dst$$reg),
15564              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15565   %}
15566   ins_pipe(pipe_class_default);
15567 %}
15568 
15569 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15570 %{
15571   match(Set dst (AddReductionVF src1 src2));
15572   ins_cost(INSN_COST);
15573   effect(TEMP tmp, TEMP dst);
15574   format %{ "fadds $dst, $src1, $src2\n\t"
15575             "ins   $tmp, S, $src2, 0, 1\n\t"
15576             "fadds $dst, $dst, $tmp\n\t"
15577             "ins   $tmp, S, $src2, 0, 2\n\t"
15578             "fadds $dst, $dst, $tmp\n\t"
15579             "ins   $tmp, S, $src2, 0, 3\n\t"
15580             "fadds $dst, $dst, $tmp\t add reduction4f"
15581   %}
15582   ins_encode %{
15583     __ fadds(as_FloatRegister($dst$$reg),
15584              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15585     __ ins(as_FloatRegister($tmp$$reg), __ S,
15586            as_FloatRegister($src2$$reg), 0, 1);
15587     __ fadds(as_FloatRegister($dst$$reg),
15588              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15589     __ ins(as_FloatRegister($tmp$$reg), __ S,
15590            as_FloatRegister($src2$$reg), 0, 2);
15591     __ fadds(as_FloatRegister($dst$$reg),
15592              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15593     __ ins(as_FloatRegister($tmp$$reg), __ S,
15594            as_FloatRegister($src2$$reg), 0, 3);
15595     __ fadds(as_FloatRegister($dst$$reg),
15596              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15597   %}
15598   ins_pipe(pipe_class_default);
15599 %}
15600 
15601 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15602 %{
15603   match(Set dst (MulReductionVF src1 src2));
15604   ins_cost(INSN_COST);
15605   effect(TEMP tmp, TEMP dst);
15606   format %{ "fmuls $dst, $src1, $src2\n\t"
15607             "ins   $tmp, S, $src2, 0, 1\n\t"
15608             "fmuls $dst, $dst, $tmp\t add reduction4f"
15609   %}
15610   ins_encode %{
15611     __ fmuls(as_FloatRegister($dst$$reg),
15612              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15613     __ ins(as_FloatRegister($tmp$$reg), __ S,
15614            as_FloatRegister($src2$$reg), 0, 1);
15615     __ fmuls(as_FloatRegister($dst$$reg),
15616              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15617   %}
15618   ins_pipe(pipe_class_default);
15619 %}
15620 
15621 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15622 %{
15623   match(Set dst (MulReductionVF src1 src2));
15624   ins_cost(INSN_COST);
15625   effect(TEMP tmp, TEMP dst);
15626   format %{ "fmuls $dst, $src1, $src2\n\t"
15627             "ins   $tmp, S, $src2, 0, 1\n\t"
15628             "fmuls $dst, $dst, $tmp\n\t"
15629             "ins   $tmp, S, $src2, 0, 2\n\t"
15630             "fmuls $dst, $dst, $tmp\n\t"
15631             "ins   $tmp, S, $src2, 0, 3\n\t"
15632             "fmuls $dst, $dst, $tmp\t add reduction4f"
15633   %}
15634   ins_encode %{
15635     __ fmuls(as_FloatRegister($dst$$reg),
15636              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15637     __ ins(as_FloatRegister($tmp$$reg), __ S,
15638            as_FloatRegister($src2$$reg), 0, 1);
15639     __ fmuls(as_FloatRegister($dst$$reg),
15640              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15641     __ ins(as_FloatRegister($tmp$$reg), __ S,
15642            as_FloatRegister($src2$$reg), 0, 2);
15643     __ fmuls(as_FloatRegister($dst$$reg),
15644              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15645     __ ins(as_FloatRegister($tmp$$reg), __ S,
15646            as_FloatRegister($src2$$reg), 0, 3);
15647     __ fmuls(as_FloatRegister($dst$$reg),
15648              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15649   %}
15650   ins_pipe(pipe_class_default);
15651 %}
15652 
15653 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15654 %{
15655   match(Set dst (AddReductionVD src1 src2));
15656   ins_cost(INSN_COST);
15657   effect(TEMP tmp, TEMP dst);
15658   format %{ "faddd $dst, $src1, $src2\n\t"
15659             "ins   $tmp, D, $src2, 0, 1\n\t"
15660             "faddd $dst, $dst, $tmp\t add reduction2d"
15661   %}
15662   ins_encode %{
15663     __ faddd(as_FloatRegister($dst$$reg),
15664              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15665     __ ins(as_FloatRegister($tmp$$reg), __ D,
15666            as_FloatRegister($src2$$reg), 0, 1);
15667     __ faddd(as_FloatRegister($dst$$reg),
15668              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15669   %}
15670   ins_pipe(pipe_class_default);
15671 %}
15672 
15673 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15674 %{
15675   match(Set dst (MulReductionVD src1 src2));
15676   ins_cost(INSN_COST);
15677   effect(TEMP tmp, TEMP dst);
15678   format %{ "fmuld $dst, $src1, $src2\n\t"
15679             "ins   $tmp, D, $src2, 0, 1\n\t"
15680             "fmuld $dst, $dst, $tmp\t add reduction2d"
15681   %}
15682   ins_encode %{
15683     __ fmuld(as_FloatRegister($dst$$reg),
15684              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15685     __ ins(as_FloatRegister($tmp$$reg), __ D,
15686            as_FloatRegister($src2$$reg), 0, 1);
15687     __ fmuld(as_FloatRegister($dst$$reg),
15688              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15689   %}
15690   ins_pipe(pipe_class_default);
15691 %}
15692 
15693 // ====================VECTOR ARITHMETIC=======================================
15694 
15695 // --------------------------------- ADD --------------------------------------
15696 
15697 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15698 %{
15699   predicate(n->as_Vector()->length() == 4 ||
15700             n->as_Vector()->length() == 8);
15701   match(Set dst (AddVB src1 src2));
15702   ins_cost(INSN_COST);
15703   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15704   ins_encode %{
15705     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15706             as_FloatRegister($src1$$reg),
15707             as_FloatRegister($src2$$reg));
15708   %}
15709   ins_pipe(vdop64);
15710 %}
15711 
15712 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15713 %{
15714   predicate(n->as_Vector()->length() == 16);
15715   match(Set dst (AddVB src1 src2));
15716   ins_cost(INSN_COST);
15717   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15718   ins_encode %{
15719     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15720             as_FloatRegister($src1$$reg),
15721             as_FloatRegister($src2$$reg));
15722   %}
15723   ins_pipe(vdop128);
15724 %}
15725 
15726 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15727 %{
15728   predicate(n->as_Vector()->length() == 2 ||
15729             n->as_Vector()->length() == 4);
15730   match(Set dst (AddVS src1 src2));
15731   ins_cost(INSN_COST);
15732   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15733   ins_encode %{
15734     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15735             as_FloatRegister($src1$$reg),
15736             as_FloatRegister($src2$$reg));
15737   %}
15738   ins_pipe(vdop64);
15739 %}
15740 
15741 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15742 %{
15743   predicate(n->as_Vector()->length() == 8);
15744   match(Set dst (AddVS src1 src2));
15745   ins_cost(INSN_COST);
15746   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15747   ins_encode %{
15748     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15749             as_FloatRegister($src1$$reg),
15750             as_FloatRegister($src2$$reg));
15751   %}
15752   ins_pipe(vdop128);
15753 %}
15754 
15755 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15756 %{
15757   predicate(n->as_Vector()->length() == 2);
15758   match(Set dst (AddVI src1 src2));
15759   ins_cost(INSN_COST);
15760   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15761   ins_encode %{
15762     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15763             as_FloatRegister($src1$$reg),
15764             as_FloatRegister($src2$$reg));
15765   %}
15766   ins_pipe(vdop64);
15767 %}
15768 
15769 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15770 %{
15771   predicate(n->as_Vector()->length() == 4);
15772   match(Set dst (AddVI src1 src2));
15773   ins_cost(INSN_COST);
15774   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15775   ins_encode %{
15776     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15777             as_FloatRegister($src1$$reg),
15778             as_FloatRegister($src2$$reg));
15779   %}
15780   ins_pipe(vdop128);
15781 %}
15782 
15783 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15784 %{
15785   predicate(n->as_Vector()->length() == 2);
15786   match(Set dst (AddVL src1 src2));
15787   ins_cost(INSN_COST);
15788   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15789   ins_encode %{
15790     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15791             as_FloatRegister($src1$$reg),
15792             as_FloatRegister($src2$$reg));
15793   %}
15794   ins_pipe(vdop128);
15795 %}
15796 
15797 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15798 %{
15799   predicate(n->as_Vector()->length() == 2);
15800   match(Set dst (AddVF src1 src2));
15801   ins_cost(INSN_COST);
15802   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15803   ins_encode %{
15804     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15805             as_FloatRegister($src1$$reg),
15806             as_FloatRegister($src2$$reg));
15807   %}
15808   ins_pipe(vdop_fp64);
15809 %}
15810 
15811 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15812 %{
15813   predicate(n->as_Vector()->length() == 4);
15814   match(Set dst (AddVF src1 src2));
15815   ins_cost(INSN_COST);
15816   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15817   ins_encode %{
15818     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15819             as_FloatRegister($src1$$reg),
15820             as_FloatRegister($src2$$reg));
15821   %}
15822   ins_pipe(vdop_fp128);
15823 %}
15824 
15825 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15826 %{
15827   match(Set dst (AddVD src1 src2));
15828   ins_cost(INSN_COST);
15829   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15830   ins_encode %{
15831     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15832             as_FloatRegister($src1$$reg),
15833             as_FloatRegister($src2$$reg));
15834   %}
15835   ins_pipe(vdop_fp128);
15836 %}
15837 
15838 // --------------------------------- SUB --------------------------------------
15839 
15840 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15841 %{
15842   predicate(n->as_Vector()->length() == 4 ||
15843             n->as_Vector()->length() == 8);
15844   match(Set dst (SubVB src1 src2));
15845   ins_cost(INSN_COST);
15846   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15847   ins_encode %{
15848     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15849             as_FloatRegister($src1$$reg),
15850             as_FloatRegister($src2$$reg));
15851   %}
15852   ins_pipe(vdop64);
15853 %}
15854 
15855 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15856 %{
15857   predicate(n->as_Vector()->length() == 16);
15858   match(Set dst (SubVB src1 src2));
15859   ins_cost(INSN_COST);
15860   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15861   ins_encode %{
15862     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15863             as_FloatRegister($src1$$reg),
15864             as_FloatRegister($src2$$reg));
15865   %}
15866   ins_pipe(vdop128);
15867 %}
15868 
15869 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15870 %{
15871   predicate(n->as_Vector()->length() == 2 ||
15872             n->as_Vector()->length() == 4);
15873   match(Set dst (SubVS src1 src2));
15874   ins_cost(INSN_COST);
15875   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15876   ins_encode %{
15877     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15878             as_FloatRegister($src1$$reg),
15879             as_FloatRegister($src2$$reg));
15880   %}
15881   ins_pipe(vdop64);
15882 %}
15883 
15884 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15885 %{
15886   predicate(n->as_Vector()->length() == 8);
15887   match(Set dst (SubVS src1 src2));
15888   ins_cost(INSN_COST);
15889   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15890   ins_encode %{
15891     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15892             as_FloatRegister($src1$$reg),
15893             as_FloatRegister($src2$$reg));
15894   %}
15895   ins_pipe(vdop128);
15896 %}
15897 
15898 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15899 %{
15900   predicate(n->as_Vector()->length() == 2);
15901   match(Set dst (SubVI src1 src2));
15902   ins_cost(INSN_COST);
15903   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15904   ins_encode %{
15905     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15906             as_FloatRegister($src1$$reg),
15907             as_FloatRegister($src2$$reg));
15908   %}
15909   ins_pipe(vdop64);
15910 %}
15911 
15912 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15913 %{
15914   predicate(n->as_Vector()->length() == 4);
15915   match(Set dst (SubVI src1 src2));
15916   ins_cost(INSN_COST);
15917   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15918   ins_encode %{
15919     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15920             as_FloatRegister($src1$$reg),
15921             as_FloatRegister($src2$$reg));
15922   %}
15923   ins_pipe(vdop128);
15924 %}
15925 
15926 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15927 %{
15928   predicate(n->as_Vector()->length() == 2);
15929   match(Set dst (SubVL src1 src2));
15930   ins_cost(INSN_COST);
15931   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15932   ins_encode %{
15933     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15934             as_FloatRegister($src1$$reg),
15935             as_FloatRegister($src2$$reg));
15936   %}
15937   ins_pipe(vdop128);
15938 %}
15939 
15940 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15941 %{
15942   predicate(n->as_Vector()->length() == 2);
15943   match(Set dst (SubVF src1 src2));
15944   ins_cost(INSN_COST);
15945   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15946   ins_encode %{
15947     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15948             as_FloatRegister($src1$$reg),
15949             as_FloatRegister($src2$$reg));
15950   %}
15951   ins_pipe(vdop_fp64);
15952 %}
15953 
15954 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15955 %{
15956   predicate(n->as_Vector()->length() == 4);
15957   match(Set dst (SubVF src1 src2));
15958   ins_cost(INSN_COST);
15959   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15960   ins_encode %{
15961     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15962             as_FloatRegister($src1$$reg),
15963             as_FloatRegister($src2$$reg));
15964   %}
15965   ins_pipe(vdop_fp128);
15966 %}
15967 
15968 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15969 %{
15970   predicate(n->as_Vector()->length() == 2);
15971   match(Set dst (SubVD src1 src2));
15972   ins_cost(INSN_COST);
15973   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15974   ins_encode %{
15975     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15976             as_FloatRegister($src1$$reg),
15977             as_FloatRegister($src2$$reg));
15978   %}
15979   ins_pipe(vdop_fp128);
15980 %}
15981 
15982 // --------------------------------- MUL --------------------------------------
15983 
15984 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15985 %{
15986   predicate(n->as_Vector()->length() == 2 ||
15987             n->as_Vector()->length() == 4);
15988   match(Set dst (MulVS src1 src2));
15989   ins_cost(INSN_COST);
15990   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15991   ins_encode %{
15992     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15993             as_FloatRegister($src1$$reg),
15994             as_FloatRegister($src2$$reg));
15995   %}
15996   ins_pipe(vmul64);
15997 %}
15998 
15999 instruct vmul8S(vecX dst, vecX src1, vecX src2)
16000 %{
16001   predicate(n->as_Vector()->length() == 8);
16002   match(Set dst (MulVS src1 src2));
16003   ins_cost(INSN_COST);
16004   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
16005   ins_encode %{
16006     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
16007             as_FloatRegister($src1$$reg),
16008             as_FloatRegister($src2$$reg));
16009   %}
16010   ins_pipe(vmul128);
16011 %}
16012 
16013 instruct vmul2I(vecD dst, vecD src1, vecD src2)
16014 %{
16015   predicate(n->as_Vector()->length() == 2);
16016   match(Set dst (MulVI src1 src2));
16017   ins_cost(INSN_COST);
16018   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
16019   ins_encode %{
16020     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
16021             as_FloatRegister($src1$$reg),
16022             as_FloatRegister($src2$$reg));
16023   %}
16024   ins_pipe(vmul64);
16025 %}
16026 
16027 instruct vmul4I(vecX dst, vecX src1, vecX src2)
16028 %{
16029   predicate(n->as_Vector()->length() == 4);
16030   match(Set dst (MulVI src1 src2));
16031   ins_cost(INSN_COST);
16032   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
16033   ins_encode %{
16034     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
16035             as_FloatRegister($src1$$reg),
16036             as_FloatRegister($src2$$reg));
16037   %}
16038   ins_pipe(vmul128);
16039 %}
16040 
16041 instruct vmul2F(vecD dst, vecD src1, vecD src2)
16042 %{
16043   predicate(n->as_Vector()->length() == 2);
16044   match(Set dst (MulVF src1 src2));
16045   ins_cost(INSN_COST);
16046   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
16047   ins_encode %{
16048     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16049             as_FloatRegister($src1$$reg),
16050             as_FloatRegister($src2$$reg));
16051   %}
16052   ins_pipe(vmuldiv_fp64);
16053 %}
16054 
16055 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16056 %{
16057   predicate(n->as_Vector()->length() == 4);
16058   match(Set dst (MulVF src1 src2));
16059   ins_cost(INSN_COST);
16060   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16061   ins_encode %{
16062     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16063             as_FloatRegister($src1$$reg),
16064             as_FloatRegister($src2$$reg));
16065   %}
16066   ins_pipe(vmuldiv_fp128);
16067 %}
16068 
16069 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16070 %{
16071   predicate(n->as_Vector()->length() == 2);
16072   match(Set dst (MulVD src1 src2));
16073   ins_cost(INSN_COST);
16074   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16075   ins_encode %{
16076     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16077             as_FloatRegister($src1$$reg),
16078             as_FloatRegister($src2$$reg));
16079   %}
16080   ins_pipe(vmuldiv_fp128);
16081 %}
16082 
16083 // --------------------------------- MLA --------------------------------------
16084 
16085 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16086 %{
16087   predicate(n->as_Vector()->length() == 2 ||
16088             n->as_Vector()->length() == 4);
16089   match(Set dst (AddVS dst (MulVS src1 src2)));
16090   ins_cost(INSN_COST);
16091   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16092   ins_encode %{
16093     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16094             as_FloatRegister($src1$$reg),
16095             as_FloatRegister($src2$$reg));
16096   %}
16097   ins_pipe(vmla64);
16098 %}
16099 
16100 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16101 %{
16102   predicate(n->as_Vector()->length() == 8);
16103   match(Set dst (AddVS dst (MulVS src1 src2)));
16104   ins_cost(INSN_COST);
16105   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16106   ins_encode %{
16107     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16108             as_FloatRegister($src1$$reg),
16109             as_FloatRegister($src2$$reg));
16110   %}
16111   ins_pipe(vmla128);
16112 %}
16113 
16114 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16115 %{
16116   predicate(n->as_Vector()->length() == 2);
16117   match(Set dst (AddVI dst (MulVI src1 src2)));
16118   ins_cost(INSN_COST);
16119   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16120   ins_encode %{
16121     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16122             as_FloatRegister($src1$$reg),
16123             as_FloatRegister($src2$$reg));
16124   %}
16125   ins_pipe(vmla64);
16126 %}
16127 
16128 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16129 %{
16130   predicate(n->as_Vector()->length() == 4);
16131   match(Set dst (AddVI dst (MulVI src1 src2)));
16132   ins_cost(INSN_COST);
16133   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16134   ins_encode %{
16135     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16136             as_FloatRegister($src1$$reg),
16137             as_FloatRegister($src2$$reg));
16138   %}
16139   ins_pipe(vmla128);
16140 %}
16141 
16142 // --------------------------------- MLS --------------------------------------
16143 
16144 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16145 %{
16146   predicate(n->as_Vector()->length() == 2 ||
16147             n->as_Vector()->length() == 4);
16148   match(Set dst (SubVS dst (MulVS src1 src2)));
16149   ins_cost(INSN_COST);
16150   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16151   ins_encode %{
16152     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16153             as_FloatRegister($src1$$reg),
16154             as_FloatRegister($src2$$reg));
16155   %}
16156   ins_pipe(vmla64);
16157 %}
16158 
16159 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16160 %{
16161   predicate(n->as_Vector()->length() == 8);
16162   match(Set dst (SubVS dst (MulVS src1 src2)));
16163   ins_cost(INSN_COST);
16164   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16165   ins_encode %{
16166     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16167             as_FloatRegister($src1$$reg),
16168             as_FloatRegister($src2$$reg));
16169   %}
16170   ins_pipe(vmla128);
16171 %}
16172 
16173 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16174 %{
16175   predicate(n->as_Vector()->length() == 2);
16176   match(Set dst (SubVI dst (MulVI src1 src2)));
16177   ins_cost(INSN_COST);
16178   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16179   ins_encode %{
16180     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16181             as_FloatRegister($src1$$reg),
16182             as_FloatRegister($src2$$reg));
16183   %}
16184   ins_pipe(vmla64);
16185 %}
16186 
16187 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16188 %{
16189   predicate(n->as_Vector()->length() == 4);
16190   match(Set dst (SubVI dst (MulVI src1 src2)));
16191   ins_cost(INSN_COST);
16192   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16193   ins_encode %{
16194     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16195             as_FloatRegister($src1$$reg),
16196             as_FloatRegister($src2$$reg));
16197   %}
16198   ins_pipe(vmla128);
16199 %}
16200 
16201 // --------------------------------- DIV --------------------------------------
16202 
16203 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16204 %{
16205   predicate(n->as_Vector()->length() == 2);
16206   match(Set dst (DivVF src1 src2));
16207   ins_cost(INSN_COST);
16208   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16209   ins_encode %{
16210     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16211             as_FloatRegister($src1$$reg),
16212             as_FloatRegister($src2$$reg));
16213   %}
16214   ins_pipe(vmuldiv_fp64);
16215 %}
16216 
16217 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16218 %{
16219   predicate(n->as_Vector()->length() == 4);
16220   match(Set dst (DivVF src1 src2));
16221   ins_cost(INSN_COST);
16222   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16223   ins_encode %{
16224     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16225             as_FloatRegister($src1$$reg),
16226             as_FloatRegister($src2$$reg));
16227   %}
16228   ins_pipe(vmuldiv_fp128);
16229 %}
16230 
16231 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16232 %{
16233   predicate(n->as_Vector()->length() == 2);
16234   match(Set dst (DivVD src1 src2));
16235   ins_cost(INSN_COST);
16236   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16237   ins_encode %{
16238     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16239             as_FloatRegister($src1$$reg),
16240             as_FloatRegister($src2$$reg));
16241   %}
16242   ins_pipe(vmuldiv_fp128);
16243 %}
16244 
16245 // --------------------------------- SQRT -------------------------------------
16246 
16247 instruct vsqrt2D(vecX dst, vecX src)
16248 %{
16249   predicate(n->as_Vector()->length() == 2);
16250   match(Set dst (SqrtVD src));
16251   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16252   ins_encode %{
16253     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16254              as_FloatRegister($src$$reg));
16255   %}
16256   ins_pipe(vsqrt_fp128);
16257 %}
16258 
16259 // --------------------------------- ABS --------------------------------------
16260 
16261 instruct vabs2F(vecD dst, vecD src)
16262 %{
16263   predicate(n->as_Vector()->length() == 2);
16264   match(Set dst (AbsVF src));
16265   ins_cost(INSN_COST * 3);
16266   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16267   ins_encode %{
16268     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16269             as_FloatRegister($src$$reg));
16270   %}
16271   ins_pipe(vunop_fp64);
16272 %}
16273 
16274 instruct vabs4F(vecX dst, vecX src)
16275 %{
16276   predicate(n->as_Vector()->length() == 4);
16277   match(Set dst (AbsVF src));
16278   ins_cost(INSN_COST * 3);
16279   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16280   ins_encode %{
16281     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16282             as_FloatRegister($src$$reg));
16283   %}
16284   ins_pipe(vunop_fp128);
16285 %}
16286 
16287 instruct vabs2D(vecX dst, vecX src)
16288 %{
16289   predicate(n->as_Vector()->length() == 2);
16290   match(Set dst (AbsVD src));
16291   ins_cost(INSN_COST * 3);
16292   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16293   ins_encode %{
16294     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16295             as_FloatRegister($src$$reg));
16296   %}
16297   ins_pipe(vunop_fp128);
16298 %}
16299 
16300 // --------------------------------- NEG --------------------------------------
16301 
16302 instruct vneg2F(vecD dst, vecD src)
16303 %{
16304   predicate(n->as_Vector()->length() == 2);
16305   match(Set dst (NegVF src));
16306   ins_cost(INSN_COST * 3);
16307   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16308   ins_encode %{
16309     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16310             as_FloatRegister($src$$reg));
16311   %}
16312   ins_pipe(vunop_fp64);
16313 %}
16314 
16315 instruct vneg4F(vecX dst, vecX src)
16316 %{
16317   predicate(n->as_Vector()->length() == 4);
16318   match(Set dst (NegVF src));
16319   ins_cost(INSN_COST * 3);
16320   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16321   ins_encode %{
16322     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16323             as_FloatRegister($src$$reg));
16324   %}
16325   ins_pipe(vunop_fp128);
16326 %}
16327 
16328 instruct vneg2D(vecX dst, vecX src)
16329 %{
16330   predicate(n->as_Vector()->length() == 2);
16331   match(Set dst (NegVD src));
16332   ins_cost(INSN_COST * 3);
16333   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16334   ins_encode %{
16335     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16336             as_FloatRegister($src$$reg));
16337   %}
16338   ins_pipe(vunop_fp128);
16339 %}
16340 
16341 // --------------------------------- AND --------------------------------------
16342 
16343 instruct vand8B(vecD dst, vecD src1, vecD src2)
16344 %{
16345   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16346             n->as_Vector()->length_in_bytes() == 8);
16347   match(Set dst (AndV src1 src2));
16348   ins_cost(INSN_COST);
16349   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16350   ins_encode %{
16351     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16352             as_FloatRegister($src1$$reg),
16353             as_FloatRegister($src2$$reg));
16354   %}
16355   ins_pipe(vlogical64);
16356 %}
16357 
16358 instruct vand16B(vecX dst, vecX src1, vecX src2)
16359 %{
16360   predicate(n->as_Vector()->length_in_bytes() == 16);
16361   match(Set dst (AndV src1 src2));
16362   ins_cost(INSN_COST);
16363   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16364   ins_encode %{
16365     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16366             as_FloatRegister($src1$$reg),
16367             as_FloatRegister($src2$$reg));
16368   %}
16369   ins_pipe(vlogical128);
16370 %}
16371 
16372 // --------------------------------- OR ---------------------------------------
16373 
16374 instruct vor8B(vecD dst, vecD src1, vecD src2)
16375 %{
16376   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16377             n->as_Vector()->length_in_bytes() == 8);
16378   match(Set dst (OrV src1 src2));
16379   ins_cost(INSN_COST);
16380   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16381   ins_encode %{
16382     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16383             as_FloatRegister($src1$$reg),
16384             as_FloatRegister($src2$$reg));
16385   %}
16386   ins_pipe(vlogical64);
16387 %}
16388 
16389 instruct vor16B(vecX dst, vecX src1, vecX src2)
16390 %{
16391   predicate(n->as_Vector()->length_in_bytes() == 16);
16392   match(Set dst (OrV src1 src2));
16393   ins_cost(INSN_COST);
16394   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16395   ins_encode %{
16396     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16397             as_FloatRegister($src1$$reg),
16398             as_FloatRegister($src2$$reg));
16399   %}
16400   ins_pipe(vlogical128);
16401 %}
16402 
16403 // --------------------------------- XOR --------------------------------------
16404 
16405 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16406 %{
16407   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16408             n->as_Vector()->length_in_bytes() == 8);
16409   match(Set dst (XorV src1 src2));
16410   ins_cost(INSN_COST);
16411   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16412   ins_encode %{
16413     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16414             as_FloatRegister($src1$$reg),
16415             as_FloatRegister($src2$$reg));
16416   %}
16417   ins_pipe(vlogical64);
16418 %}
16419 
16420 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16421 %{
16422   predicate(n->as_Vector()->length_in_bytes() == 16);
16423   match(Set dst (XorV src1 src2));
16424   ins_cost(INSN_COST);
16425   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16426   ins_encode %{
16427     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16428             as_FloatRegister($src1$$reg),
16429             as_FloatRegister($src2$$reg));
16430   %}
16431   ins_pipe(vlogical128);
16432 %}
16433 
16434 // ------------------------------ Shift ---------------------------------------
16435 
16436 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16437   match(Set dst (LShiftCntV cnt));
16438   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16439   ins_encode %{
16440     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16441   %}
16442   ins_pipe(vdup_reg_reg128);
16443 %}
16444 
16445 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16446 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16447   match(Set dst (RShiftCntV cnt));
16448   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16449   ins_encode %{
16450     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16451     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16452   %}
16453   ins_pipe(vdup_reg_reg128);
16454 %}
16455 
16456 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16457   predicate(n->as_Vector()->length() == 4 ||
16458             n->as_Vector()->length() == 8);
16459   match(Set dst (LShiftVB src shift));
16460   match(Set dst (RShiftVB src shift));
16461   ins_cost(INSN_COST);
16462   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16463   ins_encode %{
16464     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16465             as_FloatRegister($src$$reg),
16466             as_FloatRegister($shift$$reg));
16467   %}
16468   ins_pipe(vshift64);
16469 %}
16470 
16471 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16472   predicate(n->as_Vector()->length() == 16);
16473   match(Set dst (LShiftVB src shift));
16474   match(Set dst (RShiftVB src shift));
16475   ins_cost(INSN_COST);
16476   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16477   ins_encode %{
16478     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16479             as_FloatRegister($src$$reg),
16480             as_FloatRegister($shift$$reg));
16481   %}
16482   ins_pipe(vshift128);
16483 %}
16484 
16485 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16486   predicate(n->as_Vector()->length() == 4 ||
16487             n->as_Vector()->length() == 8);
16488   match(Set dst (URShiftVB src shift));
16489   ins_cost(INSN_COST);
16490   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16491   ins_encode %{
16492     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16493             as_FloatRegister($src$$reg),
16494             as_FloatRegister($shift$$reg));
16495   %}
16496   ins_pipe(vshift64);
16497 %}
16498 
16499 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16500   predicate(n->as_Vector()->length() == 16);
16501   match(Set dst (URShiftVB src shift));
16502   ins_cost(INSN_COST);
16503   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16504   ins_encode %{
16505     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16506             as_FloatRegister($src$$reg),
16507             as_FloatRegister($shift$$reg));
16508   %}
16509   ins_pipe(vshift128);
16510 %}
16511 
16512 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16513   predicate(n->as_Vector()->length() == 4 ||
16514             n->as_Vector()->length() == 8);
16515   match(Set dst (LShiftVB src shift));
16516   ins_cost(INSN_COST);
16517   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16518   ins_encode %{
16519     int sh = (int)$shift$$constant & 31;
16520     if (sh >= 8) {
16521       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16522              as_FloatRegister($src$$reg),
16523              as_FloatRegister($src$$reg));
16524     } else {
16525       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16526              as_FloatRegister($src$$reg), sh);
16527     }
16528   %}
16529   ins_pipe(vshift64_imm);
16530 %}
16531 
16532 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16533   predicate(n->as_Vector()->length() == 16);
16534   match(Set dst (LShiftVB src shift));
16535   ins_cost(INSN_COST);
16536   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16537   ins_encode %{
16538     int sh = (int)$shift$$constant & 31;
16539     if (sh >= 8) {
16540       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16541              as_FloatRegister($src$$reg),
16542              as_FloatRegister($src$$reg));
16543     } else {
16544       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16545              as_FloatRegister($src$$reg), sh);
16546     }
16547   %}
16548   ins_pipe(vshift128_imm);
16549 %}
16550 
16551 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16552   predicate(n->as_Vector()->length() == 4 ||
16553             n->as_Vector()->length() == 8);
16554   match(Set dst (RShiftVB src shift));
16555   ins_cost(INSN_COST);
16556   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16557   ins_encode %{
16558     int sh = (int)$shift$$constant & 31;
16559     if (sh >= 8) sh = 7;
16560     sh = -sh & 7;
16561     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16562            as_FloatRegister($src$$reg), sh);
16563   %}
16564   ins_pipe(vshift64_imm);
16565 %}
16566 
16567 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16568   predicate(n->as_Vector()->length() == 16);
16569   match(Set dst (RShiftVB src shift));
16570   ins_cost(INSN_COST);
16571   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16572   ins_encode %{
16573     int sh = (int)$shift$$constant & 31;
16574     if (sh >= 8) sh = 7;
16575     sh = -sh & 7;
16576     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16577            as_FloatRegister($src$$reg), sh);
16578   %}
16579   ins_pipe(vshift128_imm);
16580 %}
16581 
16582 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16583   predicate(n->as_Vector()->length() == 4 ||
16584             n->as_Vector()->length() == 8);
16585   match(Set dst (URShiftVB src shift));
16586   ins_cost(INSN_COST);
16587   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16588   ins_encode %{
16589     int sh = (int)$shift$$constant & 31;
16590     if (sh >= 8) {
16591       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16592              as_FloatRegister($src$$reg),
16593              as_FloatRegister($src$$reg));
16594     } else {
16595       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16596              as_FloatRegister($src$$reg), -sh & 7);
16597     }
16598   %}
16599   ins_pipe(vshift64_imm);
16600 %}
16601 
16602 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16603   predicate(n->as_Vector()->length() == 16);
16604   match(Set dst (URShiftVB src shift));
16605   ins_cost(INSN_COST);
16606   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16607   ins_encode %{
16608     int sh = (int)$shift$$constant & 31;
16609     if (sh >= 8) {
16610       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16611              as_FloatRegister($src$$reg),
16612              as_FloatRegister($src$$reg));
16613     } else {
16614       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16615              as_FloatRegister($src$$reg), -sh & 7);
16616     }
16617   %}
16618   ins_pipe(vshift128_imm);
16619 %}
16620 
16621 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16622   predicate(n->as_Vector()->length() == 2 ||
16623             n->as_Vector()->length() == 4);
16624   match(Set dst (LShiftVS src shift));
16625   match(Set dst (RShiftVS src shift));
16626   ins_cost(INSN_COST);
16627   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16628   ins_encode %{
16629     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16630             as_FloatRegister($src$$reg),
16631             as_FloatRegister($shift$$reg));
16632   %}
16633   ins_pipe(vshift64);
16634 %}
16635 
16636 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16637   predicate(n->as_Vector()->length() == 8);
16638   match(Set dst (LShiftVS src shift));
16639   match(Set dst (RShiftVS src shift));
16640   ins_cost(INSN_COST);
16641   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16642   ins_encode %{
16643     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16644             as_FloatRegister($src$$reg),
16645             as_FloatRegister($shift$$reg));
16646   %}
16647   ins_pipe(vshift128);
16648 %}
16649 
16650 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16651   predicate(n->as_Vector()->length() == 2 ||
16652             n->as_Vector()->length() == 4);
16653   match(Set dst (URShiftVS src shift));
16654   ins_cost(INSN_COST);
16655   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16656   ins_encode %{
16657     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16658             as_FloatRegister($src$$reg),
16659             as_FloatRegister($shift$$reg));
16660   %}
16661   ins_pipe(vshift64);
16662 %}
16663 
16664 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16665   predicate(n->as_Vector()->length() == 8);
16666   match(Set dst (URShiftVS src shift));
16667   ins_cost(INSN_COST);
16668   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16669   ins_encode %{
16670     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16671             as_FloatRegister($src$$reg),
16672             as_FloatRegister($shift$$reg));
16673   %}
16674   ins_pipe(vshift128);
16675 %}
16676 
16677 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16678   predicate(n->as_Vector()->length() == 2 ||
16679             n->as_Vector()->length() == 4);
16680   match(Set dst (LShiftVS src shift));
16681   ins_cost(INSN_COST);
16682   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16683   ins_encode %{
16684     int sh = (int)$shift$$constant & 31;
16685     if (sh >= 16) {
16686       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16687              as_FloatRegister($src$$reg),
16688              as_FloatRegister($src$$reg));
16689     } else {
16690       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16691              as_FloatRegister($src$$reg), sh);
16692     }
16693   %}
16694   ins_pipe(vshift64_imm);
16695 %}
16696 
16697 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16698   predicate(n->as_Vector()->length() == 8);
16699   match(Set dst (LShiftVS src shift));
16700   ins_cost(INSN_COST);
16701   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16702   ins_encode %{
16703     int sh = (int)$shift$$constant & 31;
16704     if (sh >= 16) {
16705       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16706              as_FloatRegister($src$$reg),
16707              as_FloatRegister($src$$reg));
16708     } else {
16709       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16710              as_FloatRegister($src$$reg), sh);
16711     }
16712   %}
16713   ins_pipe(vshift128_imm);
16714 %}
16715 
16716 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16717   predicate(n->as_Vector()->length() == 2 ||
16718             n->as_Vector()->length() == 4);
16719   match(Set dst (RShiftVS src shift));
16720   ins_cost(INSN_COST);
16721   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16722   ins_encode %{
16723     int sh = (int)$shift$$constant & 31;
16724     if (sh >= 16) sh = 15;
16725     sh = -sh & 15;
16726     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16727            as_FloatRegister($src$$reg), sh);
16728   %}
16729   ins_pipe(vshift64_imm);
16730 %}
16731 
16732 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16733   predicate(n->as_Vector()->length() == 8);
16734   match(Set dst (RShiftVS src shift));
16735   ins_cost(INSN_COST);
16736   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16737   ins_encode %{
16738     int sh = (int)$shift$$constant & 31;
16739     if (sh >= 16) sh = 15;
16740     sh = -sh & 15;
16741     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16742            as_FloatRegister($src$$reg), sh);
16743   %}
16744   ins_pipe(vshift128_imm);
16745 %}
16746 
16747 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16748   predicate(n->as_Vector()->length() == 2 ||
16749             n->as_Vector()->length() == 4);
16750   match(Set dst (URShiftVS src shift));
16751   ins_cost(INSN_COST);
16752   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16753   ins_encode %{
16754     int sh = (int)$shift$$constant & 31;
16755     if (sh >= 16) {
16756       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16757              as_FloatRegister($src$$reg),
16758              as_FloatRegister($src$$reg));
16759     } else {
16760       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16761              as_FloatRegister($src$$reg), -sh & 15);
16762     }
16763   %}
16764   ins_pipe(vshift64_imm);
16765 %}
16766 
16767 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16768   predicate(n->as_Vector()->length() == 8);
16769   match(Set dst (URShiftVS src shift));
16770   ins_cost(INSN_COST);
16771   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16772   ins_encode %{
16773     int sh = (int)$shift$$constant & 31;
16774     if (sh >= 16) {
16775       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16776              as_FloatRegister($src$$reg),
16777              as_FloatRegister($src$$reg));
16778     } else {
16779       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16780              as_FloatRegister($src$$reg), -sh & 15);
16781     }
16782   %}
16783   ins_pipe(vshift128_imm);
16784 %}
16785 
16786 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16787   predicate(n->as_Vector()->length() == 2);
16788   match(Set dst (LShiftVI src shift));
16789   match(Set dst (RShiftVI src shift));
16790   ins_cost(INSN_COST);
16791   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16792   ins_encode %{
16793     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16794             as_FloatRegister($src$$reg),
16795             as_FloatRegister($shift$$reg));
16796   %}
16797   ins_pipe(vshift64);
16798 %}
16799 
16800 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16801   predicate(n->as_Vector()->length() == 4);
16802   match(Set dst (LShiftVI src shift));
16803   match(Set dst (RShiftVI src shift));
16804   ins_cost(INSN_COST);
16805   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16806   ins_encode %{
16807     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16808             as_FloatRegister($src$$reg),
16809             as_FloatRegister($shift$$reg));
16810   %}
16811   ins_pipe(vshift128);
16812 %}
16813 
16814 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16815   predicate(n->as_Vector()->length() == 2);
16816   match(Set dst (URShiftVI src shift));
16817   ins_cost(INSN_COST);
16818   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16819   ins_encode %{
16820     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16821             as_FloatRegister($src$$reg),
16822             as_FloatRegister($shift$$reg));
16823   %}
16824   ins_pipe(vshift64);
16825 %}
16826 
16827 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16828   predicate(n->as_Vector()->length() == 4);
16829   match(Set dst (URShiftVI src shift));
16830   ins_cost(INSN_COST);
16831   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16832   ins_encode %{
16833     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16834             as_FloatRegister($src$$reg),
16835             as_FloatRegister($shift$$reg));
16836   %}
16837   ins_pipe(vshift128);
16838 %}
16839 
16840 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16841   predicate(n->as_Vector()->length() == 2);
16842   match(Set dst (LShiftVI src shift));
16843   ins_cost(INSN_COST);
16844   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16845   ins_encode %{
16846     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16847            as_FloatRegister($src$$reg),
16848            (int)$shift$$constant & 31);
16849   %}
16850   ins_pipe(vshift64_imm);
16851 %}
16852 
16853 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16854   predicate(n->as_Vector()->length() == 4);
16855   match(Set dst (LShiftVI src shift));
16856   ins_cost(INSN_COST);
16857   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16858   ins_encode %{
16859     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16860            as_FloatRegister($src$$reg),
16861            (int)$shift$$constant & 31);
16862   %}
16863   ins_pipe(vshift128_imm);
16864 %}
16865 
16866 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16867   predicate(n->as_Vector()->length() == 2);
16868   match(Set dst (RShiftVI src shift));
16869   ins_cost(INSN_COST);
16870   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16871   ins_encode %{
16872     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16873             as_FloatRegister($src$$reg),
16874             -(int)$shift$$constant & 31);
16875   %}
16876   ins_pipe(vshift64_imm);
16877 %}
16878 
16879 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16880   predicate(n->as_Vector()->length() == 4);
16881   match(Set dst (RShiftVI src shift));
16882   ins_cost(INSN_COST);
16883   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16884   ins_encode %{
16885     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16886             as_FloatRegister($src$$reg),
16887             -(int)$shift$$constant & 31);
16888   %}
16889   ins_pipe(vshift128_imm);
16890 %}
16891 
16892 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16893   predicate(n->as_Vector()->length() == 2);
16894   match(Set dst (URShiftVI src shift));
16895   ins_cost(INSN_COST);
16896   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16897   ins_encode %{
16898     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16899             as_FloatRegister($src$$reg),
16900             -(int)$shift$$constant & 31);
16901   %}
16902   ins_pipe(vshift64_imm);
16903 %}
16904 
16905 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16906   predicate(n->as_Vector()->length() == 4);
16907   match(Set dst (URShiftVI src shift));
16908   ins_cost(INSN_COST);
16909   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16910   ins_encode %{
16911     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16912             as_FloatRegister($src$$reg),
16913             -(int)$shift$$constant & 31);
16914   %}
16915   ins_pipe(vshift128_imm);
16916 %}
16917 
16918 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16919   predicate(n->as_Vector()->length() == 2);
16920   match(Set dst (LShiftVL src shift));
16921   match(Set dst (RShiftVL src shift));
16922   ins_cost(INSN_COST);
16923   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16924   ins_encode %{
16925     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16926             as_FloatRegister($src$$reg),
16927             as_FloatRegister($shift$$reg));
16928   %}
16929   ins_pipe(vshift128);
16930 %}
16931 
16932 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16933   predicate(n->as_Vector()->length() == 2);
16934   match(Set dst (URShiftVL src shift));
16935   ins_cost(INSN_COST);
16936   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16937   ins_encode %{
16938     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16939             as_FloatRegister($src$$reg),
16940             as_FloatRegister($shift$$reg));
16941   %}
16942   ins_pipe(vshift128);
16943 %}
16944 
16945 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16946   predicate(n->as_Vector()->length() == 2);
16947   match(Set dst (LShiftVL src shift));
16948   ins_cost(INSN_COST);
16949   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16950   ins_encode %{
16951     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16952            as_FloatRegister($src$$reg),
16953            (int)$shift$$constant & 63);
16954   %}
16955   ins_pipe(vshift128_imm);
16956 %}
16957 
16958 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16959   predicate(n->as_Vector()->length() == 2);
16960   match(Set dst (RShiftVL src shift));
16961   ins_cost(INSN_COST);
16962   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16963   ins_encode %{
16964     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16965             as_FloatRegister($src$$reg),
16966             -(int)$shift$$constant & 63);
16967   %}
16968   ins_pipe(vshift128_imm);
16969 %}
16970 
16971 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16972   predicate(n->as_Vector()->length() == 2);
16973   match(Set dst (URShiftVL src shift));
16974   ins_cost(INSN_COST);
16975   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16976   ins_encode %{
16977     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16978             as_FloatRegister($src$$reg),
16979             -(int)$shift$$constant & 63);
16980   %}
16981   ins_pipe(vshift128_imm);
16982 %}
16983 
16984 //----------PEEPHOLE RULES-----------------------------------------------------
16985 // These must follow all instruction definitions as they use the names
16986 // defined in the instructions definitions.
16987 //
16988 // peepmatch ( root_instr_name [preceding_instruction]* );
16989 //
16990 // peepconstraint %{
16991 // (instruction_number.operand_name relational_op instruction_number.operand_name
16992 //  [, ...] );
16993 // // instruction numbers are zero-based using left to right order in peepmatch
16994 //
16995 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16996 // // provide an instruction_number.operand_name for each operand that appears
16997 // // in the replacement instruction's match rule
16998 //
16999 // ---------VM FLAGS---------------------------------------------------------
17000 //
17001 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17002 //
17003 // Each peephole rule is given an identifying number starting with zero and
17004 // increasing by one in the order seen by the parser.  An individual peephole
17005 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17006 // on the command-line.
17007 //
17008 // ---------CURRENT LIMITATIONS----------------------------------------------
17009 //
17010 // Only match adjacent instructions in same basic block
17011 // Only equality constraints
17012 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17013 // Only one replacement instruction
17014 //
17015 // ---------EXAMPLE----------------------------------------------------------
17016 //
17017 // // pertinent parts of existing instructions in architecture description
17018 // instruct movI(iRegINoSp dst, iRegI src)
17019 // %{
17020 //   match(Set dst (CopyI src));
17021 // %}
17022 //
17023 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17024 // %{
17025 //   match(Set dst (AddI dst src));
17026 //   effect(KILL cr);
17027 // %}
17028 //
17029 // // Change (inc mov) to lea
17030 // peephole %{
17031 //   // increment preceeded by register-register move
17032 //   peepmatch ( incI_iReg movI );
17033 //   // require that the destination register of the increment
17034 //   // match the destination register of the move
17035 //   peepconstraint ( 0.dst == 1.dst );
17036 //   // construct a replacement instruction that sets
17037 //   // the destination to ( move's source register + one )
17038 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17039 // %}
17040 //
17041 
17042 // Implementation no longer uses movX instructions since
17043 // machine-independent system no longer uses CopyX nodes.
17044 //
17045 // peephole
17046 // %{
17047 //   peepmatch (incI_iReg movI);
17048 //   peepconstraint (0.dst == 1.dst);
17049 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17050 // %}
17051 
17052 // peephole
17053 // %{
17054 //   peepmatch (decI_iReg movI);
17055 //   peepconstraint (0.dst == 1.dst);
17056 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17057 // %}
17058 
17059 // peephole
17060 // %{
17061 //   peepmatch (addI_iReg_imm movI);
17062 //   peepconstraint (0.dst == 1.dst);
17063 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17064 // %}
17065 
17066 // peephole
17067 // %{
17068 //   peepmatch (incL_iReg movL);
17069 //   peepconstraint (0.dst == 1.dst);
17070 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17071 // %}
17072 
17073 // peephole
17074 // %{
17075 //   peepmatch (decL_iReg movL);
17076 //   peepconstraint (0.dst == 1.dst);
17077 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17078 // %}
17079 
17080 // peephole
17081 // %{
17082 //   peepmatch (addL_iReg_imm movL);
17083 //   peepconstraint (0.dst == 1.dst);
17084 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17085 // %}
17086 
17087 // peephole
17088 // %{
17089 //   peepmatch (addP_iReg_imm movP);
17090 //   peepconstraint (0.dst == 1.dst);
17091 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17092 // %}
17093 
17094 // // Change load of spilled value to only a spill
17095 // instruct storeI(memory mem, iRegI src)
17096 // %{
17097 //   match(Set mem (StoreI mem src));
17098 // %}
17099 //
17100 // instruct loadI(iRegINoSp dst, memory mem)
17101 // %{
17102 //   match(Set dst (LoadI mem));
17103 // %}
17104 //
17105 
17106 //----------SMARTSPILL RULES---------------------------------------------------
17107 // These must follow all instruction definitions as they use the names
17108 // defined in the instructions definitions.
17109 
17110 // Local Variables:
17111 // mode: c++
17112 // End: