1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 #include "opto/addnode.hpp"
1000 
1001 class CallStubImpl {
1002 
1003   //--------------------------------------------------------------
1004   //---<  Used for optimization in Compile::shorten_branches  >---
1005   //--------------------------------------------------------------
1006 
1007  public:
1008   // Size of call trampoline stub.
1009   static uint size_call_trampoline() {
1010     return 0; // no call trampolines on this platform
1011   }
1012 
1013   // number of relocations needed by a call trampoline stub
1014   static uint reloc_call_trampoline() {
1015     return 0; // no call trampolines on this platform
1016   }
1017 };
1018 
1019 class HandlerImpl {
1020 
1021  public:
1022 
1023   static int emit_exception_handler(CodeBuffer &cbuf);
1024   static int emit_deopt_handler(CodeBuffer& cbuf);
1025 
1026   static uint size_exception_handler() {
1027     return MacroAssembler::far_branch_size();
1028   }
1029 
1030   static uint size_deopt_handler() {
1031     // count one adr and one far branch instruction
1032     return 4 * NativeInstruction::instruction_size;
1033   }
1034 };
1035 
1036   // graph traversal helpers
1037 
1038   MemBarNode *parent_membar(const Node *n);
1039   MemBarNode *child_membar(const MemBarNode *n);
1040   bool leading_membar(const MemBarNode *barrier);
1041 
1042   bool is_card_mark_membar(const MemBarNode *barrier);
1043   bool is_CAS(int opcode);
1044 
1045   MemBarNode *leading_to_trailing(MemBarNode *leading);
1046   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1048 
1049   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1050 
1051   bool unnecessary_acquire(const Node *barrier);
1052   bool needs_acquiring_load(const Node *load);
1053 
1054   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1055 
1056   bool unnecessary_release(const Node *barrier);
1057   bool unnecessary_volatile(const Node *barrier);
1058   bool needs_releasing_store(const Node *store);
1059 
1060   // predicate controlling translation of CompareAndSwapX
1061   bool needs_acquiring_load_exclusive(const Node *load);
1062 
1063   // predicate controlling translation of StoreCM
1064   bool unnecessary_storestore(const Node *storecm);
1065 
1066   // predicate controlling addressing modes
1067   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1068 %}
1069 
1070 source %{
1071 
1072   // Optimizaton of volatile gets and puts
1073   // -------------------------------------
1074   //
1075   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1076   // use to implement volatile reads and writes. For a volatile read
1077   // we simply need
1078   //
1079   //   ldar<x>
1080   //
1081   // and for a volatile write we need
1082   //
1083   //   stlr<x>
1084   //
1085   // Alternatively, we can implement them by pairing a normal
1086   // load/store with a memory barrier. For a volatile read we need
1087   //
1088   //   ldr<x>
1089   //   dmb ishld
1090   //
1091   // for a volatile write
1092   //
1093   //   dmb ish
1094   //   str<x>
1095   //   dmb ish
1096   //
1097   // We can also use ldaxr and stlxr to implement compare and swap CAS
1098   // sequences. These are normally translated to an instruction
1099   // sequence like the following
1100   //
1101   //   dmb      ish
1102   // retry:
1103   //   ldxr<x>   rval raddr
1104   //   cmp       rval rold
1105   //   b.ne done
1106   //   stlxr<x>  rval, rnew, rold
1107   //   cbnz      rval retry
1108   // done:
1109   //   cset      r0, eq
1110   //   dmb ishld
1111   //
1112   // Note that the exclusive store is already using an stlxr
1113   // instruction. That is required to ensure visibility to other
1114   // threads of the exclusive write (assuming it succeeds) before that
1115   // of any subsequent writes.
1116   //
1117   // The following instruction sequence is an improvement on the above
1118   //
1119   // retry:
1120   //   ldaxr<x>  rval raddr
1121   //   cmp       rval rold
1122   //   b.ne done
1123   //   stlxr<x>  rval, rnew, rold
1124   //   cbnz      rval retry
1125   // done:
1126   //   cset      r0, eq
1127   //
1128   // We don't need the leading dmb ish since the stlxr guarantees
1129   // visibility of prior writes in the case that the swap is
1130   // successful. Crucially we don't have to worry about the case where
1131   // the swap is not successful since no valid program should be
1132   // relying on visibility of prior changes by the attempting thread
1133   // in the case where the CAS fails.
1134   //
1135   // Similarly, we don't need the trailing dmb ishld if we substitute
1136   // an ldaxr instruction since that will provide all the guarantees we
1137   // require regarding observation of changes made by other threads
1138   // before any change to the CAS address observed by the load.
1139   //
1140   // In order to generate the desired instruction sequence we need to
1141   // be able to identify specific 'signature' ideal graph node
1142   // sequences which i) occur as a translation of a volatile reads or
1143   // writes or CAS operations and ii) do not occur through any other
1144   // translation or graph transformation. We can then provide
1145   // alternative aldc matching rules which translate these node
1146   // sequences to the desired machine code sequences. Selection of the
1147   // alternative rules can be implemented by predicates which identify
1148   // the relevant node sequences.
1149   //
1150   // The ideal graph generator translates a volatile read to the node
1151   // sequence
1152   //
1153   //   LoadX[mo_acquire]
1154   //   MemBarAcquire
1155   //
1156   // As a special case when using the compressed oops optimization we
1157   // may also see this variant
1158   //
1159   //   LoadN[mo_acquire]
1160   //   DecodeN
1161   //   MemBarAcquire
1162   //
1163   // A volatile write is translated to the node sequence
1164   //
1165   //   MemBarRelease
1166   //   StoreX[mo_release] {CardMark}-optional
1167   //   MemBarVolatile
1168   //
1169   // n.b. the above node patterns are generated with a strict
1170   // 'signature' configuration of input and output dependencies (see
1171   // the predicates below for exact details). The card mark may be as
1172   // simple as a few extra nodes or, in a few GC configurations, may
1173   // include more complex control flow between the leading and
1174   // trailing memory barriers. However, whatever the card mark
1175   // configuration these signatures are unique to translated volatile
1176   // reads/stores -- they will not appear as a result of any other
1177   // bytecode translation or inlining nor as a consequence of
1178   // optimizing transforms.
1179   //
1180   // We also want to catch inlined unsafe volatile gets and puts and
1181   // be able to implement them using either ldar<x>/stlr<x> or some
1182   // combination of ldr<x>/stlr<x> and dmb instructions.
1183   //
1184   // Inlined unsafe volatiles puts manifest as a minor variant of the
1185   // normal volatile put node sequence containing an extra cpuorder
1186   // membar
1187   //
1188   //   MemBarRelease
1189   //   MemBarCPUOrder
1190   //   StoreX[mo_release] {CardMark}-optional
1191   //   MemBarVolatile
1192   //
1193   // n.b. as an aside, the cpuorder membar is not itself subject to
1194   // matching and translation by adlc rules.  However, the rule
1195   // predicates need to detect its presence in order to correctly
1196   // select the desired adlc rules.
1197   //
1198   // Inlined unsafe volatile gets manifest as a somewhat different
1199   // node sequence to a normal volatile get
1200   //
1201   //   MemBarCPUOrder
1202   //        ||       \\
1203   //   MemBarAcquire LoadX[mo_acquire]
1204   //        ||
1205   //   MemBarCPUOrder
1206   //
1207   // In this case the acquire membar does not directly depend on the
1208   // load. However, we can be sure that the load is generated from an
1209   // inlined unsafe volatile get if we see it dependent on this unique
1210   // sequence of membar nodes. Similarly, given an acquire membar we
1211   // can know that it was added because of an inlined unsafe volatile
1212   // get if it is fed and feeds a cpuorder membar and if its feed
1213   // membar also feeds an acquiring load.
1214   //
1215   // Finally an inlined (Unsafe) CAS operation is translated to the
1216   // following ideal graph
1217   //
1218   //   MemBarRelease
1219   //   MemBarCPUOrder
1220   //   CompareAndSwapX {CardMark}-optional
1221   //   MemBarCPUOrder
1222   //   MemBarAcquire
1223   //
1224   // So, where we can identify these volatile read and write
1225   // signatures we can choose to plant either of the above two code
1226   // sequences. For a volatile read we can simply plant a normal
1227   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1228   // also choose to inhibit translation of the MemBarAcquire and
1229   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1230   //
1231   // When we recognise a volatile store signature we can choose to
1232   // plant at a dmb ish as a translation for the MemBarRelease, a
1233   // normal str<x> and then a dmb ish for the MemBarVolatile.
1234   // Alternatively, we can inhibit translation of the MemBarRelease
1235   // and MemBarVolatile and instead plant a simple stlr<x>
1236   // instruction.
1237   //
1238   // when we recognise a CAS signature we can choose to plant a dmb
1239   // ish as a translation for the MemBarRelease, the conventional
1240   // macro-instruction sequence for the CompareAndSwap node (which
1241   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1242   // Alternatively, we can elide generation of the dmb instructions
1243   // and plant the alternative CompareAndSwap macro-instruction
1244   // sequence (which uses ldaxr<x>).
1245   //
1246   // Of course, the above only applies when we see these signature
1247   // configurations. We still want to plant dmb instructions in any
1248   // other cases where we may see a MemBarAcquire, MemBarRelease or
1249   // MemBarVolatile. For example, at the end of a constructor which
1250   // writes final/volatile fields we will see a MemBarRelease
1251   // instruction and this needs a 'dmb ish' lest we risk the
1252   // constructed object being visible without making the
1253   // final/volatile field writes visible.
1254   //
1255   // n.b. the translation rules below which rely on detection of the
1256   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1257   // If we see anything other than the signature configurations we
1258   // always just translate the loads and stores to ldr<x> and str<x>
1259   // and translate acquire, release and volatile membars to the
1260   // relevant dmb instructions.
1261   //
1262 
1263   // graph traversal helpers used for volatile put/get and CAS
1264   // optimization
1265 
1266   // 1) general purpose helpers
1267 
1268   // if node n is linked to a parent MemBarNode by an intervening
1269   // Control and Memory ProjNode return the MemBarNode otherwise return
1270   // NULL.
1271   //
1272   // n may only be a Load or a MemBar.
1273 
1274   MemBarNode *parent_membar(const Node *n)
1275   {
1276     Node *ctl = NULL;
1277     Node *mem = NULL;
1278     Node *membar = NULL;
1279 
1280     if (n->is_Load()) {
1281       ctl = n->lookup(LoadNode::Control);
1282       mem = n->lookup(LoadNode::Memory);
1283     } else if (n->is_MemBar()) {
1284       ctl = n->lookup(TypeFunc::Control);
1285       mem = n->lookup(TypeFunc::Memory);
1286     } else {
1287         return NULL;
1288     }
1289 
1290     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1291       return NULL;
1292     }
1293 
1294     membar = ctl->lookup(0);
1295 
1296     if (!membar || !membar->is_MemBar()) {
1297       return NULL;
1298     }
1299 
1300     if (mem->lookup(0) != membar) {
1301       return NULL;
1302     }
1303 
1304     return membar->as_MemBar();
1305   }
1306 
1307   // if n is linked to a child MemBarNode by intervening Control and
1308   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1309 
1310   MemBarNode *child_membar(const MemBarNode *n)
1311   {
1312     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1313     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1314 
1315     // MemBar needs to have both a Ctl and Mem projection
1316     if (! ctl || ! mem)
1317       return NULL;
1318 
1319     MemBarNode *child = NULL;
1320     Node *x;
1321 
1322     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1323       x = ctl->fast_out(i);
1324       // if we see a membar we keep hold of it. we may also see a new
1325       // arena copy of the original but it will appear later
1326       if (x->is_MemBar()) {
1327           child = x->as_MemBar();
1328           break;
1329       }
1330     }
1331 
1332     if (child == NULL) {
1333       return NULL;
1334     }
1335 
1336     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1337       x = mem->fast_out(i);
1338       // if we see a membar we keep hold of it. we may also see a new
1339       // arena copy of the original but it will appear later
1340       if (x == child) {
1341         return child;
1342       }
1343     }
1344     return NULL;
1345   }
1346 
1347   // helper predicate use to filter candidates for a leading memory
1348   // barrier
1349   //
1350   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1351   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1352 
1353   bool leading_membar(const MemBarNode *barrier)
1354   {
1355     int opcode = barrier->Opcode();
1356     // if this is a release membar we are ok
1357     if (opcode == Op_MemBarRelease) {
1358       return true;
1359     }
1360     // if its a cpuorder membar . . .
1361     if (opcode != Op_MemBarCPUOrder) {
1362       return false;
1363     }
1364     // then the parent has to be a release membar
1365     MemBarNode *parent = parent_membar(barrier);
1366     if (!parent) {
1367       return false;
1368     }
1369     opcode = parent->Opcode();
1370     return opcode == Op_MemBarRelease;
1371   }
1372 
1373   // 2) card mark detection helper
1374 
1375   // helper predicate which can be used to detect a volatile membar
1376   // introduced as part of a conditional card mark sequence either by
1377   // G1 or by CMS when UseCondCardMark is true.
1378   //
1379   // membar can be definitively determined to be part of a card mark
1380   // sequence if and only if all the following hold
1381   //
1382   // i) it is a MemBarVolatile
1383   //
1384   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1385   // true
1386   //
1387   // iii) the node's Mem projection feeds a StoreCM node.
1388 
1389   bool is_card_mark_membar(const MemBarNode *barrier)
1390   {
1391     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1392       return false;
1393     }
1394 
1395     if (barrier->Opcode() != Op_MemBarVolatile) {
1396       return false;
1397     }
1398 
1399     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1400 
1401     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1402       Node *y = mem->fast_out(i);
1403       if (y->Opcode() == Op_StoreCM) {
1404         return true;
1405       }
1406     }
1407 
1408     return false;
1409   }
1410 
1411 
1412   // 3) helper predicates to traverse volatile put or CAS graphs which
1413   // may contain GC barrier subgraphs
1414 
1415   // Preamble
1416   // --------
1417   //
1418   // for volatile writes we can omit generating barriers and employ a
1419   // releasing store when we see a node sequence sequence with a
1420   // leading MemBarRelease and a trailing MemBarVolatile as follows
1421   //
1422   //   MemBarRelease
1423   //  {    ||        } -- optional
1424   //  {MemBarCPUOrder}
1425   //       ||       \\
1426   //       ||     StoreX[mo_release]
1427   //       | \ Bot    / ???
1428   //       | MergeMem
1429   //       | /
1430   //   MemBarVolatile
1431   //
1432   // where
1433   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1434   //  | \ and / indicate further routing of the Ctl and Mem feeds
1435   //
1436   // Note that the memory feed from the CPUOrder membar to the
1437   // MergeMem node is an AliasIdxBot slice while the feed from the
1438   // StoreX is for a slice determined by the type of value being
1439   // written.
1440   //
1441   // the diagram above shows the graph we see for non-object stores.
1442   // for a volatile Object store (StoreN/P) we may see other nodes
1443   // below the leading membar because of the need for a GC pre- or
1444   // post-write barrier.
1445   //
1446   // with most GC configurations we with see this simple variant which
1447   // includes a post-write barrier card mark.
1448   //
1449   //   MemBarRelease______________________________
1450   //         ||    \\               Ctl \        \\
1451   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1452   //         | \ Bot  / oop                 . . .  /
1453   //         | MergeMem
1454   //         | /
1455   //         ||      /
1456   //   MemBarVolatile
1457   //
1458   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1459   // the object address to an int used to compute the card offset) and
1460   // Ctl+Mem to a StoreB node (which does the actual card mark).
1461   //
1462   // n.b. a StoreCM node is only ever used when CMS (with or without
1463   // CondCardMark) or G1 is configured. This abstract instruction
1464   // differs from a normal card mark write (StoreB) because it implies
1465   // a requirement to order visibility of the card mark (StoreCM)
1466   // after that of the object put (StoreP/N) using a StoreStore memory
1467   // barrier. Note that this is /not/ a requirement to order the
1468   // instructions in the generated code (that is already guaranteed by
1469   // the order of memory dependencies). Rather it is a requirement to
1470   // ensure visibility order which only applies on architectures like
1471   // AArch64 which do not implement TSO. This ordering is required for
1472   // both non-volatile and volatile puts.
1473   //
1474   // That implies that we need to translate a StoreCM using the
1475   // sequence
1476   //
1477   //   dmb ishst
1478   //   stlrb
1479   //
1480   // This dmb cannot be omitted even when the associated StoreX or
1481   // CompareAndSwapX is implemented using stlr. However, as described
1482   // below there are circumstances where a specific GC configuration
1483   // requires a stronger barrier in which case it can be omitted.
1484   // 
1485   // With the Serial or Parallel GC using +CondCardMark the card mark
1486   // is performed conditionally on it currently being unmarked in
1487   // which case the volatile put graph looks slightly different
1488   //
1489   //   MemBarRelease____________________________________________
1490   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1491   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1492   //         | \ Bot / oop                          \            |
1493   //         | MergeMem                            . . .      StoreB
1494   //         | /                                                /
1495   //         ||     /
1496   //   MemBarVolatile
1497   //
1498   // It is worth noting at this stage that all the above
1499   // configurations can be uniquely identified by checking that the
1500   // memory flow includes the following subgraph:
1501   //
1502   //   MemBarRelease
1503   //  {MemBarCPUOrder}
1504   //      |  \      . . .
1505   //      |  StoreX[mo_release]  . . .
1506   //  Bot |   / oop
1507   //     MergeMem
1508   //      |
1509   //   MemBarVolatile
1510   //
1511   // This is referred to as a *normal* volatile store subgraph. It can
1512   // easily be detected starting from any candidate MemBarRelease,
1513   // StoreX[mo_release] or MemBarVolatile node.
1514   //
1515   // A small variation on this normal case occurs for an unsafe CAS
1516   // operation. The basic memory flow subgraph for a non-object CAS is
1517   // as follows
1518   //
1519   //   MemBarRelease
1520   //         ||
1521   //   MemBarCPUOrder
1522   //          |     \\   . . .
1523   //          |     CompareAndSwapX
1524   //          |       |
1525   //      Bot |     SCMemProj
1526   //           \     / Bot
1527   //           MergeMem
1528   //           /
1529   //   MemBarCPUOrder
1530   //         ||
1531   //   MemBarAcquire
1532   //
1533   // The same basic variations on this arrangement (mutatis mutandis)
1534   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1535   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1536   // flow subgraph is still present.
1537   // 
1538   // This is referred to as a *normal* CAS subgraph. It can easily be
1539   // detected starting from any candidate MemBarRelease,
1540   // StoreX[mo_release] or MemBarAcquire node.
1541   //
1542   // The code below uses two helper predicates, leading_to_trailing
1543   // and trailing_to_leading to identify these normal graphs, one
1544   // validating the layout starting from the top membar and searching
1545   // down and the other validating the layout starting from the lower
1546   // membar and searching up.
1547   //
1548   // There are two special case GC configurations when the simple
1549   // normal graphs above may not be generated: when using G1 (which
1550   // always employs a conditional card mark); and when using CMS with
1551   // conditional card marking (+CondCardMark) configured. These GCs
1552   // are both concurrent rather than stop-the world GCs. So they
1553   // introduce extra Ctl+Mem flow into the graph between the leading
1554   // and trailing membar nodes, in particular enforcing stronger
1555   // memory serialisation beween the object put and the corresponding
1556   // conditional card mark. CMS employs a post-write GC barrier while
1557   // G1 employs both a pre- and post-write GC barrier.
1558   //
1559   // The post-write barrier subgraph for these configurations includes
1560   // a MemBarVolatile node -- referred to as a card mark membar --
1561   // which is needed to order the card write (StoreCM) operation in
1562   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1563   // operations performed by GC threads i.e. a card mark membar
1564   // constitutes a StoreLoad barrier hence must be translated to a dmb
1565   // ish (whether or not it sits inside a volatile store sequence).
1566   //
1567   // Of course, the use of the dmb ish for the card mark membar also
1568   // implies theat the StoreCM which follows can omit the dmb ishst
1569   // instruction. The necessary visibility ordering will already be
1570   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1571   // needs to be generated for as part of the StoreCM sequence with GC
1572   // configuration +CMS -CondCardMark.
1573   // 
1574   // Of course all these extra barrier nodes may well be absent --
1575   // they are only inserted for object puts. Their potential presence
1576   // significantly complicates the task of identifying whether a
1577   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1578   // MemBarAcquire forms part of a volatile put or CAS when using
1579   // these GC configurations (see below) and also complicates the
1580   // decision as to how to translate a MemBarVolatile and StoreCM.
1581   //
1582   // So, thjis means that a card mark MemBarVolatile occurring in the
1583   // post-barrier graph it needs to be distinguished from a normal
1584   // trailing MemBarVolatile. Resolving this is straightforward: a
1585   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1586   // node and that is a unique marker
1587   //
1588   //      MemBarVolatile (card mark)
1589   //       C |    \     . . .
1590   //         |   StoreCM   . . .
1591   //       . . .
1592   //
1593   // Returning to the task of translating the object put and the
1594   // leading/trailing membar nodes: what do the node graphs look like
1595   // for these 2 special cases? and how can we determine the status of
1596   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1597   // normal and non-normal cases?
1598   //
1599   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1600   // which selects conditonal execution based on the value loaded
1601   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1602   // intervening StoreLoad barrier (MemBarVolatile).
1603   //
1604   // So, with CMS we may see a node graph for a volatile object store
1605   // which looks like this
1606   //
1607   //   MemBarRelease
1608   //   MemBarCPUOrder_(leading)____________________
1609   //     C |  | M \       \\               M |   C \
1610   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1611   //       |  | Bot \    / oop      \        |
1612   //       |  |    MergeMem          \      / 
1613   //       |  |      /                |    /
1614   //     MemBarVolatile (card mark)   |   /
1615   //     C |  ||    M |               |  /
1616   //       | LoadB    | Bot       oop | / Bot
1617   //       |   |      |              / /
1618   //       | Cmp      |\            / /
1619   //       | /        | \          / /
1620   //       If         |  \        / /
1621   //       | \        |   \      / /
1622   // IfFalse  IfTrue  |    \    / /
1623   //       \     / \  |    |   / /
1624   //        \   / StoreCM  |  / /
1625   //         \ /      \   /  / /
1626   //        Region     Phi  / /
1627   //          | \   Raw |  / /
1628   //          |  . . .  | / /
1629   //          |       MergeMem
1630   //          |           |
1631   //        MemBarVolatile (trailing)
1632   //
1633   // Notice that there are two MergeMem nodes below the leading
1634   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1635   // the leading membar and the oopptr Mem slice from the Store into
1636   // the card mark membar. The trailing MergeMem merges the
1637   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1638   // slice from the StoreCM and an oop slice from the StoreN/P node
1639   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1640   // associated with the If region).
1641   //
1642   // So, in the case of CMS + CondCardMark the volatile object store
1643   // graph still includes a normal volatile store subgraph from the
1644   // leading membar to the trailing membar. However, it also contains
1645   // the same shape memory flow to the card mark membar. The two flows
1646   // can be distinguished by testing whether or not the downstream
1647   // membar is a card mark membar.
1648   //
1649   // The graph for a CAS also varies with CMS + CondCardMark, in
1650   // particular employing a control feed from the CompareAndSwapX node
1651   // through a CmpI and If to the card mark membar and StoreCM which
1652   // updates the associated card. This avoids executing the card mark
1653   // if the CAS fails. However, it can be seen from the diagram below
1654   // that the presence of the barrier does not alter the normal CAS
1655   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1656   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1657   // MemBarAcquire pair.
1658   //
1659   //   MemBarRelease
1660   //   MemBarCPUOrder__(leading)_______________________
1661   //   C /  M |                        \\            C \
1662   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1663   //          |                  C /  M |
1664   //          |                 CmpI    |
1665   //          |                  /      |
1666   //          |               . . .     |
1667   //          |              IfTrue     |
1668   //          |              /          |
1669   //       MemBarVolatile (card mark)   |
1670   //        C |  ||    M |              |
1671   //          | LoadB    | Bot   ______/|
1672   //          |   |      |      /       |
1673   //          | Cmp      |     /      SCMemProj
1674   //          | /        |    /         |
1675   //          If         |   /         /
1676   //          | \        |  /         / Bot
1677   //     IfFalse  IfTrue | /         /
1678   //          |   / \   / / prec    /
1679   //   . . .  |  /  StoreCM        /
1680   //        \ | /      | raw      /
1681   //        Region    . . .      /
1682   //           | \              /
1683   //           |   . . .   \    / Bot
1684   //           |        MergeMem
1685   //           |          /
1686   //         MemBarCPUOrder
1687   //         MemBarAcquire (trailing)
1688   //
1689   // This has a slightly different memory subgraph to the one seen
1690   // previously but the core of it has a similar memory flow to the
1691   // CAS normal subgraph:
1692   //
1693   //   MemBarRelease
1694   //   MemBarCPUOrder____
1695   //         |          \      . . .
1696   //         |       CompareAndSwapX  . . .
1697   //         |       C /  M |
1698   //         |      CmpI    |
1699   //         |       /      |
1700   //         |      . .    /
1701   //     Bot |   IfTrue   /
1702   //         |   /       /
1703   //    MemBarVolatile  /
1704   //         | ...     /
1705   //      StoreCM ... /
1706   //         |       / 
1707   //       . . .  SCMemProj
1708   //      Raw \    / Bot
1709   //        MergeMem
1710   //           |
1711   //   MemBarCPUOrder
1712   //   MemBarAcquire
1713   //
1714   // The G1 graph for a volatile object put is a lot more complicated.
1715   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1716   // which adds the old value to the SATB queue; the releasing store
1717   // itself; and, finally, a post-write graph which performs a card
1718   // mark.
1719   //
1720   // The pre-write graph may be omitted, but only when the put is
1721   // writing to a newly allocated (young gen) object and then only if
1722   // there is a direct memory chain to the Initialize node for the
1723   // object allocation. This will not happen for a volatile put since
1724   // any memory chain passes through the leading membar.
1725   //
1726   // The pre-write graph includes a series of 3 If tests. The outermost
1727   // If tests whether SATB is enabled (no else case). The next If tests
1728   // whether the old value is non-NULL (no else case). The third tests
1729   // whether the SATB queue index is > 0, if so updating the queue. The
1730   // else case for this third If calls out to the runtime to allocate a
1731   // new queue buffer.
1732   //
1733   // So with G1 the pre-write and releasing store subgraph looks like
1734   // this (the nested Ifs are omitted).
1735   //
1736   //  MemBarRelease (leading)____________
1737   //     C |  ||  M \   M \    M \  M \ . . .
1738   //       | LoadB   \  LoadL  LoadN   \
1739   //       | /        \                 \
1740   //       If         |\                 \
1741   //       | \        | \                 \
1742   //  IfFalse  IfTrue |  \                 \
1743   //       |     |    |   \                 |
1744   //       |     If   |   /\                |
1745   //       |     |          \               |
1746   //       |                 \              |
1747   //       |    . . .         \             |
1748   //       | /       | /       |            |
1749   //      Region  Phi[M]       |            |
1750   //       | \       |         |            |
1751   //       |  \_____ | ___     |            |
1752   //     C | C \     |   C \ M |            |
1753   //       | CastP2X | StoreN/P[mo_release] |
1754   //       |         |         |            |
1755   //     C |       M |       M |          M |
1756   //        \        | Raw     | oop       / Bot
1757   //                  . . .
1758   //          (post write subtree elided)
1759   //                    . . .
1760   //             C \         M /
1761   //         MemBarVolatile (trailing)
1762   //
1763   // Note that the three memory feeds into the post-write tree are an
1764   // AliasRawIdx slice associated with the writes in the pre-write
1765   // tree, an oop type slice from the StoreX specific to the type of
1766   // the volatile field and the AliasBotIdx slice emanating from the
1767   // leading membar.
1768   //
1769   // n.b. the LoadB in this subgraph is not the card read -- it's a
1770   // read of the SATB queue active flag.
1771   //
1772   // The CAS graph is once again a variant of the above with a
1773   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1774   // value from the CompareAndSwapX node is fed into the post-write
1775   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1776   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1777   //
1778   //  MemBarRelease (leading)____________
1779   //     C |  ||  M \   M \    M \  M \ . . .
1780   //       | LoadB   \  LoadL  LoadN   \
1781   //       | /        \                 \
1782   //       If         |\                 \
1783   //       | \        | \                 \
1784   //  IfFalse  IfTrue |  \                 \
1785   //       |     |    |   \                 \
1786   //       |     If   |    \                 |
1787   //       |     |          \                |
1788   //       |                 \               |
1789   //       |    . . .         \              |
1790   //       | /       | /       \             |
1791   //      Region  Phi[M]        \            |
1792   //       | \       |           \           |
1793   //       |  \_____ |            |          |
1794   //     C | C \     |            |          |
1795   //       | CastP2X |     CompareAndSwapX   |
1796   //       |         |   res |     |         |
1797   //     C |       M |       |  SCMemProj  M |
1798   //        \        | Raw   |     | Bot    / Bot
1799   //                  . . .
1800   //          (post write subtree elided)
1801   //                    . . .
1802   //             C \         M /
1803   //         MemBarVolatile (trailing)
1804   //
1805   // The G1 post-write subtree is also optional, this time when the
1806   // new value being written is either null or can be identified as a
1807   // newly allocated (young gen) object with no intervening control
1808   // flow. The latter cannot happen but the former may, in which case
1809   // the card mark membar is omitted and the memory feeds from the
1810   // leading membar and the SToreN/P are merged direct into the
1811   // trailing membar as per the normal subgraph. So, the only special
1812   // case which arises is when the post-write subgraph is generated.
1813   //
1814   // The kernel of the post-write G1 subgraph is the card mark itself
1815   // which includes a card mark memory barrier (MemBarVolatile), a
1816   // card test (LoadB), and a conditional update (If feeding a
1817   // StoreCM). These nodes are surrounded by a series of nested Ifs
1818   // which try to avoid doing the card mark. The top level If skips if
1819   // the object reference does not cross regions (i.e. it tests if
1820   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1821   // need not be recorded. The next If, which skips on a NULL value,
1822   // may be absent (it is not generated if the type of value is >=
1823   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1824   // checking if card_val != young).  n.b. although this test requires
1825   // a pre-read of the card it can safely be done before the StoreLoad
1826   // barrier. However that does not bypass the need to reread the card
1827   // after the barrier.
1828   //
1829   //                (pre-write subtree elided)
1830   //        . . .                  . . .    . . .  . . .
1831   //        C |               M |    M |    M |
1832   //       Region            Phi[M] StoreN    |
1833   //          |            Raw  |  oop |  Bot |
1834   //         / \_______         |\     |\     |\
1835   //      C / C \      . . .    | \    | \    | \
1836   //       If   CastP2X . . .   |  \   |  \   |  \
1837   //       / \                  |   \  |   \  |   \
1838   //      /   \                 |    \ |    \ |    \
1839   // IfFalse IfTrue             |      |      |     \
1840   //   |       |                 \     |     /       |
1841   //   |       If                 \    | \  /   \    |
1842   //   |      / \                  \   |   /     \   |
1843   //   |     /   \                  \  |  / \     |  |
1844   //   | IfFalse IfTrue           MergeMem   \    |  |
1845   //   |  . . .    / \                 |      \   |  |
1846   //   |          /   \                |       |  |  |
1847   //   |     IfFalse IfTrue            |       |  |  |
1848   //   |      . . .    |               |       |  |  |
1849   //   |               If             /        |  |  |
1850   //   |               / \           /         |  |  |
1851   //   |              /   \         /          |  |  |
1852   //   |         IfFalse IfTrue    /           |  |  |
1853   //   |           . . .   |      /            |  |  |
1854   //   |                    \    /             |  |  |
1855   //   |                     \  /              |  |  |
1856   //   |         MemBarVolatile__(card mark  ) |  |  |
1857   //   |              ||   C |     \           |  |  |
1858   //   |             LoadB   If     |         /   |  |
1859   //   |                    / \ Raw |        /   /  /
1860   //   |                   . . .    |       /   /  /
1861   //   |                        \   |      /   /  /
1862   //   |                        StoreCM   /   /  /
1863   //   |                           |     /   /  /
1864   //   |                            . . .   /  /
1865   //   |                                   /  /
1866   //   |   . . .                          /  /
1867   //   |    |             | /            /  /
1868   //   |    |           Phi[M] /        /  /
1869   //   |    |             |   /        /  /
1870   //   |    |             |  /        /  /
1871   //   |  Region  . . .  Phi[M]      /  /
1872   //   |    |             |         /  /
1873   //    \   |             |        /  /
1874   //     \  | . . .       |       /  /
1875   //      \ |             |      /  /
1876   //      Region         Phi[M] /  /
1877   //        |               \  /  /
1878   //         \             MergeMem
1879   //          \            /
1880   //          MemBarVolatile
1881   //
1882   // As with CMS + CondCardMark the first MergeMem merges the
1883   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1884   // slice from the Store into the card mark membar. However, in this
1885   // case it may also merge an AliasRawIdx mem slice from the pre
1886   // barrier write.
1887   //
1888   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1889   // leading membar with an oop slice from the StoreN and an
1890   // AliasRawIdx slice from the post barrier writes. In this case the
1891   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1892   // which combine feeds from the If regions in the post barrier
1893   // subgraph.
1894   //
1895   // So, for G1 the same characteristic subgraph arises as for CMS +
1896   // CondCardMark. There is a normal subgraph feeding the card mark
1897   // membar and a normal subgraph feeding the trailing membar.
1898   //
1899   // The CAS graph when using G1GC also includes an optional
1900   // post-write subgraph. It is very similar to the above graph except
1901   // for a few details.
1902   // 
1903   // - The control flow is gated by an additonal If which tests the
1904   // result from the CompareAndSwapX node
1905   // 
1906   //  - The MergeMem which feeds the card mark membar only merges the
1907   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1908   // slice from the pre-barrier. It does not merge the SCMemProj
1909   // AliasIdxBot slice. So, this subgraph does not look like the
1910   // normal CAS subgraph.
1911   //
1912   // - The MergeMem which feeds the trailing membar merges the
1913   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1914   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1915   // has two AliasIdxBot input slices. However, this subgraph does
1916   // still look like the normal CAS subgraph.
1917   //
1918   // So, the upshot is:
1919   //
1920   // In all cases a volatile put graph will include a *normal*
1921   // volatile store subgraph betwen the leading membar and the
1922   // trailing membar. It may also include a normal volatile store
1923   // subgraph betwen the leading membar and the card mark membar.
1924   //
1925   // In all cases a CAS graph will contain a unique normal CAS graph
1926   // feeding the trailing membar.
1927   //
1928   // In all cases where there is a card mark membar (either as part of
1929   // a volatile object put or CAS) it will be fed by a MergeMem whose
1930   // AliasIdxBot slice feed will be a leading membar.
1931   //
1932   // The predicates controlling generation of instructions for store
1933   // and barrier nodes employ a few simple helper functions (described
1934   // below) which identify the presence or absence of all these
1935   // subgraph configurations and provide a means of traversing from
1936   // one node in the subgraph to another.
1937 
1938   // is_CAS(int opcode)
1939   //
1940   // return true if opcode is one of the possible CompareAndSwapX
1941   // values otherwise false.
1942 
1943   bool is_CAS(int opcode)
1944   {
1945     return (opcode == Op_CompareAndSwapI ||
1946             opcode == Op_CompareAndSwapL ||
1947             opcode == Op_CompareAndSwapN ||
1948             opcode == Op_CompareAndSwapP);
1949   }
1950 
1951   // leading_to_trailing
1952   //
1953   //graph traversal helper which detects the normal case Mem feed from
1954   // a release membar (or, optionally, its cpuorder child) to a
1955   // dependent volatile membar i.e. it ensures that one or other of
1956   // the following Mem flow subgraph is present.
1957   //
1958   //   MemBarRelease {leading}
1959   //   {MemBarCPUOrder} {optional}
1960   //     Bot |  \      . . .
1961   //         |  StoreN/P[mo_release]  . . .
1962   //         |   /
1963   //        MergeMem
1964   //         |
1965   //   MemBarVolatile {not card mark}
1966   //
1967   //   MemBarRelease {leading}
1968   //   {MemBarCPUOrder} {optional}
1969   //      |       \      . . .
1970   //      |     CompareAndSwapX  . . .
1971   //               |
1972   //     . . .    SCMemProj
1973   //           \   |
1974   //      |    MergeMem
1975   //      |       /
1976   //    MemBarCPUOrder
1977   //    MemBarAcquire {trailing}
1978   //
1979   // the predicate needs to be capable of distinguishing the following
1980   // volatile put graph which may arises when a GC post barrier
1981   // inserts a card mark membar
1982   //
1983   //   MemBarRelease {leading}
1984   //   {MemBarCPUOrder}__
1985   //     Bot |   \       \
1986   //         |   StoreN/P \
1987   //         |    / \     |
1988   //        MergeMem \    |
1989   //         |        \   |
1990   //   MemBarVolatile  \  |
1991   //    {card mark}     \ |
1992   //                  MergeMem
1993   //                      |
1994   // {not card mark} MemBarVolatile
1995   //
1996   // if the correct configuration is present returns the trailing
1997   // membar otherwise NULL.
1998   //
1999   // the input membar is expected to be either a cpuorder membar or a
2000   // release membar. in the latter case it should not have a cpu membar
2001   // child.
2002   //
2003   // the returned value may be a card mark or trailing membar
2004   //
2005 
2006   MemBarNode *leading_to_trailing(MemBarNode *leading)
2007   {
2008     assert((leading->Opcode() == Op_MemBarRelease ||
2009             leading->Opcode() == Op_MemBarCPUOrder),
2010            "expecting a volatile or cpuroder membar!");
2011 
2012     // check the mem flow
2013     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2014 
2015     if (!mem) {
2016       return NULL;
2017     }
2018 
2019     Node *x = NULL;
2020     StoreNode * st = NULL;
2021     LoadStoreNode *cas = NULL;
2022     MergeMemNode *mm = NULL;
2023     MergeMemNode *mm2 = NULL;
2024 
2025     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2026       x = mem->fast_out(i);
2027       if (x->is_MergeMem()) {
2028         if (mm != NULL) {
2029           if (mm2 != NULL) {
2030           // should not see more than 2 merge mems
2031             return NULL;
2032           } else {
2033             mm2 = x->as_MergeMem();
2034           }
2035         } else {
2036           mm = x->as_MergeMem();
2037         }
2038       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2039         // two releasing stores/CAS nodes is one too many
2040         if (st != NULL || cas != NULL) {
2041           return NULL;
2042         }
2043         st = x->as_Store();
2044       } else if (is_CAS(x->Opcode())) {
2045         if (st != NULL || cas != NULL) {
2046           return NULL;
2047         }
2048         cas = x->as_LoadStore();
2049       }
2050     }
2051 
2052     // must have a store or a cas
2053     if (!st && !cas) {
2054       return NULL;
2055     }
2056 
2057     // must have at least one merge if we also have st
2058     if (st && !mm) {
2059       return NULL;
2060     }
2061 
2062     if (cas) {
2063       Node *y = NULL;
2064       // look for an SCMemProj
2065       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2066         x = cas->fast_out(i);
2067         if (x->is_Proj()) {
2068           y = x;
2069           break;
2070         }
2071       }
2072       if (y == NULL) {
2073         return NULL;
2074       }
2075       // the proj must feed a MergeMem
2076       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2077         x = y->fast_out(i);
2078         if (x->is_MergeMem()) {
2079           mm = x->as_MergeMem();
2080           break;
2081         }
2082       }
2083       if (mm == NULL) {
2084         return NULL;
2085       }
2086       MemBarNode *mbar = NULL;
2087       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2088       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2089         x = mm->fast_out(i);
2090         if (x->is_MemBar()) {
2091           int opcode = x->Opcode();
2092           if (opcode == Op_MemBarCPUOrder) {
2093             MemBarNode *z =  x->as_MemBar();
2094             z = child_membar(z);
2095             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2096               mbar = z;
2097             }
2098           }
2099           break;
2100         }
2101       }
2102       return mbar;
2103     } else {
2104       Node *y = NULL;
2105       // ensure the store feeds the first mergemem;
2106       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2107         if (st->fast_out(i) == mm) {
2108           y = st;
2109           break;
2110         }
2111       }
2112       if (y == NULL) {
2113         return NULL;
2114       }
2115       if (mm2 != NULL) {
2116         // ensure the store feeds the second mergemem;
2117         y = NULL;
2118         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2119           if (st->fast_out(i) == mm2) {
2120             y = st;
2121           }
2122         }
2123         if (y == NULL) {
2124           return NULL;
2125         }
2126       }
2127 
2128       MemBarNode *mbar = NULL;
2129       // ensure the first mergemem feeds a volatile membar
2130       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2131         x = mm->fast_out(i);
2132         if (x->is_MemBar()) {
2133           int opcode = x->Opcode();
2134           if (opcode == Op_MemBarVolatile) {
2135             mbar = x->as_MemBar();
2136           }
2137           break;
2138         }
2139       }
2140       if (mm2 == NULL) {
2141         // this is our only option for a trailing membar
2142         return mbar;
2143       }
2144       // ensure the second mergemem feeds a volatile membar
2145       MemBarNode *mbar2 = NULL;
2146       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2147         x = mm2->fast_out(i);
2148         if (x->is_MemBar()) {
2149           int opcode = x->Opcode();
2150           if (opcode == Op_MemBarVolatile) {
2151             mbar2 = x->as_MemBar();
2152           }
2153           break;
2154         }
2155       }
2156       // if we have two merge mems we must have two volatile membars
2157       if (mbar == NULL || mbar2 == NULL) {
2158         return NULL;
2159       }
2160       // return the trailing membar
2161       if (is_card_mark_membar(mbar2)) {
2162         return mbar;
2163       } else {
2164         if (is_card_mark_membar(mbar)) {
2165           return mbar2;
2166         } else {
2167           return NULL;
2168         }
2169       }
2170     }
2171   }
2172 
2173   // trailing_to_leading
2174   //
2175   // graph traversal helper which detects the normal case Mem feed
2176   // from a trailing membar to a preceding release membar (optionally
2177   // its cpuorder child) i.e. it ensures that one or other of the
2178   // following Mem flow subgraphs is present.
2179   //
2180   //   MemBarRelease {leading}
2181   //   MemBarCPUOrder {optional}
2182   //    | Bot |  \      . . .
2183   //    |     |  StoreN/P[mo_release]  . . .
2184   //    |     |   /
2185   //    |    MergeMem
2186   //    |     |
2187   //   MemBarVolatile {not card mark}
2188   //
2189   //   MemBarRelease {leading}
2190   //   MemBarCPUOrder {optional}
2191   //      |       \      . . .
2192   //      |     CompareAndSwapX  . . .
2193   //               |
2194   //     . . .    SCMemProj
2195   //           \   |
2196   //      |    MergeMem
2197   //      |       |
2198   //    MemBarCPUOrder
2199   //    MemBarAcquire {trailing}
2200   //
2201   // this predicate checks for the same flow as the previous predicate
2202   // but starting from the bottom rather than the top.
2203   //
2204   // if the configuration is present returns the cpuorder member for
2205   // preference or when absent the release membar otherwise NULL.
2206   //
2207   // n.b. the input membar is expected to be a MemBarVolatile or
2208   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2209   // mark membar.
2210 
2211   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2212   {
2213     // input must be a volatile membar
2214     assert((barrier->Opcode() == Op_MemBarVolatile ||
2215             barrier->Opcode() == Op_MemBarAcquire),
2216            "expecting a volatile or an acquire membar");
2217 
2218     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2219            !is_card_mark_membar(barrier),
2220            "not expecting a card mark membar");
2221     Node *x;
2222     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2223 
2224     // if we have an acquire membar then it must be fed via a CPUOrder
2225     // membar
2226 
2227     if (is_cas) {
2228       // skip to parent barrier which must be a cpuorder
2229       x = parent_membar(barrier);
2230       if (x->Opcode() != Op_MemBarCPUOrder)
2231         return NULL;
2232     } else {
2233       // start from the supplied barrier
2234       x = (Node *)barrier;
2235     }
2236 
2237     // the Mem feed to the membar should be a merge
2238     x = x ->in(TypeFunc::Memory);
2239     if (!x->is_MergeMem())
2240       return NULL;
2241 
2242     MergeMemNode *mm = x->as_MergeMem();
2243 
2244     if (is_cas) {
2245       // the merge should be fed from the CAS via an SCMemProj node
2246       x = NULL;
2247       for (uint idx = 1; idx < mm->req(); idx++) {
2248         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2249           x = mm->in(idx);
2250           break;
2251         }
2252       }
2253       if (x == NULL) {
2254         return NULL;
2255       }
2256       // check for a CAS feeding this proj
2257       x = x->in(0);
2258       int opcode = x->Opcode();
2259       if (!is_CAS(opcode)) {
2260         return NULL;
2261       }
2262       // the CAS should get its mem feed from the leading membar
2263       x = x->in(MemNode::Memory);
2264     } else {
2265       // the merge should get its Bottom mem feed from the leading membar
2266       x = mm->in(Compile::AliasIdxBot);
2267     }
2268 
2269     // ensure this is a non control projection
2270     if (!x->is_Proj() || x->is_CFG()) {
2271       return NULL;
2272     }
2273     // if it is fed by a membar that's the one we want
2274     x = x->in(0);
2275 
2276     if (!x->is_MemBar()) {
2277       return NULL;
2278     }
2279 
2280     MemBarNode *leading = x->as_MemBar();
2281     // reject invalid candidates
2282     if (!leading_membar(leading)) {
2283       return NULL;
2284     }
2285 
2286     // ok, we have a leading membar, now for the sanity clauses
2287 
2288     // the leading membar must feed Mem to a releasing store or CAS
2289     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2290     StoreNode *st = NULL;
2291     LoadStoreNode *cas = NULL;
2292     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2293       x = mem->fast_out(i);
2294       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2295         // two stores or CASes is one too many
2296         if (st != NULL || cas != NULL) {
2297           return NULL;
2298         }
2299         st = x->as_Store();
2300       } else if (is_CAS(x->Opcode())) {
2301         if (st != NULL || cas != NULL) {
2302           return NULL;
2303         }
2304         cas = x->as_LoadStore();
2305       }
2306     }
2307 
2308     // we should not have both a store and a cas
2309     if (st == NULL & cas == NULL) {
2310       return NULL;
2311     }
2312 
2313     if (st == NULL) {
2314       // nothing more to check
2315       return leading;
2316     } else {
2317       // we should not have a store if we started from an acquire
2318       if (is_cas) {
2319         return NULL;
2320       }
2321 
2322       // the store should feed the merge we used to get here
2323       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2324         if (st->fast_out(i) == mm) {
2325           return leading;
2326         }
2327       }
2328     }
2329 
2330     return NULL;
2331   }
2332 
2333   // card_mark_to_leading
2334   //
2335   // graph traversal helper which traverses from a card mark volatile
2336   // membar to a leading membar i.e. it ensures that the following Mem
2337   // flow subgraph is present.
2338   //
2339   //    MemBarRelease {leading}
2340   //   {MemBarCPUOrder} {optional}
2341   //         |   . . .
2342   //     Bot |   /
2343   //      MergeMem
2344   //         |
2345   //     MemBarVolatile (card mark)
2346   //        |     \
2347   //      . . .   StoreCM
2348   //
2349   // if the configuration is present returns the cpuorder member for
2350   // preference or when absent the release membar otherwise NULL.
2351   //
2352   // n.b. the input membar is expected to be a MemBarVolatile amd must
2353   // be a card mark membar.
2354 
2355   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2356   {
2357     // input must be a card mark volatile membar
2358     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2359 
2360     // the Mem feed to the membar should be a merge
2361     Node *x = barrier->in(TypeFunc::Memory);
2362     if (!x->is_MergeMem()) {
2363       return NULL;
2364     }
2365 
2366     MergeMemNode *mm = x->as_MergeMem();
2367 
2368     x = mm->in(Compile::AliasIdxBot);
2369 
2370     if (!x->is_MemBar()) {
2371       return NULL;
2372     }
2373 
2374     MemBarNode *leading = x->as_MemBar();
2375 
2376     if (leading_membar(leading)) {
2377       return leading;
2378     }
2379 
2380     return NULL;
2381   }
2382 
2383 bool unnecessary_acquire(const Node *barrier)
2384 {
2385   assert(barrier->is_MemBar(), "expecting a membar");
2386 
2387   if (UseBarriersForVolatile) {
2388     // we need to plant a dmb
2389     return false;
2390   }
2391 
2392   // a volatile read derived from bytecode (or also from an inlined
2393   // SHA field read via LibraryCallKit::load_field_from_object)
2394   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2395   // with a bogus read dependency on it's preceding load. so in those
2396   // cases we will find the load node at the PARMS offset of the
2397   // acquire membar.  n.b. there may be an intervening DecodeN node.
2398   //
2399   // a volatile load derived from an inlined unsafe field access
2400   // manifests as a cpuorder membar with Ctl and Mem projections
2401   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2402   // acquire then feeds another cpuorder membar via Ctl and Mem
2403   // projections. The load has no output dependency on these trailing
2404   // membars because subsequent nodes inserted into the graph take
2405   // their control feed from the final membar cpuorder meaning they
2406   // are all ordered after the load.
2407 
2408   Node *x = barrier->lookup(TypeFunc::Parms);
2409   if (x) {
2410     // we are starting from an acquire and it has a fake dependency
2411     //
2412     // need to check for
2413     //
2414     //   LoadX[mo_acquire]
2415     //   {  |1   }
2416     //   {DecodeN}
2417     //      |Parms
2418     //   MemBarAcquire*
2419     //
2420     // where * tags node we were passed
2421     // and |k means input k
2422     if (x->is_DecodeNarrowPtr()) {
2423       x = x->in(1);
2424     }
2425 
2426     return (x->is_Load() && x->as_Load()->is_acquire());
2427   }
2428 
2429   // now check for an unsafe volatile get
2430 
2431   // need to check for
2432   //
2433   //   MemBarCPUOrder
2434   //        ||       \\
2435   //   MemBarAcquire* LoadX[mo_acquire]
2436   //        ||
2437   //   MemBarCPUOrder
2438   //
2439   // where * tags node we were passed
2440   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2441 
2442   // check for a parent MemBarCPUOrder
2443   ProjNode *ctl;
2444   ProjNode *mem;
2445   MemBarNode *parent = parent_membar(barrier);
2446   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2447     return false;
2448   ctl = parent->proj_out(TypeFunc::Control);
2449   mem = parent->proj_out(TypeFunc::Memory);
2450   if (!ctl || !mem) {
2451     return false;
2452   }
2453   // ensure the proj nodes both feed a LoadX[mo_acquire]
2454   LoadNode *ld = NULL;
2455   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2456     x = ctl->fast_out(i);
2457     // if we see a load we keep hold of it and stop searching
2458     if (x->is_Load()) {
2459       ld = x->as_Load();
2460       break;
2461     }
2462   }
2463   // it must be an acquiring load
2464   if (ld && ld->is_acquire()) {
2465 
2466     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2467       x = mem->fast_out(i);
2468       // if we see the same load we drop it and stop searching
2469       if (x == ld) {
2470         ld = NULL;
2471         break;
2472       }
2473     }
2474     // we must have dropped the load
2475     if (ld == NULL) {
2476       // check for a child cpuorder membar
2477       MemBarNode *child  = child_membar(barrier->as_MemBar());
2478       if (child && child->Opcode() == Op_MemBarCPUOrder)
2479         return true;
2480     }
2481   }
2482 
2483   // final option for unnecessary mebar is that it is a trailing node
2484   // belonging to a CAS
2485 
2486   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2487 
2488   return leading != NULL;
2489 }
2490 
2491 bool needs_acquiring_load(const Node *n)
2492 {
2493   assert(n->is_Load(), "expecting a load");
2494   if (UseBarriersForVolatile) {
2495     // we use a normal load and a dmb
2496     return false;
2497   }
2498 
2499   LoadNode *ld = n->as_Load();
2500 
2501   if (!ld->is_acquire()) {
2502     return false;
2503   }
2504 
2505   // check if this load is feeding an acquire membar
2506   //
2507   //   LoadX[mo_acquire]
2508   //   {  |1   }
2509   //   {DecodeN}
2510   //      |Parms
2511   //   MemBarAcquire*
2512   //
2513   // where * tags node we were passed
2514   // and |k means input k
2515 
2516   Node *start = ld;
2517   Node *mbacq = NULL;
2518 
2519   // if we hit a DecodeNarrowPtr we reset the start node and restart
2520   // the search through the outputs
2521  restart:
2522 
2523   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2524     Node *x = start->fast_out(i);
2525     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2526       mbacq = x;
2527     } else if (!mbacq &&
2528                (x->is_DecodeNarrowPtr() ||
2529                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2530       start = x;
2531       goto restart;
2532     }
2533   }
2534 
2535   if (mbacq) {
2536     return true;
2537   }
2538 
2539   // now check for an unsafe volatile get
2540 
2541   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2542   //
2543   //     MemBarCPUOrder
2544   //        ||       \\
2545   //   MemBarAcquire* LoadX[mo_acquire]
2546   //        ||
2547   //   MemBarCPUOrder
2548 
2549   MemBarNode *membar;
2550 
2551   membar = parent_membar(ld);
2552 
2553   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2554     return false;
2555   }
2556 
2557   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2558 
2559   membar = child_membar(membar);
2560 
2561   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2562     return false;
2563   }
2564 
2565   membar = child_membar(membar);
2566 
2567   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2568     return false;
2569   }
2570 
2571   return true;
2572 }
2573 
2574 bool unnecessary_release(const Node *n)
2575 {
2576   assert((n->is_MemBar() &&
2577           n->Opcode() == Op_MemBarRelease),
2578          "expecting a release membar");
2579 
2580   if (UseBarriersForVolatile) {
2581     // we need to plant a dmb
2582     return false;
2583   }
2584 
2585   // if there is a dependent CPUOrder barrier then use that as the
2586   // leading
2587 
2588   MemBarNode *barrier = n->as_MemBar();
2589   // check for an intervening cpuorder membar
2590   MemBarNode *b = child_membar(barrier);
2591   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2592     // ok, so start the check from the dependent cpuorder barrier
2593     barrier = b;
2594   }
2595 
2596   // must start with a normal feed
2597   MemBarNode *trailing = leading_to_trailing(barrier);
2598 
2599   return (trailing != NULL);
2600 }
2601 
2602 bool unnecessary_volatile(const Node *n)
2603 {
2604   // assert n->is_MemBar();
2605   if (UseBarriersForVolatile) {
2606     // we need to plant a dmb
2607     return false;
2608   }
2609 
2610   MemBarNode *mbvol = n->as_MemBar();
2611 
2612   // first we check if this is part of a card mark. if so then we have
2613   // to generate a StoreLoad barrier
2614 
2615   if (is_card_mark_membar(mbvol)) {
2616       return false;
2617   }
2618 
2619   // ok, if it's not a card mark then we still need to check if it is
2620   // a trailing membar of a volatile put graph.
2621 
2622   return (trailing_to_leading(mbvol) != NULL);
2623 }
2624 
2625 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2626 
2627 bool needs_releasing_store(const Node *n)
2628 {
2629   // assert n->is_Store();
2630   if (UseBarriersForVolatile) {
2631     // we use a normal store and dmb combination
2632     return false;
2633   }
2634 
2635   StoreNode *st = n->as_Store();
2636 
2637   // the store must be marked as releasing
2638   if (!st->is_release()) {
2639     return false;
2640   }
2641 
2642   // the store must be fed by a membar
2643 
2644   Node *x = st->lookup(StoreNode::Memory);
2645 
2646   if (! x || !x->is_Proj()) {
2647     return false;
2648   }
2649 
2650   ProjNode *proj = x->as_Proj();
2651 
2652   x = proj->lookup(0);
2653 
2654   if (!x || !x->is_MemBar()) {
2655     return false;
2656   }
2657 
2658   MemBarNode *barrier = x->as_MemBar();
2659 
2660   // if the barrier is a release membar or a cpuorder mmebar fed by a
2661   // release membar then we need to check whether that forms part of a
2662   // volatile put graph.
2663 
2664   // reject invalid candidates
2665   if (!leading_membar(barrier)) {
2666     return false;
2667   }
2668 
2669   // does this lead a normal subgraph?
2670   MemBarNode *trailing = leading_to_trailing(barrier);
2671 
2672   return (trailing != NULL);
2673 }
2674 
2675 // predicate controlling translation of CAS
2676 //
2677 // returns true if CAS needs to use an acquiring load otherwise false
2678 
2679 bool needs_acquiring_load_exclusive(const Node *n)
2680 {
2681   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2682   if (UseBarriersForVolatile) {
2683     return false;
2684   }
2685 
2686   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2687 #ifdef ASSERT
2688   LoadStoreNode *st = n->as_LoadStore();
2689 
2690   // the store must be fed by a membar
2691 
2692   Node *x = st->lookup(StoreNode::Memory);
2693 
2694   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2695 
2696   ProjNode *proj = x->as_Proj();
2697 
2698   x = proj->lookup(0);
2699 
2700   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2701 
2702   MemBarNode *barrier = x->as_MemBar();
2703 
2704   // the barrier must be a cpuorder mmebar fed by a release membar
2705 
2706   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2707          "CAS not fed by cpuorder membar!");
2708 
2709   MemBarNode *b = parent_membar(barrier);
2710   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2711           "CAS not fed by cpuorder+release membar pair!");
2712 
2713   // does this lead a normal subgraph?
2714   MemBarNode *mbar = leading_to_trailing(barrier);
2715 
2716   assert(mbar != NULL, "CAS not embedded in normal graph!");
2717 
2718   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2719 #endif // ASSERT
2720   // so we can just return true here
2721   return true;
2722 }
2723 
2724 // predicate controlling translation of StoreCM
2725 //
2726 // returns true if a StoreStore must precede the card write otherwise
2727 // false
2728 
2729 bool unnecessary_storestore(const Node *storecm)
2730 {
2731   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2732 
2733   // we only ever need to generate a dmb ishst between an object put
2734   // and the associated card mark when we are using CMS without
2735   // conditional card marking. Any other occurence will happen when
2736   // performing a card mark using CMS with conditional card marking or
2737   // G1. In those cases the preceding MamBarVolatile will be
2738   // translated to a dmb ish which guarantes visibility of the
2739   // preceding StoreN/P before this StoreCM
2740 
2741   if (!UseConcMarkSweepGC || UseCondCardMark) {
2742     return true;
2743   }
2744 
2745   // if we are implementing volatile puts using barriers then we must
2746   // insert the dmb ishst
2747 
2748   if (UseBarriersForVolatile) {
2749     return false;
2750   }
2751 
2752   // we must be using CMS with conditional card marking so we ahve to
2753   // generate the StoreStore
2754 
2755   return false;
2756 }
2757 
2758 
2759 #define __ _masm.
2760 
2761 // advance declarations for helper functions to convert register
2762 // indices to register objects
2763 
2764 // the ad file has to provide implementations of certain methods
2765 // expected by the generic code
2766 //
2767 // REQUIRED FUNCTIONALITY
2768 
2769 //=============================================================================
2770 
2771 // !!!!! Special hack to get all types of calls to specify the byte offset
2772 //       from the start of the call to the point where the return address
2773 //       will point.
2774 
2775 int MachCallStaticJavaNode::ret_addr_offset()
2776 {
2777   // call should be a simple bl
2778   int off = 4;
2779   return off;
2780 }
2781 
2782 int MachCallDynamicJavaNode::ret_addr_offset()
2783 {
2784   return 16; // movz, movk, movk, bl
2785 }
2786 
2787 int MachCallRuntimeNode::ret_addr_offset() {
2788   // for generated stubs the call will be
2789   //   far_call(addr)
2790   // for real runtime callouts it will be six instructions
2791   // see aarch64_enc_java_to_runtime
2792   //   adr(rscratch2, retaddr)
2793   //   lea(rscratch1, RuntimeAddress(addr)
2794   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2795   //   blrt rscratch1
2796   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2797   if (cb) {
2798     return MacroAssembler::far_branch_size();
2799   } else {
2800     return 6 * NativeInstruction::instruction_size;
2801   }
2802 }
2803 
2804 // Indicate if the safepoint node needs the polling page as an input
2805 
2806 // the shared code plants the oop data at the start of the generated
2807 // code for the safepoint node and that needs ot be at the load
2808 // instruction itself. so we cannot plant a mov of the safepoint poll
2809 // address followed by a load. setting this to true means the mov is
2810 // scheduled as a prior instruction. that's better for scheduling
2811 // anyway.
2812 
2813 bool SafePointNode::needs_polling_address_input()
2814 {
2815   return true;
2816 }
2817 
2818 //=============================================================================
2819 
2820 #ifndef PRODUCT
2821 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2822   st->print("BREAKPOINT");
2823 }
2824 #endif
2825 
2826 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2827   MacroAssembler _masm(&cbuf);
2828   __ brk(0);
2829 }
2830 
2831 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2832   return MachNode::size(ra_);
2833 }
2834 
2835 //=============================================================================
2836 
2837 #ifndef PRODUCT
2838   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2839     st->print("nop \t# %d bytes pad for loops and calls", _count);
2840   }
2841 #endif
2842 
2843   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2844     MacroAssembler _masm(&cbuf);
2845     for (int i = 0; i < _count; i++) {
2846       __ nop();
2847     }
2848   }
2849 
2850   uint MachNopNode::size(PhaseRegAlloc*) const {
2851     return _count * NativeInstruction::instruction_size;
2852   }
2853 
2854 //=============================================================================
2855 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2856 
2857 int Compile::ConstantTable::calculate_table_base_offset() const {
2858   return 0;  // absolute addressing, no offset
2859 }
2860 
2861 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2862 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2863   ShouldNotReachHere();
2864 }
2865 
2866 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2867   // Empty encoding
2868 }
2869 
2870 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2871   return 0;
2872 }
2873 
2874 #ifndef PRODUCT
2875 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2876   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2877 }
2878 #endif
2879 
2880 #ifndef PRODUCT
2881 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2882   Compile* C = ra_->C;
2883 
2884   int framesize = C->frame_slots() << LogBytesPerInt;
2885 
2886   if (C->need_stack_bang(framesize))
2887     st->print("# stack bang size=%d\n\t", framesize);
2888 
2889   if (framesize < ((1 << 9) + 2 * wordSize)) {
2890     st->print("sub  sp, sp, #%d\n\t", framesize);
2891     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2892     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2893   } else {
2894     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2895     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2896     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2897     st->print("sub  sp, sp, rscratch1");
2898   }
2899 }
2900 #endif
2901 
2902 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2903   Compile* C = ra_->C;
2904   MacroAssembler _masm(&cbuf);
2905 
2906   // n.b. frame size includes space for return pc and rfp
2907   const long framesize = C->frame_size_in_bytes();
2908   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2909 
2910   // insert a nop at the start of the prolog so we can patch in a
2911   // branch if we need to invalidate the method later
2912   __ nop();
2913 
2914   int bangsize = C->bang_size_in_bytes();
2915   if (C->need_stack_bang(bangsize) && UseStackBanging)
2916     __ generate_stack_overflow_check(bangsize);
2917 
2918   __ build_frame(framesize);
2919 
2920   if (NotifySimulator) {
2921     __ notify(Assembler::method_entry);
2922   }
2923 
2924   if (VerifyStackAtCalls) {
2925     Unimplemented();
2926   }
2927 
2928   C->set_frame_complete(cbuf.insts_size());
2929 
2930   if (C->has_mach_constant_base_node()) {
2931     // NOTE: We set the table base offset here because users might be
2932     // emitted before MachConstantBaseNode.
2933     Compile::ConstantTable& constant_table = C->constant_table();
2934     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2935   }
2936 }
2937 
2938 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2939 {
2940   return MachNode::size(ra_); // too many variables; just compute it
2941                               // the hard way
2942 }
2943 
2944 int MachPrologNode::reloc() const
2945 {
2946   return 0;
2947 }
2948 
2949 //=============================================================================
2950 
2951 #ifndef PRODUCT
2952 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2953   Compile* C = ra_->C;
2954   int framesize = C->frame_slots() << LogBytesPerInt;
2955 
2956   st->print("# pop frame %d\n\t",framesize);
2957 
2958   if (framesize == 0) {
2959     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2960   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2961     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2962     st->print("add  sp, sp, #%d\n\t", framesize);
2963   } else {
2964     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2965     st->print("add  sp, sp, rscratch1\n\t");
2966     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2967   }
2968 
2969   if (do_polling() && C->is_method_compilation()) {
2970     st->print("# touch polling page\n\t");
2971     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2972     st->print("ldr zr, [rscratch1]");
2973   }
2974 }
2975 #endif
2976 
2977 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2978   Compile* C = ra_->C;
2979   MacroAssembler _masm(&cbuf);
2980   int framesize = C->frame_slots() << LogBytesPerInt;
2981 
2982   __ remove_frame(framesize);
2983 
2984   if (NotifySimulator) {
2985     __ notify(Assembler::method_reentry);
2986   }
2987 
2988   if (do_polling() && C->is_method_compilation()) {
2989     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2990   }
2991 }
2992 
2993 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2994   // Variable size. Determine dynamically.
2995   return MachNode::size(ra_);
2996 }
2997 
2998 int MachEpilogNode::reloc() const {
2999   // Return number of relocatable values contained in this instruction.
3000   return 1; // 1 for polling page.
3001 }
3002 
3003 const Pipeline * MachEpilogNode::pipeline() const {
3004   return MachNode::pipeline_class();
3005 }
3006 
3007 // This method seems to be obsolete. It is declared in machnode.hpp
3008 // and defined in all *.ad files, but it is never called. Should we
3009 // get rid of it?
3010 int MachEpilogNode::safepoint_offset() const {
3011   assert(do_polling(), "no return for this epilog node");
3012   return 4;
3013 }
3014 
3015 //=============================================================================
3016 
3017 // Figure out which register class each belongs in: rc_int, rc_float or
3018 // rc_stack.
3019 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3020 
3021 static enum RC rc_class(OptoReg::Name reg) {
3022 
3023   if (reg == OptoReg::Bad) {
3024     return rc_bad;
3025   }
3026 
3027   // we have 30 int registers * 2 halves
3028   // (rscratch1 and rscratch2 are omitted)
3029 
3030   if (reg < 60) {
3031     return rc_int;
3032   }
3033 
3034   // we have 32 float register * 2 halves
3035   if (reg < 60 + 128) {
3036     return rc_float;
3037   }
3038 
3039   // Between float regs & stack is the flags regs.
3040   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3041 
3042   return rc_stack;
3043 }
3044 
3045 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3046   Compile* C = ra_->C;
3047 
3048   // Get registers to move.
3049   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3050   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3051   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3052   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3053 
3054   enum RC src_hi_rc = rc_class(src_hi);
3055   enum RC src_lo_rc = rc_class(src_lo);
3056   enum RC dst_hi_rc = rc_class(dst_hi);
3057   enum RC dst_lo_rc = rc_class(dst_lo);
3058 
3059   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3060 
3061   if (src_hi != OptoReg::Bad) {
3062     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3063            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3064            "expected aligned-adjacent pairs");
3065   }
3066 
3067   if (src_lo == dst_lo && src_hi == dst_hi) {
3068     return 0;            // Self copy, no move.
3069   }
3070 
3071   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3072               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3073   int src_offset = ra_->reg2offset(src_lo);
3074   int dst_offset = ra_->reg2offset(dst_lo);
3075 
3076   if (bottom_type()->isa_vect() != NULL) {
3077     uint ireg = ideal_reg();
3078     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3079     if (cbuf) {
3080       MacroAssembler _masm(cbuf);
3081       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3082       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3083         // stack->stack
3084         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3085         if (ireg == Op_VecD) {
3086           __ unspill(rscratch1, true, src_offset);
3087           __ spill(rscratch1, true, dst_offset);
3088         } else {
3089           __ spill_copy128(src_offset, dst_offset);
3090         }
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3092         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3093                ireg == Op_VecD ? __ T8B : __ T16B,
3094                as_FloatRegister(Matcher::_regEncode[src_lo]));
3095       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3096         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(dst_lo));
3099       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3100         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3101                        ireg == Op_VecD ? __ D : __ Q,
3102                        ra_->reg2offset(src_lo));
3103       } else {
3104         ShouldNotReachHere();
3105       }
3106     }
3107   } else if (cbuf) {
3108     MacroAssembler _masm(cbuf);
3109     switch (src_lo_rc) {
3110     case rc_int:
3111       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3112         if (is64) {
3113             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3114                    as_Register(Matcher::_regEncode[src_lo]));
3115         } else {
3116             MacroAssembler _masm(cbuf);
3117             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3118                     as_Register(Matcher::_regEncode[src_lo]));
3119         }
3120       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3121         if (is64) {
3122             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3123                      as_Register(Matcher::_regEncode[src_lo]));
3124         } else {
3125             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3126                      as_Register(Matcher::_regEncode[src_lo]));
3127         }
3128       } else {                    // gpr --> stack spill
3129         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3130         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3131       }
3132       break;
3133     case rc_float:
3134       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3135         if (is64) {
3136             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3137                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3138         } else {
3139             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3140                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3141         }
3142       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3143           if (cbuf) {
3144             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3145                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3146         } else {
3147             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3148                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3149         }
3150       } else {                    // fpr --> stack spill
3151         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3152         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3153                  is64 ? __ D : __ S, dst_offset);
3154       }
3155       break;
3156     case rc_stack:
3157       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3158         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3159       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3160         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3161                    is64 ? __ D : __ S, src_offset);
3162       } else {                    // stack --> stack copy
3163         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3164         __ unspill(rscratch1, is64, src_offset);
3165         __ spill(rscratch1, is64, dst_offset);
3166       }
3167       break;
3168     default:
3169       assert(false, "bad rc_class for spill");
3170       ShouldNotReachHere();
3171     }
3172   }
3173 
3174   if (st) {
3175     st->print("spill ");
3176     if (src_lo_rc == rc_stack) {
3177       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3178     } else {
3179       st->print("%s -> ", Matcher::regName[src_lo]);
3180     }
3181     if (dst_lo_rc == rc_stack) {
3182       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3183     } else {
3184       st->print("%s", Matcher::regName[dst_lo]);
3185     }
3186     if (bottom_type()->isa_vect() != NULL) {
3187       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3188     } else {
3189       st->print("\t# spill size = %d", is64 ? 64:32);
3190     }
3191   }
3192 
3193   return 0;
3194 
3195 }
3196 
3197 #ifndef PRODUCT
3198 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3199   if (!ra_)
3200     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3201   else
3202     implementation(NULL, ra_, false, st);
3203 }
3204 #endif
3205 
3206 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3207   implementation(&cbuf, ra_, false, NULL);
3208 }
3209 
3210 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3211   return MachNode::size(ra_);
3212 }
3213 
3214 //=============================================================================
3215 
3216 #ifndef PRODUCT
3217 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3218   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3219   int reg = ra_->get_reg_first(this);
3220   st->print("add %s, rsp, #%d]\t# box lock",
3221             Matcher::regName[reg], offset);
3222 }
3223 #endif
3224 
3225 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3226   MacroAssembler _masm(&cbuf);
3227 
3228   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3229   int reg    = ra_->get_encode(this);
3230 
3231   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3232     __ add(as_Register(reg), sp, offset);
3233   } else {
3234     ShouldNotReachHere();
3235   }
3236 }
3237 
3238 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3239   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3240   return 4;
3241 }
3242 
3243 //=============================================================================
3244 
3245 #ifndef PRODUCT
3246 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3247 {
3248   st->print_cr("# MachUEPNode");
3249   if (UseCompressedClassPointers) {
3250     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3251     if (Universe::narrow_klass_shift() != 0) {
3252       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3253     }
3254   } else {
3255    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3256   }
3257   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3258   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3259 }
3260 #endif
3261 
3262 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3263 {
3264   // This is the unverified entry point.
3265   MacroAssembler _masm(&cbuf);
3266 
3267   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3268   Label skip;
3269   // TODO
3270   // can we avoid this skip and still use a reloc?
3271   __ br(Assembler::EQ, skip);
3272   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3273   __ bind(skip);
3274 }
3275 
3276 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3277 {
3278   return MachNode::size(ra_);
3279 }
3280 
3281 // REQUIRED EMIT CODE
3282 
3283 //=============================================================================
3284 
3285 // Emit exception handler code.
3286 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3287 {
3288   // mov rscratch1 #exception_blob_entry_point
3289   // br rscratch1
3290   // Note that the code buffer's insts_mark is always relative to insts.
3291   // That's why we must use the macroassembler to generate a handler.
3292   MacroAssembler _masm(&cbuf);
3293   address base = __ start_a_stub(size_exception_handler());
3294   if (base == NULL) {
3295     ciEnv::current()->record_failure("CodeCache is full");
3296     return 0;  // CodeBuffer::expand failed
3297   }
3298   int offset = __ offset();
3299   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3300   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3301   __ end_a_stub();
3302   return offset;
3303 }
3304 
3305 // Emit deopt handler code.
3306 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3307 {
3308   // Note that the code buffer's insts_mark is always relative to insts.
3309   // That's why we must use the macroassembler to generate a handler.
3310   MacroAssembler _masm(&cbuf);
3311   address base = __ start_a_stub(size_deopt_handler());
3312   if (base == NULL) {
3313     ciEnv::current()->record_failure("CodeCache is full");
3314     return 0;  // CodeBuffer::expand failed
3315   }
3316   int offset = __ offset();
3317 
3318   __ adr(lr, __ pc());
3319   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3320 
3321   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3322   __ end_a_stub();
3323   return offset;
3324 }
3325 
3326 // REQUIRED MATCHER CODE
3327 
3328 //=============================================================================
3329 
3330 const bool Matcher::match_rule_supported(int opcode) {
3331 
3332   switch (opcode) {
3333   case Op_StrComp:
3334   case Op_StrIndexOf:
3335     if (CompactStrings)  return false;
3336     break;
3337   default:
3338     break;
3339   }
3340 
3341   if (!has_match_rule(opcode)) {
3342     return false;
3343   }
3344 
3345   return true;  // Per default match rules are supported.
3346 }
3347 
3348 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3349 
3350   // TODO
3351   // identify extra cases that we might want to provide match rules for
3352   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3353   bool ret_value = match_rule_supported(opcode);
3354   // Add rules here.
3355 
3356   return ret_value;  // Per default match rules are supported.
3357 }
3358 
3359 const bool Matcher::has_predicated_vectors(void) {
3360   return false;
3361 }
3362 
3363 const int Matcher::float_pressure(int default_pressure_threshold) {
3364   return default_pressure_threshold;
3365 }
3366 
3367 int Matcher::regnum_to_fpu_offset(int regnum)
3368 {
3369   Unimplemented();
3370   return 0;
3371 }
3372 
3373 // Is this branch offset short enough that a short branch can be used?
3374 //
3375 // NOTE: If the platform does not provide any short branch variants, then
3376 //       this method should return false for offset 0.
3377 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3378   // The passed offset is relative to address of the branch.
3379 
3380   return (-32768 <= offset && offset < 32768);
3381 }
3382 
3383 const bool Matcher::isSimpleConstant64(jlong value) {
3384   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3385   // Probably always true, even if a temp register is required.
3386   return true;
3387 }
3388 
3389 // true just means we have fast l2f conversion
3390 const bool Matcher::convL2FSupported(void) {
3391   return true;
3392 }
3393 
3394 // Vector width in bytes.
3395 const int Matcher::vector_width_in_bytes(BasicType bt) {
3396   int size = MIN2(16,(int)MaxVectorSize);
3397   // Minimum 2 values in vector
3398   if (size < 2*type2aelembytes(bt)) size = 0;
3399   // But never < 4
3400   if (size < 4) size = 0;
3401   return size;
3402 }
3403 
3404 // Limits on vector size (number of elements) loaded into vector.
3405 const int Matcher::max_vector_size(const BasicType bt) {
3406   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3407 }
3408 const int Matcher::min_vector_size(const BasicType bt) {
3409 //  For the moment limit the vector size to 8 bytes
3410     int size = 8 / type2aelembytes(bt);
3411     if (size < 2) size = 2;
3412     return size;
3413 }
3414 
3415 // Vector ideal reg.
3416 const int Matcher::vector_ideal_reg(int len) {
3417   switch(len) {
3418     case  8: return Op_VecD;
3419     case 16: return Op_VecX;
3420   }
3421   ShouldNotReachHere();
3422   return 0;
3423 }
3424 
3425 const int Matcher::vector_shift_count_ideal_reg(int size) {
3426   return Op_VecX;
3427 }
3428 
3429 // AES support not yet implemented
3430 const bool Matcher::pass_original_key_for_aes() {
3431   return false;
3432 }
3433 
3434 // x86 supports misaligned vectors store/load.
3435 const bool Matcher::misaligned_vectors_ok() {
3436   return !AlignVector; // can be changed by flag
3437 }
3438 
3439 // false => size gets scaled to BytesPerLong, ok.
3440 const bool Matcher::init_array_count_is_in_bytes = false;
3441 
3442 // Use conditional move (CMOVL)
3443 const int Matcher::long_cmove_cost() {
3444   // long cmoves are no more expensive than int cmoves
3445   return 0;
3446 }
3447 
3448 const int Matcher::float_cmove_cost() {
3449   // float cmoves are no more expensive than int cmoves
3450   return 0;
3451 }
3452 
3453 // Does the CPU require late expand (see block.cpp for description of late expand)?
3454 const bool Matcher::require_postalloc_expand = false;
3455 
3456 // Do we need to mask the count passed to shift instructions or does
3457 // the cpu only look at the lower 5/6 bits anyway?
3458 const bool Matcher::need_masked_shift_count = false;
3459 
3460 // This affects two different things:
3461 //  - how Decode nodes are matched
3462 //  - how ImplicitNullCheck opportunities are recognized
3463 // If true, the matcher will try to remove all Decodes and match them
3464 // (as operands) into nodes. NullChecks are not prepared to deal with
3465 // Decodes by final_graph_reshaping().
3466 // If false, final_graph_reshaping() forces the decode behind the Cmp
3467 // for a NullCheck. The matcher matches the Decode node into a register.
3468 // Implicit_null_check optimization moves the Decode along with the
3469 // memory operation back up before the NullCheck.
3470 bool Matcher::narrow_oop_use_complex_address() {
3471   return Universe::narrow_oop_shift() == 0;
3472 }
3473 
3474 bool Matcher::narrow_klass_use_complex_address() {
3475 // TODO
3476 // decide whether we need to set this to true
3477   return false;
3478 }
3479 
3480 // Is it better to copy float constants, or load them directly from
3481 // memory?  Intel can load a float constant from a direct address,
3482 // requiring no extra registers.  Most RISCs will have to materialize
3483 // an address into a register first, so they would do better to copy
3484 // the constant from stack.
3485 const bool Matcher::rematerialize_float_constants = false;
3486 
3487 // If CPU can load and store mis-aligned doubles directly then no
3488 // fixup is needed.  Else we split the double into 2 integer pieces
3489 // and move it piece-by-piece.  Only happens when passing doubles into
3490 // C code as the Java calling convention forces doubles to be aligned.
3491 const bool Matcher::misaligned_doubles_ok = true;
3492 
3493 // No-op on amd64
3494 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3495   Unimplemented();
3496 }
3497 
3498 // Advertise here if the CPU requires explicit rounding operations to
3499 // implement the UseStrictFP mode.
3500 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3501 
3502 // Are floats converted to double when stored to stack during
3503 // deoptimization?
3504 bool Matcher::float_in_double() { return true; }
3505 
3506 // Do ints take an entire long register or just half?
3507 // The relevant question is how the int is callee-saved:
3508 // the whole long is written but de-opt'ing will have to extract
3509 // the relevant 32 bits.
3510 const bool Matcher::int_in_long = true;
3511 
3512 // Return whether or not this register is ever used as an argument.
3513 // This function is used on startup to build the trampoline stubs in
3514 // generateOptoStub.  Registers not mentioned will be killed by the VM
3515 // call in the trampoline, and arguments in those registers not be
3516 // available to the callee.
3517 bool Matcher::can_be_java_arg(int reg)
3518 {
3519   return
3520     reg ==  R0_num || reg == R0_H_num ||
3521     reg ==  R1_num || reg == R1_H_num ||
3522     reg ==  R2_num || reg == R2_H_num ||
3523     reg ==  R3_num || reg == R3_H_num ||
3524     reg ==  R4_num || reg == R4_H_num ||
3525     reg ==  R5_num || reg == R5_H_num ||
3526     reg ==  R6_num || reg == R6_H_num ||
3527     reg ==  R7_num || reg == R7_H_num ||
3528     reg ==  V0_num || reg == V0_H_num ||
3529     reg ==  V1_num || reg == V1_H_num ||
3530     reg ==  V2_num || reg == V2_H_num ||
3531     reg ==  V3_num || reg == V3_H_num ||
3532     reg ==  V4_num || reg == V4_H_num ||
3533     reg ==  V5_num || reg == V5_H_num ||
3534     reg ==  V6_num || reg == V6_H_num ||
3535     reg ==  V7_num || reg == V7_H_num;
3536 }
3537 
3538 bool Matcher::is_spillable_arg(int reg)
3539 {
3540   return can_be_java_arg(reg);
3541 }
3542 
3543 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3544   return false;
3545 }
3546 
3547 RegMask Matcher::divI_proj_mask() {
3548   ShouldNotReachHere();
3549   return RegMask();
3550 }
3551 
3552 // Register for MODI projection of divmodI.
3553 RegMask Matcher::modI_proj_mask() {
3554   ShouldNotReachHere();
3555   return RegMask();
3556 }
3557 
3558 // Register for DIVL projection of divmodL.
3559 RegMask Matcher::divL_proj_mask() {
3560   ShouldNotReachHere();
3561   return RegMask();
3562 }
3563 
3564 // Register for MODL projection of divmodL.
3565 RegMask Matcher::modL_proj_mask() {
3566   ShouldNotReachHere();
3567   return RegMask();
3568 }
3569 
3570 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3571   return FP_REG_mask();
3572 }
3573 
3574 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3575   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3576     Node* u = addp->fast_out(i);
3577     if (u->is_Mem()) {
3578       int opsize = u->as_Mem()->memory_size();
3579       assert(opsize > 0, "unexpected memory operand size");
3580       if (u->as_Mem()->memory_size() != (1<<shift)) {
3581         return false;
3582       }
3583     }
3584   }
3585   return true;
3586 }
3587 
3588 const bool Matcher::convi2l_type_required = false;
3589 
3590 // Should the Matcher clone shifts on addressing modes, expecting them
3591 // to be subsumed into complex addressing expressions or compute them
3592 // into registers?
3593 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3594   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3595     return true;
3596   }
3597 
3598   Node *off = m->in(AddPNode::Offset);
3599   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3600       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3601       // Are there other uses besides address expressions?
3602       !is_visited(off)) {
3603     address_visited.set(off->_idx); // Flag as address_visited
3604     mstack.push(off->in(2), Visit);
3605     Node *conv = off->in(1);
3606     if (conv->Opcode() == Op_ConvI2L &&
3607         // Are there other uses besides address expressions?
3608         !is_visited(conv)) {
3609       address_visited.set(conv->_idx); // Flag as address_visited
3610       mstack.push(conv->in(1), Pre_Visit);
3611     } else {
3612       mstack.push(conv, Pre_Visit);
3613     }
3614     address_visited.test_set(m->_idx); // Flag as address_visited
3615     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3616     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3617     return true;
3618   } else if (off->Opcode() == Op_ConvI2L &&
3619              // Are there other uses besides address expressions?
3620              !is_visited(off)) {
3621     address_visited.test_set(m->_idx); // Flag as address_visited
3622     address_visited.set(off->_idx); // Flag as address_visited
3623     mstack.push(off->in(1), Pre_Visit);
3624     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3625     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3626     return true;
3627   }
3628   return false;
3629 }
3630 
3631 // Transform:
3632 // (AddP base (AddP base address (LShiftL index con)) offset)
3633 // into:
3634 // (AddP base (AddP base offset) (LShiftL index con))
3635 // to take full advantage of ARM's addressing modes
3636 void Compile::reshape_address(AddPNode* addp) {
3637   Node *addr = addp->in(AddPNode::Address);
3638   if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
3639     const AddPNode *addp2 = addr->as_AddP();
3640     if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
3641          addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
3642          size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
3643         addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
3644 
3645       // Any use that can't embed the address computation?
3646       for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3647         Node* u = addp->fast_out(i);
3648         if (!u->is_Mem() || u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
3649           return;
3650         }
3651       }
3652       
3653       Node* off = addp->in(AddPNode::Offset);
3654       Node* addr2 = addp2->in(AddPNode::Address);
3655       Node* base = addp->in(AddPNode::Base);
3656       
3657       Node* new_addr = NULL;
3658       // Check whether the graph already has the new AddP we need
3659       // before we create one (no GVN available here).
3660       for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
3661         Node* u = addr2->fast_out(i);
3662         if (u->is_AddP() &&
3663             u->in(AddPNode::Base) == base &&
3664             u->in(AddPNode::Address) == addr2 &&
3665             u->in(AddPNode::Offset) == off) {
3666           new_addr = u;
3667           break;
3668         }
3669       }
3670       
3671       if (new_addr == NULL) {
3672         new_addr = new AddPNode(base, addr2, off);
3673       }
3674       Node* new_off = addp2->in(AddPNode::Offset);
3675       addp->set_req(AddPNode::Address, new_addr);
3676       if (addr->outcnt() == 0) {
3677         addr->disconnect_inputs(NULL, this);
3678       }
3679       addp->set_req(AddPNode::Offset, new_off);
3680       if (off->outcnt() == 0) {
3681         off->disconnect_inputs(NULL, this);
3682       }
3683     }
3684   }
3685 }
3686 
3687 // helper for encoding java_to_runtime calls on sim
3688 //
3689 // this is needed to compute the extra arguments required when
3690 // planting a call to the simulator blrt instruction. the TypeFunc
3691 // can be queried to identify the counts for integral, and floating
3692 // arguments and the return type
3693 
3694 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3695 {
3696   int gps = 0;
3697   int fps = 0;
3698   const TypeTuple *domain = tf->domain();
3699   int max = domain->cnt();
3700   for (int i = TypeFunc::Parms; i < max; i++) {
3701     const Type *t = domain->field_at(i);
3702     switch(t->basic_type()) {
3703     case T_FLOAT:
3704     case T_DOUBLE:
3705       fps++;
3706     default:
3707       gps++;
3708     }
3709   }
3710   gpcnt = gps;
3711   fpcnt = fps;
3712   BasicType rt = tf->return_type();
3713   switch (rt) {
3714   case T_VOID:
3715     rtype = MacroAssembler::ret_type_void;
3716     break;
3717   default:
3718     rtype = MacroAssembler::ret_type_integral;
3719     break;
3720   case T_FLOAT:
3721     rtype = MacroAssembler::ret_type_float;
3722     break;
3723   case T_DOUBLE:
3724     rtype = MacroAssembler::ret_type_double;
3725     break;
3726   }
3727 }
3728 
3729 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3730   MacroAssembler _masm(&cbuf);                                          \
3731   {                                                                     \
3732     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3733     guarantee(DISP == 0, "mode not permitted for volatile");            \
3734     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3735     __ INSN(REG, as_Register(BASE));                                    \
3736   }
3737 
3738 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3739 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3740 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3741                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3742 
3743   // Used for all non-volatile memory accesses.  The use of
3744   // $mem->opcode() to discover whether this pattern uses sign-extended
3745   // offsets is something of a kludge.
3746   static void loadStore(MacroAssembler masm, mem_insn insn,
3747                          Register reg, int opcode,
3748                          Register base, int index, int size, int disp)
3749   {
3750     Address::extend scale;
3751 
3752     // Hooboy, this is fugly.  We need a way to communicate to the
3753     // encoder that the index needs to be sign extended, so we have to
3754     // enumerate all the cases.
3755     switch (opcode) {
3756     case INDINDEXSCALEDI2L:
3757     case INDINDEXSCALEDI2LN:
3758     case INDINDEXI2L:
3759     case INDINDEXI2LN:
3760       scale = Address::sxtw(size);
3761       break;
3762     default:
3763       scale = Address::lsl(size);
3764     }
3765 
3766     if (index == -1) {
3767       (masm.*insn)(reg, Address(base, disp));
3768     } else {
3769       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3770       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3771     }
3772   }
3773 
3774   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3775                          FloatRegister reg, int opcode,
3776                          Register base, int index, int size, int disp)
3777   {
3778     Address::extend scale;
3779 
3780     switch (opcode) {
3781     case INDINDEXSCALEDI2L:
3782     case INDINDEXSCALEDI2LN:
3783       scale = Address::sxtw(size);
3784       break;
3785     default:
3786       scale = Address::lsl(size);
3787     }
3788 
3789      if (index == -1) {
3790       (masm.*insn)(reg, Address(base, disp));
3791     } else {
3792       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3793       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3794     }
3795   }
3796 
3797   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3798                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3799                          int opcode, Register base, int index, int size, int disp)
3800   {
3801     if (index == -1) {
3802       (masm.*insn)(reg, T, Address(base, disp));
3803     } else {
3804       assert(disp == 0, "unsupported address mode");
3805       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3806     }
3807   }
3808 
3809 %}
3810 
3811 
3812 
3813 //----------ENCODING BLOCK-----------------------------------------------------
3814 // This block specifies the encoding classes used by the compiler to
3815 // output byte streams.  Encoding classes are parameterized macros
3816 // used by Machine Instruction Nodes in order to generate the bit
3817 // encoding of the instruction.  Operands specify their base encoding
3818 // interface with the interface keyword.  There are currently
3819 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3820 // COND_INTER.  REG_INTER causes an operand to generate a function
3821 // which returns its register number when queried.  CONST_INTER causes
3822 // an operand to generate a function which returns the value of the
3823 // constant when queried.  MEMORY_INTER causes an operand to generate
3824 // four functions which return the Base Register, the Index Register,
3825 // the Scale Value, and the Offset Value of the operand when queried.
3826 // COND_INTER causes an operand to generate six functions which return
3827 // the encoding code (ie - encoding bits for the instruction)
3828 // associated with each basic boolean condition for a conditional
3829 // instruction.
3830 //
3831 // Instructions specify two basic values for encoding.  Again, a
3832 // function is available to check if the constant displacement is an
3833 // oop. They use the ins_encode keyword to specify their encoding
3834 // classes (which must be a sequence of enc_class names, and their
3835 // parameters, specified in the encoding block), and they use the
3836 // opcode keyword to specify, in order, their primary, secondary, and
3837 // tertiary opcode.  Only the opcode sections which a particular
3838 // instruction needs for encoding need to be specified.
3839 encode %{
3840   // Build emit functions for each basic byte or larger field in the
3841   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3842   // from C++ code in the enc_class source block.  Emit functions will
3843   // live in the main source block for now.  In future, we can
3844   // generalize this by adding a syntax that specifies the sizes of
3845   // fields in an order, so that the adlc can build the emit functions
3846   // automagically
3847 
3848   // catch all for unimplemented encodings
3849   enc_class enc_unimplemented %{
3850     MacroAssembler _masm(&cbuf);
3851     __ unimplemented("C2 catch all");
3852   %}
3853 
3854   // BEGIN Non-volatile memory access
3855 
3856   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3857     Register dst_reg = as_Register($dst$$reg);
3858     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3859                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3860   %}
3861 
3862   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3863     Register dst_reg = as_Register($dst$$reg);
3864     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3865                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3866   %}
3867 
3868   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3869     Register dst_reg = as_Register($dst$$reg);
3870     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3871                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3872   %}
3873 
3874   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3875     Register dst_reg = as_Register($dst$$reg);
3876     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3877                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3878   %}
3879 
3880   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3881     Register dst_reg = as_Register($dst$$reg);
3882     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3883                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3884   %}
3885 
3886   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3887     Register dst_reg = as_Register($dst$$reg);
3888     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3889                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3890   %}
3891 
3892   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3893     Register dst_reg = as_Register($dst$$reg);
3894     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3895                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3896   %}
3897 
3898   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3899     Register dst_reg = as_Register($dst$$reg);
3900     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3901                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3902   %}
3903 
3904   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3905     Register dst_reg = as_Register($dst$$reg);
3906     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3907                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3908   %}
3909 
3910   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3911     Register dst_reg = as_Register($dst$$reg);
3912     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3913                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3914   %}
3915 
3916   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3917     Register dst_reg = as_Register($dst$$reg);
3918     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3919                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3920   %}
3921 
3922   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3923     Register dst_reg = as_Register($dst$$reg);
3924     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3925                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3926   %}
3927 
3928   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3929     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3930     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3931                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3932   %}
3933 
3934   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3935     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3936     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3937                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3938   %}
3939 
3940   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3941     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3942     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3943        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3944   %}
3945 
3946   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3947     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3948     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3949        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3950   %}
3951 
3952   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3953     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3954     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3955        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3956   %}
3957 
3958   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3959     Register src_reg = as_Register($src$$reg);
3960     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3961                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3962   %}
3963 
3964   enc_class aarch64_enc_strb0(memory mem) %{
3965     MacroAssembler _masm(&cbuf);
3966     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3968   %}
3969 
3970   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3971     MacroAssembler _masm(&cbuf);
3972     __ membar(Assembler::StoreStore);
3973     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3974                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3975   %}
3976 
3977   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3978     Register src_reg = as_Register($src$$reg);
3979     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3980                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3981   %}
3982 
3983   enc_class aarch64_enc_strh0(memory mem) %{
3984     MacroAssembler _masm(&cbuf);
3985     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3986                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3987   %}
3988 
3989   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3990     Register src_reg = as_Register($src$$reg);
3991     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3992                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3993   %}
3994 
3995   enc_class aarch64_enc_strw0(memory mem) %{
3996     MacroAssembler _masm(&cbuf);
3997     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3998                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3999   %}
4000 
4001   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4002     Register src_reg = as_Register($src$$reg);
4003     // we sometimes get asked to store the stack pointer into the
4004     // current thread -- we cannot do that directly on AArch64
4005     if (src_reg == r31_sp) {
4006       MacroAssembler _masm(&cbuf);
4007       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4008       __ mov(rscratch2, sp);
4009       src_reg = rscratch2;
4010     }
4011     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4012                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4013   %}
4014 
4015   enc_class aarch64_enc_str0(memory mem) %{
4016     MacroAssembler _masm(&cbuf);
4017     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4018                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4019   %}
4020 
4021   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4022     FloatRegister src_reg = as_FloatRegister($src$$reg);
4023     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4024                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4025   %}
4026 
4027   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4028     FloatRegister src_reg = as_FloatRegister($src$$reg);
4029     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4030                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4031   %}
4032 
4033   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4034     FloatRegister src_reg = as_FloatRegister($src$$reg);
4035     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4036        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4037   %}
4038 
4039   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4040     FloatRegister src_reg = as_FloatRegister($src$$reg);
4041     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4042        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4043   %}
4044 
4045   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4046     FloatRegister src_reg = as_FloatRegister($src$$reg);
4047     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4048        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4049   %}
4050 
4051   // END Non-volatile memory access
4052 
4053   // volatile loads and stores
4054 
4055   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4056     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4057                  rscratch1, stlrb);
4058   %}
4059 
4060   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4061     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4062                  rscratch1, stlrh);
4063   %}
4064 
4065   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4066     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4067                  rscratch1, stlrw);
4068   %}
4069 
4070 
4071   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4072     Register dst_reg = as_Register($dst$$reg);
4073     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4074              rscratch1, ldarb);
4075     __ sxtbw(dst_reg, dst_reg);
4076   %}
4077 
4078   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4079     Register dst_reg = as_Register($dst$$reg);
4080     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4081              rscratch1, ldarb);
4082     __ sxtb(dst_reg, dst_reg);
4083   %}
4084 
4085   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4086     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4087              rscratch1, ldarb);
4088   %}
4089 
4090   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4091     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4092              rscratch1, ldarb);
4093   %}
4094 
4095   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4096     Register dst_reg = as_Register($dst$$reg);
4097     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4098              rscratch1, ldarh);
4099     __ sxthw(dst_reg, dst_reg);
4100   %}
4101 
4102   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4103     Register dst_reg = as_Register($dst$$reg);
4104     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4105              rscratch1, ldarh);
4106     __ sxth(dst_reg, dst_reg);
4107   %}
4108 
4109   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4110     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4111              rscratch1, ldarh);
4112   %}
4113 
4114   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4115     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4116              rscratch1, ldarh);
4117   %}
4118 
4119   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4120     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4121              rscratch1, ldarw);
4122   %}
4123 
4124   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4125     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4126              rscratch1, ldarw);
4127   %}
4128 
4129   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4130     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4131              rscratch1, ldar);
4132   %}
4133 
4134   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4135     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4136              rscratch1, ldarw);
4137     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4138   %}
4139 
4140   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4141     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4142              rscratch1, ldar);
4143     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4144   %}
4145 
4146   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4147     Register src_reg = as_Register($src$$reg);
4148     // we sometimes get asked to store the stack pointer into the
4149     // current thread -- we cannot do that directly on AArch64
4150     if (src_reg == r31_sp) {
4151         MacroAssembler _masm(&cbuf);
4152       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4153       __ mov(rscratch2, sp);
4154       src_reg = rscratch2;
4155     }
4156     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4157                  rscratch1, stlr);
4158   %}
4159 
4160   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4161     {
4162       MacroAssembler _masm(&cbuf);
4163       FloatRegister src_reg = as_FloatRegister($src$$reg);
4164       __ fmovs(rscratch2, src_reg);
4165     }
4166     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4167                  rscratch1, stlrw);
4168   %}
4169 
4170   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4171     {
4172       MacroAssembler _masm(&cbuf);
4173       FloatRegister src_reg = as_FloatRegister($src$$reg);
4174       __ fmovd(rscratch2, src_reg);
4175     }
4176     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4177                  rscratch1, stlr);
4178   %}
4179 
4180   // synchronized read/update encodings
4181 
4182   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4183     MacroAssembler _masm(&cbuf);
4184     Register dst_reg = as_Register($dst$$reg);
4185     Register base = as_Register($mem$$base);
4186     int index = $mem$$index;
4187     int scale = $mem$$scale;
4188     int disp = $mem$$disp;
4189     if (index == -1) {
4190        if (disp != 0) {
4191         __ lea(rscratch1, Address(base, disp));
4192         __ ldaxr(dst_reg, rscratch1);
4193       } else {
4194         // TODO
4195         // should we ever get anything other than this case?
4196         __ ldaxr(dst_reg, base);
4197       }
4198     } else {
4199       Register index_reg = as_Register(index);
4200       if (disp == 0) {
4201         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4202         __ ldaxr(dst_reg, rscratch1);
4203       } else {
4204         __ lea(rscratch1, Address(base, disp));
4205         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4206         __ ldaxr(dst_reg, rscratch1);
4207       }
4208     }
4209   %}
4210 
4211   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4212     MacroAssembler _masm(&cbuf);
4213     Register src_reg = as_Register($src$$reg);
4214     Register base = as_Register($mem$$base);
4215     int index = $mem$$index;
4216     int scale = $mem$$scale;
4217     int disp = $mem$$disp;
4218     if (index == -1) {
4219        if (disp != 0) {
4220         __ lea(rscratch2, Address(base, disp));
4221         __ stlxr(rscratch1, src_reg, rscratch2);
4222       } else {
4223         // TODO
4224         // should we ever get anything other than this case?
4225         __ stlxr(rscratch1, src_reg, base);
4226       }
4227     } else {
4228       Register index_reg = as_Register(index);
4229       if (disp == 0) {
4230         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4231         __ stlxr(rscratch1, src_reg, rscratch2);
4232       } else {
4233         __ lea(rscratch2, Address(base, disp));
4234         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4235         __ stlxr(rscratch1, src_reg, rscratch2);
4236       }
4237     }
4238     __ cmpw(rscratch1, zr);
4239   %}
4240 
4241   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4242     MacroAssembler _masm(&cbuf);
4243     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4244     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4245                Assembler::xword, /*acquire*/ false, /*release*/ true);
4246   %}
4247 
4248   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4249     MacroAssembler _masm(&cbuf);
4250     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4251     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4252                Assembler::word, /*acquire*/ false, /*release*/ true);
4253   %}
4254 
4255 
4256   // The only difference between aarch64_enc_cmpxchg and
4257   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4258   // CompareAndSwap sequence to serve as a barrier on acquiring a
4259   // lock.
4260   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4261     MacroAssembler _masm(&cbuf);
4262     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4263     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4264                Assembler::xword, /*acquire*/ true, /*release*/ true);
4265   %}
4266 
4267   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4268     MacroAssembler _masm(&cbuf);
4269     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4270     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4271                Assembler::word, /*acquire*/ true, /*release*/ true);
4272   %}
4273 
4274 
4275   // auxiliary used for CompareAndSwapX to set result register
4276   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4277     MacroAssembler _masm(&cbuf);
4278     Register res_reg = as_Register($res$$reg);
4279     __ cset(res_reg, Assembler::EQ);
4280   %}
4281 
4282   // prefetch encodings
4283 
4284   enc_class aarch64_enc_prefetchw(memory mem) %{
4285     MacroAssembler _masm(&cbuf);
4286     Register base = as_Register($mem$$base);
4287     int index = $mem$$index;
4288     int scale = $mem$$scale;
4289     int disp = $mem$$disp;
4290     if (index == -1) {
4291       __ prfm(Address(base, disp), PSTL1KEEP);
4292     } else {
4293       Register index_reg = as_Register(index);
4294       if (disp == 0) {
4295         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4296       } else {
4297         __ lea(rscratch1, Address(base, disp));
4298         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4299       }
4300     }
4301   %}
4302 
4303   /// mov envcodings
4304 
4305   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4306     MacroAssembler _masm(&cbuf);
4307     u_int32_t con = (u_int32_t)$src$$constant;
4308     Register dst_reg = as_Register($dst$$reg);
4309     if (con == 0) {
4310       __ movw(dst_reg, zr);
4311     } else {
4312       __ movw(dst_reg, con);
4313     }
4314   %}
4315 
4316   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4317     MacroAssembler _masm(&cbuf);
4318     Register dst_reg = as_Register($dst$$reg);
4319     u_int64_t con = (u_int64_t)$src$$constant;
4320     if (con == 0) {
4321       __ mov(dst_reg, zr);
4322     } else {
4323       __ mov(dst_reg, con);
4324     }
4325   %}
4326 
4327   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4328     MacroAssembler _masm(&cbuf);
4329     Register dst_reg = as_Register($dst$$reg);
4330     address con = (address)$src$$constant;
4331     if (con == NULL || con == (address)1) {
4332       ShouldNotReachHere();
4333     } else {
4334       relocInfo::relocType rtype = $src->constant_reloc();
4335       if (rtype == relocInfo::oop_type) {
4336         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4337       } else if (rtype == relocInfo::metadata_type) {
4338         __ mov_metadata(dst_reg, (Metadata*)con);
4339       } else {
4340         assert(rtype == relocInfo::none, "unexpected reloc type");
4341         if (con < (address)(uintptr_t)os::vm_page_size()) {
4342           __ mov(dst_reg, con);
4343         } else {
4344           unsigned long offset;
4345           __ adrp(dst_reg, con, offset);
4346           __ add(dst_reg, dst_reg, offset);
4347         }
4348       }
4349     }
4350   %}
4351 
4352   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4353     MacroAssembler _masm(&cbuf);
4354     Register dst_reg = as_Register($dst$$reg);
4355     __ mov(dst_reg, zr);
4356   %}
4357 
4358   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4359     MacroAssembler _masm(&cbuf);
4360     Register dst_reg = as_Register($dst$$reg);
4361     __ mov(dst_reg, (u_int64_t)1);
4362   %}
4363 
4364   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4365     MacroAssembler _masm(&cbuf);
4366     address page = (address)$src$$constant;
4367     Register dst_reg = as_Register($dst$$reg);
4368     unsigned long off;
4369     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4370     assert(off == 0, "assumed offset == 0");
4371   %}
4372 
4373   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4374     MacroAssembler _masm(&cbuf);
4375     __ load_byte_map_base($dst$$Register);
4376   %}
4377 
4378   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4379     MacroAssembler _masm(&cbuf);
4380     Register dst_reg = as_Register($dst$$reg);
4381     address con = (address)$src$$constant;
4382     if (con == NULL) {
4383       ShouldNotReachHere();
4384     } else {
4385       relocInfo::relocType rtype = $src->constant_reloc();
4386       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4387       __ set_narrow_oop(dst_reg, (jobject)con);
4388     }
4389   %}
4390 
4391   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4392     MacroAssembler _masm(&cbuf);
4393     Register dst_reg = as_Register($dst$$reg);
4394     __ mov(dst_reg, zr);
4395   %}
4396 
4397   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4398     MacroAssembler _masm(&cbuf);
4399     Register dst_reg = as_Register($dst$$reg);
4400     address con = (address)$src$$constant;
4401     if (con == NULL) {
4402       ShouldNotReachHere();
4403     } else {
4404       relocInfo::relocType rtype = $src->constant_reloc();
4405       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4406       __ set_narrow_klass(dst_reg, (Klass *)con);
4407     }
4408   %}
4409 
4410   // arithmetic encodings
4411 
4412   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4413     MacroAssembler _masm(&cbuf);
4414     Register dst_reg = as_Register($dst$$reg);
4415     Register src_reg = as_Register($src1$$reg);
4416     int32_t con = (int32_t)$src2$$constant;
4417     // add has primary == 0, subtract has primary == 1
4418     if ($primary) { con = -con; }
4419     if (con < 0) {
4420       __ subw(dst_reg, src_reg, -con);
4421     } else {
4422       __ addw(dst_reg, src_reg, con);
4423     }
4424   %}
4425 
4426   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4427     MacroAssembler _masm(&cbuf);
4428     Register dst_reg = as_Register($dst$$reg);
4429     Register src_reg = as_Register($src1$$reg);
4430     int32_t con = (int32_t)$src2$$constant;
4431     // add has primary == 0, subtract has primary == 1
4432     if ($primary) { con = -con; }
4433     if (con < 0) {
4434       __ sub(dst_reg, src_reg, -con);
4435     } else {
4436       __ add(dst_reg, src_reg, con);
4437     }
4438   %}
4439 
4440   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4441     MacroAssembler _masm(&cbuf);
4442    Register dst_reg = as_Register($dst$$reg);
4443    Register src1_reg = as_Register($src1$$reg);
4444    Register src2_reg = as_Register($src2$$reg);
4445     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4446   %}
4447 
4448   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4449     MacroAssembler _masm(&cbuf);
4450    Register dst_reg = as_Register($dst$$reg);
4451    Register src1_reg = as_Register($src1$$reg);
4452    Register src2_reg = as_Register($src2$$reg);
4453     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4454   %}
4455 
4456   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4457     MacroAssembler _masm(&cbuf);
4458    Register dst_reg = as_Register($dst$$reg);
4459    Register src1_reg = as_Register($src1$$reg);
4460    Register src2_reg = as_Register($src2$$reg);
4461     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4462   %}
4463 
4464   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4465     MacroAssembler _masm(&cbuf);
4466    Register dst_reg = as_Register($dst$$reg);
4467    Register src1_reg = as_Register($src1$$reg);
4468    Register src2_reg = as_Register($src2$$reg);
4469     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4470   %}
4471 
4472   // compare instruction encodings
4473 
4474   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4475     MacroAssembler _masm(&cbuf);
4476     Register reg1 = as_Register($src1$$reg);
4477     Register reg2 = as_Register($src2$$reg);
4478     __ cmpw(reg1, reg2);
4479   %}
4480 
4481   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4482     MacroAssembler _masm(&cbuf);
4483     Register reg = as_Register($src1$$reg);
4484     int32_t val = $src2$$constant;
4485     if (val >= 0) {
4486       __ subsw(zr, reg, val);
4487     } else {
4488       __ addsw(zr, reg, -val);
4489     }
4490   %}
4491 
4492   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4493     MacroAssembler _masm(&cbuf);
4494     Register reg1 = as_Register($src1$$reg);
4495     u_int32_t val = (u_int32_t)$src2$$constant;
4496     __ movw(rscratch1, val);
4497     __ cmpw(reg1, rscratch1);
4498   %}
4499 
4500   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4501     MacroAssembler _masm(&cbuf);
4502     Register reg1 = as_Register($src1$$reg);
4503     Register reg2 = as_Register($src2$$reg);
4504     __ cmp(reg1, reg2);
4505   %}
4506 
4507   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4508     MacroAssembler _masm(&cbuf);
4509     Register reg = as_Register($src1$$reg);
4510     int64_t val = $src2$$constant;
4511     if (val >= 0) {
4512       __ subs(zr, reg, val);
4513     } else if (val != -val) {
4514       __ adds(zr, reg, -val);
4515     } else {
4516     // aargh, Long.MIN_VALUE is a special case
4517       __ orr(rscratch1, zr, (u_int64_t)val);
4518       __ subs(zr, reg, rscratch1);
4519     }
4520   %}
4521 
4522   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4523     MacroAssembler _masm(&cbuf);
4524     Register reg1 = as_Register($src1$$reg);
4525     u_int64_t val = (u_int64_t)$src2$$constant;
4526     __ mov(rscratch1, val);
4527     __ cmp(reg1, rscratch1);
4528   %}
4529 
4530   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4531     MacroAssembler _masm(&cbuf);
4532     Register reg1 = as_Register($src1$$reg);
4533     Register reg2 = as_Register($src2$$reg);
4534     __ cmp(reg1, reg2);
4535   %}
4536 
4537   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4538     MacroAssembler _masm(&cbuf);
4539     Register reg1 = as_Register($src1$$reg);
4540     Register reg2 = as_Register($src2$$reg);
4541     __ cmpw(reg1, reg2);
4542   %}
4543 
4544   enc_class aarch64_enc_testp(iRegP src) %{
4545     MacroAssembler _masm(&cbuf);
4546     Register reg = as_Register($src$$reg);
4547     __ cmp(reg, zr);
4548   %}
4549 
4550   enc_class aarch64_enc_testn(iRegN src) %{
4551     MacroAssembler _masm(&cbuf);
4552     Register reg = as_Register($src$$reg);
4553     __ cmpw(reg, zr);
4554   %}
4555 
4556   enc_class aarch64_enc_b(label lbl) %{
4557     MacroAssembler _masm(&cbuf);
4558     Label *L = $lbl$$label;
4559     __ b(*L);
4560   %}
4561 
4562   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4563     MacroAssembler _masm(&cbuf);
4564     Label *L = $lbl$$label;
4565     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4566   %}
4567 
4568   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4569     MacroAssembler _masm(&cbuf);
4570     Label *L = $lbl$$label;
4571     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4572   %}
4573 
4574   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4575   %{
4576      Register sub_reg = as_Register($sub$$reg);
4577      Register super_reg = as_Register($super$$reg);
4578      Register temp_reg = as_Register($temp$$reg);
4579      Register result_reg = as_Register($result$$reg);
4580 
4581      Label miss;
4582      MacroAssembler _masm(&cbuf);
4583      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4584                                      NULL, &miss,
4585                                      /*set_cond_codes:*/ true);
4586      if ($primary) {
4587        __ mov(result_reg, zr);
4588      }
4589      __ bind(miss);
4590   %}
4591 
4592   enc_class aarch64_enc_java_static_call(method meth) %{
4593     MacroAssembler _masm(&cbuf);
4594 
4595     address addr = (address)$meth$$method;
4596     address call;
4597     if (!_method) {
4598       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4599       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4600     } else {
4601       int method_index = resolved_method_index(cbuf);
4602       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4603                                                   : static_call_Relocation::spec(method_index);
4604       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4605 
4606       // Emit stub for static call
4607       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4608       if (stub == NULL) {
4609         ciEnv::current()->record_failure("CodeCache is full");
4610         return;
4611       }
4612     }
4613     if (call == NULL) {
4614       ciEnv::current()->record_failure("CodeCache is full");
4615       return;
4616     }
4617   %}
4618 
4619   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4620     MacroAssembler _masm(&cbuf);
4621     int method_index = resolved_method_index(cbuf);
4622     address call = __ ic_call((address)$meth$$method, method_index);
4623     if (call == NULL) {
4624       ciEnv::current()->record_failure("CodeCache is full");
4625       return;
4626     }
4627   %}
4628 
4629   enc_class aarch64_enc_call_epilog() %{
4630     MacroAssembler _masm(&cbuf);
4631     if (VerifyStackAtCalls) {
4632       // Check that stack depth is unchanged: find majik cookie on stack
4633       __ call_Unimplemented();
4634     }
4635   %}
4636 
4637   enc_class aarch64_enc_java_to_runtime(method meth) %{
4638     MacroAssembler _masm(&cbuf);
4639 
4640     // some calls to generated routines (arraycopy code) are scheduled
4641     // by C2 as runtime calls. if so we can call them using a br (they
4642     // will be in a reachable segment) otherwise we have to use a blrt
4643     // which loads the absolute address into a register.
4644     address entry = (address)$meth$$method;
4645     CodeBlob *cb = CodeCache::find_blob(entry);
4646     if (cb) {
4647       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4648       if (call == NULL) {
4649         ciEnv::current()->record_failure("CodeCache is full");
4650         return;
4651       }
4652     } else {
4653       int gpcnt;
4654       int fpcnt;
4655       int rtype;
4656       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4657       Label retaddr;
4658       __ adr(rscratch2, retaddr);
4659       __ lea(rscratch1, RuntimeAddress(entry));
4660       // Leave a breadcrumb for JavaThread::pd_last_frame().
4661       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4662       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4663       __ bind(retaddr);
4664       __ add(sp, sp, 2 * wordSize);
4665     }
4666   %}
4667 
4668   enc_class aarch64_enc_rethrow() %{
4669     MacroAssembler _masm(&cbuf);
4670     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4671   %}
4672 
4673   enc_class aarch64_enc_ret() %{
4674     MacroAssembler _masm(&cbuf);
4675     __ ret(lr);
4676   %}
4677 
4678   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4679     MacroAssembler _masm(&cbuf);
4680     Register target_reg = as_Register($jump_target$$reg);
4681     __ br(target_reg);
4682   %}
4683 
4684   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4685     MacroAssembler _masm(&cbuf);
4686     Register target_reg = as_Register($jump_target$$reg);
4687     // exception oop should be in r0
4688     // ret addr has been popped into lr
4689     // callee expects it in r3
4690     __ mov(r3, lr);
4691     __ br(target_reg);
4692   %}
4693 
4694   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4695     MacroAssembler _masm(&cbuf);
4696     Register oop = as_Register($object$$reg);
4697     Register box = as_Register($box$$reg);
4698     Register disp_hdr = as_Register($tmp$$reg);
4699     Register tmp = as_Register($tmp2$$reg);
4700     Label cont;
4701     Label object_has_monitor;
4702     Label cas_failed;
4703 
4704     assert_different_registers(oop, box, tmp, disp_hdr);
4705 
4706     // Load markOop from object into displaced_header.
4707     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4708 
4709     // Always do locking in runtime.
4710     if (EmitSync & 0x01) {
4711       __ cmp(oop, zr);
4712       return;
4713     }
4714 
4715     if (UseBiasedLocking && !UseOptoBiasInlining) {
4716       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4717     }
4718 
4719     // Handle existing monitor
4720     if ((EmitSync & 0x02) == 0) {
4721       // we can use AArch64's bit test and branch here but
4722       // markoopDesc does not define a bit index just the bit value
4723       // so assert in case the bit pos changes
4724 #     define __monitor_value_log2 1
4725       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4726       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4727 #     undef __monitor_value_log2
4728     }
4729 
4730     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4731     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4732 
4733     // Load Compare Value application register.
4734 
4735     // Initialize the box. (Must happen before we update the object mark!)
4736     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4737 
4738     // Compare object markOop with mark and if equal exchange scratch1
4739     // with object markOop.
4740     if (UseLSE) {
4741       __ mov(tmp, disp_hdr);
4742       __ casal(Assembler::xword, tmp, box, oop);
4743       __ cmp(tmp, disp_hdr);
4744       __ br(Assembler::EQ, cont);
4745     } else {
4746       Label retry_load;
4747       __ prfm(Address(oop), PSTL1STRM);
4748       __ bind(retry_load);
4749       __ ldaxr(tmp, oop);
4750       __ cmp(tmp, disp_hdr);
4751       __ br(Assembler::NE, cas_failed);
4752       // use stlxr to ensure update is immediately visible
4753       __ stlxr(tmp, box, oop);
4754       __ cbzw(tmp, cont);
4755       __ b(retry_load);
4756     }
4757 
4758     // Formerly:
4759     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4760     //               /*newv=*/box,
4761     //               /*addr=*/oop,
4762     //               /*tmp=*/tmp,
4763     //               cont,
4764     //               /*fail*/NULL);
4765 
4766     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4767 
4768     // If the compare-and-exchange succeeded, then we found an unlocked
4769     // object, will have now locked it will continue at label cont
4770 
4771     __ bind(cas_failed);
4772     // We did not see an unlocked object so try the fast recursive case.
4773 
4774     // Check if the owner is self by comparing the value in the
4775     // markOop of object (disp_hdr) with the stack pointer.
4776     __ mov(rscratch1, sp);
4777     __ sub(disp_hdr, disp_hdr, rscratch1);
4778     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4779     // If condition is true we are cont and hence we can store 0 as the
4780     // displaced header in the box, which indicates that it is a recursive lock.
4781     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4782     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4783 
4784     // Handle existing monitor.
4785     if ((EmitSync & 0x02) == 0) {
4786       __ b(cont);
4787 
4788       __ bind(object_has_monitor);
4789       // The object's monitor m is unlocked iff m->owner == NULL,
4790       // otherwise m->owner may contain a thread or a stack address.
4791       //
4792       // Try to CAS m->owner from NULL to current thread.
4793       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4794       __ mov(disp_hdr, zr);
4795 
4796       if (UseLSE) {
4797         __ mov(rscratch1, disp_hdr);
4798         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4799         __ cmp(rscratch1, disp_hdr);
4800       } else {
4801         Label retry_load, fail;
4802         __ prfm(Address(tmp), PSTL1STRM);
4803         __ bind(retry_load);
4804         __ ldaxr(rscratch1, tmp);
4805         __ cmp(disp_hdr, rscratch1);
4806         __ br(Assembler::NE, fail);
4807         // use stlxr to ensure update is immediately visible
4808         __ stlxr(rscratch1, rthread, tmp);
4809         __ cbnzw(rscratch1, retry_load);
4810         __ bind(fail);
4811       }
4812 
4813       // Label next;
4814       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4815       //               /*newv=*/rthread,
4816       //               /*addr=*/tmp,
4817       //               /*tmp=*/rscratch1,
4818       //               /*succeed*/next,
4819       //               /*fail*/NULL);
4820       // __ bind(next);
4821 
4822       // store a non-null value into the box.
4823       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4824 
4825       // PPC port checks the following invariants
4826       // #ifdef ASSERT
4827       // bne(flag, cont);
4828       // We have acquired the monitor, check some invariants.
4829       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4830       // Invariant 1: _recursions should be 0.
4831       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4832       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4833       //                        "monitor->_recursions should be 0", -1);
4834       // Invariant 2: OwnerIsThread shouldn't be 0.
4835       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4836       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4837       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4838       // #endif
4839     }
4840 
4841     __ bind(cont);
4842     // flag == EQ indicates success
4843     // flag == NE indicates failure
4844 
4845   %}
4846 
4847   // TODO
4848   // reimplement this with custom cmpxchgptr code
4849   // which avoids some of the unnecessary branching
4850   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4851     MacroAssembler _masm(&cbuf);
4852     Register oop = as_Register($object$$reg);
4853     Register box = as_Register($box$$reg);
4854     Register disp_hdr = as_Register($tmp$$reg);
4855     Register tmp = as_Register($tmp2$$reg);
4856     Label cont;
4857     Label object_has_monitor;
4858     Label cas_failed;
4859 
4860     assert_different_registers(oop, box, tmp, disp_hdr);
4861 
4862     // Always do locking in runtime.
4863     if (EmitSync & 0x01) {
4864       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4865       return;
4866     }
4867 
4868     if (UseBiasedLocking && !UseOptoBiasInlining) {
4869       __ biased_locking_exit(oop, tmp, cont);
4870     }
4871 
4872     // Find the lock address and load the displaced header from the stack.
4873     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4874 
4875     // If the displaced header is 0, we have a recursive unlock.
4876     __ cmp(disp_hdr, zr);
4877     __ br(Assembler::EQ, cont);
4878 
4879 
4880     // Handle existing monitor.
4881     if ((EmitSync & 0x02) == 0) {
4882       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4883       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4884     }
4885 
4886     // Check if it is still a light weight lock, this is is true if we
4887     // see the stack address of the basicLock in the markOop of the
4888     // object.
4889 
4890       if (UseLSE) {
4891         __ mov(tmp, box);
4892         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4893         __ cmp(tmp, box);
4894       } else {
4895         Label retry_load;
4896         __ prfm(Address(oop), PSTL1STRM);
4897         __ bind(retry_load);
4898         __ ldxr(tmp, oop);
4899         __ cmp(box, tmp);
4900         __ br(Assembler::NE, cas_failed);
4901         // use stlxr to ensure update is immediately visible
4902         __ stlxr(tmp, disp_hdr, oop);
4903         __ cbzw(tmp, cont);
4904         __ b(retry_load);
4905       }
4906 
4907     // __ cmpxchgptr(/*compare_value=*/box,
4908     //               /*exchange_value=*/disp_hdr,
4909     //               /*where=*/oop,
4910     //               /*result=*/tmp,
4911     //               cont,
4912     //               /*cas_failed*/NULL);
4913     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4914 
4915     __ bind(cas_failed);
4916 
4917     // Handle existing monitor.
4918     if ((EmitSync & 0x02) == 0) {
4919       __ b(cont);
4920 
4921       __ bind(object_has_monitor);
4922       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4923       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4924       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4925       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4926       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4927       __ cmp(rscratch1, zr);
4928       __ br(Assembler::NE, cont);
4929 
4930       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4931       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4932       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4933       __ cmp(rscratch1, zr);
4934       __ cbnz(rscratch1, cont);
4935       // need a release store here
4936       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4937       __ stlr(rscratch1, tmp); // rscratch1 is zero
4938     }
4939 
4940     __ bind(cont);
4941     // flag == EQ indicates success
4942     // flag == NE indicates failure
4943   %}
4944 
4945 %}
4946 
4947 //----------FRAME--------------------------------------------------------------
4948 // Definition of frame structure and management information.
4949 //
4950 //  S T A C K   L A Y O U T    Allocators stack-slot number
4951 //                             |   (to get allocators register number
4952 //  G  Owned by    |        |  v    add OptoReg::stack0())
4953 //  r   CALLER     |        |
4954 //  o     |        +--------+      pad to even-align allocators stack-slot
4955 //  w     V        |  pad0  |        numbers; owned by CALLER
4956 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4957 //  h     ^        |   in   |  5
4958 //        |        |  args  |  4   Holes in incoming args owned by SELF
4959 //  |     |        |        |  3
4960 //  |     |        +--------+
4961 //  V     |        | old out|      Empty on Intel, window on Sparc
4962 //        |    old |preserve|      Must be even aligned.
4963 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4964 //        |        |   in   |  3   area for Intel ret address
4965 //     Owned by    |preserve|      Empty on Sparc.
4966 //       SELF      +--------+
4967 //        |        |  pad2  |  2   pad to align old SP
4968 //        |        +--------+  1
4969 //        |        | locks  |  0
4970 //        |        +--------+----> OptoReg::stack0(), even aligned
4971 //        |        |  pad1  | 11   pad to align new SP
4972 //        |        +--------+
4973 //        |        |        | 10
4974 //        |        | spills |  9   spills
4975 //        V        |        |  8   (pad0 slot for callee)
4976 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4977 //        ^        |  out   |  7
4978 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4979 //     Owned by    +--------+
4980 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4981 //        |    new |preserve|      Must be even-aligned.
4982 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4983 //        |        |        |
4984 //
4985 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4986 //         known from SELF's arguments and the Java calling convention.
4987 //         Region 6-7 is determined per call site.
4988 // Note 2: If the calling convention leaves holes in the incoming argument
4989 //         area, those holes are owned by SELF.  Holes in the outgoing area
4990 //         are owned by the CALLEE.  Holes should not be nessecary in the
4991 //         incoming area, as the Java calling convention is completely under
4992 //         the control of the AD file.  Doubles can be sorted and packed to
4993 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4994 //         varargs C calling conventions.
4995 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4996 //         even aligned with pad0 as needed.
4997 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4998 //           (the latter is true on Intel but is it false on AArch64?)
4999 //         region 6-11 is even aligned; it may be padded out more so that
5000 //         the region from SP to FP meets the minimum stack alignment.
5001 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5002 //         alignment.  Region 11, pad1, may be dynamically extended so that
5003 //         SP meets the minimum alignment.
5004 
5005 frame %{
5006   // What direction does stack grow in (assumed to be same for C & Java)
5007   stack_direction(TOWARDS_LOW);
5008 
5009   // These three registers define part of the calling convention
5010   // between compiled code and the interpreter.
5011 
5012   // Inline Cache Register or methodOop for I2C.
5013   inline_cache_reg(R12);
5014 
5015   // Method Oop Register when calling interpreter.
5016   interpreter_method_oop_reg(R12);
5017 
5018   // Number of stack slots consumed by locking an object
5019   sync_stack_slots(2);
5020 
5021   // Compiled code's Frame Pointer
5022   frame_pointer(R31);
5023 
5024   // Interpreter stores its frame pointer in a register which is
5025   // stored to the stack by I2CAdaptors.
5026   // I2CAdaptors convert from interpreted java to compiled java.
5027   interpreter_frame_pointer(R29);
5028 
5029   // Stack alignment requirement
5030   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5031 
5032   // Number of stack slots between incoming argument block and the start of
5033   // a new frame.  The PROLOG must add this many slots to the stack.  The
5034   // EPILOG must remove this many slots. aarch64 needs two slots for
5035   // return address and fp.
5036   // TODO think this is correct but check
5037   in_preserve_stack_slots(4);
5038 
5039   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5040   // for calls to C.  Supports the var-args backing area for register parms.
5041   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5042 
5043   // The after-PROLOG location of the return address.  Location of
5044   // return address specifies a type (REG or STACK) and a number
5045   // representing the register number (i.e. - use a register name) or
5046   // stack slot.
5047   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5048   // Otherwise, it is above the locks and verification slot and alignment word
5049   // TODO this may well be correct but need to check why that - 2 is there
5050   // ppc port uses 0 but we definitely need to allow for fixed_slots
5051   // which folds in the space used for monitors
5052   return_addr(STACK - 2 +
5053               round_to((Compile::current()->in_preserve_stack_slots() +
5054                         Compile::current()->fixed_slots()),
5055                        stack_alignment_in_slots()));
5056 
5057   // Body of function which returns an integer array locating
5058   // arguments either in registers or in stack slots.  Passed an array
5059   // of ideal registers called "sig" and a "length" count.  Stack-slot
5060   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5061   // arguments for a CALLEE.  Incoming stack arguments are
5062   // automatically biased by the preserve_stack_slots field above.
5063 
5064   calling_convention
5065   %{
5066     // No difference between ingoing/outgoing just pass false
5067     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5068   %}
5069 
5070   c_calling_convention
5071   %{
5072     // This is obviously always outgoing
5073     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5074   %}
5075 
5076   // Location of compiled Java return values.  Same as C for now.
5077   return_value
5078   %{
5079     // TODO do we allow ideal_reg == Op_RegN???
5080     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5081            "only return normal values");
5082 
5083     static const int lo[Op_RegL + 1] = { // enum name
5084       0,                                 // Op_Node
5085       0,                                 // Op_Set
5086       R0_num,                            // Op_RegN
5087       R0_num,                            // Op_RegI
5088       R0_num,                            // Op_RegP
5089       V0_num,                            // Op_RegF
5090       V0_num,                            // Op_RegD
5091       R0_num                             // Op_RegL
5092     };
5093 
5094     static const int hi[Op_RegL + 1] = { // enum name
5095       0,                                 // Op_Node
5096       0,                                 // Op_Set
5097       OptoReg::Bad,                       // Op_RegN
5098       OptoReg::Bad,                      // Op_RegI
5099       R0_H_num,                          // Op_RegP
5100       OptoReg::Bad,                      // Op_RegF
5101       V0_H_num,                          // Op_RegD
5102       R0_H_num                           // Op_RegL
5103     };
5104 
5105     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5106   %}
5107 %}
5108 
5109 //----------ATTRIBUTES---------------------------------------------------------
5110 //----------Operand Attributes-------------------------------------------------
5111 op_attrib op_cost(1);        // Required cost attribute
5112 
5113 //----------Instruction Attributes---------------------------------------------
5114 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5115 ins_attrib ins_size(32);        // Required size attribute (in bits)
5116 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5117                                 // a non-matching short branch variant
5118                                 // of some long branch?
5119 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5120                                 // be a power of 2) specifies the
5121                                 // alignment that some part of the
5122                                 // instruction (not necessarily the
5123                                 // start) requires.  If > 1, a
5124                                 // compute_padding() function must be
5125                                 // provided for the instruction
5126 
5127 //----------OPERANDS-----------------------------------------------------------
5128 // Operand definitions must precede instruction definitions for correct parsing
5129 // in the ADLC because operands constitute user defined types which are used in
5130 // instruction definitions.
5131 
5132 //----------Simple Operands----------------------------------------------------
5133 
5134 // Integer operands 32 bit
5135 // 32 bit immediate
5136 operand immI()
5137 %{
5138   match(ConI);
5139 
5140   op_cost(0);
5141   format %{ %}
5142   interface(CONST_INTER);
5143 %}
5144 
5145 // 32 bit zero
5146 operand immI0()
5147 %{
5148   predicate(n->get_int() == 0);
5149   match(ConI);
5150 
5151   op_cost(0);
5152   format %{ %}
5153   interface(CONST_INTER);
5154 %}
5155 
5156 // 32 bit unit increment
5157 operand immI_1()
5158 %{
5159   predicate(n->get_int() == 1);
5160   match(ConI);
5161 
5162   op_cost(0);
5163   format %{ %}
5164   interface(CONST_INTER);
5165 %}
5166 
5167 // 32 bit unit decrement
5168 operand immI_M1()
5169 %{
5170   predicate(n->get_int() == -1);
5171   match(ConI);
5172 
5173   op_cost(0);
5174   format %{ %}
5175   interface(CONST_INTER);
5176 %}
5177 
5178 operand immI_le_4()
5179 %{
5180   predicate(n->get_int() <= 4);
5181   match(ConI);
5182 
5183   op_cost(0);
5184   format %{ %}
5185   interface(CONST_INTER);
5186 %}
5187 
5188 operand immI_31()
5189 %{
5190   predicate(n->get_int() == 31);
5191   match(ConI);
5192 
5193   op_cost(0);
5194   format %{ %}
5195   interface(CONST_INTER);
5196 %}
5197 
5198 operand immI_8()
5199 %{
5200   predicate(n->get_int() == 8);
5201   match(ConI);
5202 
5203   op_cost(0);
5204   format %{ %}
5205   interface(CONST_INTER);
5206 %}
5207 
5208 operand immI_16()
5209 %{
5210   predicate(n->get_int() == 16);
5211   match(ConI);
5212 
5213   op_cost(0);
5214   format %{ %}
5215   interface(CONST_INTER);
5216 %}
5217 
5218 operand immI_24()
5219 %{
5220   predicate(n->get_int() == 24);
5221   match(ConI);
5222 
5223   op_cost(0);
5224   format %{ %}
5225   interface(CONST_INTER);
5226 %}
5227 
5228 operand immI_32()
5229 %{
5230   predicate(n->get_int() == 32);
5231   match(ConI);
5232 
5233   op_cost(0);
5234   format %{ %}
5235   interface(CONST_INTER);
5236 %}
5237 
5238 operand immI_48()
5239 %{
5240   predicate(n->get_int() == 48);
5241   match(ConI);
5242 
5243   op_cost(0);
5244   format %{ %}
5245   interface(CONST_INTER);
5246 %}
5247 
5248 operand immI_56()
5249 %{
5250   predicate(n->get_int() == 56);
5251   match(ConI);
5252 
5253   op_cost(0);
5254   format %{ %}
5255   interface(CONST_INTER);
5256 %}
5257 
5258 operand immI_64()
5259 %{
5260   predicate(n->get_int() == 64);
5261   match(ConI);
5262 
5263   op_cost(0);
5264   format %{ %}
5265   interface(CONST_INTER);
5266 %}
5267 
5268 operand immI_255()
5269 %{
5270   predicate(n->get_int() == 255);
5271   match(ConI);
5272 
5273   op_cost(0);
5274   format %{ %}
5275   interface(CONST_INTER);
5276 %}
5277 
5278 operand immI_65535()
5279 %{
5280   predicate(n->get_int() == 65535);
5281   match(ConI);
5282 
5283   op_cost(0);
5284   format %{ %}
5285   interface(CONST_INTER);
5286 %}
5287 
5288 operand immL_63()
5289 %{
5290   predicate(n->get_int() == 63);
5291   match(ConI);
5292 
5293   op_cost(0);
5294   format %{ %}
5295   interface(CONST_INTER);
5296 %}
5297 
5298 operand immL_255()
5299 %{
5300   predicate(n->get_int() == 255);
5301   match(ConI);
5302 
5303   op_cost(0);
5304   format %{ %}
5305   interface(CONST_INTER);
5306 %}
5307 
5308 operand immL_65535()
5309 %{
5310   predicate(n->get_long() == 65535L);
5311   match(ConL);
5312 
5313   op_cost(0);
5314   format %{ %}
5315   interface(CONST_INTER);
5316 %}
5317 
5318 operand immL_4294967295()
5319 %{
5320   predicate(n->get_long() == 4294967295L);
5321   match(ConL);
5322 
5323   op_cost(0);
5324   format %{ %}
5325   interface(CONST_INTER);
5326 %}
5327 
5328 operand immL_bitmask()
5329 %{
5330   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5331             && is_power_of_2(n->get_long() + 1));
5332   match(ConL);
5333 
5334   op_cost(0);
5335   format %{ %}
5336   interface(CONST_INTER);
5337 %}
5338 
5339 operand immI_bitmask()
5340 %{
5341   predicate(((n->get_int() & 0xc0000000) == 0)
5342             && is_power_of_2(n->get_int() + 1));
5343   match(ConI);
5344 
5345   op_cost(0);
5346   format %{ %}
5347   interface(CONST_INTER);
5348 %}
5349 
5350 // Scale values for scaled offset addressing modes (up to long but not quad)
5351 operand immIScale()
5352 %{
5353   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5354   match(ConI);
5355 
5356   op_cost(0);
5357   format %{ %}
5358   interface(CONST_INTER);
5359 %}
5360 
5361 // 26 bit signed offset -- for pc-relative branches
5362 operand immI26()
5363 %{
5364   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5365   match(ConI);
5366 
5367   op_cost(0);
5368   format %{ %}
5369   interface(CONST_INTER);
5370 %}
5371 
5372 // 19 bit signed offset -- for pc-relative loads
5373 operand immI19()
5374 %{
5375   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5376   match(ConI);
5377 
5378   op_cost(0);
5379   format %{ %}
5380   interface(CONST_INTER);
5381 %}
5382 
5383 // 12 bit unsigned offset -- for base plus immediate loads
5384 operand immIU12()
5385 %{
5386   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5387   match(ConI);
5388 
5389   op_cost(0);
5390   format %{ %}
5391   interface(CONST_INTER);
5392 %}
5393 
5394 operand immLU12()
5395 %{
5396   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5397   match(ConL);
5398 
5399   op_cost(0);
5400   format %{ %}
5401   interface(CONST_INTER);
5402 %}
5403 
5404 // Offset for scaled or unscaled immediate loads and stores
5405 operand immIOffset()
5406 %{
5407   predicate(Address::offset_ok_for_immed(n->get_int()));
5408   match(ConI);
5409 
5410   op_cost(0);
5411   format %{ %}
5412   interface(CONST_INTER);
5413 %}
5414 
5415 operand immIOffset4()
5416 %{
5417   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5418   match(ConI);
5419 
5420   op_cost(0);
5421   format %{ %}
5422   interface(CONST_INTER);
5423 %}
5424 
5425 operand immIOffset8()
5426 %{
5427   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5428   match(ConI);
5429 
5430   op_cost(0);
5431   format %{ %}
5432   interface(CONST_INTER);
5433 %}
5434 
5435 operand immIOffset16()
5436 %{
5437   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5438   match(ConI);
5439 
5440   op_cost(0);
5441   format %{ %}
5442   interface(CONST_INTER);
5443 %}
5444 
5445 operand immLoffset()
5446 %{
5447   predicate(Address::offset_ok_for_immed(n->get_long()));
5448   match(ConL);
5449 
5450   op_cost(0);
5451   format %{ %}
5452   interface(CONST_INTER);
5453 %}
5454 
5455 operand immLoffset4()
5456 %{
5457   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5458   match(ConL);
5459 
5460   op_cost(0);
5461   format %{ %}
5462   interface(CONST_INTER);
5463 %}
5464 
5465 operand immLoffset8()
5466 %{
5467   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5468   match(ConL);
5469 
5470   op_cost(0);
5471   format %{ %}
5472   interface(CONST_INTER);
5473 %}
5474 
5475 operand immLoffset16()
5476 %{
5477   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5478   match(ConL);
5479 
5480   op_cost(0);
5481   format %{ %}
5482   interface(CONST_INTER);
5483 %}
5484 
5485 // 32 bit integer valid for add sub immediate
5486 operand immIAddSub()
5487 %{
5488   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5489   match(ConI);
5490   op_cost(0);
5491   format %{ %}
5492   interface(CONST_INTER);
5493 %}
5494 
5495 // 32 bit unsigned integer valid for logical immediate
5496 // TODO -- check this is right when e.g the mask is 0x80000000
5497 operand immILog()
5498 %{
5499   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5500   match(ConI);
5501 
5502   op_cost(0);
5503   format %{ %}
5504   interface(CONST_INTER);
5505 %}
5506 
5507 // Integer operands 64 bit
5508 // 64 bit immediate
5509 operand immL()
5510 %{
5511   match(ConL);
5512 
5513   op_cost(0);
5514   format %{ %}
5515   interface(CONST_INTER);
5516 %}
5517 
5518 // 64 bit zero
5519 operand immL0()
5520 %{
5521   predicate(n->get_long() == 0);
5522   match(ConL);
5523 
5524   op_cost(0);
5525   format %{ %}
5526   interface(CONST_INTER);
5527 %}
5528 
5529 // 64 bit unit increment
5530 operand immL_1()
5531 %{
5532   predicate(n->get_long() == 1);
5533   match(ConL);
5534 
5535   op_cost(0);
5536   format %{ %}
5537   interface(CONST_INTER);
5538 %}
5539 
5540 // 64 bit unit decrement
5541 operand immL_M1()
5542 %{
5543   predicate(n->get_long() == -1);
5544   match(ConL);
5545 
5546   op_cost(0);
5547   format %{ %}
5548   interface(CONST_INTER);
5549 %}
5550 
5551 // 32 bit offset of pc in thread anchor
5552 
5553 operand immL_pc_off()
5554 %{
5555   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5556                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5557   match(ConL);
5558 
5559   op_cost(0);
5560   format %{ %}
5561   interface(CONST_INTER);
5562 %}
5563 
5564 // 64 bit integer valid for add sub immediate
5565 operand immLAddSub()
5566 %{
5567   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5568   match(ConL);
5569   op_cost(0);
5570   format %{ %}
5571   interface(CONST_INTER);
5572 %}
5573 
5574 // 64 bit integer valid for logical immediate
5575 operand immLLog()
5576 %{
5577   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5578   match(ConL);
5579   op_cost(0);
5580   format %{ %}
5581   interface(CONST_INTER);
5582 %}
5583 
5584 // Long Immediate: low 32-bit mask
5585 operand immL_32bits()
5586 %{
5587   predicate(n->get_long() == 0xFFFFFFFFL);
5588   match(ConL);
5589   op_cost(0);
5590   format %{ %}
5591   interface(CONST_INTER);
5592 %}
5593 
5594 // Pointer operands
5595 // Pointer Immediate
5596 operand immP()
5597 %{
5598   match(ConP);
5599 
5600   op_cost(0);
5601   format %{ %}
5602   interface(CONST_INTER);
5603 %}
5604 
5605 // NULL Pointer Immediate
5606 operand immP0()
5607 %{
5608   predicate(n->get_ptr() == 0);
5609   match(ConP);
5610 
5611   op_cost(0);
5612   format %{ %}
5613   interface(CONST_INTER);
5614 %}
5615 
5616 // Pointer Immediate One
5617 // this is used in object initialization (initial object header)
5618 operand immP_1()
5619 %{
5620   predicate(n->get_ptr() == 1);
5621   match(ConP);
5622 
5623   op_cost(0);
5624   format %{ %}
5625   interface(CONST_INTER);
5626 %}
5627 
5628 // Polling Page Pointer Immediate
5629 operand immPollPage()
5630 %{
5631   predicate((address)n->get_ptr() == os::get_polling_page());
5632   match(ConP);
5633 
5634   op_cost(0);
5635   format %{ %}
5636   interface(CONST_INTER);
5637 %}
5638 
5639 // Card Table Byte Map Base
5640 operand immByteMapBase()
5641 %{
5642   // Get base of card map
5643   predicate((jbyte*)n->get_ptr() ==
5644         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5645   match(ConP);
5646 
5647   op_cost(0);
5648   format %{ %}
5649   interface(CONST_INTER);
5650 %}
5651 
5652 // Pointer Immediate Minus One
5653 // this is used when we want to write the current PC to the thread anchor
5654 operand immP_M1()
5655 %{
5656   predicate(n->get_ptr() == -1);
5657   match(ConP);
5658 
5659   op_cost(0);
5660   format %{ %}
5661   interface(CONST_INTER);
5662 %}
5663 
5664 // Pointer Immediate Minus Two
5665 // this is used when we want to write the current PC to the thread anchor
5666 operand immP_M2()
5667 %{
5668   predicate(n->get_ptr() == -2);
5669   match(ConP);
5670 
5671   op_cost(0);
5672   format %{ %}
5673   interface(CONST_INTER);
5674 %}
5675 
5676 // Float and Double operands
5677 // Double Immediate
5678 operand immD()
5679 %{
5680   match(ConD);
5681   op_cost(0);
5682   format %{ %}
5683   interface(CONST_INTER);
5684 %}
5685 
5686 // Double Immediate: +0.0d
5687 operand immD0()
5688 %{
5689   predicate(jlong_cast(n->getd()) == 0);
5690   match(ConD);
5691 
5692   op_cost(0);
5693   format %{ %}
5694   interface(CONST_INTER);
5695 %}
5696 
5697 // constant 'double +0.0'.
5698 operand immDPacked()
5699 %{
5700   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5701   match(ConD);
5702   op_cost(0);
5703   format %{ %}
5704   interface(CONST_INTER);
5705 %}
5706 
5707 // Float Immediate
5708 operand immF()
5709 %{
5710   match(ConF);
5711   op_cost(0);
5712   format %{ %}
5713   interface(CONST_INTER);
5714 %}
5715 
5716 // Float Immediate: +0.0f.
5717 operand immF0()
5718 %{
5719   predicate(jint_cast(n->getf()) == 0);
5720   match(ConF);
5721 
5722   op_cost(0);
5723   format %{ %}
5724   interface(CONST_INTER);
5725 %}
5726 
5727 //
5728 operand immFPacked()
5729 %{
5730   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5731   match(ConF);
5732   op_cost(0);
5733   format %{ %}
5734   interface(CONST_INTER);
5735 %}
5736 
5737 // Narrow pointer operands
5738 // Narrow Pointer Immediate
5739 operand immN()
5740 %{
5741   match(ConN);
5742 
5743   op_cost(0);
5744   format %{ %}
5745   interface(CONST_INTER);
5746 %}
5747 
5748 // Narrow NULL Pointer Immediate
5749 operand immN0()
5750 %{
5751   predicate(n->get_narrowcon() == 0);
5752   match(ConN);
5753 
5754   op_cost(0);
5755   format %{ %}
5756   interface(CONST_INTER);
5757 %}
5758 
5759 operand immNKlass()
5760 %{
5761   match(ConNKlass);
5762 
5763   op_cost(0);
5764   format %{ %}
5765   interface(CONST_INTER);
5766 %}
5767 
5768 // Integer 32 bit Register Operands
5769 // Integer 32 bitRegister (excludes SP)
5770 operand iRegI()
5771 %{
5772   constraint(ALLOC_IN_RC(any_reg32));
5773   match(RegI);
5774   match(iRegINoSp);
5775   op_cost(0);
5776   format %{ %}
5777   interface(REG_INTER);
5778 %}
5779 
5780 // Integer 32 bit Register not Special
5781 operand iRegINoSp()
5782 %{
5783   constraint(ALLOC_IN_RC(no_special_reg32));
5784   match(RegI);
5785   op_cost(0);
5786   format %{ %}
5787   interface(REG_INTER);
5788 %}
5789 
5790 // Integer 64 bit Register Operands
5791 // Integer 64 bit Register (includes SP)
5792 operand iRegL()
5793 %{
5794   constraint(ALLOC_IN_RC(any_reg));
5795   match(RegL);
5796   match(iRegLNoSp);
5797   op_cost(0);
5798   format %{ %}
5799   interface(REG_INTER);
5800 %}
5801 
5802 // Integer 64 bit Register not Special
5803 operand iRegLNoSp()
5804 %{
5805   constraint(ALLOC_IN_RC(no_special_reg));
5806   match(RegL);
5807   format %{ %}
5808   interface(REG_INTER);
5809 %}
5810 
5811 // Pointer Register Operands
5812 // Pointer Register
5813 operand iRegP()
5814 %{
5815   constraint(ALLOC_IN_RC(ptr_reg));
5816   match(RegP);
5817   match(iRegPNoSp);
5818   match(iRegP_R0);
5819   //match(iRegP_R2);
5820   //match(iRegP_R4);
5821   //match(iRegP_R5);
5822   match(thread_RegP);
5823   op_cost(0);
5824   format %{ %}
5825   interface(REG_INTER);
5826 %}
5827 
5828 // Pointer 64 bit Register not Special
5829 operand iRegPNoSp()
5830 %{
5831   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5832   match(RegP);
5833   // match(iRegP);
5834   // match(iRegP_R0);
5835   // match(iRegP_R2);
5836   // match(iRegP_R4);
5837   // match(iRegP_R5);
5838   // match(thread_RegP);
5839   op_cost(0);
5840   format %{ %}
5841   interface(REG_INTER);
5842 %}
5843 
5844 // Pointer 64 bit Register R0 only
5845 operand iRegP_R0()
5846 %{
5847   constraint(ALLOC_IN_RC(r0_reg));
5848   match(RegP);
5849   // match(iRegP);
5850   match(iRegPNoSp);
5851   op_cost(0);
5852   format %{ %}
5853   interface(REG_INTER);
5854 %}
5855 
5856 // Pointer 64 bit Register R1 only
5857 operand iRegP_R1()
5858 %{
5859   constraint(ALLOC_IN_RC(r1_reg));
5860   match(RegP);
5861   // match(iRegP);
5862   match(iRegPNoSp);
5863   op_cost(0);
5864   format %{ %}
5865   interface(REG_INTER);
5866 %}
5867 
5868 // Pointer 64 bit Register R2 only
5869 operand iRegP_R2()
5870 %{
5871   constraint(ALLOC_IN_RC(r2_reg));
5872   match(RegP);
5873   // match(iRegP);
5874   match(iRegPNoSp);
5875   op_cost(0);
5876   format %{ %}
5877   interface(REG_INTER);
5878 %}
5879 
5880 // Pointer 64 bit Register R3 only
5881 operand iRegP_R3()
5882 %{
5883   constraint(ALLOC_IN_RC(r3_reg));
5884   match(RegP);
5885   // match(iRegP);
5886   match(iRegPNoSp);
5887   op_cost(0);
5888   format %{ %}
5889   interface(REG_INTER);
5890 %}
5891 
5892 // Pointer 64 bit Register R4 only
5893 operand iRegP_R4()
5894 %{
5895   constraint(ALLOC_IN_RC(r4_reg));
5896   match(RegP);
5897   // match(iRegP);
5898   match(iRegPNoSp);
5899   op_cost(0);
5900   format %{ %}
5901   interface(REG_INTER);
5902 %}
5903 
5904 // Pointer 64 bit Register R5 only
5905 operand iRegP_R5()
5906 %{
5907   constraint(ALLOC_IN_RC(r5_reg));
5908   match(RegP);
5909   // match(iRegP);
5910   match(iRegPNoSp);
5911   op_cost(0);
5912   format %{ %}
5913   interface(REG_INTER);
5914 %}
5915 
5916 // Pointer 64 bit Register R10 only
5917 operand iRegP_R10()
5918 %{
5919   constraint(ALLOC_IN_RC(r10_reg));
5920   match(RegP);
5921   // match(iRegP);
5922   match(iRegPNoSp);
5923   op_cost(0);
5924   format %{ %}
5925   interface(REG_INTER);
5926 %}
5927 
5928 // Long 64 bit Register R11 only
5929 operand iRegL_R11()
5930 %{
5931   constraint(ALLOC_IN_RC(r11_reg));
5932   match(RegL);
5933   match(iRegLNoSp);
5934   op_cost(0);
5935   format %{ %}
5936   interface(REG_INTER);
5937 %}
5938 
5939 // Pointer 64 bit Register FP only
5940 operand iRegP_FP()
5941 %{
5942   constraint(ALLOC_IN_RC(fp_reg));
5943   match(RegP);
5944   // match(iRegP);
5945   op_cost(0);
5946   format %{ %}
5947   interface(REG_INTER);
5948 %}
5949 
5950 // Register R0 only
5951 operand iRegI_R0()
5952 %{
5953   constraint(ALLOC_IN_RC(int_r0_reg));
5954   match(RegI);
5955   match(iRegINoSp);
5956   op_cost(0);
5957   format %{ %}
5958   interface(REG_INTER);
5959 %}
5960 
5961 // Register R2 only
5962 operand iRegI_R2()
5963 %{
5964   constraint(ALLOC_IN_RC(int_r2_reg));
5965   match(RegI);
5966   match(iRegINoSp);
5967   op_cost(0);
5968   format %{ %}
5969   interface(REG_INTER);
5970 %}
5971 
5972 // Register R3 only
5973 operand iRegI_R3()
5974 %{
5975   constraint(ALLOC_IN_RC(int_r3_reg));
5976   match(RegI);
5977   match(iRegINoSp);
5978   op_cost(0);
5979   format %{ %}
5980   interface(REG_INTER);
5981 %}
5982 
5983 
5984 // Register R2 only
5985 operand iRegI_R4()
5986 %{
5987   constraint(ALLOC_IN_RC(int_r4_reg));
5988   match(RegI);
5989   match(iRegINoSp);
5990   op_cost(0);
5991   format %{ %}
5992   interface(REG_INTER);
5993 %}
5994 
5995 
5996 // Pointer Register Operands
5997 // Narrow Pointer Register
5998 operand iRegN()
5999 %{
6000   constraint(ALLOC_IN_RC(any_reg32));
6001   match(RegN);
6002   match(iRegNNoSp);
6003   op_cost(0);
6004   format %{ %}
6005   interface(REG_INTER);
6006 %}
6007 
6008 // Integer 64 bit Register not Special
6009 operand iRegNNoSp()
6010 %{
6011   constraint(ALLOC_IN_RC(no_special_reg32));
6012   match(RegN);
6013   op_cost(0);
6014   format %{ %}
6015   interface(REG_INTER);
6016 %}
6017 
6018 // heap base register -- used for encoding immN0
6019 
6020 operand iRegIHeapbase()
6021 %{
6022   constraint(ALLOC_IN_RC(heapbase_reg));
6023   match(RegI);
6024   op_cost(0);
6025   format %{ %}
6026   interface(REG_INTER);
6027 %}
6028 
6029 // Float Register
6030 // Float register operands
6031 operand vRegF()
6032 %{
6033   constraint(ALLOC_IN_RC(float_reg));
6034   match(RegF);
6035 
6036   op_cost(0);
6037   format %{ %}
6038   interface(REG_INTER);
6039 %}
6040 
6041 // Double Register
6042 // Double register operands
6043 operand vRegD()
6044 %{
6045   constraint(ALLOC_IN_RC(double_reg));
6046   match(RegD);
6047 
6048   op_cost(0);
6049   format %{ %}
6050   interface(REG_INTER);
6051 %}
6052 
6053 operand vecD()
6054 %{
6055   constraint(ALLOC_IN_RC(vectord_reg));
6056   match(VecD);
6057 
6058   op_cost(0);
6059   format %{ %}
6060   interface(REG_INTER);
6061 %}
6062 
6063 operand vecX()
6064 %{
6065   constraint(ALLOC_IN_RC(vectorx_reg));
6066   match(VecX);
6067 
6068   op_cost(0);
6069   format %{ %}
6070   interface(REG_INTER);
6071 %}
6072 
6073 operand vRegD_V0()
6074 %{
6075   constraint(ALLOC_IN_RC(v0_reg));
6076   match(RegD);
6077   op_cost(0);
6078   format %{ %}
6079   interface(REG_INTER);
6080 %}
6081 
6082 operand vRegD_V1()
6083 %{
6084   constraint(ALLOC_IN_RC(v1_reg));
6085   match(RegD);
6086   op_cost(0);
6087   format %{ %}
6088   interface(REG_INTER);
6089 %}
6090 
6091 operand vRegD_V2()
6092 %{
6093   constraint(ALLOC_IN_RC(v2_reg));
6094   match(RegD);
6095   op_cost(0);
6096   format %{ %}
6097   interface(REG_INTER);
6098 %}
6099 
6100 operand vRegD_V3()
6101 %{
6102   constraint(ALLOC_IN_RC(v3_reg));
6103   match(RegD);
6104   op_cost(0);
6105   format %{ %}
6106   interface(REG_INTER);
6107 %}
6108 
6109 // Flags register, used as output of signed compare instructions
6110 
6111 // note that on AArch64 we also use this register as the output for
6112 // for floating point compare instructions (CmpF CmpD). this ensures
6113 // that ordered inequality tests use GT, GE, LT or LE none of which
6114 // pass through cases where the result is unordered i.e. one or both
6115 // inputs to the compare is a NaN. this means that the ideal code can
6116 // replace e.g. a GT with an LE and not end up capturing the NaN case
6117 // (where the comparison should always fail). EQ and NE tests are
6118 // always generated in ideal code so that unordered folds into the NE
6119 // case, matching the behaviour of AArch64 NE.
6120 //
6121 // This differs from x86 where the outputs of FP compares use a
6122 // special FP flags registers and where compares based on this
6123 // register are distinguished into ordered inequalities (cmpOpUCF) and
6124 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6125 // to explicitly handle the unordered case in branches. x86 also has
6126 // to include extra CMoveX rules to accept a cmpOpUCF input.
6127 
6128 operand rFlagsReg()
6129 %{
6130   constraint(ALLOC_IN_RC(int_flags));
6131   match(RegFlags);
6132 
6133   op_cost(0);
6134   format %{ "RFLAGS" %}
6135   interface(REG_INTER);
6136 %}
6137 
6138 // Flags register, used as output of unsigned compare instructions
6139 operand rFlagsRegU()
6140 %{
6141   constraint(ALLOC_IN_RC(int_flags));
6142   match(RegFlags);
6143 
6144   op_cost(0);
6145   format %{ "RFLAGSU" %}
6146   interface(REG_INTER);
6147 %}
6148 
6149 // Special Registers
6150 
6151 // Method Register
6152 operand inline_cache_RegP(iRegP reg)
6153 %{
6154   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6155   match(reg);
6156   match(iRegPNoSp);
6157   op_cost(0);
6158   format %{ %}
6159   interface(REG_INTER);
6160 %}
6161 
6162 operand interpreter_method_oop_RegP(iRegP reg)
6163 %{
6164   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6165   match(reg);
6166   match(iRegPNoSp);
6167   op_cost(0);
6168   format %{ %}
6169   interface(REG_INTER);
6170 %}
6171 
6172 // Thread Register
6173 operand thread_RegP(iRegP reg)
6174 %{
6175   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6176   match(reg);
6177   op_cost(0);
6178   format %{ %}
6179   interface(REG_INTER);
6180 %}
6181 
6182 operand lr_RegP(iRegP reg)
6183 %{
6184   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6185   match(reg);
6186   op_cost(0);
6187   format %{ %}
6188   interface(REG_INTER);
6189 %}
6190 
6191 //----------Memory Operands----------------------------------------------------
6192 
6193 operand indirect(iRegP reg)
6194 %{
6195   constraint(ALLOC_IN_RC(ptr_reg));
6196   match(reg);
6197   op_cost(0);
6198   format %{ "[$reg]" %}
6199   interface(MEMORY_INTER) %{
6200     base($reg);
6201     index(0xffffffff);
6202     scale(0x0);
6203     disp(0x0);
6204   %}
6205 %}
6206 
6207 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6208 %{
6209   constraint(ALLOC_IN_RC(ptr_reg));
6210   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6211   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6212   op_cost(0);
6213   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6214   interface(MEMORY_INTER) %{
6215     base($reg);
6216     index($ireg);
6217     scale($scale);
6218     disp(0x0);
6219   %}
6220 %}
6221 
6222 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6223 %{
6224   constraint(ALLOC_IN_RC(ptr_reg));
6225   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6226   match(AddP reg (LShiftL lreg scale));
6227   op_cost(0);
6228   format %{ "$reg, $lreg lsl($scale)" %}
6229   interface(MEMORY_INTER) %{
6230     base($reg);
6231     index($lreg);
6232     scale($scale);
6233     disp(0x0);
6234   %}
6235 %}
6236 
6237 operand indIndexI2L(iRegP reg, iRegI ireg)
6238 %{
6239   constraint(ALLOC_IN_RC(ptr_reg));
6240   match(AddP reg (ConvI2L ireg));
6241   op_cost(0);
6242   format %{ "$reg, $ireg, 0, I2L" %}
6243   interface(MEMORY_INTER) %{
6244     base($reg);
6245     index($ireg);
6246     scale(0x0);
6247     disp(0x0);
6248   %}
6249 %}
6250 
6251 operand indIndex(iRegP reg, iRegL lreg)
6252 %{
6253   constraint(ALLOC_IN_RC(ptr_reg));
6254   match(AddP reg lreg);
6255   op_cost(0);
6256   format %{ "$reg, $lreg" %}
6257   interface(MEMORY_INTER) %{
6258     base($reg);
6259     index($lreg);
6260     scale(0x0);
6261     disp(0x0);
6262   %}
6263 %}
6264 
6265 operand indOffI(iRegP reg, immIOffset off)
6266 %{
6267   constraint(ALLOC_IN_RC(ptr_reg));
6268   match(AddP reg off);
6269   op_cost(0);
6270   format %{ "[$reg, $off]" %}
6271   interface(MEMORY_INTER) %{
6272     base($reg);
6273     index(0xffffffff);
6274     scale(0x0);
6275     disp($off);
6276   %}
6277 %}
6278 
6279 operand indOffI4(iRegP reg, immIOffset4 off)
6280 %{
6281   constraint(ALLOC_IN_RC(ptr_reg));
6282   match(AddP reg off);
6283   op_cost(0);
6284   format %{ "[$reg, $off]" %}
6285   interface(MEMORY_INTER) %{
6286     base($reg);
6287     index(0xffffffff);
6288     scale(0x0);
6289     disp($off);
6290   %}
6291 %}
6292 
6293 operand indOffI8(iRegP reg, immIOffset8 off)
6294 %{
6295   constraint(ALLOC_IN_RC(ptr_reg));
6296   match(AddP reg off);
6297   op_cost(0);
6298   format %{ "[$reg, $off]" %}
6299   interface(MEMORY_INTER) %{
6300     base($reg);
6301     index(0xffffffff);
6302     scale(0x0);
6303     disp($off);
6304   %}
6305 %}
6306 
6307 operand indOffI16(iRegP reg, immIOffset16 off)
6308 %{
6309   constraint(ALLOC_IN_RC(ptr_reg));
6310   match(AddP reg off);
6311   op_cost(0);
6312   format %{ "[$reg, $off]" %}
6313   interface(MEMORY_INTER) %{
6314     base($reg);
6315     index(0xffffffff);
6316     scale(0x0);
6317     disp($off);
6318   %}
6319 %}
6320 
6321 operand indOffL(iRegP reg, immLoffset off)
6322 %{
6323   constraint(ALLOC_IN_RC(ptr_reg));
6324   match(AddP reg off);
6325   op_cost(0);
6326   format %{ "[$reg, $off]" %}
6327   interface(MEMORY_INTER) %{
6328     base($reg);
6329     index(0xffffffff);
6330     scale(0x0);
6331     disp($off);
6332   %}
6333 %}
6334 
6335 operand indOffL4(iRegP reg, immLoffset4 off)
6336 %{
6337   constraint(ALLOC_IN_RC(ptr_reg));
6338   match(AddP reg off);
6339   op_cost(0);
6340   format %{ "[$reg, $off]" %}
6341   interface(MEMORY_INTER) %{
6342     base($reg);
6343     index(0xffffffff);
6344     scale(0x0);
6345     disp($off);
6346   %}
6347 %}
6348 
6349 operand indOffL8(iRegP reg, immLoffset8 off)
6350 %{
6351   constraint(ALLOC_IN_RC(ptr_reg));
6352   match(AddP reg off);
6353   op_cost(0);
6354   format %{ "[$reg, $off]" %}
6355   interface(MEMORY_INTER) %{
6356     base($reg);
6357     index(0xffffffff);
6358     scale(0x0);
6359     disp($off);
6360   %}
6361 %}
6362 
6363 operand indOffL16(iRegP reg, immLoffset16 off)
6364 %{
6365   constraint(ALLOC_IN_RC(ptr_reg));
6366   match(AddP reg off);
6367   op_cost(0);
6368   format %{ "[$reg, $off]" %}
6369   interface(MEMORY_INTER) %{
6370     base($reg);
6371     index(0xffffffff);
6372     scale(0x0);
6373     disp($off);
6374   %}
6375 %}
6376 
6377 operand indirectN(iRegN reg)
6378 %{
6379   predicate(Universe::narrow_oop_shift() == 0);
6380   constraint(ALLOC_IN_RC(ptr_reg));
6381   match(DecodeN reg);
6382   op_cost(0);
6383   format %{ "[$reg]\t# narrow" %}
6384   interface(MEMORY_INTER) %{
6385     base($reg);
6386     index(0xffffffff);
6387     scale(0x0);
6388     disp(0x0);
6389   %}
6390 %}
6391 
6392 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6393 %{
6394   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6395   constraint(ALLOC_IN_RC(ptr_reg));
6396   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6397   op_cost(0);
6398   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6399   interface(MEMORY_INTER) %{
6400     base($reg);
6401     index($ireg);
6402     scale($scale);
6403     disp(0x0);
6404   %}
6405 %}
6406 
6407 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6408 %{
6409   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6410   constraint(ALLOC_IN_RC(ptr_reg));
6411   match(AddP (DecodeN reg) (LShiftL lreg scale));
6412   op_cost(0);
6413   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6414   interface(MEMORY_INTER) %{
6415     base($reg);
6416     index($lreg);
6417     scale($scale);
6418     disp(0x0);
6419   %}
6420 %}
6421 
6422 operand indIndexI2LN(iRegN reg, iRegI ireg)
6423 %{
6424   predicate(Universe::narrow_oop_shift() == 0);
6425   constraint(ALLOC_IN_RC(ptr_reg));
6426   match(AddP (DecodeN reg) (ConvI2L ireg));
6427   op_cost(0);
6428   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6429   interface(MEMORY_INTER) %{
6430     base($reg);
6431     index($ireg);
6432     scale(0x0);
6433     disp(0x0);
6434   %}
6435 %}
6436 
6437 operand indIndexN(iRegN reg, iRegL lreg)
6438 %{
6439   predicate(Universe::narrow_oop_shift() == 0);
6440   constraint(ALLOC_IN_RC(ptr_reg));
6441   match(AddP (DecodeN reg) lreg);
6442   op_cost(0);
6443   format %{ "$reg, $lreg\t# narrow" %}
6444   interface(MEMORY_INTER) %{
6445     base($reg);
6446     index($lreg);
6447     scale(0x0);
6448     disp(0x0);
6449   %}
6450 %}
6451 
6452 operand indOffIN(iRegN reg, immIOffset off)
6453 %{
6454   predicate(Universe::narrow_oop_shift() == 0);
6455   constraint(ALLOC_IN_RC(ptr_reg));
6456   match(AddP (DecodeN reg) off);
6457   op_cost(0);
6458   format %{ "[$reg, $off]\t# narrow" %}
6459   interface(MEMORY_INTER) %{
6460     base($reg);
6461     index(0xffffffff);
6462     scale(0x0);
6463     disp($off);
6464   %}
6465 %}
6466 
6467 operand indOffLN(iRegN reg, immLoffset off)
6468 %{
6469   predicate(Universe::narrow_oop_shift() == 0);
6470   constraint(ALLOC_IN_RC(ptr_reg));
6471   match(AddP (DecodeN reg) off);
6472   op_cost(0);
6473   format %{ "[$reg, $off]\t# narrow" %}
6474   interface(MEMORY_INTER) %{
6475     base($reg);
6476     index(0xffffffff);
6477     scale(0x0);
6478     disp($off);
6479   %}
6480 %}
6481 
6482 
6483 
6484 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6485 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6486 %{
6487   constraint(ALLOC_IN_RC(ptr_reg));
6488   match(AddP reg off);
6489   op_cost(0);
6490   format %{ "[$reg, $off]" %}
6491   interface(MEMORY_INTER) %{
6492     base($reg);
6493     index(0xffffffff);
6494     scale(0x0);
6495     disp($off);
6496   %}
6497 %}
6498 
6499 //----------Special Memory Operands--------------------------------------------
6500 // Stack Slot Operand - This operand is used for loading and storing temporary
6501 //                      values on the stack where a match requires a value to
6502 //                      flow through memory.
6503 operand stackSlotP(sRegP reg)
6504 %{
6505   constraint(ALLOC_IN_RC(stack_slots));
6506   op_cost(100);
6507   // No match rule because this operand is only generated in matching
6508   // match(RegP);
6509   format %{ "[$reg]" %}
6510   interface(MEMORY_INTER) %{
6511     base(0x1e);  // RSP
6512     index(0x0);  // No Index
6513     scale(0x0);  // No Scale
6514     disp($reg);  // Stack Offset
6515   %}
6516 %}
6517 
6518 operand stackSlotI(sRegI reg)
6519 %{
6520   constraint(ALLOC_IN_RC(stack_slots));
6521   // No match rule because this operand is only generated in matching
6522   // match(RegI);
6523   format %{ "[$reg]" %}
6524   interface(MEMORY_INTER) %{
6525     base(0x1e);  // RSP
6526     index(0x0);  // No Index
6527     scale(0x0);  // No Scale
6528     disp($reg);  // Stack Offset
6529   %}
6530 %}
6531 
6532 operand stackSlotF(sRegF reg)
6533 %{
6534   constraint(ALLOC_IN_RC(stack_slots));
6535   // No match rule because this operand is only generated in matching
6536   // match(RegF);
6537   format %{ "[$reg]" %}
6538   interface(MEMORY_INTER) %{
6539     base(0x1e);  // RSP
6540     index(0x0);  // No Index
6541     scale(0x0);  // No Scale
6542     disp($reg);  // Stack Offset
6543   %}
6544 %}
6545 
6546 operand stackSlotD(sRegD reg)
6547 %{
6548   constraint(ALLOC_IN_RC(stack_slots));
6549   // No match rule because this operand is only generated in matching
6550   // match(RegD);
6551   format %{ "[$reg]" %}
6552   interface(MEMORY_INTER) %{
6553     base(0x1e);  // RSP
6554     index(0x0);  // No Index
6555     scale(0x0);  // No Scale
6556     disp($reg);  // Stack Offset
6557   %}
6558 %}
6559 
6560 operand stackSlotL(sRegL reg)
6561 %{
6562   constraint(ALLOC_IN_RC(stack_slots));
6563   // No match rule because this operand is only generated in matching
6564   // match(RegL);
6565   format %{ "[$reg]" %}
6566   interface(MEMORY_INTER) %{
6567     base(0x1e);  // RSP
6568     index(0x0);  // No Index
6569     scale(0x0);  // No Scale
6570     disp($reg);  // Stack Offset
6571   %}
6572 %}
6573 
6574 // Operands for expressing Control Flow
6575 // NOTE: Label is a predefined operand which should not be redefined in
6576 //       the AD file. It is generically handled within the ADLC.
6577 
6578 //----------Conditional Branch Operands----------------------------------------
6579 // Comparison Op  - This is the operation of the comparison, and is limited to
6580 //                  the following set of codes:
6581 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6582 //
6583 // Other attributes of the comparison, such as unsignedness, are specified
6584 // by the comparison instruction that sets a condition code flags register.
6585 // That result is represented by a flags operand whose subtype is appropriate
6586 // to the unsignedness (etc.) of the comparison.
6587 //
6588 // Later, the instruction which matches both the Comparison Op (a Bool) and
6589 // the flags (produced by the Cmp) specifies the coding of the comparison op
6590 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6591 
6592 // used for signed integral comparisons and fp comparisons
6593 
6594 operand cmpOp()
6595 %{
6596   match(Bool);
6597 
6598   format %{ "" %}
6599   interface(COND_INTER) %{
6600     equal(0x0, "eq");
6601     not_equal(0x1, "ne");
6602     less(0xb, "lt");
6603     greater_equal(0xa, "ge");
6604     less_equal(0xd, "le");
6605     greater(0xc, "gt");
6606     overflow(0x6, "vs");
6607     no_overflow(0x7, "vc");
6608   %}
6609 %}
6610 
6611 // used for unsigned integral comparisons
6612 
6613 operand cmpOpU()
6614 %{
6615   match(Bool);
6616 
6617   format %{ "" %}
6618   interface(COND_INTER) %{
6619     equal(0x0, "eq");
6620     not_equal(0x1, "ne");
6621     less(0x3, "lo");
6622     greater_equal(0x2, "hs");
6623     less_equal(0x9, "ls");
6624     greater(0x8, "hi");
6625     overflow(0x6, "vs");
6626     no_overflow(0x7, "vc");
6627   %}
6628 %}
6629 
6630 // Special operand allowing long args to int ops to be truncated for free
6631 
6632 operand iRegL2I(iRegL reg) %{
6633 
6634   op_cost(0);
6635 
6636   match(ConvL2I reg);
6637 
6638   format %{ "l2i($reg)" %}
6639 
6640   interface(REG_INTER)
6641 %}
6642 
6643 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6644 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6645 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6646 
6647 //----------OPERAND CLASSES----------------------------------------------------
6648 // Operand Classes are groups of operands that are used as to simplify
6649 // instruction definitions by not requiring the AD writer to specify
6650 // separate instructions for every form of operand when the
6651 // instruction accepts multiple operand types with the same basic
6652 // encoding and format. The classic case of this is memory operands.
6653 
6654 // memory is used to define read/write location for load/store
6655 // instruction defs. we can turn a memory op into an Address
6656 
6657 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6658                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6659 
6660 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6661 // operations. it allows the src to be either an iRegI or a (ConvL2I
6662 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6663 // can be elided because the 32-bit instruction will just employ the
6664 // lower 32 bits anyway.
6665 //
6666 // n.b. this does not elide all L2I conversions. if the truncated
6667 // value is consumed by more than one operation then the ConvL2I
6668 // cannot be bundled into the consuming nodes so an l2i gets planted
6669 // (actually a movw $dst $src) and the downstream instructions consume
6670 // the result of the l2i as an iRegI input. That's a shame since the
6671 // movw is actually redundant but its not too costly.
6672 
6673 opclass iRegIorL2I(iRegI, iRegL2I);
6674 
6675 //----------PIPELINE-----------------------------------------------------------
6676 // Rules which define the behavior of the target architectures pipeline.
6677 
6678 // For specific pipelines, eg A53, define the stages of that pipeline
6679 //pipe_desc(ISS, EX1, EX2, WR);
6680 #define ISS S0
6681 #define EX1 S1
6682 #define EX2 S2
6683 #define WR  S3
6684 
6685 // Integer ALU reg operation
6686 pipeline %{
6687 
6688 attributes %{
6689   // ARM instructions are of fixed length
6690   fixed_size_instructions;        // Fixed size instructions TODO does
6691   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6692   // ARM instructions come in 32-bit word units
6693   instruction_unit_size = 4;         // An instruction is 4 bytes long
6694   instruction_fetch_unit_size = 64;  // The processor fetches one line
6695   instruction_fetch_units = 1;       // of 64 bytes
6696 
6697   // List of nop instructions
6698   nops( MachNop );
6699 %}
6700 
6701 // We don't use an actual pipeline model so don't care about resources
6702 // or description. we do use pipeline classes to introduce fixed
6703 // latencies
6704 
6705 //----------RESOURCES----------------------------------------------------------
6706 // Resources are the functional units available to the machine
6707 
6708 resources( INS0, INS1, INS01 = INS0 | INS1,
6709            ALU0, ALU1, ALU = ALU0 | ALU1,
6710            MAC,
6711            DIV,
6712            BRANCH,
6713            LDST,
6714            NEON_FP);
6715 
6716 //----------PIPELINE DESCRIPTION-----------------------------------------------
6717 // Pipeline Description specifies the stages in the machine's pipeline
6718 
6719 // Define the pipeline as a generic 6 stage pipeline
6720 pipe_desc(S0, S1, S2, S3, S4, S5);
6721 
6722 //----------PIPELINE CLASSES---------------------------------------------------
6723 // Pipeline Classes describe the stages in which input and output are
6724 // referenced by the hardware pipeline.
6725 
6726 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6727 %{
6728   single_instruction;
6729   src1   : S1(read);
6730   src2   : S2(read);
6731   dst    : S5(write);
6732   INS01  : ISS;
6733   NEON_FP : S5;
6734 %}
6735 
6736 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6737 %{
6738   single_instruction;
6739   src1   : S1(read);
6740   src2   : S2(read);
6741   dst    : S5(write);
6742   INS01  : ISS;
6743   NEON_FP : S5;
6744 %}
6745 
6746 pipe_class fp_uop_s(vRegF dst, vRegF src)
6747 %{
6748   single_instruction;
6749   src    : S1(read);
6750   dst    : S5(write);
6751   INS01  : ISS;
6752   NEON_FP : S5;
6753 %}
6754 
6755 pipe_class fp_uop_d(vRegD dst, vRegD src)
6756 %{
6757   single_instruction;
6758   src    : S1(read);
6759   dst    : S5(write);
6760   INS01  : ISS;
6761   NEON_FP : S5;
6762 %}
6763 
6764 pipe_class fp_d2f(vRegF dst, vRegD src)
6765 %{
6766   single_instruction;
6767   src    : S1(read);
6768   dst    : S5(write);
6769   INS01  : ISS;
6770   NEON_FP : S5;
6771 %}
6772 
6773 pipe_class fp_f2d(vRegD dst, vRegF src)
6774 %{
6775   single_instruction;
6776   src    : S1(read);
6777   dst    : S5(write);
6778   INS01  : ISS;
6779   NEON_FP : S5;
6780 %}
6781 
6782 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6783 %{
6784   single_instruction;
6785   src    : S1(read);
6786   dst    : S5(write);
6787   INS01  : ISS;
6788   NEON_FP : S5;
6789 %}
6790 
6791 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6792 %{
6793   single_instruction;
6794   src    : S1(read);
6795   dst    : S5(write);
6796   INS01  : ISS;
6797   NEON_FP : S5;
6798 %}
6799 
6800 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6801 %{
6802   single_instruction;
6803   src    : S1(read);
6804   dst    : S5(write);
6805   INS01  : ISS;
6806   NEON_FP : S5;
6807 %}
6808 
6809 pipe_class fp_l2f(vRegF dst, iRegL src)
6810 %{
6811   single_instruction;
6812   src    : S1(read);
6813   dst    : S5(write);
6814   INS01  : ISS;
6815   NEON_FP : S5;
6816 %}
6817 
6818 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6819 %{
6820   single_instruction;
6821   src    : S1(read);
6822   dst    : S5(write);
6823   INS01  : ISS;
6824   NEON_FP : S5;
6825 %}
6826 
6827 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6828 %{
6829   single_instruction;
6830   src    : S1(read);
6831   dst    : S5(write);
6832   INS01  : ISS;
6833   NEON_FP : S5;
6834 %}
6835 
6836 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6837 %{
6838   single_instruction;
6839   src    : S1(read);
6840   dst    : S5(write);
6841   INS01  : ISS;
6842   NEON_FP : S5;
6843 %}
6844 
6845 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6846 %{
6847   single_instruction;
6848   src    : S1(read);
6849   dst    : S5(write);
6850   INS01  : ISS;
6851   NEON_FP : S5;
6852 %}
6853 
6854 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6855 %{
6856   single_instruction;
6857   src1   : S1(read);
6858   src2   : S2(read);
6859   dst    : S5(write);
6860   INS0   : ISS;
6861   NEON_FP : S5;
6862 %}
6863 
6864 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6865 %{
6866   single_instruction;
6867   src1   : S1(read);
6868   src2   : S2(read);
6869   dst    : S5(write);
6870   INS0   : ISS;
6871   NEON_FP : S5;
6872 %}
6873 
6874 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6875 %{
6876   single_instruction;
6877   cr     : S1(read);
6878   src1   : S1(read);
6879   src2   : S1(read);
6880   dst    : S3(write);
6881   INS01  : ISS;
6882   NEON_FP : S3;
6883 %}
6884 
6885 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6886 %{
6887   single_instruction;
6888   cr     : S1(read);
6889   src1   : S1(read);
6890   src2   : S1(read);
6891   dst    : S3(write);
6892   INS01  : ISS;
6893   NEON_FP : S3;
6894 %}
6895 
6896 pipe_class fp_imm_s(vRegF dst)
6897 %{
6898   single_instruction;
6899   dst    : S3(write);
6900   INS01  : ISS;
6901   NEON_FP : S3;
6902 %}
6903 
6904 pipe_class fp_imm_d(vRegD dst)
6905 %{
6906   single_instruction;
6907   dst    : S3(write);
6908   INS01  : ISS;
6909   NEON_FP : S3;
6910 %}
6911 
6912 pipe_class fp_load_constant_s(vRegF dst)
6913 %{
6914   single_instruction;
6915   dst    : S4(write);
6916   INS01  : ISS;
6917   NEON_FP : S4;
6918 %}
6919 
6920 pipe_class fp_load_constant_d(vRegD dst)
6921 %{
6922   single_instruction;
6923   dst    : S4(write);
6924   INS01  : ISS;
6925   NEON_FP : S4;
6926 %}
6927 
6928 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6929 %{
6930   single_instruction;
6931   dst    : S5(write);
6932   src1   : S1(read);
6933   src2   : S1(read);
6934   INS01  : ISS;
6935   NEON_FP : S5;
6936 %}
6937 
6938 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6939 %{
6940   single_instruction;
6941   dst    : S5(write);
6942   src1   : S1(read);
6943   src2   : S1(read);
6944   INS0   : ISS;
6945   NEON_FP : S5;
6946 %}
6947 
6948 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6949 %{
6950   single_instruction;
6951   dst    : S5(write);
6952   src1   : S1(read);
6953   src2   : S1(read);
6954   dst    : S1(read);
6955   INS01  : ISS;
6956   NEON_FP : S5;
6957 %}
6958 
6959 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6960 %{
6961   single_instruction;
6962   dst    : S5(write);
6963   src1   : S1(read);
6964   src2   : S1(read);
6965   dst    : S1(read);
6966   INS0   : ISS;
6967   NEON_FP : S5;
6968 %}
6969 
6970 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6971 %{
6972   single_instruction;
6973   dst    : S4(write);
6974   src1   : S2(read);
6975   src2   : S2(read);
6976   INS01  : ISS;
6977   NEON_FP : S4;
6978 %}
6979 
6980 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6981 %{
6982   single_instruction;
6983   dst    : S4(write);
6984   src1   : S2(read);
6985   src2   : S2(read);
6986   INS0   : ISS;
6987   NEON_FP : S4;
6988 %}
6989 
6990 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6991 %{
6992   single_instruction;
6993   dst    : S3(write);
6994   src1   : S2(read);
6995   src2   : S2(read);
6996   INS01  : ISS;
6997   NEON_FP : S3;
6998 %}
6999 
7000 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7001 %{
7002   single_instruction;
7003   dst    : S3(write);
7004   src1   : S2(read);
7005   src2   : S2(read);
7006   INS0   : ISS;
7007   NEON_FP : S3;
7008 %}
7009 
7010 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7011 %{
7012   single_instruction;
7013   dst    : S3(write);
7014   src    : S1(read);
7015   shift  : S1(read);
7016   INS01  : ISS;
7017   NEON_FP : S3;
7018 %}
7019 
7020 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7021 %{
7022   single_instruction;
7023   dst    : S3(write);
7024   src    : S1(read);
7025   shift  : S1(read);
7026   INS0   : ISS;
7027   NEON_FP : S3;
7028 %}
7029 
7030 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7031 %{
7032   single_instruction;
7033   dst    : S3(write);
7034   src    : S1(read);
7035   INS01  : ISS;
7036   NEON_FP : S3;
7037 %}
7038 
7039 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7040 %{
7041   single_instruction;
7042   dst    : S3(write);
7043   src    : S1(read);
7044   INS0   : ISS;
7045   NEON_FP : S3;
7046 %}
7047 
7048 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7049 %{
7050   single_instruction;
7051   dst    : S5(write);
7052   src1   : S1(read);
7053   src2   : S1(read);
7054   INS01  : ISS;
7055   NEON_FP : S5;
7056 %}
7057 
7058 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7059 %{
7060   single_instruction;
7061   dst    : S5(write);
7062   src1   : S1(read);
7063   src2   : S1(read);
7064   INS0   : ISS;
7065   NEON_FP : S5;
7066 %}
7067 
7068 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7069 %{
7070   single_instruction;
7071   dst    : S5(write);
7072   src1   : S1(read);
7073   src2   : S1(read);
7074   INS0   : ISS;
7075   NEON_FP : S5;
7076 %}
7077 
7078 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7079 %{
7080   single_instruction;
7081   dst    : S5(write);
7082   src1   : S1(read);
7083   src2   : S1(read);
7084   INS0   : ISS;
7085   NEON_FP : S5;
7086 %}
7087 
7088 pipe_class vsqrt_fp128(vecX dst, vecX src)
7089 %{
7090   single_instruction;
7091   dst    : S5(write);
7092   src    : S1(read);
7093   INS0   : ISS;
7094   NEON_FP : S5;
7095 %}
7096 
7097 pipe_class vunop_fp64(vecD dst, vecD src)
7098 %{
7099   single_instruction;
7100   dst    : S5(write);
7101   src    : S1(read);
7102   INS01  : ISS;
7103   NEON_FP : S5;
7104 %}
7105 
7106 pipe_class vunop_fp128(vecX dst, vecX src)
7107 %{
7108   single_instruction;
7109   dst    : S5(write);
7110   src    : S1(read);
7111   INS0   : ISS;
7112   NEON_FP : S5;
7113 %}
7114 
7115 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7116 %{
7117   single_instruction;
7118   dst    : S3(write);
7119   src    : S1(read);
7120   INS01  : ISS;
7121   NEON_FP : S3;
7122 %}
7123 
7124 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7125 %{
7126   single_instruction;
7127   dst    : S3(write);
7128   src    : S1(read);
7129   INS01  : ISS;
7130   NEON_FP : S3;
7131 %}
7132 
7133 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7134 %{
7135   single_instruction;
7136   dst    : S3(write);
7137   src    : S1(read);
7138   INS01  : ISS;
7139   NEON_FP : S3;
7140 %}
7141 
7142 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7143 %{
7144   single_instruction;
7145   dst    : S3(write);
7146   src    : S1(read);
7147   INS01  : ISS;
7148   NEON_FP : S3;
7149 %}
7150 
7151 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7152 %{
7153   single_instruction;
7154   dst    : S3(write);
7155   src    : S1(read);
7156   INS01  : ISS;
7157   NEON_FP : S3;
7158 %}
7159 
7160 pipe_class vmovi_reg_imm64(vecD dst)
7161 %{
7162   single_instruction;
7163   dst    : S3(write);
7164   INS01  : ISS;
7165   NEON_FP : S3;
7166 %}
7167 
7168 pipe_class vmovi_reg_imm128(vecX dst)
7169 %{
7170   single_instruction;
7171   dst    : S3(write);
7172   INS0   : ISS;
7173   NEON_FP : S3;
7174 %}
7175 
7176 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7177 %{
7178   single_instruction;
7179   dst    : S5(write);
7180   mem    : ISS(read);
7181   INS01  : ISS;
7182   NEON_FP : S3;
7183 %}
7184 
7185 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7186 %{
7187   single_instruction;
7188   dst    : S5(write);
7189   mem    : ISS(read);
7190   INS01  : ISS;
7191   NEON_FP : S3;
7192 %}
7193 
7194 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7195 %{
7196   single_instruction;
7197   mem    : ISS(read);
7198   src    : S2(read);
7199   INS01  : ISS;
7200   NEON_FP : S3;
7201 %}
7202 
7203 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7204 %{
7205   single_instruction;
7206   mem    : ISS(read);
7207   src    : S2(read);
7208   INS01  : ISS;
7209   NEON_FP : S3;
7210 %}
7211 
7212 //------- Integer ALU operations --------------------------
7213 
7214 // Integer ALU reg-reg operation
7215 // Operands needed in EX1, result generated in EX2
7216 // Eg.  ADD     x0, x1, x2
7217 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7218 %{
7219   single_instruction;
7220   dst    : EX2(write);
7221   src1   : EX1(read);
7222   src2   : EX1(read);
7223   INS01  : ISS; // Dual issue as instruction 0 or 1
7224   ALU    : EX2;
7225 %}
7226 
7227 // Integer ALU reg-reg operation with constant shift
7228 // Shifted register must be available in LATE_ISS instead of EX1
7229 // Eg.  ADD     x0, x1, x2, LSL #2
7230 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7231 %{
7232   single_instruction;
7233   dst    : EX2(write);
7234   src1   : EX1(read);
7235   src2   : ISS(read);
7236   INS01  : ISS;
7237   ALU    : EX2;
7238 %}
7239 
7240 // Integer ALU reg operation with constant shift
7241 // Eg.  LSL     x0, x1, #shift
7242 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7243 %{
7244   single_instruction;
7245   dst    : EX2(write);
7246   src1   : ISS(read);
7247   INS01  : ISS;
7248   ALU    : EX2;
7249 %}
7250 
7251 // Integer ALU reg-reg operation with variable shift
7252 // Both operands must be available in LATE_ISS instead of EX1
7253 // Result is available in EX1 instead of EX2
7254 // Eg.  LSLV    x0, x1, x2
7255 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7256 %{
7257   single_instruction;
7258   dst    : EX1(write);
7259   src1   : ISS(read);
7260   src2   : ISS(read);
7261   INS01  : ISS;
7262   ALU    : EX1;
7263 %}
7264 
7265 // Integer ALU reg-reg operation with extract
7266 // As for _vshift above, but result generated in EX2
7267 // Eg.  EXTR    x0, x1, x2, #N
7268 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7269 %{
7270   single_instruction;
7271   dst    : EX2(write);
7272   src1   : ISS(read);
7273   src2   : ISS(read);
7274   INS1   : ISS; // Can only dual issue as Instruction 1
7275   ALU    : EX1;
7276 %}
7277 
7278 // Integer ALU reg operation
7279 // Eg.  NEG     x0, x1
7280 pipe_class ialu_reg(iRegI dst, iRegI src)
7281 %{
7282   single_instruction;
7283   dst    : EX2(write);
7284   src    : EX1(read);
7285   INS01  : ISS;
7286   ALU    : EX2;
7287 %}
7288 
7289 // Integer ALU reg mmediate operation
7290 // Eg.  ADD     x0, x1, #N
7291 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7292 %{
7293   single_instruction;
7294   dst    : EX2(write);
7295   src1   : EX1(read);
7296   INS01  : ISS;
7297   ALU    : EX2;
7298 %}
7299 
7300 // Integer ALU immediate operation (no source operands)
7301 // Eg.  MOV     x0, #N
7302 pipe_class ialu_imm(iRegI dst)
7303 %{
7304   single_instruction;
7305   dst    : EX1(write);
7306   INS01  : ISS;
7307   ALU    : EX1;
7308 %}
7309 
7310 //------- Compare operation -------------------------------
7311 
7312 // Compare reg-reg
7313 // Eg.  CMP     x0, x1
7314 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7315 %{
7316   single_instruction;
7317 //  fixed_latency(16);
7318   cr     : EX2(write);
7319   op1    : EX1(read);
7320   op2    : EX1(read);
7321   INS01  : ISS;
7322   ALU    : EX2;
7323 %}
7324 
7325 // Compare reg-reg
7326 // Eg.  CMP     x0, #N
7327 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7328 %{
7329   single_instruction;
7330 //  fixed_latency(16);
7331   cr     : EX2(write);
7332   op1    : EX1(read);
7333   INS01  : ISS;
7334   ALU    : EX2;
7335 %}
7336 
7337 //------- Conditional instructions ------------------------
7338 
7339 // Conditional no operands
7340 // Eg.  CSINC   x0, zr, zr, <cond>
7341 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7342 %{
7343   single_instruction;
7344   cr     : EX1(read);
7345   dst    : EX2(write);
7346   INS01  : ISS;
7347   ALU    : EX2;
7348 %}
7349 
7350 // Conditional 2 operand
7351 // EG.  CSEL    X0, X1, X2, <cond>
7352 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7353 %{
7354   single_instruction;
7355   cr     : EX1(read);
7356   src1   : EX1(read);
7357   src2   : EX1(read);
7358   dst    : EX2(write);
7359   INS01  : ISS;
7360   ALU    : EX2;
7361 %}
7362 
7363 // Conditional 2 operand
7364 // EG.  CSEL    X0, X1, X2, <cond>
7365 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7366 %{
7367   single_instruction;
7368   cr     : EX1(read);
7369   src    : EX1(read);
7370   dst    : EX2(write);
7371   INS01  : ISS;
7372   ALU    : EX2;
7373 %}
7374 
7375 //------- Multiply pipeline operations --------------------
7376 
7377 // Multiply reg-reg
7378 // Eg.  MUL     w0, w1, w2
7379 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7380 %{
7381   single_instruction;
7382   dst    : WR(write);
7383   src1   : ISS(read);
7384   src2   : ISS(read);
7385   INS01  : ISS;
7386   MAC    : WR;
7387 %}
7388 
7389 // Multiply accumulate
7390 // Eg.  MADD    w0, w1, w2, w3
7391 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7392 %{
7393   single_instruction;
7394   dst    : WR(write);
7395   src1   : ISS(read);
7396   src2   : ISS(read);
7397   src3   : ISS(read);
7398   INS01  : ISS;
7399   MAC    : WR;
7400 %}
7401 
7402 // Eg.  MUL     w0, w1, w2
7403 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7404 %{
7405   single_instruction;
7406   fixed_latency(3); // Maximum latency for 64 bit mul
7407   dst    : WR(write);
7408   src1   : ISS(read);
7409   src2   : ISS(read);
7410   INS01  : ISS;
7411   MAC    : WR;
7412 %}
7413 
7414 // Multiply accumulate
7415 // Eg.  MADD    w0, w1, w2, w3
7416 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7417 %{
7418   single_instruction;
7419   fixed_latency(3); // Maximum latency for 64 bit mul
7420   dst    : WR(write);
7421   src1   : ISS(read);
7422   src2   : ISS(read);
7423   src3   : ISS(read);
7424   INS01  : ISS;
7425   MAC    : WR;
7426 %}
7427 
7428 //------- Divide pipeline operations --------------------
7429 
7430 // Eg.  SDIV    w0, w1, w2
7431 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7432 %{
7433   single_instruction;
7434   fixed_latency(8); // Maximum latency for 32 bit divide
7435   dst    : WR(write);
7436   src1   : ISS(read);
7437   src2   : ISS(read);
7438   INS0   : ISS; // Can only dual issue as instruction 0
7439   DIV    : WR;
7440 %}
7441 
7442 // Eg.  SDIV    x0, x1, x2
7443 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7444 %{
7445   single_instruction;
7446   fixed_latency(16); // Maximum latency for 64 bit divide
7447   dst    : WR(write);
7448   src1   : ISS(read);
7449   src2   : ISS(read);
7450   INS0   : ISS; // Can only dual issue as instruction 0
7451   DIV    : WR;
7452 %}
7453 
7454 //------- Load pipeline operations ------------------------
7455 
7456 // Load - prefetch
7457 // Eg.  PFRM    <mem>
7458 pipe_class iload_prefetch(memory mem)
7459 %{
7460   single_instruction;
7461   mem    : ISS(read);
7462   INS01  : ISS;
7463   LDST   : WR;
7464 %}
7465 
7466 // Load - reg, mem
7467 // Eg.  LDR     x0, <mem>
7468 pipe_class iload_reg_mem(iRegI dst, memory mem)
7469 %{
7470   single_instruction;
7471   dst    : WR(write);
7472   mem    : ISS(read);
7473   INS01  : ISS;
7474   LDST   : WR;
7475 %}
7476 
7477 // Load - reg, reg
7478 // Eg.  LDR     x0, [sp, x1]
7479 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7480 %{
7481   single_instruction;
7482   dst    : WR(write);
7483   src    : ISS(read);
7484   INS01  : ISS;
7485   LDST   : WR;
7486 %}
7487 
7488 //------- Store pipeline operations -----------------------
7489 
7490 // Store - zr, mem
7491 // Eg.  STR     zr, <mem>
7492 pipe_class istore_mem(memory mem)
7493 %{
7494   single_instruction;
7495   mem    : ISS(read);
7496   INS01  : ISS;
7497   LDST   : WR;
7498 %}
7499 
7500 // Store - reg, mem
7501 // Eg.  STR     x0, <mem>
7502 pipe_class istore_reg_mem(iRegI src, memory mem)
7503 %{
7504   single_instruction;
7505   mem    : ISS(read);
7506   src    : EX2(read);
7507   INS01  : ISS;
7508   LDST   : WR;
7509 %}
7510 
7511 // Store - reg, reg
7512 // Eg. STR      x0, [sp, x1]
7513 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7514 %{
7515   single_instruction;
7516   dst    : ISS(read);
7517   src    : EX2(read);
7518   INS01  : ISS;
7519   LDST   : WR;
7520 %}
7521 
7522 //------- Store pipeline operations -----------------------
7523 
7524 // Branch
7525 pipe_class pipe_branch()
7526 %{
7527   single_instruction;
7528   INS01  : ISS;
7529   BRANCH : EX1;
7530 %}
7531 
7532 // Conditional branch
7533 pipe_class pipe_branch_cond(rFlagsReg cr)
7534 %{
7535   single_instruction;
7536   cr     : EX1(read);
7537   INS01  : ISS;
7538   BRANCH : EX1;
7539 %}
7540 
7541 // Compare & Branch
7542 // EG.  CBZ/CBNZ
7543 pipe_class pipe_cmp_branch(iRegI op1)
7544 %{
7545   single_instruction;
7546   op1    : EX1(read);
7547   INS01  : ISS;
7548   BRANCH : EX1;
7549 %}
7550 
7551 //------- Synchronisation operations ----------------------
7552 
7553 // Any operation requiring serialization.
7554 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7555 pipe_class pipe_serial()
7556 %{
7557   single_instruction;
7558   force_serialization;
7559   fixed_latency(16);
7560   INS01  : ISS(2); // Cannot dual issue with any other instruction
7561   LDST   : WR;
7562 %}
7563 
7564 // Generic big/slow expanded idiom - also serialized
7565 pipe_class pipe_slow()
7566 %{
7567   instruction_count(10);
7568   multiple_bundles;
7569   force_serialization;
7570   fixed_latency(16);
7571   INS01  : ISS(2); // Cannot dual issue with any other instruction
7572   LDST   : WR;
7573 %}
7574 
7575 // Empty pipeline class
7576 pipe_class pipe_class_empty()
7577 %{
7578   single_instruction;
7579   fixed_latency(0);
7580 %}
7581 
7582 // Default pipeline class.
7583 pipe_class pipe_class_default()
7584 %{
7585   single_instruction;
7586   fixed_latency(2);
7587 %}
7588 
7589 // Pipeline class for compares.
7590 pipe_class pipe_class_compare()
7591 %{
7592   single_instruction;
7593   fixed_latency(16);
7594 %}
7595 
7596 // Pipeline class for memory operations.
7597 pipe_class pipe_class_memory()
7598 %{
7599   single_instruction;
7600   fixed_latency(16);
7601 %}
7602 
7603 // Pipeline class for call.
7604 pipe_class pipe_class_call()
7605 %{
7606   single_instruction;
7607   fixed_latency(100);
7608 %}
7609 
7610 // Define the class for the Nop node.
7611 define %{
7612    MachNop = pipe_class_empty;
7613 %}
7614 
7615 %}
7616 //----------INSTRUCTIONS-------------------------------------------------------
7617 //
7618 // match      -- States which machine-independent subtree may be replaced
7619 //               by this instruction.
7620 // ins_cost   -- The estimated cost of this instruction is used by instruction
7621 //               selection to identify a minimum cost tree of machine
7622 //               instructions that matches a tree of machine-independent
7623 //               instructions.
7624 // format     -- A string providing the disassembly for this instruction.
7625 //               The value of an instruction's operand may be inserted
7626 //               by referring to it with a '$' prefix.
7627 // opcode     -- Three instruction opcodes may be provided.  These are referred
7628 //               to within an encode class as $primary, $secondary, and $tertiary
7629 //               rrspectively.  The primary opcode is commonly used to
7630 //               indicate the type of machine instruction, while secondary
7631 //               and tertiary are often used for prefix options or addressing
7632 //               modes.
7633 // ins_encode -- A list of encode classes with parameters. The encode class
7634 //               name must have been defined in an 'enc_class' specification
7635 //               in the encode section of the architecture description.
7636 
7637 // ============================================================================
7638 // Memory (Load/Store) Instructions
7639 
7640 // Load Instructions
7641 
7642 // Load Byte (8 bit signed)
7643 instruct loadB(iRegINoSp dst, memory mem)
7644 %{
7645   match(Set dst (LoadB mem));
7646   predicate(!needs_acquiring_load(n));
7647 
7648   ins_cost(4 * INSN_COST);
7649   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7650 
7651   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7652 
7653   ins_pipe(iload_reg_mem);
7654 %}
7655 
7656 // Load Byte (8 bit signed) into long
7657 instruct loadB2L(iRegLNoSp dst, memory mem)
7658 %{
7659   match(Set dst (ConvI2L (LoadB mem)));
7660   predicate(!needs_acquiring_load(n->in(1)));
7661 
7662   ins_cost(4 * INSN_COST);
7663   format %{ "ldrsb  $dst, $mem\t# byte" %}
7664 
7665   ins_encode(aarch64_enc_ldrsb(dst, mem));
7666 
7667   ins_pipe(iload_reg_mem);
7668 %}
7669 
7670 // Load Byte (8 bit unsigned)
7671 instruct loadUB(iRegINoSp dst, memory mem)
7672 %{
7673   match(Set dst (LoadUB mem));
7674   predicate(!needs_acquiring_load(n));
7675 
7676   ins_cost(4 * INSN_COST);
7677   format %{ "ldrbw  $dst, $mem\t# byte" %}
7678 
7679   ins_encode(aarch64_enc_ldrb(dst, mem));
7680 
7681   ins_pipe(iload_reg_mem);
7682 %}
7683 
7684 // Load Byte (8 bit unsigned) into long
7685 instruct loadUB2L(iRegLNoSp dst, memory mem)
7686 %{
7687   match(Set dst (ConvI2L (LoadUB mem)));
7688   predicate(!needs_acquiring_load(n->in(1)));
7689 
7690   ins_cost(4 * INSN_COST);
7691   format %{ "ldrb  $dst, $mem\t# byte" %}
7692 
7693   ins_encode(aarch64_enc_ldrb(dst, mem));
7694 
7695   ins_pipe(iload_reg_mem);
7696 %}
7697 
7698 // Load Short (16 bit signed)
7699 instruct loadS(iRegINoSp dst, memory mem)
7700 %{
7701   match(Set dst (LoadS mem));
7702   predicate(!needs_acquiring_load(n));
7703 
7704   ins_cost(4 * INSN_COST);
7705   format %{ "ldrshw  $dst, $mem\t# short" %}
7706 
7707   ins_encode(aarch64_enc_ldrshw(dst, mem));
7708 
7709   ins_pipe(iload_reg_mem);
7710 %}
7711 
7712 // Load Short (16 bit signed) into long
7713 instruct loadS2L(iRegLNoSp dst, memory mem)
7714 %{
7715   match(Set dst (ConvI2L (LoadS mem)));
7716   predicate(!needs_acquiring_load(n->in(1)));
7717 
7718   ins_cost(4 * INSN_COST);
7719   format %{ "ldrsh  $dst, $mem\t# short" %}
7720 
7721   ins_encode(aarch64_enc_ldrsh(dst, mem));
7722 
7723   ins_pipe(iload_reg_mem);
7724 %}
7725 
7726 // Load Char (16 bit unsigned)
7727 instruct loadUS(iRegINoSp dst, memory mem)
7728 %{
7729   match(Set dst (LoadUS mem));
7730   predicate(!needs_acquiring_load(n));
7731 
7732   ins_cost(4 * INSN_COST);
7733   format %{ "ldrh  $dst, $mem\t# short" %}
7734 
7735   ins_encode(aarch64_enc_ldrh(dst, mem));
7736 
7737   ins_pipe(iload_reg_mem);
7738 %}
7739 
7740 // Load Short/Char (16 bit unsigned) into long
7741 instruct loadUS2L(iRegLNoSp dst, memory mem)
7742 %{
7743   match(Set dst (ConvI2L (LoadUS mem)));
7744   predicate(!needs_acquiring_load(n->in(1)));
7745 
7746   ins_cost(4 * INSN_COST);
7747   format %{ "ldrh  $dst, $mem\t# short" %}
7748 
7749   ins_encode(aarch64_enc_ldrh(dst, mem));
7750 
7751   ins_pipe(iload_reg_mem);
7752 %}
7753 
7754 // Load Integer (32 bit signed)
7755 instruct loadI(iRegINoSp dst, memory mem)
7756 %{
7757   match(Set dst (LoadI mem));
7758   predicate(!needs_acquiring_load(n));
7759 
7760   ins_cost(4 * INSN_COST);
7761   format %{ "ldrw  $dst, $mem\t# int" %}
7762 
7763   ins_encode(aarch64_enc_ldrw(dst, mem));
7764 
7765   ins_pipe(iload_reg_mem);
7766 %}
7767 
7768 // Load Integer (32 bit signed) into long
7769 instruct loadI2L(iRegLNoSp dst, memory mem)
7770 %{
7771   match(Set dst (ConvI2L (LoadI mem)));
7772   predicate(!needs_acquiring_load(n->in(1)));
7773 
7774   ins_cost(4 * INSN_COST);
7775   format %{ "ldrsw  $dst, $mem\t# int" %}
7776 
7777   ins_encode(aarch64_enc_ldrsw(dst, mem));
7778 
7779   ins_pipe(iload_reg_mem);
7780 %}
7781 
7782 // Load Integer (32 bit unsigned) into long
7783 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7784 %{
7785   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7786   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7787 
7788   ins_cost(4 * INSN_COST);
7789   format %{ "ldrw  $dst, $mem\t# int" %}
7790 
7791   ins_encode(aarch64_enc_ldrw(dst, mem));
7792 
7793   ins_pipe(iload_reg_mem);
7794 %}
7795 
7796 // Load Long (64 bit signed)
7797 instruct loadL(iRegLNoSp dst, memory mem)
7798 %{
7799   match(Set dst (LoadL mem));
7800   predicate(!needs_acquiring_load(n));
7801 
7802   ins_cost(4 * INSN_COST);
7803   format %{ "ldr  $dst, $mem\t# int" %}
7804 
7805   ins_encode(aarch64_enc_ldr(dst, mem));
7806 
7807   ins_pipe(iload_reg_mem);
7808 %}
7809 
7810 // Load Range
7811 instruct loadRange(iRegINoSp dst, memory mem)
7812 %{
7813   match(Set dst (LoadRange mem));
7814 
7815   ins_cost(4 * INSN_COST);
7816   format %{ "ldrw  $dst, $mem\t# range" %}
7817 
7818   ins_encode(aarch64_enc_ldrw(dst, mem));
7819 
7820   ins_pipe(iload_reg_mem);
7821 %}
7822 
7823 // Load Pointer
7824 instruct loadP(iRegPNoSp dst, memory mem)
7825 %{
7826   match(Set dst (LoadP mem));
7827   predicate(!needs_acquiring_load(n));
7828 
7829   ins_cost(4 * INSN_COST);
7830   format %{ "ldr  $dst, $mem\t# ptr" %}
7831 
7832   ins_encode(aarch64_enc_ldr(dst, mem));
7833 
7834   ins_pipe(iload_reg_mem);
7835 %}
7836 
7837 // Load Compressed Pointer
7838 instruct loadN(iRegNNoSp dst, memory mem)
7839 %{
7840   match(Set dst (LoadN mem));
7841   predicate(!needs_acquiring_load(n));
7842 
7843   ins_cost(4 * INSN_COST);
7844   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7845 
7846   ins_encode(aarch64_enc_ldrw(dst, mem));
7847 
7848   ins_pipe(iload_reg_mem);
7849 %}
7850 
7851 // Load Klass Pointer
7852 instruct loadKlass(iRegPNoSp dst, memory mem)
7853 %{
7854   match(Set dst (LoadKlass mem));
7855   predicate(!needs_acquiring_load(n));
7856 
7857   ins_cost(4 * INSN_COST);
7858   format %{ "ldr  $dst, $mem\t# class" %}
7859 
7860   ins_encode(aarch64_enc_ldr(dst, mem));
7861 
7862   ins_pipe(iload_reg_mem);
7863 %}
7864 
7865 // Load Narrow Klass Pointer
7866 instruct loadNKlass(iRegNNoSp dst, memory mem)
7867 %{
7868   match(Set dst (LoadNKlass mem));
7869   predicate(!needs_acquiring_load(n));
7870 
7871   ins_cost(4 * INSN_COST);
7872   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7873 
7874   ins_encode(aarch64_enc_ldrw(dst, mem));
7875 
7876   ins_pipe(iload_reg_mem);
7877 %}
7878 
7879 // Load Float
7880 instruct loadF(vRegF dst, memory mem)
7881 %{
7882   match(Set dst (LoadF mem));
7883   predicate(!needs_acquiring_load(n));
7884 
7885   ins_cost(4 * INSN_COST);
7886   format %{ "ldrs  $dst, $mem\t# float" %}
7887 
7888   ins_encode( aarch64_enc_ldrs(dst, mem) );
7889 
7890   ins_pipe(pipe_class_memory);
7891 %}
7892 
7893 // Load Double
7894 instruct loadD(vRegD dst, memory mem)
7895 %{
7896   match(Set dst (LoadD mem));
7897   predicate(!needs_acquiring_load(n));
7898 
7899   ins_cost(4 * INSN_COST);
7900   format %{ "ldrd  $dst, $mem\t# double" %}
7901 
7902   ins_encode( aarch64_enc_ldrd(dst, mem) );
7903 
7904   ins_pipe(pipe_class_memory);
7905 %}
7906 
7907 
7908 // Load Int Constant
7909 instruct loadConI(iRegINoSp dst, immI src)
7910 %{
7911   match(Set dst src);
7912 
7913   ins_cost(INSN_COST);
7914   format %{ "mov $dst, $src\t# int" %}
7915 
7916   ins_encode( aarch64_enc_movw_imm(dst, src) );
7917 
7918   ins_pipe(ialu_imm);
7919 %}
7920 
7921 // Load Long Constant
7922 instruct loadConL(iRegLNoSp dst, immL src)
7923 %{
7924   match(Set dst src);
7925 
7926   ins_cost(INSN_COST);
7927   format %{ "mov $dst, $src\t# long" %}
7928 
7929   ins_encode( aarch64_enc_mov_imm(dst, src) );
7930 
7931   ins_pipe(ialu_imm);
7932 %}
7933 
7934 // Load Pointer Constant
7935 
7936 instruct loadConP(iRegPNoSp dst, immP con)
7937 %{
7938   match(Set dst con);
7939 
7940   ins_cost(INSN_COST * 4);
7941   format %{
7942     "mov  $dst, $con\t# ptr\n\t"
7943   %}
7944 
7945   ins_encode(aarch64_enc_mov_p(dst, con));
7946 
7947   ins_pipe(ialu_imm);
7948 %}
7949 
7950 // Load Null Pointer Constant
7951 
7952 instruct loadConP0(iRegPNoSp dst, immP0 con)
7953 %{
7954   match(Set dst con);
7955 
7956   ins_cost(INSN_COST);
7957   format %{ "mov  $dst, $con\t# NULL ptr" %}
7958 
7959   ins_encode(aarch64_enc_mov_p0(dst, con));
7960 
7961   ins_pipe(ialu_imm);
7962 %}
7963 
7964 // Load Pointer Constant One
7965 
7966 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7967 %{
7968   match(Set dst con);
7969 
7970   ins_cost(INSN_COST);
7971   format %{ "mov  $dst, $con\t# NULL ptr" %}
7972 
7973   ins_encode(aarch64_enc_mov_p1(dst, con));
7974 
7975   ins_pipe(ialu_imm);
7976 %}
7977 
7978 // Load Poll Page Constant
7979 
7980 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7981 %{
7982   match(Set dst con);
7983 
7984   ins_cost(INSN_COST);
7985   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7986 
7987   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7988 
7989   ins_pipe(ialu_imm);
7990 %}
7991 
7992 // Load Byte Map Base Constant
7993 
7994 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7995 %{
7996   match(Set dst con);
7997 
7998   ins_cost(INSN_COST);
7999   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8000 
8001   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8002 
8003   ins_pipe(ialu_imm);
8004 %}
8005 
8006 // Load Narrow Pointer Constant
8007 
8008 instruct loadConN(iRegNNoSp dst, immN con)
8009 %{
8010   match(Set dst con);
8011 
8012   ins_cost(INSN_COST * 4);
8013   format %{ "mov  $dst, $con\t# compressed ptr" %}
8014 
8015   ins_encode(aarch64_enc_mov_n(dst, con));
8016 
8017   ins_pipe(ialu_imm);
8018 %}
8019 
8020 // Load Narrow Null Pointer Constant
8021 
8022 instruct loadConN0(iRegNNoSp dst, immN0 con)
8023 %{
8024   match(Set dst con);
8025 
8026   ins_cost(INSN_COST);
8027   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8028 
8029   ins_encode(aarch64_enc_mov_n0(dst, con));
8030 
8031   ins_pipe(ialu_imm);
8032 %}
8033 
8034 // Load Narrow Klass Constant
8035 
8036 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8037 %{
8038   match(Set dst con);
8039 
8040   ins_cost(INSN_COST);
8041   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8042 
8043   ins_encode(aarch64_enc_mov_nk(dst, con));
8044 
8045   ins_pipe(ialu_imm);
8046 %}
8047 
8048 // Load Packed Float Constant
8049 
8050 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8051   match(Set dst con);
8052   ins_cost(INSN_COST * 4);
8053   format %{ "fmovs  $dst, $con"%}
8054   ins_encode %{
8055     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8056   %}
8057 
8058   ins_pipe(fp_imm_s);
8059 %}
8060 
8061 // Load Float Constant
8062 
8063 instruct loadConF(vRegF dst, immF con) %{
8064   match(Set dst con);
8065 
8066   ins_cost(INSN_COST * 4);
8067 
8068   format %{
8069     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8070   %}
8071 
8072   ins_encode %{
8073     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8074   %}
8075 
8076   ins_pipe(fp_load_constant_s);
8077 %}
8078 
8079 // Load Packed Double Constant
8080 
8081 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8082   match(Set dst con);
8083   ins_cost(INSN_COST);
8084   format %{ "fmovd  $dst, $con"%}
8085   ins_encode %{
8086     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8087   %}
8088 
8089   ins_pipe(fp_imm_d);
8090 %}
8091 
8092 // Load Double Constant
8093 
8094 instruct loadConD(vRegD dst, immD con) %{
8095   match(Set dst con);
8096 
8097   ins_cost(INSN_COST * 5);
8098   format %{
8099     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8100   %}
8101 
8102   ins_encode %{
8103     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8104   %}
8105 
8106   ins_pipe(fp_load_constant_d);
8107 %}
8108 
8109 // Store Instructions
8110 
8111 // Store CMS card-mark Immediate
8112 instruct storeimmCM0(immI0 zero, memory mem)
8113 %{
8114   match(Set mem (StoreCM mem zero));
8115   predicate(unnecessary_storestore(n));
8116 
8117   ins_cost(INSN_COST);
8118   format %{ "strb zr, $mem\t# byte" %}
8119 
8120   ins_encode(aarch64_enc_strb0(mem));
8121 
8122   ins_pipe(istore_mem);
8123 %}
8124 
8125 // Store CMS card-mark Immediate with intervening StoreStore
8126 // needed when using CMS with no conditional card marking
8127 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8128 %{
8129   match(Set mem (StoreCM mem zero));
8130 
8131   ins_cost(INSN_COST * 2);
8132   format %{ "dmb ishst"
8133       "\n\tstrb zr, $mem\t# byte" %}
8134 
8135   ins_encode(aarch64_enc_strb0_ordered(mem));
8136 
8137   ins_pipe(istore_mem);
8138 %}
8139 
8140 // Store Byte
8141 instruct storeB(iRegIorL2I src, memory mem)
8142 %{
8143   match(Set mem (StoreB mem src));
8144   predicate(!needs_releasing_store(n));
8145 
8146   ins_cost(INSN_COST);
8147   format %{ "strb  $src, $mem\t# byte" %}
8148 
8149   ins_encode(aarch64_enc_strb(src, mem));
8150 
8151   ins_pipe(istore_reg_mem);
8152 %}
8153 
8154 
8155 instruct storeimmB0(immI0 zero, memory mem)
8156 %{
8157   match(Set mem (StoreB mem zero));
8158   predicate(!needs_releasing_store(n));
8159 
8160   ins_cost(INSN_COST);
8161   format %{ "strb rscractch2, $mem\t# byte" %}
8162 
8163   ins_encode(aarch64_enc_strb0(mem));
8164 
8165   ins_pipe(istore_mem);
8166 %}
8167 
8168 // Store Char/Short
8169 instruct storeC(iRegIorL2I src, memory mem)
8170 %{
8171   match(Set mem (StoreC mem src));
8172   predicate(!needs_releasing_store(n));
8173 
8174   ins_cost(INSN_COST);
8175   format %{ "strh  $src, $mem\t# short" %}
8176 
8177   ins_encode(aarch64_enc_strh(src, mem));
8178 
8179   ins_pipe(istore_reg_mem);
8180 %}
8181 
8182 instruct storeimmC0(immI0 zero, memory mem)
8183 %{
8184   match(Set mem (StoreC mem zero));
8185   predicate(!needs_releasing_store(n));
8186 
8187   ins_cost(INSN_COST);
8188   format %{ "strh  zr, $mem\t# short" %}
8189 
8190   ins_encode(aarch64_enc_strh0(mem));
8191 
8192   ins_pipe(istore_mem);
8193 %}
8194 
8195 // Store Integer
8196 
8197 instruct storeI(iRegIorL2I src, memory mem)
8198 %{
8199   match(Set mem(StoreI mem src));
8200   predicate(!needs_releasing_store(n));
8201 
8202   ins_cost(INSN_COST);
8203   format %{ "strw  $src, $mem\t# int" %}
8204 
8205   ins_encode(aarch64_enc_strw(src, mem));
8206 
8207   ins_pipe(istore_reg_mem);
8208 %}
8209 
8210 instruct storeimmI0(immI0 zero, memory mem)
8211 %{
8212   match(Set mem(StoreI mem zero));
8213   predicate(!needs_releasing_store(n));
8214 
8215   ins_cost(INSN_COST);
8216   format %{ "strw  zr, $mem\t# int" %}
8217 
8218   ins_encode(aarch64_enc_strw0(mem));
8219 
8220   ins_pipe(istore_mem);
8221 %}
8222 
8223 // Store Long (64 bit signed)
8224 instruct storeL(iRegL src, memory mem)
8225 %{
8226   match(Set mem (StoreL mem src));
8227   predicate(!needs_releasing_store(n));
8228 
8229   ins_cost(INSN_COST);
8230   format %{ "str  $src, $mem\t# int" %}
8231 
8232   ins_encode(aarch64_enc_str(src, mem));
8233 
8234   ins_pipe(istore_reg_mem);
8235 %}
8236 
8237 // Store Long (64 bit signed)
8238 instruct storeimmL0(immL0 zero, memory mem)
8239 %{
8240   match(Set mem (StoreL mem zero));
8241   predicate(!needs_releasing_store(n));
8242 
8243   ins_cost(INSN_COST);
8244   format %{ "str  zr, $mem\t# int" %}
8245 
8246   ins_encode(aarch64_enc_str0(mem));
8247 
8248   ins_pipe(istore_mem);
8249 %}
8250 
8251 // Store Pointer
8252 instruct storeP(iRegP src, memory mem)
8253 %{
8254   match(Set mem (StoreP mem src));
8255   predicate(!needs_releasing_store(n));
8256 
8257   ins_cost(INSN_COST);
8258   format %{ "str  $src, $mem\t# ptr" %}
8259 
8260   ins_encode(aarch64_enc_str(src, mem));
8261 
8262   ins_pipe(istore_reg_mem);
8263 %}
8264 
8265 // Store Pointer
8266 instruct storeimmP0(immP0 zero, memory mem)
8267 %{
8268   match(Set mem (StoreP mem zero));
8269   predicate(!needs_releasing_store(n));
8270 
8271   ins_cost(INSN_COST);
8272   format %{ "str zr, $mem\t# ptr" %}
8273 
8274   ins_encode(aarch64_enc_str0(mem));
8275 
8276   ins_pipe(istore_mem);
8277 %}
8278 
8279 // Store Compressed Pointer
8280 instruct storeN(iRegN src, memory mem)
8281 %{
8282   match(Set mem (StoreN mem src));
8283   predicate(!needs_releasing_store(n));
8284 
8285   ins_cost(INSN_COST);
8286   format %{ "strw  $src, $mem\t# compressed ptr" %}
8287 
8288   ins_encode(aarch64_enc_strw(src, mem));
8289 
8290   ins_pipe(istore_reg_mem);
8291 %}
8292 
8293 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8294 %{
8295   match(Set mem (StoreN mem zero));
8296   predicate(Universe::narrow_oop_base() == NULL &&
8297             Universe::narrow_klass_base() == NULL &&
8298             (!needs_releasing_store(n)));
8299 
8300   ins_cost(INSN_COST);
8301   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8302 
8303   ins_encode(aarch64_enc_strw(heapbase, mem));
8304 
8305   ins_pipe(istore_reg_mem);
8306 %}
8307 
8308 // Store Float
8309 instruct storeF(vRegF src, memory mem)
8310 %{
8311   match(Set mem (StoreF mem src));
8312   predicate(!needs_releasing_store(n));
8313 
8314   ins_cost(INSN_COST);
8315   format %{ "strs  $src, $mem\t# float" %}
8316 
8317   ins_encode( aarch64_enc_strs(src, mem) );
8318 
8319   ins_pipe(pipe_class_memory);
8320 %}
8321 
8322 // TODO
8323 // implement storeImmF0 and storeFImmPacked
8324 
8325 // Store Double
8326 instruct storeD(vRegD src, memory mem)
8327 %{
8328   match(Set mem (StoreD mem src));
8329   predicate(!needs_releasing_store(n));
8330 
8331   ins_cost(INSN_COST);
8332   format %{ "strd  $src, $mem\t# double" %}
8333 
8334   ins_encode( aarch64_enc_strd(src, mem) );
8335 
8336   ins_pipe(pipe_class_memory);
8337 %}
8338 
8339 // Store Compressed Klass Pointer
8340 instruct storeNKlass(iRegN src, memory mem)
8341 %{
8342   predicate(!needs_releasing_store(n));
8343   match(Set mem (StoreNKlass mem src));
8344 
8345   ins_cost(INSN_COST);
8346   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8347 
8348   ins_encode(aarch64_enc_strw(src, mem));
8349 
8350   ins_pipe(istore_reg_mem);
8351 %}
8352 
8353 // TODO
8354 // implement storeImmD0 and storeDImmPacked
8355 
8356 // prefetch instructions
8357 // Must be safe to execute with invalid address (cannot fault).
8358 
8359 instruct prefetchalloc( memory mem ) %{
8360   match(PrefetchAllocation mem);
8361 
8362   ins_cost(INSN_COST);
8363   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8364 
8365   ins_encode( aarch64_enc_prefetchw(mem) );
8366 
8367   ins_pipe(iload_prefetch);
8368 %}
8369 
8370 //  ---------------- volatile loads and stores ----------------
8371 
8372 // Load Byte (8 bit signed)
8373 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8374 %{
8375   match(Set dst (LoadB mem));
8376 
8377   ins_cost(VOLATILE_REF_COST);
8378   format %{ "ldarsb  $dst, $mem\t# byte" %}
8379 
8380   ins_encode(aarch64_enc_ldarsb(dst, mem));
8381 
8382   ins_pipe(pipe_serial);
8383 %}
8384 
8385 // Load Byte (8 bit signed) into long
8386 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8387 %{
8388   match(Set dst (ConvI2L (LoadB mem)));
8389 
8390   ins_cost(VOLATILE_REF_COST);
8391   format %{ "ldarsb  $dst, $mem\t# byte" %}
8392 
8393   ins_encode(aarch64_enc_ldarsb(dst, mem));
8394 
8395   ins_pipe(pipe_serial);
8396 %}
8397 
8398 // Load Byte (8 bit unsigned)
8399 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8400 %{
8401   match(Set dst (LoadUB mem));
8402 
8403   ins_cost(VOLATILE_REF_COST);
8404   format %{ "ldarb  $dst, $mem\t# byte" %}
8405 
8406   ins_encode(aarch64_enc_ldarb(dst, mem));
8407 
8408   ins_pipe(pipe_serial);
8409 %}
8410 
8411 // Load Byte (8 bit unsigned) into long
8412 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8413 %{
8414   match(Set dst (ConvI2L (LoadUB mem)));
8415 
8416   ins_cost(VOLATILE_REF_COST);
8417   format %{ "ldarb  $dst, $mem\t# byte" %}
8418 
8419   ins_encode(aarch64_enc_ldarb(dst, mem));
8420 
8421   ins_pipe(pipe_serial);
8422 %}
8423 
8424 // Load Short (16 bit signed)
8425 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8426 %{
8427   match(Set dst (LoadS mem));
8428 
8429   ins_cost(VOLATILE_REF_COST);
8430   format %{ "ldarshw  $dst, $mem\t# short" %}
8431 
8432   ins_encode(aarch64_enc_ldarshw(dst, mem));
8433 
8434   ins_pipe(pipe_serial);
8435 %}
8436 
8437 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8438 %{
8439   match(Set dst (LoadUS mem));
8440 
8441   ins_cost(VOLATILE_REF_COST);
8442   format %{ "ldarhw  $dst, $mem\t# short" %}
8443 
8444   ins_encode(aarch64_enc_ldarhw(dst, mem));
8445 
8446   ins_pipe(pipe_serial);
8447 %}
8448 
8449 // Load Short/Char (16 bit unsigned) into long
8450 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8451 %{
8452   match(Set dst (ConvI2L (LoadUS mem)));
8453 
8454   ins_cost(VOLATILE_REF_COST);
8455   format %{ "ldarh  $dst, $mem\t# short" %}
8456 
8457   ins_encode(aarch64_enc_ldarh(dst, mem));
8458 
8459   ins_pipe(pipe_serial);
8460 %}
8461 
8462 // Load Short/Char (16 bit signed) into long
8463 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8464 %{
8465   match(Set dst (ConvI2L (LoadS mem)));
8466 
8467   ins_cost(VOLATILE_REF_COST);
8468   format %{ "ldarh  $dst, $mem\t# short" %}
8469 
8470   ins_encode(aarch64_enc_ldarsh(dst, mem));
8471 
8472   ins_pipe(pipe_serial);
8473 %}
8474 
8475 // Load Integer (32 bit signed)
8476 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8477 %{
8478   match(Set dst (LoadI mem));
8479 
8480   ins_cost(VOLATILE_REF_COST);
8481   format %{ "ldarw  $dst, $mem\t# int" %}
8482 
8483   ins_encode(aarch64_enc_ldarw(dst, mem));
8484 
8485   ins_pipe(pipe_serial);
8486 %}
8487 
8488 // Load Integer (32 bit unsigned) into long
8489 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8490 %{
8491   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8492 
8493   ins_cost(VOLATILE_REF_COST);
8494   format %{ "ldarw  $dst, $mem\t# int" %}
8495 
8496   ins_encode(aarch64_enc_ldarw(dst, mem));
8497 
8498   ins_pipe(pipe_serial);
8499 %}
8500 
8501 // Load Long (64 bit signed)
8502 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8503 %{
8504   match(Set dst (LoadL mem));
8505 
8506   ins_cost(VOLATILE_REF_COST);
8507   format %{ "ldar  $dst, $mem\t# int" %}
8508 
8509   ins_encode(aarch64_enc_ldar(dst, mem));
8510 
8511   ins_pipe(pipe_serial);
8512 %}
8513 
8514 // Load Pointer
8515 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8516 %{
8517   match(Set dst (LoadP mem));
8518 
8519   ins_cost(VOLATILE_REF_COST);
8520   format %{ "ldar  $dst, $mem\t# ptr" %}
8521 
8522   ins_encode(aarch64_enc_ldar(dst, mem));
8523 
8524   ins_pipe(pipe_serial);
8525 %}
8526 
8527 // Load Compressed Pointer
8528 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8529 %{
8530   match(Set dst (LoadN mem));
8531 
8532   ins_cost(VOLATILE_REF_COST);
8533   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8534 
8535   ins_encode(aarch64_enc_ldarw(dst, mem));
8536 
8537   ins_pipe(pipe_serial);
8538 %}
8539 
8540 // Load Float
8541 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8542 %{
8543   match(Set dst (LoadF mem));
8544 
8545   ins_cost(VOLATILE_REF_COST);
8546   format %{ "ldars  $dst, $mem\t# float" %}
8547 
8548   ins_encode( aarch64_enc_fldars(dst, mem) );
8549 
8550   ins_pipe(pipe_serial);
8551 %}
8552 
8553 // Load Double
8554 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8555 %{
8556   match(Set dst (LoadD mem));
8557 
8558   ins_cost(VOLATILE_REF_COST);
8559   format %{ "ldard  $dst, $mem\t# double" %}
8560 
8561   ins_encode( aarch64_enc_fldard(dst, mem) );
8562 
8563   ins_pipe(pipe_serial);
8564 %}
8565 
8566 // Store Byte
8567 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8568 %{
8569   match(Set mem (StoreB mem src));
8570 
8571   ins_cost(VOLATILE_REF_COST);
8572   format %{ "stlrb  $src, $mem\t# byte" %}
8573 
8574   ins_encode(aarch64_enc_stlrb(src, mem));
8575 
8576   ins_pipe(pipe_class_memory);
8577 %}
8578 
8579 // Store Char/Short
8580 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8581 %{
8582   match(Set mem (StoreC mem src));
8583 
8584   ins_cost(VOLATILE_REF_COST);
8585   format %{ "stlrh  $src, $mem\t# short" %}
8586 
8587   ins_encode(aarch64_enc_stlrh(src, mem));
8588 
8589   ins_pipe(pipe_class_memory);
8590 %}
8591 
8592 // Store Integer
8593 
8594 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8595 %{
8596   match(Set mem(StoreI mem src));
8597 
8598   ins_cost(VOLATILE_REF_COST);
8599   format %{ "stlrw  $src, $mem\t# int" %}
8600 
8601   ins_encode(aarch64_enc_stlrw(src, mem));
8602 
8603   ins_pipe(pipe_class_memory);
8604 %}
8605 
8606 // Store Long (64 bit signed)
8607 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8608 %{
8609   match(Set mem (StoreL mem src));
8610 
8611   ins_cost(VOLATILE_REF_COST);
8612   format %{ "stlr  $src, $mem\t# int" %}
8613 
8614   ins_encode(aarch64_enc_stlr(src, mem));
8615 
8616   ins_pipe(pipe_class_memory);
8617 %}
8618 
8619 // Store Pointer
8620 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8621 %{
8622   match(Set mem (StoreP mem src));
8623 
8624   ins_cost(VOLATILE_REF_COST);
8625   format %{ "stlr  $src, $mem\t# ptr" %}
8626 
8627   ins_encode(aarch64_enc_stlr(src, mem));
8628 
8629   ins_pipe(pipe_class_memory);
8630 %}
8631 
8632 // Store Compressed Pointer
8633 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8634 %{
8635   match(Set mem (StoreN mem src));
8636 
8637   ins_cost(VOLATILE_REF_COST);
8638   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8639 
8640   ins_encode(aarch64_enc_stlrw(src, mem));
8641 
8642   ins_pipe(pipe_class_memory);
8643 %}
8644 
8645 // Store Float
8646 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8647 %{
8648   match(Set mem (StoreF mem src));
8649 
8650   ins_cost(VOLATILE_REF_COST);
8651   format %{ "stlrs  $src, $mem\t# float" %}
8652 
8653   ins_encode( aarch64_enc_fstlrs(src, mem) );
8654 
8655   ins_pipe(pipe_class_memory);
8656 %}
8657 
8658 // TODO
8659 // implement storeImmF0 and storeFImmPacked
8660 
8661 // Store Double
8662 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8663 %{
8664   match(Set mem (StoreD mem src));
8665 
8666   ins_cost(VOLATILE_REF_COST);
8667   format %{ "stlrd  $src, $mem\t# double" %}
8668 
8669   ins_encode( aarch64_enc_fstlrd(src, mem) );
8670 
8671   ins_pipe(pipe_class_memory);
8672 %}
8673 
8674 //  ---------------- end of volatile loads and stores ----------------
8675 
8676 // ============================================================================
8677 // BSWAP Instructions
8678 
8679 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8680   match(Set dst (ReverseBytesI src));
8681 
8682   ins_cost(INSN_COST);
8683   format %{ "revw  $dst, $src" %}
8684 
8685   ins_encode %{
8686     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8687   %}
8688 
8689   ins_pipe(ialu_reg);
8690 %}
8691 
8692 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8693   match(Set dst (ReverseBytesL src));
8694 
8695   ins_cost(INSN_COST);
8696   format %{ "rev  $dst, $src" %}
8697 
8698   ins_encode %{
8699     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8700   %}
8701 
8702   ins_pipe(ialu_reg);
8703 %}
8704 
8705 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8706   match(Set dst (ReverseBytesUS src));
8707 
8708   ins_cost(INSN_COST);
8709   format %{ "rev16w  $dst, $src" %}
8710 
8711   ins_encode %{
8712     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8713   %}
8714 
8715   ins_pipe(ialu_reg);
8716 %}
8717 
8718 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8719   match(Set dst (ReverseBytesS src));
8720 
8721   ins_cost(INSN_COST);
8722   format %{ "rev16w  $dst, $src\n\t"
8723             "sbfmw $dst, $dst, #0, #15" %}
8724 
8725   ins_encode %{
8726     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8727     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8728   %}
8729 
8730   ins_pipe(ialu_reg);
8731 %}
8732 
8733 // ============================================================================
8734 // Zero Count Instructions
8735 
8736 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8737   match(Set dst (CountLeadingZerosI src));
8738 
8739   ins_cost(INSN_COST);
8740   format %{ "clzw  $dst, $src" %}
8741   ins_encode %{
8742     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8743   %}
8744 
8745   ins_pipe(ialu_reg);
8746 %}
8747 
8748 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8749   match(Set dst (CountLeadingZerosL src));
8750 
8751   ins_cost(INSN_COST);
8752   format %{ "clz   $dst, $src" %}
8753   ins_encode %{
8754     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8755   %}
8756 
8757   ins_pipe(ialu_reg);
8758 %}
8759 
8760 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8761   match(Set dst (CountTrailingZerosI src));
8762 
8763   ins_cost(INSN_COST * 2);
8764   format %{ "rbitw  $dst, $src\n\t"
8765             "clzw   $dst, $dst" %}
8766   ins_encode %{
8767     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8768     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8769   %}
8770 
8771   ins_pipe(ialu_reg);
8772 %}
8773 
8774 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8775   match(Set dst (CountTrailingZerosL src));
8776 
8777   ins_cost(INSN_COST * 2);
8778   format %{ "rbit   $dst, $src\n\t"
8779             "clz    $dst, $dst" %}
8780   ins_encode %{
8781     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8782     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8783   %}
8784 
8785   ins_pipe(ialu_reg);
8786 %}
8787 
8788 //---------- Population Count Instructions -------------------------------------
8789 //
8790 
8791 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8792   predicate(UsePopCountInstruction);
8793   match(Set dst (PopCountI src));
8794   effect(TEMP tmp);
8795   ins_cost(INSN_COST * 13);
8796 
8797   format %{ "movw   $src, $src\n\t"
8798             "mov    $tmp, $src\t# vector (1D)\n\t"
8799             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8800             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8801             "mov    $dst, $tmp\t# vector (1D)" %}
8802   ins_encode %{
8803     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8804     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8805     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8806     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8807     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8808   %}
8809 
8810   ins_pipe(pipe_class_default);
8811 %}
8812 
8813 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8814   predicate(UsePopCountInstruction);
8815   match(Set dst (PopCountI (LoadI mem)));
8816   effect(TEMP tmp);
8817   ins_cost(INSN_COST * 13);
8818 
8819   format %{ "ldrs   $tmp, $mem\n\t"
8820             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8821             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8822             "mov    $dst, $tmp\t# vector (1D)" %}
8823   ins_encode %{
8824     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8825     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8826                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8827     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8828     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8829     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8830   %}
8831 
8832   ins_pipe(pipe_class_default);
8833 %}
8834 
8835 // Note: Long.bitCount(long) returns an int.
8836 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8837   predicate(UsePopCountInstruction);
8838   match(Set dst (PopCountL src));
8839   effect(TEMP tmp);
8840   ins_cost(INSN_COST * 13);
8841 
8842   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8843             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8844             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8845             "mov    $dst, $tmp\t# vector (1D)" %}
8846   ins_encode %{
8847     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8848     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8849     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8850     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8851   %}
8852 
8853   ins_pipe(pipe_class_default);
8854 %}
8855 
8856 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8857   predicate(UsePopCountInstruction);
8858   match(Set dst (PopCountL (LoadL mem)));
8859   effect(TEMP tmp);
8860   ins_cost(INSN_COST * 13);
8861 
8862   format %{ "ldrd   $tmp, $mem\n\t"
8863             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8864             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8865             "mov    $dst, $tmp\t# vector (1D)" %}
8866   ins_encode %{
8867     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8868     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8869                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8870     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8871     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8872     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8873   %}
8874 
8875   ins_pipe(pipe_class_default);
8876 %}
8877 
8878 // ============================================================================
8879 // MemBar Instruction
8880 
8881 instruct load_fence() %{
8882   match(LoadFence);
8883   ins_cost(VOLATILE_REF_COST);
8884 
8885   format %{ "load_fence" %}
8886 
8887   ins_encode %{
8888     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8889   %}
8890   ins_pipe(pipe_serial);
8891 %}
8892 
8893 instruct unnecessary_membar_acquire() %{
8894   predicate(unnecessary_acquire(n));
8895   match(MemBarAcquire);
8896   ins_cost(0);
8897 
8898   format %{ "membar_acquire (elided)" %}
8899 
8900   ins_encode %{
8901     __ block_comment("membar_acquire (elided)");
8902   %}
8903 
8904   ins_pipe(pipe_class_empty);
8905 %}
8906 
8907 instruct membar_acquire() %{
8908   match(MemBarAcquire);
8909   ins_cost(VOLATILE_REF_COST);
8910 
8911   format %{ "membar_acquire" %}
8912 
8913   ins_encode %{
8914     __ block_comment("membar_acquire");
8915     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8916   %}
8917 
8918   ins_pipe(pipe_serial);
8919 %}
8920 
8921 
8922 instruct membar_acquire_lock() %{
8923   match(MemBarAcquireLock);
8924   ins_cost(VOLATILE_REF_COST);
8925 
8926   format %{ "membar_acquire_lock (elided)" %}
8927 
8928   ins_encode %{
8929     __ block_comment("membar_acquire_lock (elided)");
8930   %}
8931 
8932   ins_pipe(pipe_serial);
8933 %}
8934 
8935 instruct store_fence() %{
8936   match(StoreFence);
8937   ins_cost(VOLATILE_REF_COST);
8938 
8939   format %{ "store_fence" %}
8940 
8941   ins_encode %{
8942     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8943   %}
8944   ins_pipe(pipe_serial);
8945 %}
8946 
8947 instruct unnecessary_membar_release() %{
8948   predicate(unnecessary_release(n));
8949   match(MemBarRelease);
8950   ins_cost(0);
8951 
8952   format %{ "membar_release (elided)" %}
8953 
8954   ins_encode %{
8955     __ block_comment("membar_release (elided)");
8956   %}
8957   ins_pipe(pipe_serial);
8958 %}
8959 
8960 instruct membar_release() %{
8961   match(MemBarRelease);
8962   ins_cost(VOLATILE_REF_COST);
8963 
8964   format %{ "membar_release" %}
8965 
8966   ins_encode %{
8967     __ block_comment("membar_release");
8968     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8969   %}
8970   ins_pipe(pipe_serial);
8971 %}
8972 
8973 instruct membar_storestore() %{
8974   match(MemBarStoreStore);
8975   ins_cost(VOLATILE_REF_COST);
8976 
8977   format %{ "MEMBAR-store-store" %}
8978 
8979   ins_encode %{
8980     __ membar(Assembler::StoreStore);
8981   %}
8982   ins_pipe(pipe_serial);
8983 %}
8984 
8985 instruct membar_release_lock() %{
8986   match(MemBarReleaseLock);
8987   ins_cost(VOLATILE_REF_COST);
8988 
8989   format %{ "membar_release_lock (elided)" %}
8990 
8991   ins_encode %{
8992     __ block_comment("membar_release_lock (elided)");
8993   %}
8994 
8995   ins_pipe(pipe_serial);
8996 %}
8997 
8998 instruct unnecessary_membar_volatile() %{
8999   predicate(unnecessary_volatile(n));
9000   match(MemBarVolatile);
9001   ins_cost(0);
9002 
9003   format %{ "membar_volatile (elided)" %}
9004 
9005   ins_encode %{
9006     __ block_comment("membar_volatile (elided)");
9007   %}
9008 
9009   ins_pipe(pipe_serial);
9010 %}
9011 
9012 instruct membar_volatile() %{
9013   match(MemBarVolatile);
9014   ins_cost(VOLATILE_REF_COST*100);
9015 
9016   format %{ "membar_volatile" %}
9017 
9018   ins_encode %{
9019     __ block_comment("membar_volatile");
9020     __ membar(Assembler::StoreLoad);
9021   %}
9022 
9023   ins_pipe(pipe_serial);
9024 %}
9025 
9026 // ============================================================================
9027 // Cast/Convert Instructions
9028 
9029 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9030   match(Set dst (CastX2P src));
9031 
9032   ins_cost(INSN_COST);
9033   format %{ "mov $dst, $src\t# long -> ptr" %}
9034 
9035   ins_encode %{
9036     if ($dst$$reg != $src$$reg) {
9037       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9038     }
9039   %}
9040 
9041   ins_pipe(ialu_reg);
9042 %}
9043 
9044 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9045   match(Set dst (CastP2X src));
9046 
9047   ins_cost(INSN_COST);
9048   format %{ "mov $dst, $src\t# ptr -> long" %}
9049 
9050   ins_encode %{
9051     if ($dst$$reg != $src$$reg) {
9052       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9053     }
9054   %}
9055 
9056   ins_pipe(ialu_reg);
9057 %}
9058 
9059 // Convert oop into int for vectors alignment masking
9060 instruct convP2I(iRegINoSp dst, iRegP src) %{
9061   match(Set dst (ConvL2I (CastP2X src)));
9062 
9063   ins_cost(INSN_COST);
9064   format %{ "movw $dst, $src\t# ptr -> int" %}
9065   ins_encode %{
9066     __ movw($dst$$Register, $src$$Register);
9067   %}
9068 
9069   ins_pipe(ialu_reg);
9070 %}
9071 
9072 // Convert compressed oop into int for vectors alignment masking
9073 // in case of 32bit oops (heap < 4Gb).
9074 instruct convN2I(iRegINoSp dst, iRegN src)
9075 %{
9076   predicate(Universe::narrow_oop_shift() == 0);
9077   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9078 
9079   ins_cost(INSN_COST);
9080   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9081   ins_encode %{
9082     __ movw($dst$$Register, $src$$Register);
9083   %}
9084 
9085   ins_pipe(ialu_reg);
9086 %}
9087 
9088 
9089 // Convert oop pointer into compressed form
9090 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9091   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9092   match(Set dst (EncodeP src));
9093   effect(KILL cr);
9094   ins_cost(INSN_COST * 3);
9095   format %{ "encode_heap_oop $dst, $src" %}
9096   ins_encode %{
9097     Register s = $src$$Register;
9098     Register d = $dst$$Register;
9099     __ encode_heap_oop(d, s);
9100   %}
9101   ins_pipe(ialu_reg);
9102 %}
9103 
9104 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9105   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9106   match(Set dst (EncodeP src));
9107   ins_cost(INSN_COST * 3);
9108   format %{ "encode_heap_oop_not_null $dst, $src" %}
9109   ins_encode %{
9110     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9111   %}
9112   ins_pipe(ialu_reg);
9113 %}
9114 
9115 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9116   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9117             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9118   match(Set dst (DecodeN src));
9119   ins_cost(INSN_COST * 3);
9120   format %{ "decode_heap_oop $dst, $src" %}
9121   ins_encode %{
9122     Register s = $src$$Register;
9123     Register d = $dst$$Register;
9124     __ decode_heap_oop(d, s);
9125   %}
9126   ins_pipe(ialu_reg);
9127 %}
9128 
9129 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9130   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9131             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9132   match(Set dst (DecodeN src));
9133   ins_cost(INSN_COST * 3);
9134   format %{ "decode_heap_oop_not_null $dst, $src" %}
9135   ins_encode %{
9136     Register s = $src$$Register;
9137     Register d = $dst$$Register;
9138     __ decode_heap_oop_not_null(d, s);
9139   %}
9140   ins_pipe(ialu_reg);
9141 %}
9142 
9143 // n.b. AArch64 implementations of encode_klass_not_null and
9144 // decode_klass_not_null do not modify the flags register so, unlike
9145 // Intel, we don't kill CR as a side effect here
9146 
9147 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9148   match(Set dst (EncodePKlass src));
9149 
9150   ins_cost(INSN_COST * 3);
9151   format %{ "encode_klass_not_null $dst,$src" %}
9152 
9153   ins_encode %{
9154     Register src_reg = as_Register($src$$reg);
9155     Register dst_reg = as_Register($dst$$reg);
9156     __ encode_klass_not_null(dst_reg, src_reg);
9157   %}
9158 
9159    ins_pipe(ialu_reg);
9160 %}
9161 
9162 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9163   match(Set dst (DecodeNKlass src));
9164 
9165   ins_cost(INSN_COST * 3);
9166   format %{ "decode_klass_not_null $dst,$src" %}
9167 
9168   ins_encode %{
9169     Register src_reg = as_Register($src$$reg);
9170     Register dst_reg = as_Register($dst$$reg);
9171     if (dst_reg != src_reg) {
9172       __ decode_klass_not_null(dst_reg, src_reg);
9173     } else {
9174       __ decode_klass_not_null(dst_reg);
9175     }
9176   %}
9177 
9178    ins_pipe(ialu_reg);
9179 %}
9180 
9181 instruct checkCastPP(iRegPNoSp dst)
9182 %{
9183   match(Set dst (CheckCastPP dst));
9184 
9185   size(0);
9186   format %{ "# checkcastPP of $dst" %}
9187   ins_encode(/* empty encoding */);
9188   ins_pipe(pipe_class_empty);
9189 %}
9190 
9191 instruct castPP(iRegPNoSp dst)
9192 %{
9193   match(Set dst (CastPP dst));
9194 
9195   size(0);
9196   format %{ "# castPP of $dst" %}
9197   ins_encode(/* empty encoding */);
9198   ins_pipe(pipe_class_empty);
9199 %}
9200 
9201 instruct castII(iRegI dst)
9202 %{
9203   match(Set dst (CastII dst));
9204 
9205   size(0);
9206   format %{ "# castII of $dst" %}
9207   ins_encode(/* empty encoding */);
9208   ins_cost(0);
9209   ins_pipe(pipe_class_empty);
9210 %}
9211 
9212 // ============================================================================
9213 // Atomic operation instructions
9214 //
9215 // Intel and SPARC both implement Ideal Node LoadPLocked and
9216 // Store{PIL}Conditional instructions using a normal load for the
9217 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9218 //
9219 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9220 // pair to lock object allocations from Eden space when not using
9221 // TLABs.
9222 //
9223 // There does not appear to be a Load{IL}Locked Ideal Node and the
9224 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9225 // and to use StoreIConditional only for 32-bit and StoreLConditional
9226 // only for 64-bit.
9227 //
9228 // We implement LoadPLocked and StorePLocked instructions using,
9229 // respectively the AArch64 hw load-exclusive and store-conditional
9230 // instructions. Whereas we must implement each of
9231 // Store{IL}Conditional using a CAS which employs a pair of
9232 // instructions comprising a load-exclusive followed by a
9233 // store-conditional.
9234 
9235 
9236 // Locked-load (linked load) of the current heap-top
9237 // used when updating the eden heap top
9238 // implemented using ldaxr on AArch64
9239 
9240 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9241 %{
9242   match(Set dst (LoadPLocked mem));
9243 
9244   ins_cost(VOLATILE_REF_COST);
9245 
9246   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9247 
9248   ins_encode(aarch64_enc_ldaxr(dst, mem));
9249 
9250   ins_pipe(pipe_serial);
9251 %}
9252 
9253 // Conditional-store of the updated heap-top.
9254 // Used during allocation of the shared heap.
9255 // Sets flag (EQ) on success.
9256 // implemented using stlxr on AArch64.
9257 
9258 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9259 %{
9260   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9261 
9262   ins_cost(VOLATILE_REF_COST);
9263 
9264  // TODO
9265  // do we need to do a store-conditional release or can we just use a
9266  // plain store-conditional?
9267 
9268   format %{
9269     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9270     "cmpw rscratch1, zr\t# EQ on successful write"
9271   %}
9272 
9273   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9274 
9275   ins_pipe(pipe_serial);
9276 %}
9277 
9278 
9279 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9280 // when attempting to rebias a lock towards the current thread.  We
9281 // must use the acquire form of cmpxchg in order to guarantee acquire
9282 // semantics in this case.
9283 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9284 %{
9285   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9286 
9287   ins_cost(VOLATILE_REF_COST);
9288 
9289   format %{
9290     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9291     "cmpw rscratch1, zr\t# EQ on successful write"
9292   %}
9293 
9294   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9295 
9296   ins_pipe(pipe_slow);
9297 %}
9298 
9299 // storeIConditional also has acquire semantics, for no better reason
9300 // than matching storeLConditional.  At the time of writing this
9301 // comment storeIConditional was not used anywhere by AArch64.
9302 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9303 %{
9304   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9305 
9306   ins_cost(VOLATILE_REF_COST);
9307 
9308   format %{
9309     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9310     "cmpw rscratch1, zr\t# EQ on successful write"
9311   %}
9312 
9313   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9314 
9315   ins_pipe(pipe_slow);
9316 %}
9317 
9318 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9319 // can't match them
9320 
9321 // standard CompareAndSwapX when we are using barriers
9322 // these have higher priority than the rules selected by a predicate
9323 
9324 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9325 
9326   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9327   ins_cost(2 * VOLATILE_REF_COST);
9328 
9329   effect(KILL cr);
9330 
9331  format %{
9332     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9333     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9334  %}
9335 
9336  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9337             aarch64_enc_cset_eq(res));
9338 
9339   ins_pipe(pipe_slow);
9340 %}
9341 
9342 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9343 
9344   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9345   ins_cost(2 * VOLATILE_REF_COST);
9346 
9347   effect(KILL cr);
9348 
9349  format %{
9350     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9351     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9352  %}
9353 
9354  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9355             aarch64_enc_cset_eq(res));
9356 
9357   ins_pipe(pipe_slow);
9358 %}
9359 
9360 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9361 
9362   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9363   ins_cost(2 * VOLATILE_REF_COST);
9364 
9365   effect(KILL cr);
9366 
9367  format %{
9368     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9369     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9370  %}
9371 
9372  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9373             aarch64_enc_cset_eq(res));
9374 
9375   ins_pipe(pipe_slow);
9376 %}
9377 
9378 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9379 
9380   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9381   ins_cost(2 * VOLATILE_REF_COST);
9382 
9383   effect(KILL cr);
9384 
9385  format %{
9386     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9387     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9388  %}
9389 
9390  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9391             aarch64_enc_cset_eq(res));
9392 
9393   ins_pipe(pipe_slow);
9394 %}
9395 
9396 // alternative CompareAndSwapX when we are eliding barriers
9397 
9398 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9399 
9400   predicate(needs_acquiring_load_exclusive(n));
9401   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9402   ins_cost(VOLATILE_REF_COST);
9403 
9404   effect(KILL cr);
9405 
9406  format %{
9407     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9408     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9409  %}
9410 
9411  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9412             aarch64_enc_cset_eq(res));
9413 
9414   ins_pipe(pipe_slow);
9415 %}
9416 
9417 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9418 
9419   predicate(needs_acquiring_load_exclusive(n));
9420   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9421   ins_cost(VOLATILE_REF_COST);
9422 
9423   effect(KILL cr);
9424 
9425  format %{
9426     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9427     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9428  %}
9429 
9430  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9431             aarch64_enc_cset_eq(res));
9432 
9433   ins_pipe(pipe_slow);
9434 %}
9435 
9436 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9437 
9438   predicate(needs_acquiring_load_exclusive(n));
9439   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9440   ins_cost(VOLATILE_REF_COST);
9441 
9442   effect(KILL cr);
9443 
9444  format %{
9445     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9446     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9447  %}
9448 
9449  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9450             aarch64_enc_cset_eq(res));
9451 
9452   ins_pipe(pipe_slow);
9453 %}
9454 
9455 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9456 
9457   predicate(needs_acquiring_load_exclusive(n));
9458   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9459   ins_cost(VOLATILE_REF_COST);
9460 
9461   effect(KILL cr);
9462 
9463  format %{
9464     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9465     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9466  %}
9467 
9468  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9469             aarch64_enc_cset_eq(res));
9470 
9471   ins_pipe(pipe_slow);
9472 %}
9473 
9474 
9475 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9476   match(Set prev (GetAndSetI mem newv));
9477   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9478   ins_encode %{
9479     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9480   %}
9481   ins_pipe(pipe_serial);
9482 %}
9483 
9484 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9485   match(Set prev (GetAndSetL mem newv));
9486   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9487   ins_encode %{
9488     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9489   %}
9490   ins_pipe(pipe_serial);
9491 %}
9492 
9493 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9494   match(Set prev (GetAndSetN mem newv));
9495   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9496   ins_encode %{
9497     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9498   %}
9499   ins_pipe(pipe_serial);
9500 %}
9501 
9502 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9503   match(Set prev (GetAndSetP mem newv));
9504   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9505   ins_encode %{
9506     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9507   %}
9508   ins_pipe(pipe_serial);
9509 %}
9510 
9511 
9512 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9513   match(Set newval (GetAndAddL mem incr));
9514   ins_cost(INSN_COST * 10);
9515   format %{ "get_and_addL $newval, [$mem], $incr" %}
9516   ins_encode %{
9517     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9518   %}
9519   ins_pipe(pipe_serial);
9520 %}
9521 
9522 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9523   predicate(n->as_LoadStore()->result_not_used());
9524   match(Set dummy (GetAndAddL mem incr));
9525   ins_cost(INSN_COST * 9);
9526   format %{ "get_and_addL [$mem], $incr" %}
9527   ins_encode %{
9528     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9529   %}
9530   ins_pipe(pipe_serial);
9531 %}
9532 
9533 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9534   match(Set newval (GetAndAddL mem incr));
9535   ins_cost(INSN_COST * 10);
9536   format %{ "get_and_addL $newval, [$mem], $incr" %}
9537   ins_encode %{
9538     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9539   %}
9540   ins_pipe(pipe_serial);
9541 %}
9542 
9543 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9544   predicate(n->as_LoadStore()->result_not_used());
9545   match(Set dummy (GetAndAddL mem incr));
9546   ins_cost(INSN_COST * 9);
9547   format %{ "get_and_addL [$mem], $incr" %}
9548   ins_encode %{
9549     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9550   %}
9551   ins_pipe(pipe_serial);
9552 %}
9553 
9554 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9555   match(Set newval (GetAndAddI mem incr));
9556   ins_cost(INSN_COST * 10);
9557   format %{ "get_and_addI $newval, [$mem], $incr" %}
9558   ins_encode %{
9559     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9560   %}
9561   ins_pipe(pipe_serial);
9562 %}
9563 
9564 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9565   predicate(n->as_LoadStore()->result_not_used());
9566   match(Set dummy (GetAndAddI mem incr));
9567   ins_cost(INSN_COST * 9);
9568   format %{ "get_and_addI [$mem], $incr" %}
9569   ins_encode %{
9570     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9571   %}
9572   ins_pipe(pipe_serial);
9573 %}
9574 
9575 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9576   match(Set newval (GetAndAddI mem incr));
9577   ins_cost(INSN_COST * 10);
9578   format %{ "get_and_addI $newval, [$mem], $incr" %}
9579   ins_encode %{
9580     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9581   %}
9582   ins_pipe(pipe_serial);
9583 %}
9584 
9585 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9586   predicate(n->as_LoadStore()->result_not_used());
9587   match(Set dummy (GetAndAddI mem incr));
9588   ins_cost(INSN_COST * 9);
9589   format %{ "get_and_addI [$mem], $incr" %}
9590   ins_encode %{
9591     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9592   %}
9593   ins_pipe(pipe_serial);
9594 %}
9595 
9596 // Manifest a CmpL result in an integer register.
9597 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9598 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9599 %{
9600   match(Set dst (CmpL3 src1 src2));
9601   effect(KILL flags);
9602 
9603   ins_cost(INSN_COST * 6);
9604   format %{
9605       "cmp $src1, $src2"
9606       "csetw $dst, ne"
9607       "cnegw $dst, lt"
9608   %}
9609   // format %{ "CmpL3 $dst, $src1, $src2" %}
9610   ins_encode %{
9611     __ cmp($src1$$Register, $src2$$Register);
9612     __ csetw($dst$$Register, Assembler::NE);
9613     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9614   %}
9615 
9616   ins_pipe(pipe_class_default);
9617 %}
9618 
9619 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9620 %{
9621   match(Set dst (CmpL3 src1 src2));
9622   effect(KILL flags);
9623 
9624   ins_cost(INSN_COST * 6);
9625   format %{
9626       "cmp $src1, $src2"
9627       "csetw $dst, ne"
9628       "cnegw $dst, lt"
9629   %}
9630   ins_encode %{
9631     int32_t con = (int32_t)$src2$$constant;
9632      if (con < 0) {
9633       __ adds(zr, $src1$$Register, -con);
9634     } else {
9635       __ subs(zr, $src1$$Register, con);
9636     }
9637     __ csetw($dst$$Register, Assembler::NE);
9638     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9639   %}
9640 
9641   ins_pipe(pipe_class_default);
9642 %}
9643 
9644 // ============================================================================
9645 // Conditional Move Instructions
9646 
9647 // n.b. we have identical rules for both a signed compare op (cmpOp)
9648 // and an unsigned compare op (cmpOpU). it would be nice if we could
9649 // define an op class which merged both inputs and use it to type the
9650 // argument to a single rule. unfortunatelyt his fails because the
9651 // opclass does not live up to the COND_INTER interface of its
9652 // component operands. When the generic code tries to negate the
9653 // operand it ends up running the generci Machoper::negate method
9654 // which throws a ShouldNotHappen. So, we have to provide two flavours
9655 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9656 
9657 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9658   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9659 
9660   ins_cost(INSN_COST * 2);
9661   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9662 
9663   ins_encode %{
9664     __ cselw(as_Register($dst$$reg),
9665              as_Register($src2$$reg),
9666              as_Register($src1$$reg),
9667              (Assembler::Condition)$cmp$$cmpcode);
9668   %}
9669 
9670   ins_pipe(icond_reg_reg);
9671 %}
9672 
9673 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9674   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9675 
9676   ins_cost(INSN_COST * 2);
9677   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9678 
9679   ins_encode %{
9680     __ cselw(as_Register($dst$$reg),
9681              as_Register($src2$$reg),
9682              as_Register($src1$$reg),
9683              (Assembler::Condition)$cmp$$cmpcode);
9684   %}
9685 
9686   ins_pipe(icond_reg_reg);
9687 %}
9688 
9689 // special cases where one arg is zero
9690 
9691 // n.b. this is selected in preference to the rule above because it
9692 // avoids loading constant 0 into a source register
9693 
9694 // TODO
9695 // we ought only to be able to cull one of these variants as the ideal
9696 // transforms ought always to order the zero consistently (to left/right?)
9697 
9698 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9699   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9700 
9701   ins_cost(INSN_COST * 2);
9702   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9703 
9704   ins_encode %{
9705     __ cselw(as_Register($dst$$reg),
9706              as_Register($src$$reg),
9707              zr,
9708              (Assembler::Condition)$cmp$$cmpcode);
9709   %}
9710 
9711   ins_pipe(icond_reg);
9712 %}
9713 
9714 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9715   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9716 
9717   ins_cost(INSN_COST * 2);
9718   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9719 
9720   ins_encode %{
9721     __ cselw(as_Register($dst$$reg),
9722              as_Register($src$$reg),
9723              zr,
9724              (Assembler::Condition)$cmp$$cmpcode);
9725   %}
9726 
9727   ins_pipe(icond_reg);
9728 %}
9729 
9730 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9731   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9732 
9733   ins_cost(INSN_COST * 2);
9734   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9735 
9736   ins_encode %{
9737     __ cselw(as_Register($dst$$reg),
9738              zr,
9739              as_Register($src$$reg),
9740              (Assembler::Condition)$cmp$$cmpcode);
9741   %}
9742 
9743   ins_pipe(icond_reg);
9744 %}
9745 
9746 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9747   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9748 
9749   ins_cost(INSN_COST * 2);
9750   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9751 
9752   ins_encode %{
9753     __ cselw(as_Register($dst$$reg),
9754              zr,
9755              as_Register($src$$reg),
9756              (Assembler::Condition)$cmp$$cmpcode);
9757   %}
9758 
9759   ins_pipe(icond_reg);
9760 %}
9761 
9762 // special case for creating a boolean 0 or 1
9763 
9764 // n.b. this is selected in preference to the rule above because it
9765 // avoids loading constants 0 and 1 into a source register
9766 
9767 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9768   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9769 
9770   ins_cost(INSN_COST * 2);
9771   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9772 
9773   ins_encode %{
9774     // equivalently
9775     // cset(as_Register($dst$$reg),
9776     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9777     __ csincw(as_Register($dst$$reg),
9778              zr,
9779              zr,
9780              (Assembler::Condition)$cmp$$cmpcode);
9781   %}
9782 
9783   ins_pipe(icond_none);
9784 %}
9785 
9786 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9787   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9788 
9789   ins_cost(INSN_COST * 2);
9790   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9791 
9792   ins_encode %{
9793     // equivalently
9794     // cset(as_Register($dst$$reg),
9795     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9796     __ csincw(as_Register($dst$$reg),
9797              zr,
9798              zr,
9799              (Assembler::Condition)$cmp$$cmpcode);
9800   %}
9801 
9802   ins_pipe(icond_none);
9803 %}
9804 
9805 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9806   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9807 
9808   ins_cost(INSN_COST * 2);
9809   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9810 
9811   ins_encode %{
9812     __ csel(as_Register($dst$$reg),
9813             as_Register($src2$$reg),
9814             as_Register($src1$$reg),
9815             (Assembler::Condition)$cmp$$cmpcode);
9816   %}
9817 
9818   ins_pipe(icond_reg_reg);
9819 %}
9820 
9821 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9822   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9823 
9824   ins_cost(INSN_COST * 2);
9825   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9826 
9827   ins_encode %{
9828     __ csel(as_Register($dst$$reg),
9829             as_Register($src2$$reg),
9830             as_Register($src1$$reg),
9831             (Assembler::Condition)$cmp$$cmpcode);
9832   %}
9833 
9834   ins_pipe(icond_reg_reg);
9835 %}
9836 
9837 // special cases where one arg is zero
9838 
9839 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9840   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9841 
9842   ins_cost(INSN_COST * 2);
9843   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9844 
9845   ins_encode %{
9846     __ csel(as_Register($dst$$reg),
9847             zr,
9848             as_Register($src$$reg),
9849             (Assembler::Condition)$cmp$$cmpcode);
9850   %}
9851 
9852   ins_pipe(icond_reg);
9853 %}
9854 
9855 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9856   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9857 
9858   ins_cost(INSN_COST * 2);
9859   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9860 
9861   ins_encode %{
9862     __ csel(as_Register($dst$$reg),
9863             zr,
9864             as_Register($src$$reg),
9865             (Assembler::Condition)$cmp$$cmpcode);
9866   %}
9867 
9868   ins_pipe(icond_reg);
9869 %}
9870 
9871 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9872   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9873 
9874   ins_cost(INSN_COST * 2);
9875   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9876 
9877   ins_encode %{
9878     __ csel(as_Register($dst$$reg),
9879             as_Register($src$$reg),
9880             zr,
9881             (Assembler::Condition)$cmp$$cmpcode);
9882   %}
9883 
9884   ins_pipe(icond_reg);
9885 %}
9886 
9887 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9888   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9889 
9890   ins_cost(INSN_COST * 2);
9891   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9892 
9893   ins_encode %{
9894     __ csel(as_Register($dst$$reg),
9895             as_Register($src$$reg),
9896             zr,
9897             (Assembler::Condition)$cmp$$cmpcode);
9898   %}
9899 
9900   ins_pipe(icond_reg);
9901 %}
9902 
9903 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9904   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9905 
9906   ins_cost(INSN_COST * 2);
9907   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9908 
9909   ins_encode %{
9910     __ csel(as_Register($dst$$reg),
9911             as_Register($src2$$reg),
9912             as_Register($src1$$reg),
9913             (Assembler::Condition)$cmp$$cmpcode);
9914   %}
9915 
9916   ins_pipe(icond_reg_reg);
9917 %}
9918 
9919 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9920   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9921 
9922   ins_cost(INSN_COST * 2);
9923   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9924 
9925   ins_encode %{
9926     __ csel(as_Register($dst$$reg),
9927             as_Register($src2$$reg),
9928             as_Register($src1$$reg),
9929             (Assembler::Condition)$cmp$$cmpcode);
9930   %}
9931 
9932   ins_pipe(icond_reg_reg);
9933 %}
9934 
9935 // special cases where one arg is zero
9936 
9937 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9938   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9939 
9940   ins_cost(INSN_COST * 2);
9941   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9942 
9943   ins_encode %{
9944     __ csel(as_Register($dst$$reg),
9945             zr,
9946             as_Register($src$$reg),
9947             (Assembler::Condition)$cmp$$cmpcode);
9948   %}
9949 
9950   ins_pipe(icond_reg);
9951 %}
9952 
9953 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9954   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9955 
9956   ins_cost(INSN_COST * 2);
9957   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9958 
9959   ins_encode %{
9960     __ csel(as_Register($dst$$reg),
9961             zr,
9962             as_Register($src$$reg),
9963             (Assembler::Condition)$cmp$$cmpcode);
9964   %}
9965 
9966   ins_pipe(icond_reg);
9967 %}
9968 
9969 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9970   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9971 
9972   ins_cost(INSN_COST * 2);
9973   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9974 
9975   ins_encode %{
9976     __ csel(as_Register($dst$$reg),
9977             as_Register($src$$reg),
9978             zr,
9979             (Assembler::Condition)$cmp$$cmpcode);
9980   %}
9981 
9982   ins_pipe(icond_reg);
9983 %}
9984 
9985 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9986   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9987 
9988   ins_cost(INSN_COST * 2);
9989   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9990 
9991   ins_encode %{
9992     __ csel(as_Register($dst$$reg),
9993             as_Register($src$$reg),
9994             zr,
9995             (Assembler::Condition)$cmp$$cmpcode);
9996   %}
9997 
9998   ins_pipe(icond_reg);
9999 %}
10000 
10001 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10002   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10003 
10004   ins_cost(INSN_COST * 2);
10005   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10006 
10007   ins_encode %{
10008     __ cselw(as_Register($dst$$reg),
10009              as_Register($src2$$reg),
10010              as_Register($src1$$reg),
10011              (Assembler::Condition)$cmp$$cmpcode);
10012   %}
10013 
10014   ins_pipe(icond_reg_reg);
10015 %}
10016 
10017 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10018   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10019 
10020   ins_cost(INSN_COST * 2);
10021   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10022 
10023   ins_encode %{
10024     __ cselw(as_Register($dst$$reg),
10025              as_Register($src2$$reg),
10026              as_Register($src1$$reg),
10027              (Assembler::Condition)$cmp$$cmpcode);
10028   %}
10029 
10030   ins_pipe(icond_reg_reg);
10031 %}
10032 
10033 // special cases where one arg is zero
10034 
10035 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10036   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10037 
10038   ins_cost(INSN_COST * 2);
10039   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10040 
10041   ins_encode %{
10042     __ cselw(as_Register($dst$$reg),
10043              zr,
10044              as_Register($src$$reg),
10045              (Assembler::Condition)$cmp$$cmpcode);
10046   %}
10047 
10048   ins_pipe(icond_reg);
10049 %}
10050 
10051 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10052   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10053 
10054   ins_cost(INSN_COST * 2);
10055   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10056 
10057   ins_encode %{
10058     __ cselw(as_Register($dst$$reg),
10059              zr,
10060              as_Register($src$$reg),
10061              (Assembler::Condition)$cmp$$cmpcode);
10062   %}
10063 
10064   ins_pipe(icond_reg);
10065 %}
10066 
10067 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10068   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10069 
10070   ins_cost(INSN_COST * 2);
10071   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10072 
10073   ins_encode %{
10074     __ cselw(as_Register($dst$$reg),
10075              as_Register($src$$reg),
10076              zr,
10077              (Assembler::Condition)$cmp$$cmpcode);
10078   %}
10079 
10080   ins_pipe(icond_reg);
10081 %}
10082 
10083 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10084   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10085 
10086   ins_cost(INSN_COST * 2);
10087   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10088 
10089   ins_encode %{
10090     __ cselw(as_Register($dst$$reg),
10091              as_Register($src$$reg),
10092              zr,
10093              (Assembler::Condition)$cmp$$cmpcode);
10094   %}
10095 
10096   ins_pipe(icond_reg);
10097 %}
10098 
10099 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10100 %{
10101   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10102 
10103   ins_cost(INSN_COST * 3);
10104 
10105   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10106   ins_encode %{
10107     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10108     __ fcsels(as_FloatRegister($dst$$reg),
10109               as_FloatRegister($src2$$reg),
10110               as_FloatRegister($src1$$reg),
10111               cond);
10112   %}
10113 
10114   ins_pipe(fp_cond_reg_reg_s);
10115 %}
10116 
10117 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10118 %{
10119   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10120 
10121   ins_cost(INSN_COST * 3);
10122 
10123   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10124   ins_encode %{
10125     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10126     __ fcsels(as_FloatRegister($dst$$reg),
10127               as_FloatRegister($src2$$reg),
10128               as_FloatRegister($src1$$reg),
10129               cond);
10130   %}
10131 
10132   ins_pipe(fp_cond_reg_reg_s);
10133 %}
10134 
10135 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10136 %{
10137   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10138 
10139   ins_cost(INSN_COST * 3);
10140 
10141   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10142   ins_encode %{
10143     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10144     __ fcseld(as_FloatRegister($dst$$reg),
10145               as_FloatRegister($src2$$reg),
10146               as_FloatRegister($src1$$reg),
10147               cond);
10148   %}
10149 
10150   ins_pipe(fp_cond_reg_reg_d);
10151 %}
10152 
10153 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10154 %{
10155   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10156 
10157   ins_cost(INSN_COST * 3);
10158 
10159   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10160   ins_encode %{
10161     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10162     __ fcseld(as_FloatRegister($dst$$reg),
10163               as_FloatRegister($src2$$reg),
10164               as_FloatRegister($src1$$reg),
10165               cond);
10166   %}
10167 
10168   ins_pipe(fp_cond_reg_reg_d);
10169 %}
10170 
10171 // ============================================================================
10172 // Arithmetic Instructions
10173 //
10174 
10175 // Integer Addition
10176 
10177 // TODO
10178 // these currently employ operations which do not set CR and hence are
10179 // not flagged as killing CR but we would like to isolate the cases
10180 // where we want to set flags from those where we don't. need to work
10181 // out how to do that.
10182 
10183 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10184   match(Set dst (AddI src1 src2));
10185 
10186   ins_cost(INSN_COST);
10187   format %{ "addw  $dst, $src1, $src2" %}
10188 
10189   ins_encode %{
10190     __ addw(as_Register($dst$$reg),
10191             as_Register($src1$$reg),
10192             as_Register($src2$$reg));
10193   %}
10194 
10195   ins_pipe(ialu_reg_reg);
10196 %}
10197 
10198 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10199   match(Set dst (AddI src1 src2));
10200 
10201   ins_cost(INSN_COST);
10202   format %{ "addw $dst, $src1, $src2" %}
10203 
10204   // use opcode to indicate that this is an add not a sub
10205   opcode(0x0);
10206 
10207   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10208 
10209   ins_pipe(ialu_reg_imm);
10210 %}
10211 
10212 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10213   match(Set dst (AddI (ConvL2I src1) src2));
10214 
10215   ins_cost(INSN_COST);
10216   format %{ "addw $dst, $src1, $src2" %}
10217 
10218   // use opcode to indicate that this is an add not a sub
10219   opcode(0x0);
10220 
10221   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10222 
10223   ins_pipe(ialu_reg_imm);
10224 %}
10225 
10226 // Pointer Addition
10227 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10228   match(Set dst (AddP src1 src2));
10229 
10230   ins_cost(INSN_COST);
10231   format %{ "add $dst, $src1, $src2\t# ptr" %}
10232 
10233   ins_encode %{
10234     __ add(as_Register($dst$$reg),
10235            as_Register($src1$$reg),
10236            as_Register($src2$$reg));
10237   %}
10238 
10239   ins_pipe(ialu_reg_reg);
10240 %}
10241 
10242 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10243   match(Set dst (AddP src1 (ConvI2L src2)));
10244 
10245   ins_cost(1.9 * INSN_COST);
10246   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10247 
10248   ins_encode %{
10249     __ add(as_Register($dst$$reg),
10250            as_Register($src1$$reg),
10251            as_Register($src2$$reg), ext::sxtw);
10252   %}
10253 
10254   ins_pipe(ialu_reg_reg);
10255 %}
10256 
10257 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10258   match(Set dst (AddP src1 (LShiftL src2 scale)));
10259 
10260   ins_cost(1.9 * INSN_COST);
10261   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10262 
10263   ins_encode %{
10264     __ lea(as_Register($dst$$reg),
10265            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10266                    Address::lsl($scale$$constant)));
10267   %}
10268 
10269   ins_pipe(ialu_reg_reg_shift);
10270 %}
10271 
10272 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10273   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10274 
10275   ins_cost(1.9 * INSN_COST);
10276   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10277 
10278   ins_encode %{
10279     __ lea(as_Register($dst$$reg),
10280            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10281                    Address::sxtw($scale$$constant)));
10282   %}
10283 
10284   ins_pipe(ialu_reg_reg_shift);
10285 %}
10286 
10287 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10288   match(Set dst (LShiftL (ConvI2L src) scale));
10289 
10290   ins_cost(INSN_COST);
10291   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10292 
10293   ins_encode %{
10294     __ sbfiz(as_Register($dst$$reg),
10295           as_Register($src$$reg),
10296           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10297   %}
10298 
10299   ins_pipe(ialu_reg_shift);
10300 %}
10301 
10302 // Pointer Immediate Addition
10303 // n.b. this needs to be more expensive than using an indirect memory
10304 // operand
10305 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10306   match(Set dst (AddP src1 src2));
10307 
10308   ins_cost(INSN_COST);
10309   format %{ "add $dst, $src1, $src2\t# ptr" %}
10310 
10311   // use opcode to indicate that this is an add not a sub
10312   opcode(0x0);
10313 
10314   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10315 
10316   ins_pipe(ialu_reg_imm);
10317 %}
10318 
10319 // Long Addition
10320 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10321 
10322   match(Set dst (AddL src1 src2));
10323 
10324   ins_cost(INSN_COST);
10325   format %{ "add  $dst, $src1, $src2" %}
10326 
10327   ins_encode %{
10328     __ add(as_Register($dst$$reg),
10329            as_Register($src1$$reg),
10330            as_Register($src2$$reg));
10331   %}
10332 
10333   ins_pipe(ialu_reg_reg);
10334 %}
10335 
10336 // No constant pool entries requiredLong Immediate Addition.
10337 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10338   match(Set dst (AddL src1 src2));
10339 
10340   ins_cost(INSN_COST);
10341   format %{ "add $dst, $src1, $src2" %}
10342 
10343   // use opcode to indicate that this is an add not a sub
10344   opcode(0x0);
10345 
10346   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10347 
10348   ins_pipe(ialu_reg_imm);
10349 %}
10350 
10351 // Integer Subtraction
10352 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10353   match(Set dst (SubI src1 src2));
10354 
10355   ins_cost(INSN_COST);
10356   format %{ "subw  $dst, $src1, $src2" %}
10357 
10358   ins_encode %{
10359     __ subw(as_Register($dst$$reg),
10360             as_Register($src1$$reg),
10361             as_Register($src2$$reg));
10362   %}
10363 
10364   ins_pipe(ialu_reg_reg);
10365 %}
10366 
10367 // Immediate Subtraction
10368 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10369   match(Set dst (SubI src1 src2));
10370 
10371   ins_cost(INSN_COST);
10372   format %{ "subw $dst, $src1, $src2" %}
10373 
10374   // use opcode to indicate that this is a sub not an add
10375   opcode(0x1);
10376 
10377   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10378 
10379   ins_pipe(ialu_reg_imm);
10380 %}
10381 
10382 // Long Subtraction
10383 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10384 
10385   match(Set dst (SubL src1 src2));
10386 
10387   ins_cost(INSN_COST);
10388   format %{ "sub  $dst, $src1, $src2" %}
10389 
10390   ins_encode %{
10391     __ sub(as_Register($dst$$reg),
10392            as_Register($src1$$reg),
10393            as_Register($src2$$reg));
10394   %}
10395 
10396   ins_pipe(ialu_reg_reg);
10397 %}
10398 
10399 // No constant pool entries requiredLong Immediate Subtraction.
10400 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10401   match(Set dst (SubL src1 src2));
10402 
10403   ins_cost(INSN_COST);
10404   format %{ "sub$dst, $src1, $src2" %}
10405 
10406   // use opcode to indicate that this is a sub not an add
10407   opcode(0x1);
10408 
10409   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10410 
10411   ins_pipe(ialu_reg_imm);
10412 %}
10413 
10414 // Integer Negation (special case for sub)
10415 
10416 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10417   match(Set dst (SubI zero src));
10418 
10419   ins_cost(INSN_COST);
10420   format %{ "negw $dst, $src\t# int" %}
10421 
10422   ins_encode %{
10423     __ negw(as_Register($dst$$reg),
10424             as_Register($src$$reg));
10425   %}
10426 
10427   ins_pipe(ialu_reg);
10428 %}
10429 
10430 // Long Negation
10431 
10432 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10433   match(Set dst (SubL zero src));
10434 
10435   ins_cost(INSN_COST);
10436   format %{ "neg $dst, $src\t# long" %}
10437 
10438   ins_encode %{
10439     __ neg(as_Register($dst$$reg),
10440            as_Register($src$$reg));
10441   %}
10442 
10443   ins_pipe(ialu_reg);
10444 %}
10445 
10446 // Integer Multiply
10447 
10448 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10449   match(Set dst (MulI src1 src2));
10450 
10451   ins_cost(INSN_COST * 3);
10452   format %{ "mulw  $dst, $src1, $src2" %}
10453 
10454   ins_encode %{
10455     __ mulw(as_Register($dst$$reg),
10456             as_Register($src1$$reg),
10457             as_Register($src2$$reg));
10458   %}
10459 
10460   ins_pipe(imul_reg_reg);
10461 %}
10462 
10463 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10464   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10465 
10466   ins_cost(INSN_COST * 3);
10467   format %{ "smull  $dst, $src1, $src2" %}
10468 
10469   ins_encode %{
10470     __ smull(as_Register($dst$$reg),
10471              as_Register($src1$$reg),
10472              as_Register($src2$$reg));
10473   %}
10474 
10475   ins_pipe(imul_reg_reg);
10476 %}
10477 
10478 // Long Multiply
10479 
10480 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10481   match(Set dst (MulL src1 src2));
10482 
10483   ins_cost(INSN_COST * 5);
10484   format %{ "mul  $dst, $src1, $src2" %}
10485 
10486   ins_encode %{
10487     __ mul(as_Register($dst$$reg),
10488            as_Register($src1$$reg),
10489            as_Register($src2$$reg));
10490   %}
10491 
10492   ins_pipe(lmul_reg_reg);
10493 %}
10494 
10495 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10496 %{
10497   match(Set dst (MulHiL src1 src2));
10498 
10499   ins_cost(INSN_COST * 7);
10500   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10501 
10502   ins_encode %{
10503     __ smulh(as_Register($dst$$reg),
10504              as_Register($src1$$reg),
10505              as_Register($src2$$reg));
10506   %}
10507 
10508   ins_pipe(lmul_reg_reg);
10509 %}
10510 
10511 // Combined Integer Multiply & Add/Sub
10512 
10513 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10514   match(Set dst (AddI src3 (MulI src1 src2)));
10515 
10516   ins_cost(INSN_COST * 3);
10517   format %{ "madd  $dst, $src1, $src2, $src3" %}
10518 
10519   ins_encode %{
10520     __ maddw(as_Register($dst$$reg),
10521              as_Register($src1$$reg),
10522              as_Register($src2$$reg),
10523              as_Register($src3$$reg));
10524   %}
10525 
10526   ins_pipe(imac_reg_reg);
10527 %}
10528 
10529 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10530   match(Set dst (SubI src3 (MulI src1 src2)));
10531 
10532   ins_cost(INSN_COST * 3);
10533   format %{ "msub  $dst, $src1, $src2, $src3" %}
10534 
10535   ins_encode %{
10536     __ msubw(as_Register($dst$$reg),
10537              as_Register($src1$$reg),
10538              as_Register($src2$$reg),
10539              as_Register($src3$$reg));
10540   %}
10541 
10542   ins_pipe(imac_reg_reg);
10543 %}
10544 
10545 // Combined Long Multiply & Add/Sub
10546 
10547 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10548   match(Set dst (AddL src3 (MulL src1 src2)));
10549 
10550   ins_cost(INSN_COST * 5);
10551   format %{ "madd  $dst, $src1, $src2, $src3" %}
10552 
10553   ins_encode %{
10554     __ madd(as_Register($dst$$reg),
10555             as_Register($src1$$reg),
10556             as_Register($src2$$reg),
10557             as_Register($src3$$reg));
10558   %}
10559 
10560   ins_pipe(lmac_reg_reg);
10561 %}
10562 
10563 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10564   match(Set dst (SubL src3 (MulL src1 src2)));
10565 
10566   ins_cost(INSN_COST * 5);
10567   format %{ "msub  $dst, $src1, $src2, $src3" %}
10568 
10569   ins_encode %{
10570     __ msub(as_Register($dst$$reg),
10571             as_Register($src1$$reg),
10572             as_Register($src2$$reg),
10573             as_Register($src3$$reg));
10574   %}
10575 
10576   ins_pipe(lmac_reg_reg);
10577 %}
10578 
10579 // Integer Divide
10580 
10581 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10582   match(Set dst (DivI src1 src2));
10583 
10584   ins_cost(INSN_COST * 19);
10585   format %{ "sdivw  $dst, $src1, $src2" %}
10586 
10587   ins_encode(aarch64_enc_divw(dst, src1, src2));
10588   ins_pipe(idiv_reg_reg);
10589 %}
10590 
10591 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10592   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10593   ins_cost(INSN_COST);
10594   format %{ "lsrw $dst, $src1, $div1" %}
10595   ins_encode %{
10596     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10597   %}
10598   ins_pipe(ialu_reg_shift);
10599 %}
10600 
10601 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10602   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10603   ins_cost(INSN_COST);
10604   format %{ "addw $dst, $src, LSR $div1" %}
10605 
10606   ins_encode %{
10607     __ addw(as_Register($dst$$reg),
10608               as_Register($src$$reg),
10609               as_Register($src$$reg),
10610               Assembler::LSR, 31);
10611   %}
10612   ins_pipe(ialu_reg);
10613 %}
10614 
10615 // Long Divide
10616 
10617 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10618   match(Set dst (DivL src1 src2));
10619 
10620   ins_cost(INSN_COST * 35);
10621   format %{ "sdiv   $dst, $src1, $src2" %}
10622 
10623   ins_encode(aarch64_enc_div(dst, src1, src2));
10624   ins_pipe(ldiv_reg_reg);
10625 %}
10626 
10627 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10628   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10629   ins_cost(INSN_COST);
10630   format %{ "lsr $dst, $src1, $div1" %}
10631   ins_encode %{
10632     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10633   %}
10634   ins_pipe(ialu_reg_shift);
10635 %}
10636 
10637 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10638   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10639   ins_cost(INSN_COST);
10640   format %{ "add $dst, $src, $div1" %}
10641 
10642   ins_encode %{
10643     __ add(as_Register($dst$$reg),
10644               as_Register($src$$reg),
10645               as_Register($src$$reg),
10646               Assembler::LSR, 63);
10647   %}
10648   ins_pipe(ialu_reg);
10649 %}
10650 
10651 // Integer Remainder
10652 
10653 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10654   match(Set dst (ModI src1 src2));
10655 
10656   ins_cost(INSN_COST * 22);
10657   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10658             "msubw($dst, rscratch1, $src2, $src1" %}
10659 
10660   ins_encode(aarch64_enc_modw(dst, src1, src2));
10661   ins_pipe(idiv_reg_reg);
10662 %}
10663 
10664 // Long Remainder
10665 
10666 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10667   match(Set dst (ModL src1 src2));
10668 
10669   ins_cost(INSN_COST * 38);
10670   format %{ "sdiv   rscratch1, $src1, $src2\n"
10671             "msub($dst, rscratch1, $src2, $src1" %}
10672 
10673   ins_encode(aarch64_enc_mod(dst, src1, src2));
10674   ins_pipe(ldiv_reg_reg);
10675 %}
10676 
10677 // Integer Shifts
10678 
10679 // Shift Left Register
10680 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10681   match(Set dst (LShiftI src1 src2));
10682 
10683   ins_cost(INSN_COST * 2);
10684   format %{ "lslvw  $dst, $src1, $src2" %}
10685 
10686   ins_encode %{
10687     __ lslvw(as_Register($dst$$reg),
10688              as_Register($src1$$reg),
10689              as_Register($src2$$reg));
10690   %}
10691 
10692   ins_pipe(ialu_reg_reg_vshift);
10693 %}
10694 
10695 // Shift Left Immediate
10696 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10697   match(Set dst (LShiftI src1 src2));
10698 
10699   ins_cost(INSN_COST);
10700   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10701 
10702   ins_encode %{
10703     __ lslw(as_Register($dst$$reg),
10704             as_Register($src1$$reg),
10705             $src2$$constant & 0x1f);
10706   %}
10707 
10708   ins_pipe(ialu_reg_shift);
10709 %}
10710 
10711 // Shift Right Logical Register
10712 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10713   match(Set dst (URShiftI src1 src2));
10714 
10715   ins_cost(INSN_COST * 2);
10716   format %{ "lsrvw  $dst, $src1, $src2" %}
10717 
10718   ins_encode %{
10719     __ lsrvw(as_Register($dst$$reg),
10720              as_Register($src1$$reg),
10721              as_Register($src2$$reg));
10722   %}
10723 
10724   ins_pipe(ialu_reg_reg_vshift);
10725 %}
10726 
10727 // Shift Right Logical Immediate
10728 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10729   match(Set dst (URShiftI src1 src2));
10730 
10731   ins_cost(INSN_COST);
10732   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10733 
10734   ins_encode %{
10735     __ lsrw(as_Register($dst$$reg),
10736             as_Register($src1$$reg),
10737             $src2$$constant & 0x1f);
10738   %}
10739 
10740   ins_pipe(ialu_reg_shift);
10741 %}
10742 
10743 // Shift Right Arithmetic Register
10744 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10745   match(Set dst (RShiftI src1 src2));
10746 
10747   ins_cost(INSN_COST * 2);
10748   format %{ "asrvw  $dst, $src1, $src2" %}
10749 
10750   ins_encode %{
10751     __ asrvw(as_Register($dst$$reg),
10752              as_Register($src1$$reg),
10753              as_Register($src2$$reg));
10754   %}
10755 
10756   ins_pipe(ialu_reg_reg_vshift);
10757 %}
10758 
10759 // Shift Right Arithmetic Immediate
10760 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10761   match(Set dst (RShiftI src1 src2));
10762 
10763   ins_cost(INSN_COST);
10764   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10765 
10766   ins_encode %{
10767     __ asrw(as_Register($dst$$reg),
10768             as_Register($src1$$reg),
10769             $src2$$constant & 0x1f);
10770   %}
10771 
10772   ins_pipe(ialu_reg_shift);
10773 %}
10774 
10775 // Combined Int Mask and Right Shift (using UBFM)
10776 // TODO
10777 
10778 // Long Shifts
10779 
10780 // Shift Left Register
10781 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10782   match(Set dst (LShiftL src1 src2));
10783 
10784   ins_cost(INSN_COST * 2);
10785   format %{ "lslv  $dst, $src1, $src2" %}
10786 
10787   ins_encode %{
10788     __ lslv(as_Register($dst$$reg),
10789             as_Register($src1$$reg),
10790             as_Register($src2$$reg));
10791   %}
10792 
10793   ins_pipe(ialu_reg_reg_vshift);
10794 %}
10795 
10796 // Shift Left Immediate
10797 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10798   match(Set dst (LShiftL src1 src2));
10799 
10800   ins_cost(INSN_COST);
10801   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10802 
10803   ins_encode %{
10804     __ lsl(as_Register($dst$$reg),
10805             as_Register($src1$$reg),
10806             $src2$$constant & 0x3f);
10807   %}
10808 
10809   ins_pipe(ialu_reg_shift);
10810 %}
10811 
10812 // Shift Right Logical Register
10813 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10814   match(Set dst (URShiftL src1 src2));
10815 
10816   ins_cost(INSN_COST * 2);
10817   format %{ "lsrv  $dst, $src1, $src2" %}
10818 
10819   ins_encode %{
10820     __ lsrv(as_Register($dst$$reg),
10821             as_Register($src1$$reg),
10822             as_Register($src2$$reg));
10823   %}
10824 
10825   ins_pipe(ialu_reg_reg_vshift);
10826 %}
10827 
10828 // Shift Right Logical Immediate
10829 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10830   match(Set dst (URShiftL src1 src2));
10831 
10832   ins_cost(INSN_COST);
10833   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10834 
10835   ins_encode %{
10836     __ lsr(as_Register($dst$$reg),
10837            as_Register($src1$$reg),
10838            $src2$$constant & 0x3f);
10839   %}
10840 
10841   ins_pipe(ialu_reg_shift);
10842 %}
10843 
10844 // A special-case pattern for card table stores.
10845 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10846   match(Set dst (URShiftL (CastP2X src1) src2));
10847 
10848   ins_cost(INSN_COST);
10849   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10850 
10851   ins_encode %{
10852     __ lsr(as_Register($dst$$reg),
10853            as_Register($src1$$reg),
10854            $src2$$constant & 0x3f);
10855   %}
10856 
10857   ins_pipe(ialu_reg_shift);
10858 %}
10859 
10860 // Shift Right Arithmetic Register
10861 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10862   match(Set dst (RShiftL src1 src2));
10863 
10864   ins_cost(INSN_COST * 2);
10865   format %{ "asrv  $dst, $src1, $src2" %}
10866 
10867   ins_encode %{
10868     __ asrv(as_Register($dst$$reg),
10869             as_Register($src1$$reg),
10870             as_Register($src2$$reg));
10871   %}
10872 
10873   ins_pipe(ialu_reg_reg_vshift);
10874 %}
10875 
10876 // Shift Right Arithmetic Immediate
10877 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10878   match(Set dst (RShiftL src1 src2));
10879 
10880   ins_cost(INSN_COST);
10881   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10882 
10883   ins_encode %{
10884     __ asr(as_Register($dst$$reg),
10885            as_Register($src1$$reg),
10886            $src2$$constant & 0x3f);
10887   %}
10888 
10889   ins_pipe(ialu_reg_shift);
10890 %}
10891 
10892 // BEGIN This section of the file is automatically generated. Do not edit --------------
10893 
10894 instruct regL_not_reg(iRegLNoSp dst,
10895                          iRegL src1, immL_M1 m1,
10896                          rFlagsReg cr) %{
10897   match(Set dst (XorL src1 m1));
10898   ins_cost(INSN_COST);
10899   format %{ "eon  $dst, $src1, zr" %}
10900 
10901   ins_encode %{
10902     __ eon(as_Register($dst$$reg),
10903               as_Register($src1$$reg),
10904               zr,
10905               Assembler::LSL, 0);
10906   %}
10907 
10908   ins_pipe(ialu_reg);
10909 %}
10910 instruct regI_not_reg(iRegINoSp dst,
10911                          iRegIorL2I src1, immI_M1 m1,
10912                          rFlagsReg cr) %{
10913   match(Set dst (XorI src1 m1));
10914   ins_cost(INSN_COST);
10915   format %{ "eonw  $dst, $src1, zr" %}
10916 
10917   ins_encode %{
10918     __ eonw(as_Register($dst$$reg),
10919               as_Register($src1$$reg),
10920               zr,
10921               Assembler::LSL, 0);
10922   %}
10923 
10924   ins_pipe(ialu_reg);
10925 %}
10926 
10927 instruct AndI_reg_not_reg(iRegINoSp dst,
10928                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10929                          rFlagsReg cr) %{
10930   match(Set dst (AndI src1 (XorI src2 m1)));
10931   ins_cost(INSN_COST);
10932   format %{ "bicw  $dst, $src1, $src2" %}
10933 
10934   ins_encode %{
10935     __ bicw(as_Register($dst$$reg),
10936               as_Register($src1$$reg),
10937               as_Register($src2$$reg),
10938               Assembler::LSL, 0);
10939   %}
10940 
10941   ins_pipe(ialu_reg_reg);
10942 %}
10943 
10944 instruct AndL_reg_not_reg(iRegLNoSp dst,
10945                          iRegL src1, iRegL src2, immL_M1 m1,
10946                          rFlagsReg cr) %{
10947   match(Set dst (AndL src1 (XorL src2 m1)));
10948   ins_cost(INSN_COST);
10949   format %{ "bic  $dst, $src1, $src2" %}
10950 
10951   ins_encode %{
10952     __ bic(as_Register($dst$$reg),
10953               as_Register($src1$$reg),
10954               as_Register($src2$$reg),
10955               Assembler::LSL, 0);
10956   %}
10957 
10958   ins_pipe(ialu_reg_reg);
10959 %}
10960 
10961 instruct OrI_reg_not_reg(iRegINoSp dst,
10962                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10963                          rFlagsReg cr) %{
10964   match(Set dst (OrI src1 (XorI src2 m1)));
10965   ins_cost(INSN_COST);
10966   format %{ "ornw  $dst, $src1, $src2" %}
10967 
10968   ins_encode %{
10969     __ ornw(as_Register($dst$$reg),
10970               as_Register($src1$$reg),
10971               as_Register($src2$$reg),
10972               Assembler::LSL, 0);
10973   %}
10974 
10975   ins_pipe(ialu_reg_reg);
10976 %}
10977 
10978 instruct OrL_reg_not_reg(iRegLNoSp dst,
10979                          iRegL src1, iRegL src2, immL_M1 m1,
10980                          rFlagsReg cr) %{
10981   match(Set dst (OrL src1 (XorL src2 m1)));
10982   ins_cost(INSN_COST);
10983   format %{ "orn  $dst, $src1, $src2" %}
10984 
10985   ins_encode %{
10986     __ orn(as_Register($dst$$reg),
10987               as_Register($src1$$reg),
10988               as_Register($src2$$reg),
10989               Assembler::LSL, 0);
10990   %}
10991 
10992   ins_pipe(ialu_reg_reg);
10993 %}
10994 
10995 instruct XorI_reg_not_reg(iRegINoSp dst,
10996                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10997                          rFlagsReg cr) %{
10998   match(Set dst (XorI m1 (XorI src2 src1)));
10999   ins_cost(INSN_COST);
11000   format %{ "eonw  $dst, $src1, $src2" %}
11001 
11002   ins_encode %{
11003     __ eonw(as_Register($dst$$reg),
11004               as_Register($src1$$reg),
11005               as_Register($src2$$reg),
11006               Assembler::LSL, 0);
11007   %}
11008 
11009   ins_pipe(ialu_reg_reg);
11010 %}
11011 
11012 instruct XorL_reg_not_reg(iRegLNoSp dst,
11013                          iRegL src1, iRegL src2, immL_M1 m1,
11014                          rFlagsReg cr) %{
11015   match(Set dst (XorL m1 (XorL src2 src1)));
11016   ins_cost(INSN_COST);
11017   format %{ "eon  $dst, $src1, $src2" %}
11018 
11019   ins_encode %{
11020     __ eon(as_Register($dst$$reg),
11021               as_Register($src1$$reg),
11022               as_Register($src2$$reg),
11023               Assembler::LSL, 0);
11024   %}
11025 
11026   ins_pipe(ialu_reg_reg);
11027 %}
11028 
11029 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11030                          iRegIorL2I src1, iRegIorL2I src2,
11031                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11032   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11033   ins_cost(1.9 * INSN_COST);
11034   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11035 
11036   ins_encode %{
11037     __ bicw(as_Register($dst$$reg),
11038               as_Register($src1$$reg),
11039               as_Register($src2$$reg),
11040               Assembler::LSR,
11041               $src3$$constant & 0x1f);
11042   %}
11043 
11044   ins_pipe(ialu_reg_reg_shift);
11045 %}
11046 
11047 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11048                          iRegL src1, iRegL src2,
11049                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11050   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11051   ins_cost(1.9 * INSN_COST);
11052   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11053 
11054   ins_encode %{
11055     __ bic(as_Register($dst$$reg),
11056               as_Register($src1$$reg),
11057               as_Register($src2$$reg),
11058               Assembler::LSR,
11059               $src3$$constant & 0x3f);
11060   %}
11061 
11062   ins_pipe(ialu_reg_reg_shift);
11063 %}
11064 
11065 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11066                          iRegIorL2I src1, iRegIorL2I src2,
11067                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11068   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11069   ins_cost(1.9 * INSN_COST);
11070   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11071 
11072   ins_encode %{
11073     __ bicw(as_Register($dst$$reg),
11074               as_Register($src1$$reg),
11075               as_Register($src2$$reg),
11076               Assembler::ASR,
11077               $src3$$constant & 0x1f);
11078   %}
11079 
11080   ins_pipe(ialu_reg_reg_shift);
11081 %}
11082 
11083 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11084                          iRegL src1, iRegL src2,
11085                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11086   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11087   ins_cost(1.9 * INSN_COST);
11088   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11089 
11090   ins_encode %{
11091     __ bic(as_Register($dst$$reg),
11092               as_Register($src1$$reg),
11093               as_Register($src2$$reg),
11094               Assembler::ASR,
11095               $src3$$constant & 0x3f);
11096   %}
11097 
11098   ins_pipe(ialu_reg_reg_shift);
11099 %}
11100 
11101 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11102                          iRegIorL2I src1, iRegIorL2I src2,
11103                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11104   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11105   ins_cost(1.9 * INSN_COST);
11106   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11107 
11108   ins_encode %{
11109     __ bicw(as_Register($dst$$reg),
11110               as_Register($src1$$reg),
11111               as_Register($src2$$reg),
11112               Assembler::LSL,
11113               $src3$$constant & 0x1f);
11114   %}
11115 
11116   ins_pipe(ialu_reg_reg_shift);
11117 %}
11118 
11119 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11120                          iRegL src1, iRegL src2,
11121                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11122   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11123   ins_cost(1.9 * INSN_COST);
11124   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11125 
11126   ins_encode %{
11127     __ bic(as_Register($dst$$reg),
11128               as_Register($src1$$reg),
11129               as_Register($src2$$reg),
11130               Assembler::LSL,
11131               $src3$$constant & 0x3f);
11132   %}
11133 
11134   ins_pipe(ialu_reg_reg_shift);
11135 %}
11136 
11137 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11138                          iRegIorL2I src1, iRegIorL2I src2,
11139                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11140   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11141   ins_cost(1.9 * INSN_COST);
11142   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11143 
11144   ins_encode %{
11145     __ eonw(as_Register($dst$$reg),
11146               as_Register($src1$$reg),
11147               as_Register($src2$$reg),
11148               Assembler::LSR,
11149               $src3$$constant & 0x1f);
11150   %}
11151 
11152   ins_pipe(ialu_reg_reg_shift);
11153 %}
11154 
11155 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11156                          iRegL src1, iRegL src2,
11157                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11158   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11159   ins_cost(1.9 * INSN_COST);
11160   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11161 
11162   ins_encode %{
11163     __ eon(as_Register($dst$$reg),
11164               as_Register($src1$$reg),
11165               as_Register($src2$$reg),
11166               Assembler::LSR,
11167               $src3$$constant & 0x3f);
11168   %}
11169 
11170   ins_pipe(ialu_reg_reg_shift);
11171 %}
11172 
11173 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11174                          iRegIorL2I src1, iRegIorL2I src2,
11175                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11176   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11177   ins_cost(1.9 * INSN_COST);
11178   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11179 
11180   ins_encode %{
11181     __ eonw(as_Register($dst$$reg),
11182               as_Register($src1$$reg),
11183               as_Register($src2$$reg),
11184               Assembler::ASR,
11185               $src3$$constant & 0x1f);
11186   %}
11187 
11188   ins_pipe(ialu_reg_reg_shift);
11189 %}
11190 
11191 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11192                          iRegL src1, iRegL src2,
11193                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11194   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11195   ins_cost(1.9 * INSN_COST);
11196   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11197 
11198   ins_encode %{
11199     __ eon(as_Register($dst$$reg),
11200               as_Register($src1$$reg),
11201               as_Register($src2$$reg),
11202               Assembler::ASR,
11203               $src3$$constant & 0x3f);
11204   %}
11205 
11206   ins_pipe(ialu_reg_reg_shift);
11207 %}
11208 
11209 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11210                          iRegIorL2I src1, iRegIorL2I src2,
11211                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11212   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11213   ins_cost(1.9 * INSN_COST);
11214   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11215 
11216   ins_encode %{
11217     __ eonw(as_Register($dst$$reg),
11218               as_Register($src1$$reg),
11219               as_Register($src2$$reg),
11220               Assembler::LSL,
11221               $src3$$constant & 0x1f);
11222   %}
11223 
11224   ins_pipe(ialu_reg_reg_shift);
11225 %}
11226 
11227 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11228                          iRegL src1, iRegL src2,
11229                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11230   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11231   ins_cost(1.9 * INSN_COST);
11232   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11233 
11234   ins_encode %{
11235     __ eon(as_Register($dst$$reg),
11236               as_Register($src1$$reg),
11237               as_Register($src2$$reg),
11238               Assembler::LSL,
11239               $src3$$constant & 0x3f);
11240   %}
11241 
11242   ins_pipe(ialu_reg_reg_shift);
11243 %}
11244 
11245 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11246                          iRegIorL2I src1, iRegIorL2I src2,
11247                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11248   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11249   ins_cost(1.9 * INSN_COST);
11250   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11251 
11252   ins_encode %{
11253     __ ornw(as_Register($dst$$reg),
11254               as_Register($src1$$reg),
11255               as_Register($src2$$reg),
11256               Assembler::LSR,
11257               $src3$$constant & 0x1f);
11258   %}
11259 
11260   ins_pipe(ialu_reg_reg_shift);
11261 %}
11262 
11263 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11264                          iRegL src1, iRegL src2,
11265                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11266   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11267   ins_cost(1.9 * INSN_COST);
11268   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11269 
11270   ins_encode %{
11271     __ orn(as_Register($dst$$reg),
11272               as_Register($src1$$reg),
11273               as_Register($src2$$reg),
11274               Assembler::LSR,
11275               $src3$$constant & 0x3f);
11276   %}
11277 
11278   ins_pipe(ialu_reg_reg_shift);
11279 %}
11280 
11281 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11282                          iRegIorL2I src1, iRegIorL2I src2,
11283                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11284   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11285   ins_cost(1.9 * INSN_COST);
11286   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11287 
11288   ins_encode %{
11289     __ ornw(as_Register($dst$$reg),
11290               as_Register($src1$$reg),
11291               as_Register($src2$$reg),
11292               Assembler::ASR,
11293               $src3$$constant & 0x1f);
11294   %}
11295 
11296   ins_pipe(ialu_reg_reg_shift);
11297 %}
11298 
11299 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11300                          iRegL src1, iRegL src2,
11301                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11302   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11303   ins_cost(1.9 * INSN_COST);
11304   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11305 
11306   ins_encode %{
11307     __ orn(as_Register($dst$$reg),
11308               as_Register($src1$$reg),
11309               as_Register($src2$$reg),
11310               Assembler::ASR,
11311               $src3$$constant & 0x3f);
11312   %}
11313 
11314   ins_pipe(ialu_reg_reg_shift);
11315 %}
11316 
11317 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11318                          iRegIorL2I src1, iRegIorL2I src2,
11319                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11320   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11321   ins_cost(1.9 * INSN_COST);
11322   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11323 
11324   ins_encode %{
11325     __ ornw(as_Register($dst$$reg),
11326               as_Register($src1$$reg),
11327               as_Register($src2$$reg),
11328               Assembler::LSL,
11329               $src3$$constant & 0x1f);
11330   %}
11331 
11332   ins_pipe(ialu_reg_reg_shift);
11333 %}
11334 
11335 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11336                          iRegL src1, iRegL src2,
11337                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11338   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11339   ins_cost(1.9 * INSN_COST);
11340   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11341 
11342   ins_encode %{
11343     __ orn(as_Register($dst$$reg),
11344               as_Register($src1$$reg),
11345               as_Register($src2$$reg),
11346               Assembler::LSL,
11347               $src3$$constant & 0x3f);
11348   %}
11349 
11350   ins_pipe(ialu_reg_reg_shift);
11351 %}
11352 
11353 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11354                          iRegIorL2I src1, iRegIorL2I src2,
11355                          immI src3, rFlagsReg cr) %{
11356   match(Set dst (AndI src1 (URShiftI src2 src3)));
11357 
11358   ins_cost(1.9 * INSN_COST);
11359   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11360 
11361   ins_encode %{
11362     __ andw(as_Register($dst$$reg),
11363               as_Register($src1$$reg),
11364               as_Register($src2$$reg),
11365               Assembler::LSR,
11366               $src3$$constant & 0x1f);
11367   %}
11368 
11369   ins_pipe(ialu_reg_reg_shift);
11370 %}
11371 
11372 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11373                          iRegL src1, iRegL src2,
11374                          immI src3, rFlagsReg cr) %{
11375   match(Set dst (AndL src1 (URShiftL src2 src3)));
11376 
11377   ins_cost(1.9 * INSN_COST);
11378   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11379 
11380   ins_encode %{
11381     __ andr(as_Register($dst$$reg),
11382               as_Register($src1$$reg),
11383               as_Register($src2$$reg),
11384               Assembler::LSR,
11385               $src3$$constant & 0x3f);
11386   %}
11387 
11388   ins_pipe(ialu_reg_reg_shift);
11389 %}
11390 
11391 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11392                          iRegIorL2I src1, iRegIorL2I src2,
11393                          immI src3, rFlagsReg cr) %{
11394   match(Set dst (AndI src1 (RShiftI src2 src3)));
11395 
11396   ins_cost(1.9 * INSN_COST);
11397   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11398 
11399   ins_encode %{
11400     __ andw(as_Register($dst$$reg),
11401               as_Register($src1$$reg),
11402               as_Register($src2$$reg),
11403               Assembler::ASR,
11404               $src3$$constant & 0x1f);
11405   %}
11406 
11407   ins_pipe(ialu_reg_reg_shift);
11408 %}
11409 
11410 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11411                          iRegL src1, iRegL src2,
11412                          immI src3, rFlagsReg cr) %{
11413   match(Set dst (AndL src1 (RShiftL src2 src3)));
11414 
11415   ins_cost(1.9 * INSN_COST);
11416   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11417 
11418   ins_encode %{
11419     __ andr(as_Register($dst$$reg),
11420               as_Register($src1$$reg),
11421               as_Register($src2$$reg),
11422               Assembler::ASR,
11423               $src3$$constant & 0x3f);
11424   %}
11425 
11426   ins_pipe(ialu_reg_reg_shift);
11427 %}
11428 
11429 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11430                          iRegIorL2I src1, iRegIorL2I src2,
11431                          immI src3, rFlagsReg cr) %{
11432   match(Set dst (AndI src1 (LShiftI src2 src3)));
11433 
11434   ins_cost(1.9 * INSN_COST);
11435   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11436 
11437   ins_encode %{
11438     __ andw(as_Register($dst$$reg),
11439               as_Register($src1$$reg),
11440               as_Register($src2$$reg),
11441               Assembler::LSL,
11442               $src3$$constant & 0x1f);
11443   %}
11444 
11445   ins_pipe(ialu_reg_reg_shift);
11446 %}
11447 
11448 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11449                          iRegL src1, iRegL src2,
11450                          immI src3, rFlagsReg cr) %{
11451   match(Set dst (AndL src1 (LShiftL src2 src3)));
11452 
11453   ins_cost(1.9 * INSN_COST);
11454   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11455 
11456   ins_encode %{
11457     __ andr(as_Register($dst$$reg),
11458               as_Register($src1$$reg),
11459               as_Register($src2$$reg),
11460               Assembler::LSL,
11461               $src3$$constant & 0x3f);
11462   %}
11463 
11464   ins_pipe(ialu_reg_reg_shift);
11465 %}
11466 
11467 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11468                          iRegIorL2I src1, iRegIorL2I src2,
11469                          immI src3, rFlagsReg cr) %{
11470   match(Set dst (XorI src1 (URShiftI src2 src3)));
11471 
11472   ins_cost(1.9 * INSN_COST);
11473   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11474 
11475   ins_encode %{
11476     __ eorw(as_Register($dst$$reg),
11477               as_Register($src1$$reg),
11478               as_Register($src2$$reg),
11479               Assembler::LSR,
11480               $src3$$constant & 0x1f);
11481   %}
11482 
11483   ins_pipe(ialu_reg_reg_shift);
11484 %}
11485 
11486 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11487                          iRegL src1, iRegL src2,
11488                          immI src3, rFlagsReg cr) %{
11489   match(Set dst (XorL src1 (URShiftL src2 src3)));
11490 
11491   ins_cost(1.9 * INSN_COST);
11492   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11493 
11494   ins_encode %{
11495     __ eor(as_Register($dst$$reg),
11496               as_Register($src1$$reg),
11497               as_Register($src2$$reg),
11498               Assembler::LSR,
11499               $src3$$constant & 0x3f);
11500   %}
11501 
11502   ins_pipe(ialu_reg_reg_shift);
11503 %}
11504 
11505 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11506                          iRegIorL2I src1, iRegIorL2I src2,
11507                          immI src3, rFlagsReg cr) %{
11508   match(Set dst (XorI src1 (RShiftI src2 src3)));
11509 
11510   ins_cost(1.9 * INSN_COST);
11511   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11512 
11513   ins_encode %{
11514     __ eorw(as_Register($dst$$reg),
11515               as_Register($src1$$reg),
11516               as_Register($src2$$reg),
11517               Assembler::ASR,
11518               $src3$$constant & 0x1f);
11519   %}
11520 
11521   ins_pipe(ialu_reg_reg_shift);
11522 %}
11523 
11524 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11525                          iRegL src1, iRegL src2,
11526                          immI src3, rFlagsReg cr) %{
11527   match(Set dst (XorL src1 (RShiftL src2 src3)));
11528 
11529   ins_cost(1.9 * INSN_COST);
11530   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11531 
11532   ins_encode %{
11533     __ eor(as_Register($dst$$reg),
11534               as_Register($src1$$reg),
11535               as_Register($src2$$reg),
11536               Assembler::ASR,
11537               $src3$$constant & 0x3f);
11538   %}
11539 
11540   ins_pipe(ialu_reg_reg_shift);
11541 %}
11542 
11543 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11544                          iRegIorL2I src1, iRegIorL2I src2,
11545                          immI src3, rFlagsReg cr) %{
11546   match(Set dst (XorI src1 (LShiftI src2 src3)));
11547 
11548   ins_cost(1.9 * INSN_COST);
11549   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11550 
11551   ins_encode %{
11552     __ eorw(as_Register($dst$$reg),
11553               as_Register($src1$$reg),
11554               as_Register($src2$$reg),
11555               Assembler::LSL,
11556               $src3$$constant & 0x1f);
11557   %}
11558 
11559   ins_pipe(ialu_reg_reg_shift);
11560 %}
11561 
11562 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11563                          iRegL src1, iRegL src2,
11564                          immI src3, rFlagsReg cr) %{
11565   match(Set dst (XorL src1 (LShiftL src2 src3)));
11566 
11567   ins_cost(1.9 * INSN_COST);
11568   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11569 
11570   ins_encode %{
11571     __ eor(as_Register($dst$$reg),
11572               as_Register($src1$$reg),
11573               as_Register($src2$$reg),
11574               Assembler::LSL,
11575               $src3$$constant & 0x3f);
11576   %}
11577 
11578   ins_pipe(ialu_reg_reg_shift);
11579 %}
11580 
11581 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11582                          iRegIorL2I src1, iRegIorL2I src2,
11583                          immI src3, rFlagsReg cr) %{
11584   match(Set dst (OrI src1 (URShiftI src2 src3)));
11585 
11586   ins_cost(1.9 * INSN_COST);
11587   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11588 
11589   ins_encode %{
11590     __ orrw(as_Register($dst$$reg),
11591               as_Register($src1$$reg),
11592               as_Register($src2$$reg),
11593               Assembler::LSR,
11594               $src3$$constant & 0x1f);
11595   %}
11596 
11597   ins_pipe(ialu_reg_reg_shift);
11598 %}
11599 
11600 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11601                          iRegL src1, iRegL src2,
11602                          immI src3, rFlagsReg cr) %{
11603   match(Set dst (OrL src1 (URShiftL src2 src3)));
11604 
11605   ins_cost(1.9 * INSN_COST);
11606   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11607 
11608   ins_encode %{
11609     __ orr(as_Register($dst$$reg),
11610               as_Register($src1$$reg),
11611               as_Register($src2$$reg),
11612               Assembler::LSR,
11613               $src3$$constant & 0x3f);
11614   %}
11615 
11616   ins_pipe(ialu_reg_reg_shift);
11617 %}
11618 
11619 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11620                          iRegIorL2I src1, iRegIorL2I src2,
11621                          immI src3, rFlagsReg cr) %{
11622   match(Set dst (OrI src1 (RShiftI src2 src3)));
11623 
11624   ins_cost(1.9 * INSN_COST);
11625   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11626 
11627   ins_encode %{
11628     __ orrw(as_Register($dst$$reg),
11629               as_Register($src1$$reg),
11630               as_Register($src2$$reg),
11631               Assembler::ASR,
11632               $src3$$constant & 0x1f);
11633   %}
11634 
11635   ins_pipe(ialu_reg_reg_shift);
11636 %}
11637 
11638 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11639                          iRegL src1, iRegL src2,
11640                          immI src3, rFlagsReg cr) %{
11641   match(Set dst (OrL src1 (RShiftL src2 src3)));
11642 
11643   ins_cost(1.9 * INSN_COST);
11644   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11645 
11646   ins_encode %{
11647     __ orr(as_Register($dst$$reg),
11648               as_Register($src1$$reg),
11649               as_Register($src2$$reg),
11650               Assembler::ASR,
11651               $src3$$constant & 0x3f);
11652   %}
11653 
11654   ins_pipe(ialu_reg_reg_shift);
11655 %}
11656 
11657 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11658                          iRegIorL2I src1, iRegIorL2I src2,
11659                          immI src3, rFlagsReg cr) %{
11660   match(Set dst (OrI src1 (LShiftI src2 src3)));
11661 
11662   ins_cost(1.9 * INSN_COST);
11663   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11664 
11665   ins_encode %{
11666     __ orrw(as_Register($dst$$reg),
11667               as_Register($src1$$reg),
11668               as_Register($src2$$reg),
11669               Assembler::LSL,
11670               $src3$$constant & 0x1f);
11671   %}
11672 
11673   ins_pipe(ialu_reg_reg_shift);
11674 %}
11675 
11676 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11677                          iRegL src1, iRegL src2,
11678                          immI src3, rFlagsReg cr) %{
11679   match(Set dst (OrL src1 (LShiftL src2 src3)));
11680 
11681   ins_cost(1.9 * INSN_COST);
11682   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11683 
11684   ins_encode %{
11685     __ orr(as_Register($dst$$reg),
11686               as_Register($src1$$reg),
11687               as_Register($src2$$reg),
11688               Assembler::LSL,
11689               $src3$$constant & 0x3f);
11690   %}
11691 
11692   ins_pipe(ialu_reg_reg_shift);
11693 %}
11694 
11695 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11696                          iRegIorL2I src1, iRegIorL2I src2,
11697                          immI src3, rFlagsReg cr) %{
11698   match(Set dst (AddI src1 (URShiftI src2 src3)));
11699 
11700   ins_cost(1.9 * INSN_COST);
11701   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11702 
11703   ins_encode %{
11704     __ addw(as_Register($dst$$reg),
11705               as_Register($src1$$reg),
11706               as_Register($src2$$reg),
11707               Assembler::LSR,
11708               $src3$$constant & 0x1f);
11709   %}
11710 
11711   ins_pipe(ialu_reg_reg_shift);
11712 %}
11713 
11714 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11715                          iRegL src1, iRegL src2,
11716                          immI src3, rFlagsReg cr) %{
11717   match(Set dst (AddL src1 (URShiftL src2 src3)));
11718 
11719   ins_cost(1.9 * INSN_COST);
11720   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11721 
11722   ins_encode %{
11723     __ add(as_Register($dst$$reg),
11724               as_Register($src1$$reg),
11725               as_Register($src2$$reg),
11726               Assembler::LSR,
11727               $src3$$constant & 0x3f);
11728   %}
11729 
11730   ins_pipe(ialu_reg_reg_shift);
11731 %}
11732 
11733 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11734                          iRegIorL2I src1, iRegIorL2I src2,
11735                          immI src3, rFlagsReg cr) %{
11736   match(Set dst (AddI src1 (RShiftI src2 src3)));
11737 
11738   ins_cost(1.9 * INSN_COST);
11739   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11740 
11741   ins_encode %{
11742     __ addw(as_Register($dst$$reg),
11743               as_Register($src1$$reg),
11744               as_Register($src2$$reg),
11745               Assembler::ASR,
11746               $src3$$constant & 0x1f);
11747   %}
11748 
11749   ins_pipe(ialu_reg_reg_shift);
11750 %}
11751 
11752 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11753                          iRegL src1, iRegL src2,
11754                          immI src3, rFlagsReg cr) %{
11755   match(Set dst (AddL src1 (RShiftL src2 src3)));
11756 
11757   ins_cost(1.9 * INSN_COST);
11758   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11759 
11760   ins_encode %{
11761     __ add(as_Register($dst$$reg),
11762               as_Register($src1$$reg),
11763               as_Register($src2$$reg),
11764               Assembler::ASR,
11765               $src3$$constant & 0x3f);
11766   %}
11767 
11768   ins_pipe(ialu_reg_reg_shift);
11769 %}
11770 
11771 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11772                          iRegIorL2I src1, iRegIorL2I src2,
11773                          immI src3, rFlagsReg cr) %{
11774   match(Set dst (AddI src1 (LShiftI src2 src3)));
11775 
11776   ins_cost(1.9 * INSN_COST);
11777   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11778 
11779   ins_encode %{
11780     __ addw(as_Register($dst$$reg),
11781               as_Register($src1$$reg),
11782               as_Register($src2$$reg),
11783               Assembler::LSL,
11784               $src3$$constant & 0x1f);
11785   %}
11786 
11787   ins_pipe(ialu_reg_reg_shift);
11788 %}
11789 
11790 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11791                          iRegL src1, iRegL src2,
11792                          immI src3, rFlagsReg cr) %{
11793   match(Set dst (AddL src1 (LShiftL src2 src3)));
11794 
11795   ins_cost(1.9 * INSN_COST);
11796   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11797 
11798   ins_encode %{
11799     __ add(as_Register($dst$$reg),
11800               as_Register($src1$$reg),
11801               as_Register($src2$$reg),
11802               Assembler::LSL,
11803               $src3$$constant & 0x3f);
11804   %}
11805 
11806   ins_pipe(ialu_reg_reg_shift);
11807 %}
11808 
11809 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11810                          iRegIorL2I src1, iRegIorL2I src2,
11811                          immI src3, rFlagsReg cr) %{
11812   match(Set dst (SubI src1 (URShiftI src2 src3)));
11813 
11814   ins_cost(1.9 * INSN_COST);
11815   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11816 
11817   ins_encode %{
11818     __ subw(as_Register($dst$$reg),
11819               as_Register($src1$$reg),
11820               as_Register($src2$$reg),
11821               Assembler::LSR,
11822               $src3$$constant & 0x1f);
11823   %}
11824 
11825   ins_pipe(ialu_reg_reg_shift);
11826 %}
11827 
11828 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11829                          iRegL src1, iRegL src2,
11830                          immI src3, rFlagsReg cr) %{
11831   match(Set dst (SubL src1 (URShiftL src2 src3)));
11832 
11833   ins_cost(1.9 * INSN_COST);
11834   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11835 
11836   ins_encode %{
11837     __ sub(as_Register($dst$$reg),
11838               as_Register($src1$$reg),
11839               as_Register($src2$$reg),
11840               Assembler::LSR,
11841               $src3$$constant & 0x3f);
11842   %}
11843 
11844   ins_pipe(ialu_reg_reg_shift);
11845 %}
11846 
11847 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11848                          iRegIorL2I src1, iRegIorL2I src2,
11849                          immI src3, rFlagsReg cr) %{
11850   match(Set dst (SubI src1 (RShiftI src2 src3)));
11851 
11852   ins_cost(1.9 * INSN_COST);
11853   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11854 
11855   ins_encode %{
11856     __ subw(as_Register($dst$$reg),
11857               as_Register($src1$$reg),
11858               as_Register($src2$$reg),
11859               Assembler::ASR,
11860               $src3$$constant & 0x1f);
11861   %}
11862 
11863   ins_pipe(ialu_reg_reg_shift);
11864 %}
11865 
11866 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11867                          iRegL src1, iRegL src2,
11868                          immI src3, rFlagsReg cr) %{
11869   match(Set dst (SubL src1 (RShiftL src2 src3)));
11870 
11871   ins_cost(1.9 * INSN_COST);
11872   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11873 
11874   ins_encode %{
11875     __ sub(as_Register($dst$$reg),
11876               as_Register($src1$$reg),
11877               as_Register($src2$$reg),
11878               Assembler::ASR,
11879               $src3$$constant & 0x3f);
11880   %}
11881 
11882   ins_pipe(ialu_reg_reg_shift);
11883 %}
11884 
11885 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11886                          iRegIorL2I src1, iRegIorL2I src2,
11887                          immI src3, rFlagsReg cr) %{
11888   match(Set dst (SubI src1 (LShiftI src2 src3)));
11889 
11890   ins_cost(1.9 * INSN_COST);
11891   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11892 
11893   ins_encode %{
11894     __ subw(as_Register($dst$$reg),
11895               as_Register($src1$$reg),
11896               as_Register($src2$$reg),
11897               Assembler::LSL,
11898               $src3$$constant & 0x1f);
11899   %}
11900 
11901   ins_pipe(ialu_reg_reg_shift);
11902 %}
11903 
11904 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11905                          iRegL src1, iRegL src2,
11906                          immI src3, rFlagsReg cr) %{
11907   match(Set dst (SubL src1 (LShiftL src2 src3)));
11908 
11909   ins_cost(1.9 * INSN_COST);
11910   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11911 
11912   ins_encode %{
11913     __ sub(as_Register($dst$$reg),
11914               as_Register($src1$$reg),
11915               as_Register($src2$$reg),
11916               Assembler::LSL,
11917               $src3$$constant & 0x3f);
11918   %}
11919 
11920   ins_pipe(ialu_reg_reg_shift);
11921 %}
11922 
11923 
11924 
11925 // Shift Left followed by Shift Right.
11926 // This idiom is used by the compiler for the i2b bytecode etc.
11927 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11928 %{
11929   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11930   // Make sure we are not going to exceed what sbfm can do.
11931   predicate((unsigned int)n->in(2)->get_int() <= 63
11932             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11933 
11934   ins_cost(INSN_COST * 2);
11935   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11936   ins_encode %{
11937     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11938     int s = 63 - lshift;
11939     int r = (rshift - lshift) & 63;
11940     __ sbfm(as_Register($dst$$reg),
11941             as_Register($src$$reg),
11942             r, s);
11943   %}
11944 
11945   ins_pipe(ialu_reg_shift);
11946 %}
11947 
11948 // Shift Left followed by Shift Right.
11949 // This idiom is used by the compiler for the i2b bytecode etc.
11950 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11951 %{
11952   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11953   // Make sure we are not going to exceed what sbfmw can do.
11954   predicate((unsigned int)n->in(2)->get_int() <= 31
11955             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11956 
11957   ins_cost(INSN_COST * 2);
11958   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11959   ins_encode %{
11960     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11961     int s = 31 - lshift;
11962     int r = (rshift - lshift) & 31;
11963     __ sbfmw(as_Register($dst$$reg),
11964             as_Register($src$$reg),
11965             r, s);
11966   %}
11967 
11968   ins_pipe(ialu_reg_shift);
11969 %}
11970 
11971 // Shift Left followed by Shift Right.
11972 // This idiom is used by the compiler for the i2b bytecode etc.
11973 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11974 %{
11975   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11976   // Make sure we are not going to exceed what ubfm can do.
11977   predicate((unsigned int)n->in(2)->get_int() <= 63
11978             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11979 
11980   ins_cost(INSN_COST * 2);
11981   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11982   ins_encode %{
11983     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11984     int s = 63 - lshift;
11985     int r = (rshift - lshift) & 63;
11986     __ ubfm(as_Register($dst$$reg),
11987             as_Register($src$$reg),
11988             r, s);
11989   %}
11990 
11991   ins_pipe(ialu_reg_shift);
11992 %}
11993 
11994 // Shift Left followed by Shift Right.
11995 // This idiom is used by the compiler for the i2b bytecode etc.
11996 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11997 %{
11998   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11999   // Make sure we are not going to exceed what ubfmw can do.
12000   predicate((unsigned int)n->in(2)->get_int() <= 31
12001             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12002 
12003   ins_cost(INSN_COST * 2);
12004   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12005   ins_encode %{
12006     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12007     int s = 31 - lshift;
12008     int r = (rshift - lshift) & 31;
12009     __ ubfmw(as_Register($dst$$reg),
12010             as_Register($src$$reg),
12011             r, s);
12012   %}
12013 
12014   ins_pipe(ialu_reg_shift);
12015 %}
12016 // Bitfield extract with shift & mask
12017 
12018 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12019 %{
12020   match(Set dst (AndI (URShiftI src rshift) mask));
12021 
12022   ins_cost(INSN_COST);
12023   format %{ "ubfxw $dst, $src, $mask" %}
12024   ins_encode %{
12025     int rshift = $rshift$$constant;
12026     long mask = $mask$$constant;
12027     int width = exact_log2(mask+1);
12028     __ ubfxw(as_Register($dst$$reg),
12029             as_Register($src$$reg), rshift, width);
12030   %}
12031   ins_pipe(ialu_reg_shift);
12032 %}
12033 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12034 %{
12035   match(Set dst (AndL (URShiftL src rshift) mask));
12036 
12037   ins_cost(INSN_COST);
12038   format %{ "ubfx $dst, $src, $mask" %}
12039   ins_encode %{
12040     int rshift = $rshift$$constant;
12041     long mask = $mask$$constant;
12042     int width = exact_log2(mask+1);
12043     __ ubfx(as_Register($dst$$reg),
12044             as_Register($src$$reg), rshift, width);
12045   %}
12046   ins_pipe(ialu_reg_shift);
12047 %}
12048 
12049 // We can use ubfx when extending an And with a mask when we know mask
12050 // is positive.  We know that because immI_bitmask guarantees it.
12051 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12052 %{
12053   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12054 
12055   ins_cost(INSN_COST * 2);
12056   format %{ "ubfx $dst, $src, $mask" %}
12057   ins_encode %{
12058     int rshift = $rshift$$constant;
12059     long mask = $mask$$constant;
12060     int width = exact_log2(mask+1);
12061     __ ubfx(as_Register($dst$$reg),
12062             as_Register($src$$reg), rshift, width);
12063   %}
12064   ins_pipe(ialu_reg_shift);
12065 %}
12066 
12067 // Rotations
12068 
12069 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12070 %{
12071   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12072   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12073 
12074   ins_cost(INSN_COST);
12075   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12076 
12077   ins_encode %{
12078     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12079             $rshift$$constant & 63);
12080   %}
12081   ins_pipe(ialu_reg_reg_extr);
12082 %}
12083 
12084 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12085 %{
12086   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12087   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12088 
12089   ins_cost(INSN_COST);
12090   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12091 
12092   ins_encode %{
12093     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12094             $rshift$$constant & 31);
12095   %}
12096   ins_pipe(ialu_reg_reg_extr);
12097 %}
12098 
12099 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12100 %{
12101   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12102   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12103 
12104   ins_cost(INSN_COST);
12105   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12106 
12107   ins_encode %{
12108     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12109             $rshift$$constant & 63);
12110   %}
12111   ins_pipe(ialu_reg_reg_extr);
12112 %}
12113 
12114 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12115 %{
12116   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12117   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12118 
12119   ins_cost(INSN_COST);
12120   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12121 
12122   ins_encode %{
12123     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12124             $rshift$$constant & 31);
12125   %}
12126   ins_pipe(ialu_reg_reg_extr);
12127 %}
12128 
12129 
12130 // rol expander
12131 
12132 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12133 %{
12134   effect(DEF dst, USE src, USE shift);
12135 
12136   format %{ "rol    $dst, $src, $shift" %}
12137   ins_cost(INSN_COST * 3);
12138   ins_encode %{
12139     __ subw(rscratch1, zr, as_Register($shift$$reg));
12140     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12141             rscratch1);
12142     %}
12143   ins_pipe(ialu_reg_reg_vshift);
12144 %}
12145 
12146 // rol expander
12147 
12148 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12149 %{
12150   effect(DEF dst, USE src, USE shift);
12151 
12152   format %{ "rol    $dst, $src, $shift" %}
12153   ins_cost(INSN_COST * 3);
12154   ins_encode %{
12155     __ subw(rscratch1, zr, as_Register($shift$$reg));
12156     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12157             rscratch1);
12158     %}
12159   ins_pipe(ialu_reg_reg_vshift);
12160 %}
12161 
12162 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12163 %{
12164   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12165 
12166   expand %{
12167     rolL_rReg(dst, src, shift, cr);
12168   %}
12169 %}
12170 
12171 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12172 %{
12173   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12174 
12175   expand %{
12176     rolL_rReg(dst, src, shift, cr);
12177   %}
12178 %}
12179 
12180 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12181 %{
12182   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12183 
12184   expand %{
12185     rolL_rReg(dst, src, shift, cr);
12186   %}
12187 %}
12188 
12189 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12190 %{
12191   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12192 
12193   expand %{
12194     rolL_rReg(dst, src, shift, cr);
12195   %}
12196 %}
12197 
12198 // ror expander
12199 
12200 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12201 %{
12202   effect(DEF dst, USE src, USE shift);
12203 
12204   format %{ "ror    $dst, $src, $shift" %}
12205   ins_cost(INSN_COST);
12206   ins_encode %{
12207     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12208             as_Register($shift$$reg));
12209     %}
12210   ins_pipe(ialu_reg_reg_vshift);
12211 %}
12212 
12213 // ror expander
12214 
12215 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12216 %{
12217   effect(DEF dst, USE src, USE shift);
12218 
12219   format %{ "ror    $dst, $src, $shift" %}
12220   ins_cost(INSN_COST);
12221   ins_encode %{
12222     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12223             as_Register($shift$$reg));
12224     %}
12225   ins_pipe(ialu_reg_reg_vshift);
12226 %}
12227 
12228 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12229 %{
12230   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12231 
12232   expand %{
12233     rorL_rReg(dst, src, shift, cr);
12234   %}
12235 %}
12236 
12237 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12238 %{
12239   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12240 
12241   expand %{
12242     rorL_rReg(dst, src, shift, cr);
12243   %}
12244 %}
12245 
12246 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12247 %{
12248   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12249 
12250   expand %{
12251     rorI_rReg(dst, src, shift, cr);
12252   %}
12253 %}
12254 
12255 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12256 %{
12257   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12258 
12259   expand %{
12260     rorI_rReg(dst, src, shift, cr);
12261   %}
12262 %}
12263 
12264 // Add/subtract (extended)
12265 
12266 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12267 %{
12268   match(Set dst (AddL src1 (ConvI2L src2)));
12269   ins_cost(INSN_COST);
12270   format %{ "add  $dst, $src1, sxtw $src2" %}
12271 
12272    ins_encode %{
12273      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12274             as_Register($src2$$reg), ext::sxtw);
12275    %}
12276   ins_pipe(ialu_reg_reg);
12277 %};
12278 
12279 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12280 %{
12281   match(Set dst (SubL src1 (ConvI2L src2)));
12282   ins_cost(INSN_COST);
12283   format %{ "sub  $dst, $src1, sxtw $src2" %}
12284 
12285    ins_encode %{
12286      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12287             as_Register($src2$$reg), ext::sxtw);
12288    %}
12289   ins_pipe(ialu_reg_reg);
12290 %};
12291 
12292 
12293 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12294 %{
12295   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12296   ins_cost(INSN_COST);
12297   format %{ "add  $dst, $src1, sxth $src2" %}
12298 
12299    ins_encode %{
12300      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12301             as_Register($src2$$reg), ext::sxth);
12302    %}
12303   ins_pipe(ialu_reg_reg);
12304 %}
12305 
12306 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12307 %{
12308   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12309   ins_cost(INSN_COST);
12310   format %{ "add  $dst, $src1, sxtb $src2" %}
12311 
12312    ins_encode %{
12313      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12314             as_Register($src2$$reg), ext::sxtb);
12315    %}
12316   ins_pipe(ialu_reg_reg);
12317 %}
12318 
12319 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12320 %{
12321   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12322   ins_cost(INSN_COST);
12323   format %{ "add  $dst, $src1, uxtb $src2" %}
12324 
12325    ins_encode %{
12326      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12327             as_Register($src2$$reg), ext::uxtb);
12328    %}
12329   ins_pipe(ialu_reg_reg);
12330 %}
12331 
12332 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12333 %{
12334   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12335   ins_cost(INSN_COST);
12336   format %{ "add  $dst, $src1, sxth $src2" %}
12337 
12338    ins_encode %{
12339      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12340             as_Register($src2$$reg), ext::sxth);
12341    %}
12342   ins_pipe(ialu_reg_reg);
12343 %}
12344 
12345 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12346 %{
12347   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12348   ins_cost(INSN_COST);
12349   format %{ "add  $dst, $src1, sxtw $src2" %}
12350 
12351    ins_encode %{
12352      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12353             as_Register($src2$$reg), ext::sxtw);
12354    %}
12355   ins_pipe(ialu_reg_reg);
12356 %}
12357 
12358 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12359 %{
12360   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12361   ins_cost(INSN_COST);
12362   format %{ "add  $dst, $src1, sxtb $src2" %}
12363 
12364    ins_encode %{
12365      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12366             as_Register($src2$$reg), ext::sxtb);
12367    %}
12368   ins_pipe(ialu_reg_reg);
12369 %}
12370 
12371 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12372 %{
12373   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12374   ins_cost(INSN_COST);
12375   format %{ "add  $dst, $src1, uxtb $src2" %}
12376 
12377    ins_encode %{
12378      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12379             as_Register($src2$$reg), ext::uxtb);
12380    %}
12381   ins_pipe(ialu_reg_reg);
12382 %}
12383 
12384 
12385 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12386 %{
12387   match(Set dst (AddI src1 (AndI src2 mask)));
12388   ins_cost(INSN_COST);
12389   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12390 
12391    ins_encode %{
12392      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12393             as_Register($src2$$reg), ext::uxtb);
12394    %}
12395   ins_pipe(ialu_reg_reg);
12396 %}
12397 
12398 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12399 %{
12400   match(Set dst (AddI src1 (AndI src2 mask)));
12401   ins_cost(INSN_COST);
12402   format %{ "addw  $dst, $src1, $src2, uxth" %}
12403 
12404    ins_encode %{
12405      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12406             as_Register($src2$$reg), ext::uxth);
12407    %}
12408   ins_pipe(ialu_reg_reg);
12409 %}
12410 
12411 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12412 %{
12413   match(Set dst (AddL src1 (AndL src2 mask)));
12414   ins_cost(INSN_COST);
12415   format %{ "add  $dst, $src1, $src2, uxtb" %}
12416 
12417    ins_encode %{
12418      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12419             as_Register($src2$$reg), ext::uxtb);
12420    %}
12421   ins_pipe(ialu_reg_reg);
12422 %}
12423 
12424 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12425 %{
12426   match(Set dst (AddL src1 (AndL src2 mask)));
12427   ins_cost(INSN_COST);
12428   format %{ "add  $dst, $src1, $src2, uxth" %}
12429 
12430    ins_encode %{
12431      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12432             as_Register($src2$$reg), ext::uxth);
12433    %}
12434   ins_pipe(ialu_reg_reg);
12435 %}
12436 
12437 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12438 %{
12439   match(Set dst (AddL src1 (AndL src2 mask)));
12440   ins_cost(INSN_COST);
12441   format %{ "add  $dst, $src1, $src2, uxtw" %}
12442 
12443    ins_encode %{
12444      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12445             as_Register($src2$$reg), ext::uxtw);
12446    %}
12447   ins_pipe(ialu_reg_reg);
12448 %}
12449 
12450 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12451 %{
12452   match(Set dst (SubI src1 (AndI src2 mask)));
12453   ins_cost(INSN_COST);
12454   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12455 
12456    ins_encode %{
12457      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12458             as_Register($src2$$reg), ext::uxtb);
12459    %}
12460   ins_pipe(ialu_reg_reg);
12461 %}
12462 
12463 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12464 %{
12465   match(Set dst (SubI src1 (AndI src2 mask)));
12466   ins_cost(INSN_COST);
12467   format %{ "subw  $dst, $src1, $src2, uxth" %}
12468 
12469    ins_encode %{
12470      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12471             as_Register($src2$$reg), ext::uxth);
12472    %}
12473   ins_pipe(ialu_reg_reg);
12474 %}
12475 
12476 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12477 %{
12478   match(Set dst (SubL src1 (AndL src2 mask)));
12479   ins_cost(INSN_COST);
12480   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12481 
12482    ins_encode %{
12483      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12484             as_Register($src2$$reg), ext::uxtb);
12485    %}
12486   ins_pipe(ialu_reg_reg);
12487 %}
12488 
12489 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12490 %{
12491   match(Set dst (SubL src1 (AndL src2 mask)));
12492   ins_cost(INSN_COST);
12493   format %{ "sub  $dst, $src1, $src2, uxth" %}
12494 
12495    ins_encode %{
12496      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12497             as_Register($src2$$reg), ext::uxth);
12498    %}
12499   ins_pipe(ialu_reg_reg);
12500 %}
12501 
12502 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12503 %{
12504   match(Set dst (SubL src1 (AndL src2 mask)));
12505   ins_cost(INSN_COST);
12506   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12507 
12508    ins_encode %{
12509      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12510             as_Register($src2$$reg), ext::uxtw);
12511    %}
12512   ins_pipe(ialu_reg_reg);
12513 %}
12514 
12515 // END This section of the file is automatically generated. Do not edit --------------
12516 
12517 // ============================================================================
12518 // Floating Point Arithmetic Instructions
12519 
12520 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12521   match(Set dst (AddF src1 src2));
12522 
12523   ins_cost(INSN_COST * 5);
12524   format %{ "fadds   $dst, $src1, $src2" %}
12525 
12526   ins_encode %{
12527     __ fadds(as_FloatRegister($dst$$reg),
12528              as_FloatRegister($src1$$reg),
12529              as_FloatRegister($src2$$reg));
12530   %}
12531 
12532   ins_pipe(fp_dop_reg_reg_s);
12533 %}
12534 
12535 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12536   match(Set dst (AddD src1 src2));
12537 
12538   ins_cost(INSN_COST * 5);
12539   format %{ "faddd   $dst, $src1, $src2" %}
12540 
12541   ins_encode %{
12542     __ faddd(as_FloatRegister($dst$$reg),
12543              as_FloatRegister($src1$$reg),
12544              as_FloatRegister($src2$$reg));
12545   %}
12546 
12547   ins_pipe(fp_dop_reg_reg_d);
12548 %}
12549 
12550 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12551   match(Set dst (SubF src1 src2));
12552 
12553   ins_cost(INSN_COST * 5);
12554   format %{ "fsubs   $dst, $src1, $src2" %}
12555 
12556   ins_encode %{
12557     __ fsubs(as_FloatRegister($dst$$reg),
12558              as_FloatRegister($src1$$reg),
12559              as_FloatRegister($src2$$reg));
12560   %}
12561 
12562   ins_pipe(fp_dop_reg_reg_s);
12563 %}
12564 
12565 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12566   match(Set dst (SubD src1 src2));
12567 
12568   ins_cost(INSN_COST * 5);
12569   format %{ "fsubd   $dst, $src1, $src2" %}
12570 
12571   ins_encode %{
12572     __ fsubd(as_FloatRegister($dst$$reg),
12573              as_FloatRegister($src1$$reg),
12574              as_FloatRegister($src2$$reg));
12575   %}
12576 
12577   ins_pipe(fp_dop_reg_reg_d);
12578 %}
12579 
12580 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12581   match(Set dst (MulF src1 src2));
12582 
12583   ins_cost(INSN_COST * 6);
12584   format %{ "fmuls   $dst, $src1, $src2" %}
12585 
12586   ins_encode %{
12587     __ fmuls(as_FloatRegister($dst$$reg),
12588              as_FloatRegister($src1$$reg),
12589              as_FloatRegister($src2$$reg));
12590   %}
12591 
12592   ins_pipe(fp_dop_reg_reg_s);
12593 %}
12594 
12595 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12596   match(Set dst (MulD src1 src2));
12597 
12598   ins_cost(INSN_COST * 6);
12599   format %{ "fmuld   $dst, $src1, $src2" %}
12600 
12601   ins_encode %{
12602     __ fmuld(as_FloatRegister($dst$$reg),
12603              as_FloatRegister($src1$$reg),
12604              as_FloatRegister($src2$$reg));
12605   %}
12606 
12607   ins_pipe(fp_dop_reg_reg_d);
12608 %}
12609 
12610 // We cannot use these fused mul w add/sub ops because they don't
12611 // produce the same result as the equivalent separated ops
12612 // (essentially they don't round the intermediate result). that's a
12613 // shame. leaving them here in case we can idenitfy cases where it is
12614 // legitimate to use them
12615 
12616 
12617 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12618 //   match(Set dst (AddF (MulF src1 src2) src3));
12619 
12620 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12621 
12622 //   ins_encode %{
12623 //     __ fmadds(as_FloatRegister($dst$$reg),
12624 //              as_FloatRegister($src1$$reg),
12625 //              as_FloatRegister($src2$$reg),
12626 //              as_FloatRegister($src3$$reg));
12627 //   %}
12628 
12629 //   ins_pipe(pipe_class_default);
12630 // %}
12631 
12632 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12633 //   match(Set dst (AddD (MulD src1 src2) src3));
12634 
12635 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12636 
12637 //   ins_encode %{
12638 //     __ fmaddd(as_FloatRegister($dst$$reg),
12639 //              as_FloatRegister($src1$$reg),
12640 //              as_FloatRegister($src2$$reg),
12641 //              as_FloatRegister($src3$$reg));
12642 //   %}
12643 
12644 //   ins_pipe(pipe_class_default);
12645 // %}
12646 
12647 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12648 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12649 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12650 
12651 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12652 
12653 //   ins_encode %{
12654 //     __ fmsubs(as_FloatRegister($dst$$reg),
12655 //               as_FloatRegister($src1$$reg),
12656 //               as_FloatRegister($src2$$reg),
12657 //              as_FloatRegister($src3$$reg));
12658 //   %}
12659 
12660 //   ins_pipe(pipe_class_default);
12661 // %}
12662 
12663 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12664 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12665 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12666 
12667 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12668 
12669 //   ins_encode %{
12670 //     __ fmsubd(as_FloatRegister($dst$$reg),
12671 //               as_FloatRegister($src1$$reg),
12672 //               as_FloatRegister($src2$$reg),
12673 //               as_FloatRegister($src3$$reg));
12674 //   %}
12675 
12676 //   ins_pipe(pipe_class_default);
12677 // %}
12678 
12679 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12680 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12681 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12682 
12683 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12684 
12685 //   ins_encode %{
12686 //     __ fnmadds(as_FloatRegister($dst$$reg),
12687 //                as_FloatRegister($src1$$reg),
12688 //                as_FloatRegister($src2$$reg),
12689 //                as_FloatRegister($src3$$reg));
12690 //   %}
12691 
12692 //   ins_pipe(pipe_class_default);
12693 // %}
12694 
12695 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12696 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12697 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12698 
12699 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12700 
12701 //   ins_encode %{
12702 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12703 //                as_FloatRegister($src1$$reg),
12704 //                as_FloatRegister($src2$$reg),
12705 //                as_FloatRegister($src3$$reg));
12706 //   %}
12707 
12708 //   ins_pipe(pipe_class_default);
12709 // %}
12710 
12711 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12712 //   match(Set dst (SubF (MulF src1 src2) src3));
12713 
12714 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12715 
12716 //   ins_encode %{
12717 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12718 //                as_FloatRegister($src1$$reg),
12719 //                as_FloatRegister($src2$$reg),
12720 //                as_FloatRegister($src3$$reg));
12721 //   %}
12722 
12723 //   ins_pipe(pipe_class_default);
12724 // %}
12725 
12726 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12727 //   match(Set dst (SubD (MulD src1 src2) src3));
12728 
12729 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12730 
12731 //   ins_encode %{
12732 //   // n.b. insn name should be fnmsubd
12733 //     __ fnmsub(as_FloatRegister($dst$$reg),
12734 //                as_FloatRegister($src1$$reg),
12735 //                as_FloatRegister($src2$$reg),
12736 //                as_FloatRegister($src3$$reg));
12737 //   %}
12738 
12739 //   ins_pipe(pipe_class_default);
12740 // %}
12741 
12742 
12743 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12744   match(Set dst (DivF src1  src2));
12745 
12746   ins_cost(INSN_COST * 18);
12747   format %{ "fdivs   $dst, $src1, $src2" %}
12748 
12749   ins_encode %{
12750     __ fdivs(as_FloatRegister($dst$$reg),
12751              as_FloatRegister($src1$$reg),
12752              as_FloatRegister($src2$$reg));
12753   %}
12754 
12755   ins_pipe(fp_div_s);
12756 %}
12757 
12758 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12759   match(Set dst (DivD src1  src2));
12760 
12761   ins_cost(INSN_COST * 32);
12762   format %{ "fdivd   $dst, $src1, $src2" %}
12763 
12764   ins_encode %{
12765     __ fdivd(as_FloatRegister($dst$$reg),
12766              as_FloatRegister($src1$$reg),
12767              as_FloatRegister($src2$$reg));
12768   %}
12769 
12770   ins_pipe(fp_div_d);
12771 %}
12772 
12773 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12774   match(Set dst (NegF src));
12775 
12776   ins_cost(INSN_COST * 3);
12777   format %{ "fneg   $dst, $src" %}
12778 
12779   ins_encode %{
12780     __ fnegs(as_FloatRegister($dst$$reg),
12781              as_FloatRegister($src$$reg));
12782   %}
12783 
12784   ins_pipe(fp_uop_s);
12785 %}
12786 
12787 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12788   match(Set dst (NegD src));
12789 
12790   ins_cost(INSN_COST * 3);
12791   format %{ "fnegd   $dst, $src" %}
12792 
12793   ins_encode %{
12794     __ fnegd(as_FloatRegister($dst$$reg),
12795              as_FloatRegister($src$$reg));
12796   %}
12797 
12798   ins_pipe(fp_uop_d);
12799 %}
12800 
12801 instruct absF_reg(vRegF dst, vRegF src) %{
12802   match(Set dst (AbsF src));
12803 
12804   ins_cost(INSN_COST * 3);
12805   format %{ "fabss   $dst, $src" %}
12806   ins_encode %{
12807     __ fabss(as_FloatRegister($dst$$reg),
12808              as_FloatRegister($src$$reg));
12809   %}
12810 
12811   ins_pipe(fp_uop_s);
12812 %}
12813 
12814 instruct absD_reg(vRegD dst, vRegD src) %{
12815   match(Set dst (AbsD src));
12816 
12817   ins_cost(INSN_COST * 3);
12818   format %{ "fabsd   $dst, $src" %}
12819   ins_encode %{
12820     __ fabsd(as_FloatRegister($dst$$reg),
12821              as_FloatRegister($src$$reg));
12822   %}
12823 
12824   ins_pipe(fp_uop_d);
12825 %}
12826 
12827 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12828   match(Set dst (SqrtD src));
12829 
12830   ins_cost(INSN_COST * 50);
12831   format %{ "fsqrtd  $dst, $src" %}
12832   ins_encode %{
12833     __ fsqrtd(as_FloatRegister($dst$$reg),
12834              as_FloatRegister($src$$reg));
12835   %}
12836 
12837   ins_pipe(fp_div_s);
12838 %}
12839 
12840 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12841   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12842 
12843   ins_cost(INSN_COST * 50);
12844   format %{ "fsqrts  $dst, $src" %}
12845   ins_encode %{
12846     __ fsqrts(as_FloatRegister($dst$$reg),
12847              as_FloatRegister($src$$reg));
12848   %}
12849 
12850   ins_pipe(fp_div_d);
12851 %}
12852 
12853 // ============================================================================
12854 // Logical Instructions
12855 
12856 // Integer Logical Instructions
12857 
12858 // And Instructions
12859 
12860 
12861 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12862   match(Set dst (AndI src1 src2));
12863 
12864   format %{ "andw  $dst, $src1, $src2\t# int" %}
12865 
12866   ins_cost(INSN_COST);
12867   ins_encode %{
12868     __ andw(as_Register($dst$$reg),
12869             as_Register($src1$$reg),
12870             as_Register($src2$$reg));
12871   %}
12872 
12873   ins_pipe(ialu_reg_reg);
12874 %}
12875 
12876 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12877   match(Set dst (AndI src1 src2));
12878 
12879   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12880 
12881   ins_cost(INSN_COST);
12882   ins_encode %{
12883     __ andw(as_Register($dst$$reg),
12884             as_Register($src1$$reg),
12885             (unsigned long)($src2$$constant));
12886   %}
12887 
12888   ins_pipe(ialu_reg_imm);
12889 %}
12890 
12891 // Or Instructions
12892 
12893 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12894   match(Set dst (OrI src1 src2));
12895 
12896   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12897 
12898   ins_cost(INSN_COST);
12899   ins_encode %{
12900     __ orrw(as_Register($dst$$reg),
12901             as_Register($src1$$reg),
12902             as_Register($src2$$reg));
12903   %}
12904 
12905   ins_pipe(ialu_reg_reg);
12906 %}
12907 
12908 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12909   match(Set dst (OrI src1 src2));
12910 
12911   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12912 
12913   ins_cost(INSN_COST);
12914   ins_encode %{
12915     __ orrw(as_Register($dst$$reg),
12916             as_Register($src1$$reg),
12917             (unsigned long)($src2$$constant));
12918   %}
12919 
12920   ins_pipe(ialu_reg_imm);
12921 %}
12922 
12923 // Xor Instructions
12924 
12925 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12926   match(Set dst (XorI src1 src2));
12927 
12928   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12929 
12930   ins_cost(INSN_COST);
12931   ins_encode %{
12932     __ eorw(as_Register($dst$$reg),
12933             as_Register($src1$$reg),
12934             as_Register($src2$$reg));
12935   %}
12936 
12937   ins_pipe(ialu_reg_reg);
12938 %}
12939 
12940 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12941   match(Set dst (XorI src1 src2));
12942 
12943   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12944 
12945   ins_cost(INSN_COST);
12946   ins_encode %{
12947     __ eorw(as_Register($dst$$reg),
12948             as_Register($src1$$reg),
12949             (unsigned long)($src2$$constant));
12950   %}
12951 
12952   ins_pipe(ialu_reg_imm);
12953 %}
12954 
12955 // Long Logical Instructions
12956 // TODO
12957 
12958 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12959   match(Set dst (AndL src1 src2));
12960 
12961   format %{ "and  $dst, $src1, $src2\t# int" %}
12962 
12963   ins_cost(INSN_COST);
12964   ins_encode %{
12965     __ andr(as_Register($dst$$reg),
12966             as_Register($src1$$reg),
12967             as_Register($src2$$reg));
12968   %}
12969 
12970   ins_pipe(ialu_reg_reg);
12971 %}
12972 
12973 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12974   match(Set dst (AndL src1 src2));
12975 
12976   format %{ "and  $dst, $src1, $src2\t# int" %}
12977 
12978   ins_cost(INSN_COST);
12979   ins_encode %{
12980     __ andr(as_Register($dst$$reg),
12981             as_Register($src1$$reg),
12982             (unsigned long)($src2$$constant));
12983   %}
12984 
12985   ins_pipe(ialu_reg_imm);
12986 %}
12987 
12988 // Or Instructions
12989 
12990 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12991   match(Set dst (OrL src1 src2));
12992 
12993   format %{ "orr  $dst, $src1, $src2\t# int" %}
12994 
12995   ins_cost(INSN_COST);
12996   ins_encode %{
12997     __ orr(as_Register($dst$$reg),
12998            as_Register($src1$$reg),
12999            as_Register($src2$$reg));
13000   %}
13001 
13002   ins_pipe(ialu_reg_reg);
13003 %}
13004 
13005 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13006   match(Set dst (OrL src1 src2));
13007 
13008   format %{ "orr  $dst, $src1, $src2\t# int" %}
13009 
13010   ins_cost(INSN_COST);
13011   ins_encode %{
13012     __ orr(as_Register($dst$$reg),
13013            as_Register($src1$$reg),
13014            (unsigned long)($src2$$constant));
13015   %}
13016 
13017   ins_pipe(ialu_reg_imm);
13018 %}
13019 
13020 // Xor Instructions
13021 
13022 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13023   match(Set dst (XorL src1 src2));
13024 
13025   format %{ "eor  $dst, $src1, $src2\t# int" %}
13026 
13027   ins_cost(INSN_COST);
13028   ins_encode %{
13029     __ eor(as_Register($dst$$reg),
13030            as_Register($src1$$reg),
13031            as_Register($src2$$reg));
13032   %}
13033 
13034   ins_pipe(ialu_reg_reg);
13035 %}
13036 
13037 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13038   match(Set dst (XorL src1 src2));
13039 
13040   ins_cost(INSN_COST);
13041   format %{ "eor  $dst, $src1, $src2\t# int" %}
13042 
13043   ins_encode %{
13044     __ eor(as_Register($dst$$reg),
13045            as_Register($src1$$reg),
13046            (unsigned long)($src2$$constant));
13047   %}
13048 
13049   ins_pipe(ialu_reg_imm);
13050 %}
13051 
13052 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13053 %{
13054   match(Set dst (ConvI2L src));
13055 
13056   ins_cost(INSN_COST);
13057   format %{ "sxtw  $dst, $src\t# i2l" %}
13058   ins_encode %{
13059     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13060   %}
13061   ins_pipe(ialu_reg_shift);
13062 %}
13063 
13064 // this pattern occurs in bigmath arithmetic
13065 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13066 %{
13067   match(Set dst (AndL (ConvI2L src) mask));
13068 
13069   ins_cost(INSN_COST);
13070   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13071   ins_encode %{
13072     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13073   %}
13074 
13075   ins_pipe(ialu_reg_shift);
13076 %}
13077 
13078 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13079   match(Set dst (ConvL2I src));
13080 
13081   ins_cost(INSN_COST);
13082   format %{ "movw  $dst, $src \t// l2i" %}
13083 
13084   ins_encode %{
13085     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13086   %}
13087 
13088   ins_pipe(ialu_reg);
13089 %}
13090 
13091 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13092 %{
13093   match(Set dst (Conv2B src));
13094   effect(KILL cr);
13095 
13096   format %{
13097     "cmpw $src, zr\n\t"
13098     "cset $dst, ne"
13099   %}
13100 
13101   ins_encode %{
13102     __ cmpw(as_Register($src$$reg), zr);
13103     __ cset(as_Register($dst$$reg), Assembler::NE);
13104   %}
13105 
13106   ins_pipe(ialu_reg);
13107 %}
13108 
13109 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13110 %{
13111   match(Set dst (Conv2B src));
13112   effect(KILL cr);
13113 
13114   format %{
13115     "cmp  $src, zr\n\t"
13116     "cset $dst, ne"
13117   %}
13118 
13119   ins_encode %{
13120     __ cmp(as_Register($src$$reg), zr);
13121     __ cset(as_Register($dst$$reg), Assembler::NE);
13122   %}
13123 
13124   ins_pipe(ialu_reg);
13125 %}
13126 
13127 instruct convD2F_reg(vRegF dst, vRegD src) %{
13128   match(Set dst (ConvD2F src));
13129 
13130   ins_cost(INSN_COST * 5);
13131   format %{ "fcvtd  $dst, $src \t// d2f" %}
13132 
13133   ins_encode %{
13134     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13135   %}
13136 
13137   ins_pipe(fp_d2f);
13138 %}
13139 
13140 instruct convF2D_reg(vRegD dst, vRegF src) %{
13141   match(Set dst (ConvF2D src));
13142 
13143   ins_cost(INSN_COST * 5);
13144   format %{ "fcvts  $dst, $src \t// f2d" %}
13145 
13146   ins_encode %{
13147     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13148   %}
13149 
13150   ins_pipe(fp_f2d);
13151 %}
13152 
13153 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13154   match(Set dst (ConvF2I src));
13155 
13156   ins_cost(INSN_COST * 5);
13157   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13158 
13159   ins_encode %{
13160     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13161   %}
13162 
13163   ins_pipe(fp_f2i);
13164 %}
13165 
13166 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13167   match(Set dst (ConvF2L src));
13168 
13169   ins_cost(INSN_COST * 5);
13170   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13171 
13172   ins_encode %{
13173     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13174   %}
13175 
13176   ins_pipe(fp_f2l);
13177 %}
13178 
13179 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13180   match(Set dst (ConvI2F src));
13181 
13182   ins_cost(INSN_COST * 5);
13183   format %{ "scvtfws  $dst, $src \t// i2f" %}
13184 
13185   ins_encode %{
13186     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13187   %}
13188 
13189   ins_pipe(fp_i2f);
13190 %}
13191 
13192 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13193   match(Set dst (ConvL2F src));
13194 
13195   ins_cost(INSN_COST * 5);
13196   format %{ "scvtfs  $dst, $src \t// l2f" %}
13197 
13198   ins_encode %{
13199     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13200   %}
13201 
13202   ins_pipe(fp_l2f);
13203 %}
13204 
13205 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13206   match(Set dst (ConvD2I src));
13207 
13208   ins_cost(INSN_COST * 5);
13209   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13210 
13211   ins_encode %{
13212     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13213   %}
13214 
13215   ins_pipe(fp_d2i);
13216 %}
13217 
13218 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13219   match(Set dst (ConvD2L src));
13220 
13221   ins_cost(INSN_COST * 5);
13222   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13223 
13224   ins_encode %{
13225     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13226   %}
13227 
13228   ins_pipe(fp_d2l);
13229 %}
13230 
13231 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13232   match(Set dst (ConvI2D src));
13233 
13234   ins_cost(INSN_COST * 5);
13235   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13236 
13237   ins_encode %{
13238     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13239   %}
13240 
13241   ins_pipe(fp_i2d);
13242 %}
13243 
13244 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13245   match(Set dst (ConvL2D src));
13246 
13247   ins_cost(INSN_COST * 5);
13248   format %{ "scvtfd  $dst, $src \t// l2d" %}
13249 
13250   ins_encode %{
13251     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13252   %}
13253 
13254   ins_pipe(fp_l2d);
13255 %}
13256 
13257 // stack <-> reg and reg <-> reg shuffles with no conversion
13258 
13259 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13260 
13261   match(Set dst (MoveF2I src));
13262 
13263   effect(DEF dst, USE src);
13264 
13265   ins_cost(4 * INSN_COST);
13266 
13267   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13268 
13269   ins_encode %{
13270     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13271   %}
13272 
13273   ins_pipe(iload_reg_reg);
13274 
13275 %}
13276 
13277 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13278 
13279   match(Set dst (MoveI2F src));
13280 
13281   effect(DEF dst, USE src);
13282 
13283   ins_cost(4 * INSN_COST);
13284 
13285   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13286 
13287   ins_encode %{
13288     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13289   %}
13290 
13291   ins_pipe(pipe_class_memory);
13292 
13293 %}
13294 
13295 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13296 
13297   match(Set dst (MoveD2L src));
13298 
13299   effect(DEF dst, USE src);
13300 
13301   ins_cost(4 * INSN_COST);
13302 
13303   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13304 
13305   ins_encode %{
13306     __ ldr($dst$$Register, Address(sp, $src$$disp));
13307   %}
13308 
13309   ins_pipe(iload_reg_reg);
13310 
13311 %}
13312 
13313 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13314 
13315   match(Set dst (MoveL2D src));
13316 
13317   effect(DEF dst, USE src);
13318 
13319   ins_cost(4 * INSN_COST);
13320 
13321   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13322 
13323   ins_encode %{
13324     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13325   %}
13326 
13327   ins_pipe(pipe_class_memory);
13328 
13329 %}
13330 
13331 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13332 
13333   match(Set dst (MoveF2I src));
13334 
13335   effect(DEF dst, USE src);
13336 
13337   ins_cost(INSN_COST);
13338 
13339   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13340 
13341   ins_encode %{
13342     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13343   %}
13344 
13345   ins_pipe(pipe_class_memory);
13346 
13347 %}
13348 
13349 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13350 
13351   match(Set dst (MoveI2F src));
13352 
13353   effect(DEF dst, USE src);
13354 
13355   ins_cost(INSN_COST);
13356 
13357   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13358 
13359   ins_encode %{
13360     __ strw($src$$Register, Address(sp, $dst$$disp));
13361   %}
13362 
13363   ins_pipe(istore_reg_reg);
13364 
13365 %}
13366 
13367 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13368 
13369   match(Set dst (MoveD2L src));
13370 
13371   effect(DEF dst, USE src);
13372 
13373   ins_cost(INSN_COST);
13374 
13375   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13376 
13377   ins_encode %{
13378     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13379   %}
13380 
13381   ins_pipe(pipe_class_memory);
13382 
13383 %}
13384 
13385 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13386 
13387   match(Set dst (MoveL2D src));
13388 
13389   effect(DEF dst, USE src);
13390 
13391   ins_cost(INSN_COST);
13392 
13393   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13394 
13395   ins_encode %{
13396     __ str($src$$Register, Address(sp, $dst$$disp));
13397   %}
13398 
13399   ins_pipe(istore_reg_reg);
13400 
13401 %}
13402 
13403 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13404 
13405   match(Set dst (MoveF2I src));
13406 
13407   effect(DEF dst, USE src);
13408 
13409   ins_cost(INSN_COST);
13410 
13411   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13412 
13413   ins_encode %{
13414     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13415   %}
13416 
13417   ins_pipe(fp_f2i);
13418 
13419 %}
13420 
13421 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13422 
13423   match(Set dst (MoveI2F src));
13424 
13425   effect(DEF dst, USE src);
13426 
13427   ins_cost(INSN_COST);
13428 
13429   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13430 
13431   ins_encode %{
13432     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13433   %}
13434 
13435   ins_pipe(fp_i2f);
13436 
13437 %}
13438 
13439 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13440 
13441   match(Set dst (MoveD2L src));
13442 
13443   effect(DEF dst, USE src);
13444 
13445   ins_cost(INSN_COST);
13446 
13447   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13448 
13449   ins_encode %{
13450     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13451   %}
13452 
13453   ins_pipe(fp_d2l);
13454 
13455 %}
13456 
13457 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13458 
13459   match(Set dst (MoveL2D src));
13460 
13461   effect(DEF dst, USE src);
13462 
13463   ins_cost(INSN_COST);
13464 
13465   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13466 
13467   ins_encode %{
13468     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13469   %}
13470 
13471   ins_pipe(fp_l2d);
13472 
13473 %}
13474 
13475 // ============================================================================
13476 // clearing of an array
13477 
13478 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13479 %{
13480   match(Set dummy (ClearArray cnt base));
13481   effect(USE_KILL cnt, USE_KILL base);
13482 
13483   ins_cost(4 * INSN_COST);
13484   format %{ "ClearArray $cnt, $base" %}
13485 
13486   ins_encode %{
13487     __ zero_words($base$$Register, $cnt$$Register);
13488   %}
13489 
13490   ins_pipe(pipe_class_memory);
13491 %}
13492 
13493 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 tmp, Universe dummy, rFlagsReg cr)
13494 %{
13495   match(Set dummy (ClearArray cnt base));
13496   effect(USE_KILL base, TEMP tmp);
13497 
13498   ins_cost(4 * INSN_COST);
13499   format %{ "ClearArray $cnt, $base" %}
13500 
13501   ins_encode %{
13502     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13503   %}
13504 
13505   ins_pipe(pipe_class_memory);
13506 %}
13507 
13508 // ============================================================================
13509 // Overflow Math Instructions
13510 
13511 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13512 %{
13513   match(Set cr (OverflowAddI op1 op2));
13514 
13515   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13516   ins_cost(INSN_COST);
13517   ins_encode %{
13518     __ cmnw($op1$$Register, $op2$$Register);
13519   %}
13520 
13521   ins_pipe(icmp_reg_reg);
13522 %}
13523 
13524 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13525 %{
13526   match(Set cr (OverflowAddI op1 op2));
13527 
13528   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13529   ins_cost(INSN_COST);
13530   ins_encode %{
13531     __ cmnw($op1$$Register, $op2$$constant);
13532   %}
13533 
13534   ins_pipe(icmp_reg_imm);
13535 %}
13536 
13537 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13538 %{
13539   match(Set cr (OverflowAddL op1 op2));
13540 
13541   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13542   ins_cost(INSN_COST);
13543   ins_encode %{
13544     __ cmn($op1$$Register, $op2$$Register);
13545   %}
13546 
13547   ins_pipe(icmp_reg_reg);
13548 %}
13549 
13550 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13551 %{
13552   match(Set cr (OverflowAddL op1 op2));
13553 
13554   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13555   ins_cost(INSN_COST);
13556   ins_encode %{
13557     __ cmn($op1$$Register, $op2$$constant);
13558   %}
13559 
13560   ins_pipe(icmp_reg_imm);
13561 %}
13562 
13563 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13564 %{
13565   match(Set cr (OverflowSubI op1 op2));
13566 
13567   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13568   ins_cost(INSN_COST);
13569   ins_encode %{
13570     __ cmpw($op1$$Register, $op2$$Register);
13571   %}
13572 
13573   ins_pipe(icmp_reg_reg);
13574 %}
13575 
13576 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13577 %{
13578   match(Set cr (OverflowSubI op1 op2));
13579 
13580   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13581   ins_cost(INSN_COST);
13582   ins_encode %{
13583     __ cmpw($op1$$Register, $op2$$constant);
13584   %}
13585 
13586   ins_pipe(icmp_reg_imm);
13587 %}
13588 
13589 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13590 %{
13591   match(Set cr (OverflowSubL op1 op2));
13592 
13593   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13594   ins_cost(INSN_COST);
13595   ins_encode %{
13596     __ cmp($op1$$Register, $op2$$Register);
13597   %}
13598 
13599   ins_pipe(icmp_reg_reg);
13600 %}
13601 
13602 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13603 %{
13604   match(Set cr (OverflowSubL op1 op2));
13605 
13606   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13607   ins_cost(INSN_COST);
13608   ins_encode %{
13609     __ cmp($op1$$Register, $op2$$constant);
13610   %}
13611 
13612   ins_pipe(icmp_reg_imm);
13613 %}
13614 
13615 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13616 %{
13617   match(Set cr (OverflowSubI zero op1));
13618 
13619   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13620   ins_cost(INSN_COST);
13621   ins_encode %{
13622     __ cmpw(zr, $op1$$Register);
13623   %}
13624 
13625   ins_pipe(icmp_reg_imm);
13626 %}
13627 
13628 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13629 %{
13630   match(Set cr (OverflowSubL zero op1));
13631 
13632   format %{ "cmp   zr, $op1\t# overflow check long" %}
13633   ins_cost(INSN_COST);
13634   ins_encode %{
13635     __ cmp(zr, $op1$$Register);
13636   %}
13637 
13638   ins_pipe(icmp_reg_imm);
13639 %}
13640 
13641 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13642 %{
13643   match(Set cr (OverflowMulI op1 op2));
13644 
13645   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13646             "cmp   rscratch1, rscratch1, sxtw\n\t"
13647             "movw  rscratch1, #0x80000000\n\t"
13648             "cselw rscratch1, rscratch1, zr, NE\n\t"
13649             "cmpw  rscratch1, #1" %}
13650   ins_cost(5 * INSN_COST);
13651   ins_encode %{
13652     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13653     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13654     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13655     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13656     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13657   %}
13658 
13659   ins_pipe(pipe_slow);
13660 %}
13661 
13662 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13663 %{
13664   match(If cmp (OverflowMulI op1 op2));
13665   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13666             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13667   effect(USE labl, KILL cr);
13668 
13669   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13670             "cmp   rscratch1, rscratch1, sxtw\n\t"
13671             "b$cmp   $labl" %}
13672   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13673   ins_encode %{
13674     Label* L = $labl$$label;
13675     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13676     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13677     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13678     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13679   %}
13680 
13681   ins_pipe(pipe_serial);
13682 %}
13683 
13684 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13685 %{
13686   match(Set cr (OverflowMulL op1 op2));
13687 
13688   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13689             "smulh rscratch2, $op1, $op2\n\t"
13690             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13691             "movw  rscratch1, #0x80000000\n\t"
13692             "cselw rscratch1, rscratch1, zr, NE\n\t"
13693             "cmpw  rscratch1, #1" %}
13694   ins_cost(6 * INSN_COST);
13695   ins_encode %{
13696     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13697     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13698     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13699     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13700     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13701     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13702   %}
13703 
13704   ins_pipe(pipe_slow);
13705 %}
13706 
13707 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13708 %{
13709   match(If cmp (OverflowMulL op1 op2));
13710   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13711             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13712   effect(USE labl, KILL cr);
13713 
13714   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13715             "smulh rscratch2, $op1, $op2\n\t"
13716             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13717             "b$cmp $labl" %}
13718   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13719   ins_encode %{
13720     Label* L = $labl$$label;
13721     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13722     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13723     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13724     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13725     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13726   %}
13727 
13728   ins_pipe(pipe_serial);
13729 %}
13730 
13731 // ============================================================================
13732 // Compare Instructions
13733 
13734 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13735 %{
13736   match(Set cr (CmpI op1 op2));
13737 
13738   effect(DEF cr, USE op1, USE op2);
13739 
13740   ins_cost(INSN_COST);
13741   format %{ "cmpw  $op1, $op2" %}
13742 
13743   ins_encode(aarch64_enc_cmpw(op1, op2));
13744 
13745   ins_pipe(icmp_reg_reg);
13746 %}
13747 
13748 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13749 %{
13750   match(Set cr (CmpI op1 zero));
13751 
13752   effect(DEF cr, USE op1);
13753 
13754   ins_cost(INSN_COST);
13755   format %{ "cmpw $op1, 0" %}
13756 
13757   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13758 
13759   ins_pipe(icmp_reg_imm);
13760 %}
13761 
13762 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13763 %{
13764   match(Set cr (CmpI op1 op2));
13765 
13766   effect(DEF cr, USE op1);
13767 
13768   ins_cost(INSN_COST);
13769   format %{ "cmpw  $op1, $op2" %}
13770 
13771   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13772 
13773   ins_pipe(icmp_reg_imm);
13774 %}
13775 
13776 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13777 %{
13778   match(Set cr (CmpI op1 op2));
13779 
13780   effect(DEF cr, USE op1);
13781 
13782   ins_cost(INSN_COST * 2);
13783   format %{ "cmpw  $op1, $op2" %}
13784 
13785   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13786 
13787   ins_pipe(icmp_reg_imm);
13788 %}
13789 
13790 // Unsigned compare Instructions; really, same as signed compare
13791 // except it should only be used to feed an If or a CMovI which takes a
13792 // cmpOpU.
13793 
13794 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13795 %{
13796   match(Set cr (CmpU op1 op2));
13797 
13798   effect(DEF cr, USE op1, USE op2);
13799 
13800   ins_cost(INSN_COST);
13801   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13802 
13803   ins_encode(aarch64_enc_cmpw(op1, op2));
13804 
13805   ins_pipe(icmp_reg_reg);
13806 %}
13807 
13808 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13809 %{
13810   match(Set cr (CmpU op1 zero));
13811 
13812   effect(DEF cr, USE op1);
13813 
13814   ins_cost(INSN_COST);
13815   format %{ "cmpw $op1, #0\t# unsigned" %}
13816 
13817   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13818 
13819   ins_pipe(icmp_reg_imm);
13820 %}
13821 
13822 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13823 %{
13824   match(Set cr (CmpU op1 op2));
13825 
13826   effect(DEF cr, USE op1);
13827 
13828   ins_cost(INSN_COST);
13829   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13830 
13831   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13832 
13833   ins_pipe(icmp_reg_imm);
13834 %}
13835 
13836 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13837 %{
13838   match(Set cr (CmpU op1 op2));
13839 
13840   effect(DEF cr, USE op1);
13841 
13842   ins_cost(INSN_COST * 2);
13843   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13844 
13845   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13846 
13847   ins_pipe(icmp_reg_imm);
13848 %}
13849 
13850 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13851 %{
13852   match(Set cr (CmpL op1 op2));
13853 
13854   effect(DEF cr, USE op1, USE op2);
13855 
13856   ins_cost(INSN_COST);
13857   format %{ "cmp  $op1, $op2" %}
13858 
13859   ins_encode(aarch64_enc_cmp(op1, op2));
13860 
13861   ins_pipe(icmp_reg_reg);
13862 %}
13863 
13864 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13865 %{
13866   match(Set cr (CmpL op1 zero));
13867 
13868   effect(DEF cr, USE op1);
13869 
13870   ins_cost(INSN_COST);
13871   format %{ "tst  $op1" %}
13872 
13873   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13874 
13875   ins_pipe(icmp_reg_imm);
13876 %}
13877 
13878 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13879 %{
13880   match(Set cr (CmpL op1 op2));
13881 
13882   effect(DEF cr, USE op1);
13883 
13884   ins_cost(INSN_COST);
13885   format %{ "cmp  $op1, $op2" %}
13886 
13887   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13888 
13889   ins_pipe(icmp_reg_imm);
13890 %}
13891 
13892 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13893 %{
13894   match(Set cr (CmpL op1 op2));
13895 
13896   effect(DEF cr, USE op1);
13897 
13898   ins_cost(INSN_COST * 2);
13899   format %{ "cmp  $op1, $op2" %}
13900 
13901   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13902 
13903   ins_pipe(icmp_reg_imm);
13904 %}
13905 
13906 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13907 %{
13908   match(Set cr (CmpP op1 op2));
13909 
13910   effect(DEF cr, USE op1, USE op2);
13911 
13912   ins_cost(INSN_COST);
13913   format %{ "cmp  $op1, $op2\t // ptr" %}
13914 
13915   ins_encode(aarch64_enc_cmpp(op1, op2));
13916 
13917   ins_pipe(icmp_reg_reg);
13918 %}
13919 
13920 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13921 %{
13922   match(Set cr (CmpN op1 op2));
13923 
13924   effect(DEF cr, USE op1, USE op2);
13925 
13926   ins_cost(INSN_COST);
13927   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13928 
13929   ins_encode(aarch64_enc_cmpn(op1, op2));
13930 
13931   ins_pipe(icmp_reg_reg);
13932 %}
13933 
13934 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13935 %{
13936   match(Set cr (CmpP op1 zero));
13937 
13938   effect(DEF cr, USE op1, USE zero);
13939 
13940   ins_cost(INSN_COST);
13941   format %{ "cmp  $op1, 0\t // ptr" %}
13942 
13943   ins_encode(aarch64_enc_testp(op1));
13944 
13945   ins_pipe(icmp_reg_imm);
13946 %}
13947 
13948 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13949 %{
13950   match(Set cr (CmpN op1 zero));
13951 
13952   effect(DEF cr, USE op1, USE zero);
13953 
13954   ins_cost(INSN_COST);
13955   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13956 
13957   ins_encode(aarch64_enc_testn(op1));
13958 
13959   ins_pipe(icmp_reg_imm);
13960 %}
13961 
13962 // FP comparisons
13963 //
13964 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13965 // using normal cmpOp. See declaration of rFlagsReg for details.
13966 
13967 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13968 %{
13969   match(Set cr (CmpF src1 src2));
13970 
13971   ins_cost(3 * INSN_COST);
13972   format %{ "fcmps $src1, $src2" %}
13973 
13974   ins_encode %{
13975     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13976   %}
13977 
13978   ins_pipe(pipe_class_compare);
13979 %}
13980 
13981 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13982 %{
13983   match(Set cr (CmpF src1 src2));
13984 
13985   ins_cost(3 * INSN_COST);
13986   format %{ "fcmps $src1, 0.0" %}
13987 
13988   ins_encode %{
13989     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13990   %}
13991 
13992   ins_pipe(pipe_class_compare);
13993 %}
13994 // FROM HERE
13995 
13996 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13997 %{
13998   match(Set cr (CmpD src1 src2));
13999 
14000   ins_cost(3 * INSN_COST);
14001   format %{ "fcmpd $src1, $src2" %}
14002 
14003   ins_encode %{
14004     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14005   %}
14006 
14007   ins_pipe(pipe_class_compare);
14008 %}
14009 
14010 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
14011 %{
14012   match(Set cr (CmpD src1 src2));
14013 
14014   ins_cost(3 * INSN_COST);
14015   format %{ "fcmpd $src1, 0.0" %}
14016 
14017   ins_encode %{
14018     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
14019   %}
14020 
14021   ins_pipe(pipe_class_compare);
14022 %}
14023 
14024 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14025 %{
14026   match(Set dst (CmpF3 src1 src2));
14027   effect(KILL cr);
14028 
14029   ins_cost(5 * INSN_COST);
14030   format %{ "fcmps $src1, $src2\n\t"
14031             "csinvw($dst, zr, zr, eq\n\t"
14032             "csnegw($dst, $dst, $dst, lt)"
14033   %}
14034 
14035   ins_encode %{
14036     Label done;
14037     FloatRegister s1 = as_FloatRegister($src1$$reg);
14038     FloatRegister s2 = as_FloatRegister($src2$$reg);
14039     Register d = as_Register($dst$$reg);
14040     __ fcmps(s1, s2);
14041     // installs 0 if EQ else -1
14042     __ csinvw(d, zr, zr, Assembler::EQ);
14043     // keeps -1 if less or unordered else installs 1
14044     __ csnegw(d, d, d, Assembler::LT);
14045     __ bind(done);
14046   %}
14047 
14048   ins_pipe(pipe_class_default);
14049 
14050 %}
14051 
14052 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14053 %{
14054   match(Set dst (CmpD3 src1 src2));
14055   effect(KILL cr);
14056 
14057   ins_cost(5 * INSN_COST);
14058   format %{ "fcmpd $src1, $src2\n\t"
14059             "csinvw($dst, zr, zr, eq\n\t"
14060             "csnegw($dst, $dst, $dst, lt)"
14061   %}
14062 
14063   ins_encode %{
14064     Label done;
14065     FloatRegister s1 = as_FloatRegister($src1$$reg);
14066     FloatRegister s2 = as_FloatRegister($src2$$reg);
14067     Register d = as_Register($dst$$reg);
14068     __ fcmpd(s1, s2);
14069     // installs 0 if EQ else -1
14070     __ csinvw(d, zr, zr, Assembler::EQ);
14071     // keeps -1 if less or unordered else installs 1
14072     __ csnegw(d, d, d, Assembler::LT);
14073     __ bind(done);
14074   %}
14075   ins_pipe(pipe_class_default);
14076 
14077 %}
14078 
14079 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14080 %{
14081   match(Set dst (CmpF3 src1 zero));
14082   effect(KILL cr);
14083 
14084   ins_cost(5 * INSN_COST);
14085   format %{ "fcmps $src1, 0.0\n\t"
14086             "csinvw($dst, zr, zr, eq\n\t"
14087             "csnegw($dst, $dst, $dst, lt)"
14088   %}
14089 
14090   ins_encode %{
14091     Label done;
14092     FloatRegister s1 = as_FloatRegister($src1$$reg);
14093     Register d = as_Register($dst$$reg);
14094     __ fcmps(s1, 0.0D);
14095     // installs 0 if EQ else -1
14096     __ csinvw(d, zr, zr, Assembler::EQ);
14097     // keeps -1 if less or unordered else installs 1
14098     __ csnegw(d, d, d, Assembler::LT);
14099     __ bind(done);
14100   %}
14101 
14102   ins_pipe(pipe_class_default);
14103 
14104 %}
14105 
14106 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14107 %{
14108   match(Set dst (CmpD3 src1 zero));
14109   effect(KILL cr);
14110 
14111   ins_cost(5 * INSN_COST);
14112   format %{ "fcmpd $src1, 0.0\n\t"
14113             "csinvw($dst, zr, zr, eq\n\t"
14114             "csnegw($dst, $dst, $dst, lt)"
14115   %}
14116 
14117   ins_encode %{
14118     Label done;
14119     FloatRegister s1 = as_FloatRegister($src1$$reg);
14120     Register d = as_Register($dst$$reg);
14121     __ fcmpd(s1, 0.0D);
14122     // installs 0 if EQ else -1
14123     __ csinvw(d, zr, zr, Assembler::EQ);
14124     // keeps -1 if less or unordered else installs 1
14125     __ csnegw(d, d, d, Assembler::LT);
14126     __ bind(done);
14127   %}
14128   ins_pipe(pipe_class_default);
14129 
14130 %}
14131 
14132 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14133 %{
14134   match(Set dst (CmpLTMask p q));
14135   effect(KILL cr);
14136 
14137   ins_cost(3 * INSN_COST);
14138 
14139   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14140             "csetw $dst, lt\n\t"
14141             "subw $dst, zr, $dst"
14142   %}
14143 
14144   ins_encode %{
14145     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14146     __ csetw(as_Register($dst$$reg), Assembler::LT);
14147     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14148   %}
14149 
14150   ins_pipe(ialu_reg_reg);
14151 %}
14152 
14153 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14154 %{
14155   match(Set dst (CmpLTMask src zero));
14156   effect(KILL cr);
14157 
14158   ins_cost(INSN_COST);
14159 
14160   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14161 
14162   ins_encode %{
14163     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14164   %}
14165 
14166   ins_pipe(ialu_reg_shift);
14167 %}
14168 
14169 // ============================================================================
14170 // Max and Min
14171 
14172 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14173 %{
14174   match(Set dst (MinI src1 src2));
14175 
14176   effect(DEF dst, USE src1, USE src2, KILL cr);
14177   size(8);
14178 
14179   ins_cost(INSN_COST * 3);
14180   format %{
14181     "cmpw $src1 $src2\t signed int\n\t"
14182     "cselw $dst, $src1, $src2 lt\t"
14183   %}
14184 
14185   ins_encode %{
14186     __ cmpw(as_Register($src1$$reg),
14187             as_Register($src2$$reg));
14188     __ cselw(as_Register($dst$$reg),
14189              as_Register($src1$$reg),
14190              as_Register($src2$$reg),
14191              Assembler::LT);
14192   %}
14193 
14194   ins_pipe(ialu_reg_reg);
14195 %}
14196 // FROM HERE
14197 
14198 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14199 %{
14200   match(Set dst (MaxI src1 src2));
14201 
14202   effect(DEF dst, USE src1, USE src2, KILL cr);
14203   size(8);
14204 
14205   ins_cost(INSN_COST * 3);
14206   format %{
14207     "cmpw $src1 $src2\t signed int\n\t"
14208     "cselw $dst, $src1, $src2 gt\t"
14209   %}
14210 
14211   ins_encode %{
14212     __ cmpw(as_Register($src1$$reg),
14213             as_Register($src2$$reg));
14214     __ cselw(as_Register($dst$$reg),
14215              as_Register($src1$$reg),
14216              as_Register($src2$$reg),
14217              Assembler::GT);
14218   %}
14219 
14220   ins_pipe(ialu_reg_reg);
14221 %}
14222 
14223 // ============================================================================
14224 // Branch Instructions
14225 
14226 // Direct Branch.
14227 instruct branch(label lbl)
14228 %{
14229   match(Goto);
14230 
14231   effect(USE lbl);
14232 
14233   ins_cost(BRANCH_COST);
14234   format %{ "b  $lbl" %}
14235 
14236   ins_encode(aarch64_enc_b(lbl));
14237 
14238   ins_pipe(pipe_branch);
14239 %}
14240 
14241 // Conditional Near Branch
14242 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14243 %{
14244   // Same match rule as `branchConFar'.
14245   match(If cmp cr);
14246 
14247   effect(USE lbl);
14248 
14249   ins_cost(BRANCH_COST);
14250   // If set to 1 this indicates that the current instruction is a
14251   // short variant of a long branch. This avoids using this
14252   // instruction in first-pass matching. It will then only be used in
14253   // the `Shorten_branches' pass.
14254   // ins_short_branch(1);
14255   format %{ "b$cmp  $lbl" %}
14256 
14257   ins_encode(aarch64_enc_br_con(cmp, lbl));
14258 
14259   ins_pipe(pipe_branch_cond);
14260 %}
14261 
14262 // Conditional Near Branch Unsigned
14263 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14264 %{
14265   // Same match rule as `branchConFar'.
14266   match(If cmp cr);
14267 
14268   effect(USE lbl);
14269 
14270   ins_cost(BRANCH_COST);
14271   // If set to 1 this indicates that the current instruction is a
14272   // short variant of a long branch. This avoids using this
14273   // instruction in first-pass matching. It will then only be used in
14274   // the `Shorten_branches' pass.
14275   // ins_short_branch(1);
14276   format %{ "b$cmp  $lbl\t# unsigned" %}
14277 
14278   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14279 
14280   ins_pipe(pipe_branch_cond);
14281 %}
14282 
14283 // Make use of CBZ and CBNZ.  These instructions, as well as being
14284 // shorter than (cmp; branch), have the additional benefit of not
14285 // killing the flags.
14286 
14287 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14288   match(If cmp (CmpI op1 op2));
14289   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14290             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14291   effect(USE labl);
14292 
14293   ins_cost(BRANCH_COST);
14294   format %{ "cbw$cmp   $op1, $labl" %}
14295   ins_encode %{
14296     Label* L = $labl$$label;
14297     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14298     if (cond == Assembler::EQ)
14299       __ cbzw($op1$$Register, *L);
14300     else
14301       __ cbnzw($op1$$Register, *L);
14302   %}
14303   ins_pipe(pipe_cmp_branch);
14304 %}
14305 
14306 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14307   match(If cmp (CmpL op1 op2));
14308   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14309             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14310   effect(USE labl);
14311 
14312   ins_cost(BRANCH_COST);
14313   format %{ "cb$cmp   $op1, $labl" %}
14314   ins_encode %{
14315     Label* L = $labl$$label;
14316     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14317     if (cond == Assembler::EQ)
14318       __ cbz($op1$$Register, *L);
14319     else
14320       __ cbnz($op1$$Register, *L);
14321   %}
14322   ins_pipe(pipe_cmp_branch);
14323 %}
14324 
14325 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14326   match(If cmp (CmpP op1 op2));
14327   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14328             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14329   effect(USE labl);
14330 
14331   ins_cost(BRANCH_COST);
14332   format %{ "cb$cmp   $op1, $labl" %}
14333   ins_encode %{
14334     Label* L = $labl$$label;
14335     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14336     if (cond == Assembler::EQ)
14337       __ cbz($op1$$Register, *L);
14338     else
14339       __ cbnz($op1$$Register, *L);
14340   %}
14341   ins_pipe(pipe_cmp_branch);
14342 %}
14343 
14344 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14345   match(If cmp (CmpN op1 op2));
14346   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14347             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14348   effect(USE labl);
14349 
14350   ins_cost(BRANCH_COST);
14351   format %{ "cbw$cmp   $op1, $labl" %}
14352   ins_encode %{
14353     Label* L = $labl$$label;
14354     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14355     if (cond == Assembler::EQ)
14356       __ cbzw($op1$$Register, *L);
14357     else
14358       __ cbnzw($op1$$Register, *L);
14359   %}
14360   ins_pipe(pipe_cmp_branch);
14361 %}
14362 
14363 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14364   match(If cmp (CmpP (DecodeN oop) zero));
14365   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14366             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14367   effect(USE labl);
14368 
14369   ins_cost(BRANCH_COST);
14370   format %{ "cb$cmp   $oop, $labl" %}
14371   ins_encode %{
14372     Label* L = $labl$$label;
14373     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14374     if (cond == Assembler::EQ)
14375       __ cbzw($oop$$Register, *L);
14376     else
14377       __ cbnzw($oop$$Register, *L);
14378   %}
14379   ins_pipe(pipe_cmp_branch);
14380 %}
14381 
14382 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14383   match(If cmp (CmpU op1 op2));
14384   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14385             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14386             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14387             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
14388   effect(USE labl);
14389 
14390   ins_cost(BRANCH_COST);
14391   format %{ "cbw$cmp   $op1, $labl" %}
14392   ins_encode %{
14393     Label* L = $labl$$label;
14394     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14395     if (cond == Assembler::EQ || cond == Assembler::LS)
14396       __ cbzw($op1$$Register, *L);
14397     else
14398       __ cbnzw($op1$$Register, *L);
14399   %}
14400   ins_pipe(pipe_cmp_branch);
14401 %}
14402 
14403 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14404   match(If cmp (CmpU op1 op2));
14405   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14406             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14407             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14408             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
14409   effect(USE labl);
14410 
14411   ins_cost(BRANCH_COST);
14412   format %{ "cb$cmp   $op1, $labl" %}
14413   ins_encode %{
14414     Label* L = $labl$$label;
14415     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14416     if (cond == Assembler::EQ || cond == Assembler::LS)
14417       __ cbz($op1$$Register, *L);
14418     else
14419       __ cbnz($op1$$Register, *L);
14420   %}
14421   ins_pipe(pipe_cmp_branch);
14422 %}
14423 
14424 // Test bit and Branch
14425 
14426 // Patterns for short (< 32KiB) variants
14427 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14428   match(If cmp (CmpL op1 op2));
14429   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14430             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14431   effect(USE labl);
14432 
14433   ins_cost(BRANCH_COST);
14434   format %{ "cb$cmp   $op1, $labl # long" %}
14435   ins_encode %{
14436     Label* L = $labl$$label;
14437     Assembler::Condition cond =
14438       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14439     __ tbr(cond, $op1$$Register, 63, *L);
14440   %}
14441   ins_pipe(pipe_cmp_branch);
14442   ins_short_branch(1);
14443 %}
14444 
14445 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14446   match(If cmp (CmpI op1 op2));
14447   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14448             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14449   effect(USE labl);
14450 
14451   ins_cost(BRANCH_COST);
14452   format %{ "cb$cmp   $op1, $labl # int" %}
14453   ins_encode %{
14454     Label* L = $labl$$label;
14455     Assembler::Condition cond =
14456       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14457     __ tbr(cond, $op1$$Register, 31, *L);
14458   %}
14459   ins_pipe(pipe_cmp_branch);
14460   ins_short_branch(1);
14461 %}
14462 
14463 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14464   match(If cmp (CmpL (AndL op1 op2) op3));
14465   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14466             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14467             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14468   effect(USE labl);
14469 
14470   ins_cost(BRANCH_COST);
14471   format %{ "tb$cmp   $op1, $op2, $labl" %}
14472   ins_encode %{
14473     Label* L = $labl$$label;
14474     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14475     int bit = exact_log2($op2$$constant);
14476     __ tbr(cond, $op1$$Register, bit, *L);
14477   %}
14478   ins_pipe(pipe_cmp_branch);
14479   ins_short_branch(1);
14480 %}
14481 
14482 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14483   match(If cmp (CmpI (AndI op1 op2) op3));
14484   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14485             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14486             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14487   effect(USE labl);
14488 
14489   ins_cost(BRANCH_COST);
14490   format %{ "tb$cmp   $op1, $op2, $labl" %}
14491   ins_encode %{
14492     Label* L = $labl$$label;
14493     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14494     int bit = exact_log2($op2$$constant);
14495     __ tbr(cond, $op1$$Register, bit, *L);
14496   %}
14497   ins_pipe(pipe_cmp_branch);
14498   ins_short_branch(1);
14499 %}
14500 
14501 // And far variants
14502 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14503   match(If cmp (CmpL op1 op2));
14504   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14505             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14506   effect(USE labl);
14507 
14508   ins_cost(BRANCH_COST);
14509   format %{ "cb$cmp   $op1, $labl # long" %}
14510   ins_encode %{
14511     Label* L = $labl$$label;
14512     Assembler::Condition cond =
14513       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14514     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14515   %}
14516   ins_pipe(pipe_cmp_branch);
14517 %}
14518 
14519 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14520   match(If cmp (CmpI op1 op2));
14521   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14522             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14523   effect(USE labl);
14524 
14525   ins_cost(BRANCH_COST);
14526   format %{ "cb$cmp   $op1, $labl # int" %}
14527   ins_encode %{
14528     Label* L = $labl$$label;
14529     Assembler::Condition cond =
14530       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14531     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14532   %}
14533   ins_pipe(pipe_cmp_branch);
14534 %}
14535 
14536 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14537   match(If cmp (CmpL (AndL op1 op2) op3));
14538   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14539             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14540             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14541   effect(USE labl);
14542 
14543   ins_cost(BRANCH_COST);
14544   format %{ "tb$cmp   $op1, $op2, $labl" %}
14545   ins_encode %{
14546     Label* L = $labl$$label;
14547     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14548     int bit = exact_log2($op2$$constant);
14549     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14550   %}
14551   ins_pipe(pipe_cmp_branch);
14552 %}
14553 
14554 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14555   match(If cmp (CmpI (AndI op1 op2) op3));
14556   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14557             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14558             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14559   effect(USE labl);
14560 
14561   ins_cost(BRANCH_COST);
14562   format %{ "tb$cmp   $op1, $op2, $labl" %}
14563   ins_encode %{
14564     Label* L = $labl$$label;
14565     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14566     int bit = exact_log2($op2$$constant);
14567     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14568   %}
14569   ins_pipe(pipe_cmp_branch);
14570 %}
14571 
14572 // Test bits
14573 
14574 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14575   match(Set cr (CmpL (AndL op1 op2) op3));
14576   predicate(Assembler::operand_valid_for_logical_immediate
14577             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14578 
14579   ins_cost(INSN_COST);
14580   format %{ "tst $op1, $op2 # long" %}
14581   ins_encode %{
14582     __ tst($op1$$Register, $op2$$constant);
14583   %}
14584   ins_pipe(ialu_reg_reg);
14585 %}
14586 
14587 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14588   match(Set cr (CmpI (AndI op1 op2) op3));
14589   predicate(Assembler::operand_valid_for_logical_immediate
14590             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14591 
14592   ins_cost(INSN_COST);
14593   format %{ "tst $op1, $op2 # int" %}
14594   ins_encode %{
14595     __ tstw($op1$$Register, $op2$$constant);
14596   %}
14597   ins_pipe(ialu_reg_reg);
14598 %}
14599 
14600 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14601   match(Set cr (CmpL (AndL op1 op2) op3));
14602 
14603   ins_cost(INSN_COST);
14604   format %{ "tst $op1, $op2 # long" %}
14605   ins_encode %{
14606     __ tst($op1$$Register, $op2$$Register);
14607   %}
14608   ins_pipe(ialu_reg_reg);
14609 %}
14610 
14611 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14612   match(Set cr (CmpI (AndI op1 op2) op3));
14613 
14614   ins_cost(INSN_COST);
14615   format %{ "tstw $op1, $op2 # int" %}
14616   ins_encode %{
14617     __ tstw($op1$$Register, $op2$$Register);
14618   %}
14619   ins_pipe(ialu_reg_reg);
14620 %}
14621 
14622 
14623 // Conditional Far Branch
14624 // Conditional Far Branch Unsigned
14625 // TODO: fixme
14626 
14627 // counted loop end branch near
14628 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14629 %{
14630   match(CountedLoopEnd cmp cr);
14631 
14632   effect(USE lbl);
14633 
14634   ins_cost(BRANCH_COST);
14635   // short variant.
14636   // ins_short_branch(1);
14637   format %{ "b$cmp $lbl \t// counted loop end" %}
14638 
14639   ins_encode(aarch64_enc_br_con(cmp, lbl));
14640 
14641   ins_pipe(pipe_branch);
14642 %}
14643 
14644 // counted loop end branch near Unsigned
14645 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14646 %{
14647   match(CountedLoopEnd cmp cr);
14648 
14649   effect(USE lbl);
14650 
14651   ins_cost(BRANCH_COST);
14652   // short variant.
14653   // ins_short_branch(1);
14654   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14655 
14656   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14657 
14658   ins_pipe(pipe_branch);
14659 %}
14660 
14661 // counted loop end branch far
14662 // counted loop end branch far unsigned
14663 // TODO: fixme
14664 
14665 // ============================================================================
14666 // inlined locking and unlocking
14667 
14668 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14669 %{
14670   match(Set cr (FastLock object box));
14671   effect(TEMP tmp, TEMP tmp2);
14672 
14673   // TODO
14674   // identify correct cost
14675   ins_cost(5 * INSN_COST);
14676   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14677 
14678   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14679 
14680   ins_pipe(pipe_serial);
14681 %}
14682 
14683 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14684 %{
14685   match(Set cr (FastUnlock object box));
14686   effect(TEMP tmp, TEMP tmp2);
14687 
14688   ins_cost(5 * INSN_COST);
14689   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14690 
14691   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14692 
14693   ins_pipe(pipe_serial);
14694 %}
14695 
14696 
14697 // ============================================================================
14698 // Safepoint Instructions
14699 
14700 // TODO
14701 // provide a near and far version of this code
14702 
14703 instruct safePoint(iRegP poll)
14704 %{
14705   match(SafePoint poll);
14706 
14707   format %{
14708     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14709   %}
14710   ins_encode %{
14711     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14712   %}
14713   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14714 %}
14715 
14716 
14717 // ============================================================================
14718 // Procedure Call/Return Instructions
14719 
14720 // Call Java Static Instruction
14721 
14722 instruct CallStaticJavaDirect(method meth)
14723 %{
14724   match(CallStaticJava);
14725 
14726   effect(USE meth);
14727 
14728   ins_cost(CALL_COST);
14729 
14730   format %{ "call,static $meth \t// ==> " %}
14731 
14732   ins_encode( aarch64_enc_java_static_call(meth),
14733               aarch64_enc_call_epilog );
14734 
14735   ins_pipe(pipe_class_call);
14736 %}
14737 
14738 // TO HERE
14739 
14740 // Call Java Dynamic Instruction
14741 instruct CallDynamicJavaDirect(method meth)
14742 %{
14743   match(CallDynamicJava);
14744 
14745   effect(USE meth);
14746 
14747   ins_cost(CALL_COST);
14748 
14749   format %{ "CALL,dynamic $meth \t// ==> " %}
14750 
14751   ins_encode( aarch64_enc_java_dynamic_call(meth),
14752                aarch64_enc_call_epilog );
14753 
14754   ins_pipe(pipe_class_call);
14755 %}
14756 
14757 // Call Runtime Instruction
14758 
14759 instruct CallRuntimeDirect(method meth)
14760 %{
14761   match(CallRuntime);
14762 
14763   effect(USE meth);
14764 
14765   ins_cost(CALL_COST);
14766 
14767   format %{ "CALL, runtime $meth" %}
14768 
14769   ins_encode( aarch64_enc_java_to_runtime(meth) );
14770 
14771   ins_pipe(pipe_class_call);
14772 %}
14773 
14774 // Call Runtime Instruction
14775 
14776 instruct CallLeafDirect(method meth)
14777 %{
14778   match(CallLeaf);
14779 
14780   effect(USE meth);
14781 
14782   ins_cost(CALL_COST);
14783 
14784   format %{ "CALL, runtime leaf $meth" %}
14785 
14786   ins_encode( aarch64_enc_java_to_runtime(meth) );
14787 
14788   ins_pipe(pipe_class_call);
14789 %}
14790 
14791 // Call Runtime Instruction
14792 
14793 instruct CallLeafNoFPDirect(method meth)
14794 %{
14795   match(CallLeafNoFP);
14796 
14797   effect(USE meth);
14798 
14799   ins_cost(CALL_COST);
14800 
14801   format %{ "CALL, runtime leaf nofp $meth" %}
14802 
14803   ins_encode( aarch64_enc_java_to_runtime(meth) );
14804 
14805   ins_pipe(pipe_class_call);
14806 %}
14807 
14808 // Tail Call; Jump from runtime stub to Java code.
14809 // Also known as an 'interprocedural jump'.
14810 // Target of jump will eventually return to caller.
14811 // TailJump below removes the return address.
14812 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14813 %{
14814   match(TailCall jump_target method_oop);
14815 
14816   ins_cost(CALL_COST);
14817 
14818   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14819 
14820   ins_encode(aarch64_enc_tail_call(jump_target));
14821 
14822   ins_pipe(pipe_class_call);
14823 %}
14824 
14825 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14826 %{
14827   match(TailJump jump_target ex_oop);
14828 
14829   ins_cost(CALL_COST);
14830 
14831   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14832 
14833   ins_encode(aarch64_enc_tail_jmp(jump_target));
14834 
14835   ins_pipe(pipe_class_call);
14836 %}
14837 
14838 // Create exception oop: created by stack-crawling runtime code.
14839 // Created exception is now available to this handler, and is setup
14840 // just prior to jumping to this handler. No code emitted.
14841 // TODO check
14842 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14843 instruct CreateException(iRegP_R0 ex_oop)
14844 %{
14845   match(Set ex_oop (CreateEx));
14846 
14847   format %{ " -- \t// exception oop; no code emitted" %}
14848 
14849   size(0);
14850 
14851   ins_encode( /*empty*/ );
14852 
14853   ins_pipe(pipe_class_empty);
14854 %}
14855 
14856 // Rethrow exception: The exception oop will come in the first
14857 // argument position. Then JUMP (not call) to the rethrow stub code.
14858 instruct RethrowException() %{
14859   match(Rethrow);
14860   ins_cost(CALL_COST);
14861 
14862   format %{ "b rethrow_stub" %}
14863 
14864   ins_encode( aarch64_enc_rethrow() );
14865 
14866   ins_pipe(pipe_class_call);
14867 %}
14868 
14869 
14870 // Return Instruction
14871 // epilog node loads ret address into lr as part of frame pop
14872 instruct Ret()
14873 %{
14874   match(Return);
14875 
14876   format %{ "ret\t// return register" %}
14877 
14878   ins_encode( aarch64_enc_ret() );
14879 
14880   ins_pipe(pipe_branch);
14881 %}
14882 
14883 // Die now.
14884 instruct ShouldNotReachHere() %{
14885   match(Halt);
14886 
14887   ins_cost(CALL_COST);
14888   format %{ "ShouldNotReachHere" %}
14889 
14890   ins_encode %{
14891     // TODO
14892     // implement proper trap call here
14893     __ brk(999);
14894   %}
14895 
14896   ins_pipe(pipe_class_default);
14897 %}
14898 
14899 // ============================================================================
14900 // Partial Subtype Check
14901 //
14902 // superklass array for an instance of the superklass.  Set a hidden
14903 // internal cache on a hit (cache is checked with exposed code in
14904 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14905 // encoding ALSO sets flags.
14906 
14907 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14908 %{
14909   match(Set result (PartialSubtypeCheck sub super));
14910   effect(KILL cr, KILL temp);
14911 
14912   ins_cost(1100);  // slightly larger than the next version
14913   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14914 
14915   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14916 
14917   opcode(0x1); // Force zero of result reg on hit
14918 
14919   ins_pipe(pipe_class_memory);
14920 %}
14921 
14922 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14923 %{
14924   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14925   effect(KILL temp, KILL result);
14926 
14927   ins_cost(1100);  // slightly larger than the next version
14928   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14929 
14930   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14931 
14932   opcode(0x0); // Don't zero result reg on hit
14933 
14934   ins_pipe(pipe_class_memory);
14935 %}
14936 
14937 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14938                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14939 %{
14940   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
14941   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14942   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14943 
14944   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14945   ins_encode %{
14946     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14947     __ asrw($cnt1$$Register, $cnt1$$Register, 1);
14948     __ asrw($cnt2$$Register, $cnt2$$Register, 1);
14949     __ string_compare($str1$$Register, $str2$$Register,
14950                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14951                       $tmp1$$Register);
14952   %}
14953   ins_pipe(pipe_class_memory);
14954 %}
14955 
14956 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14957        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14958 %{
14959   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14960   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14961   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14962          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14963   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14964 
14965   ins_encode %{
14966     __ string_indexof($str1$$Register, $str2$$Register,
14967                       $cnt1$$Register, $cnt2$$Register,
14968                       $tmp1$$Register, $tmp2$$Register,
14969                       $tmp3$$Register, $tmp4$$Register,
14970                       -1, $result$$Register);
14971   %}
14972   ins_pipe(pipe_class_memory);
14973 %}
14974 
14975 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14976                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14977                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14978 %{
14979   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
14980   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14981   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14982          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14983   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14984 
14985   ins_encode %{
14986     int icnt2 = (int)$int_cnt2$$constant;
14987     __ string_indexof($str1$$Register, $str2$$Register,
14988                       $cnt1$$Register, zr,
14989                       $tmp1$$Register, $tmp2$$Register,
14990                       $tmp3$$Register, $tmp4$$Register,
14991                       icnt2, $result$$Register);
14992   %}
14993   ins_pipe(pipe_class_memory);
14994 %}
14995 
14996 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14997                         iRegI_R0 result, rFlagsReg cr)
14998 %{
14999   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
15000   match(Set result (StrEquals (Binary str1 str2) cnt));
15001   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15002 
15003   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15004   ins_encode %{
15005     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15006     __ arrays_equals($str1$$Register, $str2$$Register,
15007                      $result$$Register, $cnt$$Register,
15008                      1, /*is_string*/true);
15009   %}
15010   ins_pipe(pipe_class_memory);
15011 %}
15012 
15013 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15014                         iRegI_R0 result, rFlagsReg cr)
15015 %{
15016   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
15017   match(Set result (StrEquals (Binary str1 str2) cnt));
15018   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15019 
15020   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15021   ins_encode %{
15022     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15023     __ asrw($cnt$$Register, $cnt$$Register, 1);
15024     __ arrays_equals($str1$$Register, $str2$$Register,
15025                      $result$$Register, $cnt$$Register,
15026                      2, /*is_string*/true);
15027   %}
15028   ins_pipe(pipe_class_memory);
15029 %}
15030 
15031 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15032                       iRegP_R10 tmp, rFlagsReg cr)
15033 %{
15034   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15035   match(Set result (AryEq ary1 ary2));
15036   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15037 
15038   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15039   ins_encode %{
15040     __ arrays_equals($ary1$$Register, $ary2$$Register,
15041                      $result$$Register, $tmp$$Register,
15042                      1, /*is_string*/false);
15043     %}
15044   ins_pipe(pipe_class_memory);
15045 %}
15046 
15047 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15048                       iRegP_R10 tmp, rFlagsReg cr)
15049 %{
15050   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15051   match(Set result (AryEq ary1 ary2));
15052   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15053 
15054   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15055   ins_encode %{
15056     __ arrays_equals($ary1$$Register, $ary2$$Register,
15057                      $result$$Register, $tmp$$Register,
15058                      2, /*is_string*/false);
15059   %}
15060   ins_pipe(pipe_class_memory);
15061 %}
15062 
15063 
15064 // fast char[] to byte[] compression
15065 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15066                          vRegD_V0 tmp1, vRegD_V1 tmp2,
15067                          vRegD_V2 tmp3, vRegD_V3 tmp4,
15068                          iRegI_R0 result, rFlagsReg cr)
15069 %{
15070   match(Set result (StrCompressedCopy src (Binary dst len)));
15071   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15072 
15073   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
15074   ins_encode %{
15075     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
15076                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
15077                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
15078                            $result$$Register);
15079   %}
15080   ins_pipe( pipe_slow );
15081 %}
15082 
15083 // fast byte[] to char[] inflation
15084 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
15085                         vRegD tmp1, vRegD tmp2, vRegD tmp3, iRegP_R3 tmp4, rFlagsReg cr)
15086 %{
15087   match(Set dummy (StrInflatedCopy src (Binary dst len)));
15088   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
15089 
15090   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
15091   ins_encode %{
15092     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
15093                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
15094   %}
15095   ins_pipe(pipe_class_memory);
15096 %}
15097 
15098 // encode char[] to byte[] in ISO_8859_1
15099 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15100                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15101                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15102                           iRegI_R0 result, rFlagsReg cr)
15103 %{
15104   match(Set result (EncodeISOArray src (Binary dst len)));
15105   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15106          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15107 
15108   format %{ "Encode array $src,$dst,$len -> $result" %}
15109   ins_encode %{
15110     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15111          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15112          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15113   %}
15114   ins_pipe( pipe_class_memory );
15115 %}
15116 
15117 // ============================================================================
15118 // This name is KNOWN by the ADLC and cannot be changed.
15119 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15120 // for this guy.
15121 instruct tlsLoadP(thread_RegP dst)
15122 %{
15123   match(Set dst (ThreadLocal));
15124 
15125   ins_cost(0);
15126 
15127   format %{ " -- \t// $dst=Thread::current(), empty" %}
15128 
15129   size(0);
15130 
15131   ins_encode( /*empty*/ );
15132 
15133   ins_pipe(pipe_class_empty);
15134 %}
15135 
15136 // ====================VECTOR INSTRUCTIONS=====================================
15137 
15138 // Load vector (32 bits)
15139 instruct loadV4(vecD dst, vmem4 mem)
15140 %{
15141   predicate(n->as_LoadVector()->memory_size() == 4);
15142   match(Set dst (LoadVector mem));
15143   ins_cost(4 * INSN_COST);
15144   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15145   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15146   ins_pipe(vload_reg_mem64);
15147 %}
15148 
15149 // Load vector (64 bits)
15150 instruct loadV8(vecD dst, vmem8 mem)
15151 %{
15152   predicate(n->as_LoadVector()->memory_size() == 8);
15153   match(Set dst (LoadVector mem));
15154   ins_cost(4 * INSN_COST);
15155   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15156   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15157   ins_pipe(vload_reg_mem64);
15158 %}
15159 
15160 // Load Vector (128 bits)
15161 instruct loadV16(vecX dst, vmem16 mem)
15162 %{
15163   predicate(n->as_LoadVector()->memory_size() == 16);
15164   match(Set dst (LoadVector mem));
15165   ins_cost(4 * INSN_COST);
15166   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15167   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15168   ins_pipe(vload_reg_mem128);
15169 %}
15170 
15171 // Store Vector (32 bits)
15172 instruct storeV4(vecD src, vmem4 mem)
15173 %{
15174   predicate(n->as_StoreVector()->memory_size() == 4);
15175   match(Set mem (StoreVector mem src));
15176   ins_cost(4 * INSN_COST);
15177   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15178   ins_encode( aarch64_enc_strvS(src, mem) );
15179   ins_pipe(vstore_reg_mem64);
15180 %}
15181 
15182 // Store Vector (64 bits)
15183 instruct storeV8(vecD src, vmem8 mem)
15184 %{
15185   predicate(n->as_StoreVector()->memory_size() == 8);
15186   match(Set mem (StoreVector mem src));
15187   ins_cost(4 * INSN_COST);
15188   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15189   ins_encode( aarch64_enc_strvD(src, mem) );
15190   ins_pipe(vstore_reg_mem64);
15191 %}
15192 
15193 // Store Vector (128 bits)
15194 instruct storeV16(vecX src, vmem16 mem)
15195 %{
15196   predicate(n->as_StoreVector()->memory_size() == 16);
15197   match(Set mem (StoreVector mem src));
15198   ins_cost(4 * INSN_COST);
15199   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15200   ins_encode( aarch64_enc_strvQ(src, mem) );
15201   ins_pipe(vstore_reg_mem128);
15202 %}
15203 
15204 instruct replicate8B(vecD dst, iRegIorL2I src)
15205 %{
15206   predicate(n->as_Vector()->length() == 4 ||
15207             n->as_Vector()->length() == 8);
15208   match(Set dst (ReplicateB src));
15209   ins_cost(INSN_COST);
15210   format %{ "dup  $dst, $src\t# vector (8B)" %}
15211   ins_encode %{
15212     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15213   %}
15214   ins_pipe(vdup_reg_reg64);
15215 %}
15216 
15217 instruct replicate16B(vecX dst, iRegIorL2I src)
15218 %{
15219   predicate(n->as_Vector()->length() == 16);
15220   match(Set dst (ReplicateB src));
15221   ins_cost(INSN_COST);
15222   format %{ "dup  $dst, $src\t# vector (16B)" %}
15223   ins_encode %{
15224     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15225   %}
15226   ins_pipe(vdup_reg_reg128);
15227 %}
15228 
15229 instruct replicate8B_imm(vecD dst, immI con)
15230 %{
15231   predicate(n->as_Vector()->length() == 4 ||
15232             n->as_Vector()->length() == 8);
15233   match(Set dst (ReplicateB con));
15234   ins_cost(INSN_COST);
15235   format %{ "movi  $dst, $con\t# vector(8B)" %}
15236   ins_encode %{
15237     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15238   %}
15239   ins_pipe(vmovi_reg_imm64);
15240 %}
15241 
15242 instruct replicate16B_imm(vecX dst, immI con)
15243 %{
15244   predicate(n->as_Vector()->length() == 16);
15245   match(Set dst (ReplicateB con));
15246   ins_cost(INSN_COST);
15247   format %{ "movi  $dst, $con\t# vector(16B)" %}
15248   ins_encode %{
15249     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15250   %}
15251   ins_pipe(vmovi_reg_imm128);
15252 %}
15253 
15254 instruct replicate4S(vecD dst, iRegIorL2I src)
15255 %{
15256   predicate(n->as_Vector()->length() == 2 ||
15257             n->as_Vector()->length() == 4);
15258   match(Set dst (ReplicateS src));
15259   ins_cost(INSN_COST);
15260   format %{ "dup  $dst, $src\t# vector (4S)" %}
15261   ins_encode %{
15262     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15263   %}
15264   ins_pipe(vdup_reg_reg64);
15265 %}
15266 
15267 instruct replicate8S(vecX dst, iRegIorL2I src)
15268 %{
15269   predicate(n->as_Vector()->length() == 8);
15270   match(Set dst (ReplicateS src));
15271   ins_cost(INSN_COST);
15272   format %{ "dup  $dst, $src\t# vector (8S)" %}
15273   ins_encode %{
15274     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15275   %}
15276   ins_pipe(vdup_reg_reg128);
15277 %}
15278 
15279 instruct replicate4S_imm(vecD dst, immI con)
15280 %{
15281   predicate(n->as_Vector()->length() == 2 ||
15282             n->as_Vector()->length() == 4);
15283   match(Set dst (ReplicateS con));
15284   ins_cost(INSN_COST);
15285   format %{ "movi  $dst, $con\t# vector(4H)" %}
15286   ins_encode %{
15287     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15288   %}
15289   ins_pipe(vmovi_reg_imm64);
15290 %}
15291 
15292 instruct replicate8S_imm(vecX dst, immI con)
15293 %{
15294   predicate(n->as_Vector()->length() == 8);
15295   match(Set dst (ReplicateS con));
15296   ins_cost(INSN_COST);
15297   format %{ "movi  $dst, $con\t# vector(8H)" %}
15298   ins_encode %{
15299     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15300   %}
15301   ins_pipe(vmovi_reg_imm128);
15302 %}
15303 
15304 instruct replicate2I(vecD dst, iRegIorL2I src)
15305 %{
15306   predicate(n->as_Vector()->length() == 2);
15307   match(Set dst (ReplicateI src));
15308   ins_cost(INSN_COST);
15309   format %{ "dup  $dst, $src\t# vector (2I)" %}
15310   ins_encode %{
15311     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15312   %}
15313   ins_pipe(vdup_reg_reg64);
15314 %}
15315 
15316 instruct replicate4I(vecX dst, iRegIorL2I src)
15317 %{
15318   predicate(n->as_Vector()->length() == 4);
15319   match(Set dst (ReplicateI src));
15320   ins_cost(INSN_COST);
15321   format %{ "dup  $dst, $src\t# vector (4I)" %}
15322   ins_encode %{
15323     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15324   %}
15325   ins_pipe(vdup_reg_reg128);
15326 %}
15327 
15328 instruct replicate2I_imm(vecD dst, immI con)
15329 %{
15330   predicate(n->as_Vector()->length() == 2);
15331   match(Set dst (ReplicateI con));
15332   ins_cost(INSN_COST);
15333   format %{ "movi  $dst, $con\t# vector(2I)" %}
15334   ins_encode %{
15335     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15336   %}
15337   ins_pipe(vmovi_reg_imm64);
15338 %}
15339 
15340 instruct replicate4I_imm(vecX dst, immI con)
15341 %{
15342   predicate(n->as_Vector()->length() == 4);
15343   match(Set dst (ReplicateI con));
15344   ins_cost(INSN_COST);
15345   format %{ "movi  $dst, $con\t# vector(4I)" %}
15346   ins_encode %{
15347     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15348   %}
15349   ins_pipe(vmovi_reg_imm128);
15350 %}
15351 
15352 instruct replicate2L(vecX dst, iRegL src)
15353 %{
15354   predicate(n->as_Vector()->length() == 2);
15355   match(Set dst (ReplicateL src));
15356   ins_cost(INSN_COST);
15357   format %{ "dup  $dst, $src\t# vector (2L)" %}
15358   ins_encode %{
15359     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15360   %}
15361   ins_pipe(vdup_reg_reg128);
15362 %}
15363 
15364 instruct replicate2L_zero(vecX dst, immI0 zero)
15365 %{
15366   predicate(n->as_Vector()->length() == 2);
15367   match(Set dst (ReplicateI zero));
15368   ins_cost(INSN_COST);
15369   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15370   ins_encode %{
15371     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15372            as_FloatRegister($dst$$reg),
15373            as_FloatRegister($dst$$reg));
15374   %}
15375   ins_pipe(vmovi_reg_imm128);
15376 %}
15377 
15378 instruct replicate2F(vecD dst, vRegF src)
15379 %{
15380   predicate(n->as_Vector()->length() == 2);
15381   match(Set dst (ReplicateF src));
15382   ins_cost(INSN_COST);
15383   format %{ "dup  $dst, $src\t# vector (2F)" %}
15384   ins_encode %{
15385     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15386            as_FloatRegister($src$$reg));
15387   %}
15388   ins_pipe(vdup_reg_freg64);
15389 %}
15390 
15391 instruct replicate4F(vecX dst, vRegF src)
15392 %{
15393   predicate(n->as_Vector()->length() == 4);
15394   match(Set dst (ReplicateF src));
15395   ins_cost(INSN_COST);
15396   format %{ "dup  $dst, $src\t# vector (4F)" %}
15397   ins_encode %{
15398     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15399            as_FloatRegister($src$$reg));
15400   %}
15401   ins_pipe(vdup_reg_freg128);
15402 %}
15403 
15404 instruct replicate2D(vecX dst, vRegD src)
15405 %{
15406   predicate(n->as_Vector()->length() == 2);
15407   match(Set dst (ReplicateD src));
15408   ins_cost(INSN_COST);
15409   format %{ "dup  $dst, $src\t# vector (2D)" %}
15410   ins_encode %{
15411     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15412            as_FloatRegister($src$$reg));
15413   %}
15414   ins_pipe(vdup_reg_dreg128);
15415 %}
15416 
15417 // ====================REDUCTION ARITHMETIC====================================
15418 
15419 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15420 %{
15421   match(Set dst (AddReductionVI src1 src2));
15422   ins_cost(INSN_COST);
15423   effect(TEMP tmp, TEMP tmp2);
15424   format %{ "umov  $tmp, $src2, S, 0\n\t"
15425             "umov  $tmp2, $src2, S, 1\n\t"
15426             "addw  $dst, $src1, $tmp\n\t"
15427             "addw  $dst, $dst, $tmp2\t add reduction2i"
15428   %}
15429   ins_encode %{
15430     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15431     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15432     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15433     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15434   %}
15435   ins_pipe(pipe_class_default);
15436 %}
15437 
15438 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15439 %{
15440   match(Set dst (AddReductionVI src1 src2));
15441   ins_cost(INSN_COST);
15442   effect(TEMP tmp, TEMP tmp2);
15443   format %{ "addv  $tmp, T4S, $src2\n\t"
15444             "umov  $tmp2, $tmp, S, 0\n\t"
15445             "addw  $dst, $tmp2, $src1\t add reduction4i"
15446   %}
15447   ins_encode %{
15448     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15449             as_FloatRegister($src2$$reg));
15450     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15451     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15452   %}
15453   ins_pipe(pipe_class_default);
15454 %}
15455 
15456 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15457 %{
15458   match(Set dst (MulReductionVI src1 src2));
15459   ins_cost(INSN_COST);
15460   effect(TEMP tmp, TEMP dst);
15461   format %{ "umov  $tmp, $src2, S, 0\n\t"
15462             "mul   $dst, $tmp, $src1\n\t"
15463             "umov  $tmp, $src2, S, 1\n\t"
15464             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15465   %}
15466   ins_encode %{
15467     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15468     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15469     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15470     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15471   %}
15472   ins_pipe(pipe_class_default);
15473 %}
15474 
15475 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15476 %{
15477   match(Set dst (MulReductionVI src1 src2));
15478   ins_cost(INSN_COST);
15479   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15480   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15481             "mul   $tmp, $tmp, $src2\n\t"
15482             "umov  $tmp2, $tmp, S, 0\n\t"
15483             "mul   $dst, $tmp2, $src1\n\t"
15484             "umov  $tmp2, $tmp, S, 1\n\t"
15485             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15486   %}
15487   ins_encode %{
15488     __ ins(as_FloatRegister($tmp$$reg), __ D,
15489            as_FloatRegister($src2$$reg), 0, 1);
15490     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15491            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15492     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15493     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15494     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15495     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15496   %}
15497   ins_pipe(pipe_class_default);
15498 %}
15499 
15500 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15501 %{
15502   match(Set dst (AddReductionVF src1 src2));
15503   ins_cost(INSN_COST);
15504   effect(TEMP tmp, TEMP dst);
15505   format %{ "fadds $dst, $src1, $src2\n\t"
15506             "ins   $tmp, S, $src2, 0, 1\n\t"
15507             "fadds $dst, $dst, $tmp\t add reduction2f"
15508   %}
15509   ins_encode %{
15510     __ fadds(as_FloatRegister($dst$$reg),
15511              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15512     __ ins(as_FloatRegister($tmp$$reg), __ S,
15513            as_FloatRegister($src2$$reg), 0, 1);
15514     __ fadds(as_FloatRegister($dst$$reg),
15515              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15516   %}
15517   ins_pipe(pipe_class_default);
15518 %}
15519 
15520 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15521 %{
15522   match(Set dst (AddReductionVF src1 src2));
15523   ins_cost(INSN_COST);
15524   effect(TEMP tmp, TEMP dst);
15525   format %{ "fadds $dst, $src1, $src2\n\t"
15526             "ins   $tmp, S, $src2, 0, 1\n\t"
15527             "fadds $dst, $dst, $tmp\n\t"
15528             "ins   $tmp, S, $src2, 0, 2\n\t"
15529             "fadds $dst, $dst, $tmp\n\t"
15530             "ins   $tmp, S, $src2, 0, 3\n\t"
15531             "fadds $dst, $dst, $tmp\t add reduction4f"
15532   %}
15533   ins_encode %{
15534     __ fadds(as_FloatRegister($dst$$reg),
15535              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15536     __ ins(as_FloatRegister($tmp$$reg), __ S,
15537            as_FloatRegister($src2$$reg), 0, 1);
15538     __ fadds(as_FloatRegister($dst$$reg),
15539              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15540     __ ins(as_FloatRegister($tmp$$reg), __ S,
15541            as_FloatRegister($src2$$reg), 0, 2);
15542     __ fadds(as_FloatRegister($dst$$reg),
15543              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15544     __ ins(as_FloatRegister($tmp$$reg), __ S,
15545            as_FloatRegister($src2$$reg), 0, 3);
15546     __ fadds(as_FloatRegister($dst$$reg),
15547              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15548   %}
15549   ins_pipe(pipe_class_default);
15550 %}
15551 
15552 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15553 %{
15554   match(Set dst (MulReductionVF src1 src2));
15555   ins_cost(INSN_COST);
15556   effect(TEMP tmp, TEMP dst);
15557   format %{ "fmuls $dst, $src1, $src2\n\t"
15558             "ins   $tmp, S, $src2, 0, 1\n\t"
15559             "fmuls $dst, $dst, $tmp\t add reduction4f"
15560   %}
15561   ins_encode %{
15562     __ fmuls(as_FloatRegister($dst$$reg),
15563              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15564     __ ins(as_FloatRegister($tmp$$reg), __ S,
15565            as_FloatRegister($src2$$reg), 0, 1);
15566     __ fmuls(as_FloatRegister($dst$$reg),
15567              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15568   %}
15569   ins_pipe(pipe_class_default);
15570 %}
15571 
15572 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15573 %{
15574   match(Set dst (MulReductionVF src1 src2));
15575   ins_cost(INSN_COST);
15576   effect(TEMP tmp, TEMP dst);
15577   format %{ "fmuls $dst, $src1, $src2\n\t"
15578             "ins   $tmp, S, $src2, 0, 1\n\t"
15579             "fmuls $dst, $dst, $tmp\n\t"
15580             "ins   $tmp, S, $src2, 0, 2\n\t"
15581             "fmuls $dst, $dst, $tmp\n\t"
15582             "ins   $tmp, S, $src2, 0, 3\n\t"
15583             "fmuls $dst, $dst, $tmp\t add reduction4f"
15584   %}
15585   ins_encode %{
15586     __ fmuls(as_FloatRegister($dst$$reg),
15587              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15588     __ ins(as_FloatRegister($tmp$$reg), __ S,
15589            as_FloatRegister($src2$$reg), 0, 1);
15590     __ fmuls(as_FloatRegister($dst$$reg),
15591              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15592     __ ins(as_FloatRegister($tmp$$reg), __ S,
15593            as_FloatRegister($src2$$reg), 0, 2);
15594     __ fmuls(as_FloatRegister($dst$$reg),
15595              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15596     __ ins(as_FloatRegister($tmp$$reg), __ S,
15597            as_FloatRegister($src2$$reg), 0, 3);
15598     __ fmuls(as_FloatRegister($dst$$reg),
15599              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15600   %}
15601   ins_pipe(pipe_class_default);
15602 %}
15603 
15604 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15605 %{
15606   match(Set dst (AddReductionVD src1 src2));
15607   ins_cost(INSN_COST);
15608   effect(TEMP tmp, TEMP dst);
15609   format %{ "faddd $dst, $src1, $src2\n\t"
15610             "ins   $tmp, D, $src2, 0, 1\n\t"
15611             "faddd $dst, $dst, $tmp\t add reduction2d"
15612   %}
15613   ins_encode %{
15614     __ faddd(as_FloatRegister($dst$$reg),
15615              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15616     __ ins(as_FloatRegister($tmp$$reg), __ D,
15617            as_FloatRegister($src2$$reg), 0, 1);
15618     __ faddd(as_FloatRegister($dst$$reg),
15619              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15620   %}
15621   ins_pipe(pipe_class_default);
15622 %}
15623 
15624 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15625 %{
15626   match(Set dst (MulReductionVD src1 src2));
15627   ins_cost(INSN_COST);
15628   effect(TEMP tmp, TEMP dst);
15629   format %{ "fmuld $dst, $src1, $src2\n\t"
15630             "ins   $tmp, D, $src2, 0, 1\n\t"
15631             "fmuld $dst, $dst, $tmp\t add reduction2d"
15632   %}
15633   ins_encode %{
15634     __ fmuld(as_FloatRegister($dst$$reg),
15635              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15636     __ ins(as_FloatRegister($tmp$$reg), __ D,
15637            as_FloatRegister($src2$$reg), 0, 1);
15638     __ fmuld(as_FloatRegister($dst$$reg),
15639              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15640   %}
15641   ins_pipe(pipe_class_default);
15642 %}
15643 
15644 // ====================VECTOR ARITHMETIC=======================================
15645 
15646 // --------------------------------- ADD --------------------------------------
15647 
15648 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15649 %{
15650   predicate(n->as_Vector()->length() == 4 ||
15651             n->as_Vector()->length() == 8);
15652   match(Set dst (AddVB src1 src2));
15653   ins_cost(INSN_COST);
15654   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15655   ins_encode %{
15656     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15657             as_FloatRegister($src1$$reg),
15658             as_FloatRegister($src2$$reg));
15659   %}
15660   ins_pipe(vdop64);
15661 %}
15662 
15663 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15664 %{
15665   predicate(n->as_Vector()->length() == 16);
15666   match(Set dst (AddVB src1 src2));
15667   ins_cost(INSN_COST);
15668   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15669   ins_encode %{
15670     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15671             as_FloatRegister($src1$$reg),
15672             as_FloatRegister($src2$$reg));
15673   %}
15674   ins_pipe(vdop128);
15675 %}
15676 
15677 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15678 %{
15679   predicate(n->as_Vector()->length() == 2 ||
15680             n->as_Vector()->length() == 4);
15681   match(Set dst (AddVS src1 src2));
15682   ins_cost(INSN_COST);
15683   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15684   ins_encode %{
15685     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15686             as_FloatRegister($src1$$reg),
15687             as_FloatRegister($src2$$reg));
15688   %}
15689   ins_pipe(vdop64);
15690 %}
15691 
15692 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15693 %{
15694   predicate(n->as_Vector()->length() == 8);
15695   match(Set dst (AddVS src1 src2));
15696   ins_cost(INSN_COST);
15697   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15698   ins_encode %{
15699     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15700             as_FloatRegister($src1$$reg),
15701             as_FloatRegister($src2$$reg));
15702   %}
15703   ins_pipe(vdop128);
15704 %}
15705 
15706 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15707 %{
15708   predicate(n->as_Vector()->length() == 2);
15709   match(Set dst (AddVI src1 src2));
15710   ins_cost(INSN_COST);
15711   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15712   ins_encode %{
15713     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15714             as_FloatRegister($src1$$reg),
15715             as_FloatRegister($src2$$reg));
15716   %}
15717   ins_pipe(vdop64);
15718 %}
15719 
15720 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15721 %{
15722   predicate(n->as_Vector()->length() == 4);
15723   match(Set dst (AddVI src1 src2));
15724   ins_cost(INSN_COST);
15725   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15726   ins_encode %{
15727     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15728             as_FloatRegister($src1$$reg),
15729             as_FloatRegister($src2$$reg));
15730   %}
15731   ins_pipe(vdop128);
15732 %}
15733 
15734 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15735 %{
15736   predicate(n->as_Vector()->length() == 2);
15737   match(Set dst (AddVL src1 src2));
15738   ins_cost(INSN_COST);
15739   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15740   ins_encode %{
15741     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15742             as_FloatRegister($src1$$reg),
15743             as_FloatRegister($src2$$reg));
15744   %}
15745   ins_pipe(vdop128);
15746 %}
15747 
15748 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15749 %{
15750   predicate(n->as_Vector()->length() == 2);
15751   match(Set dst (AddVF src1 src2));
15752   ins_cost(INSN_COST);
15753   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15754   ins_encode %{
15755     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15756             as_FloatRegister($src1$$reg),
15757             as_FloatRegister($src2$$reg));
15758   %}
15759   ins_pipe(vdop_fp64);
15760 %}
15761 
15762 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15763 %{
15764   predicate(n->as_Vector()->length() == 4);
15765   match(Set dst (AddVF src1 src2));
15766   ins_cost(INSN_COST);
15767   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15768   ins_encode %{
15769     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15770             as_FloatRegister($src1$$reg),
15771             as_FloatRegister($src2$$reg));
15772   %}
15773   ins_pipe(vdop_fp128);
15774 %}
15775 
15776 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15777 %{
15778   match(Set dst (AddVD src1 src2));
15779   ins_cost(INSN_COST);
15780   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15781   ins_encode %{
15782     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15783             as_FloatRegister($src1$$reg),
15784             as_FloatRegister($src2$$reg));
15785   %}
15786   ins_pipe(vdop_fp128);
15787 %}
15788 
15789 // --------------------------------- SUB --------------------------------------
15790 
15791 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15792 %{
15793   predicate(n->as_Vector()->length() == 4 ||
15794             n->as_Vector()->length() == 8);
15795   match(Set dst (SubVB src1 src2));
15796   ins_cost(INSN_COST);
15797   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15798   ins_encode %{
15799     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15800             as_FloatRegister($src1$$reg),
15801             as_FloatRegister($src2$$reg));
15802   %}
15803   ins_pipe(vdop64);
15804 %}
15805 
15806 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15807 %{
15808   predicate(n->as_Vector()->length() == 16);
15809   match(Set dst (SubVB src1 src2));
15810   ins_cost(INSN_COST);
15811   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15812   ins_encode %{
15813     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15814             as_FloatRegister($src1$$reg),
15815             as_FloatRegister($src2$$reg));
15816   %}
15817   ins_pipe(vdop128);
15818 %}
15819 
15820 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15821 %{
15822   predicate(n->as_Vector()->length() == 2 ||
15823             n->as_Vector()->length() == 4);
15824   match(Set dst (SubVS src1 src2));
15825   ins_cost(INSN_COST);
15826   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15827   ins_encode %{
15828     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15829             as_FloatRegister($src1$$reg),
15830             as_FloatRegister($src2$$reg));
15831   %}
15832   ins_pipe(vdop64);
15833 %}
15834 
15835 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15836 %{
15837   predicate(n->as_Vector()->length() == 8);
15838   match(Set dst (SubVS src1 src2));
15839   ins_cost(INSN_COST);
15840   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15841   ins_encode %{
15842     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15843             as_FloatRegister($src1$$reg),
15844             as_FloatRegister($src2$$reg));
15845   %}
15846   ins_pipe(vdop128);
15847 %}
15848 
15849 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15850 %{
15851   predicate(n->as_Vector()->length() == 2);
15852   match(Set dst (SubVI src1 src2));
15853   ins_cost(INSN_COST);
15854   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15855   ins_encode %{
15856     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15857             as_FloatRegister($src1$$reg),
15858             as_FloatRegister($src2$$reg));
15859   %}
15860   ins_pipe(vdop64);
15861 %}
15862 
15863 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15864 %{
15865   predicate(n->as_Vector()->length() == 4);
15866   match(Set dst (SubVI src1 src2));
15867   ins_cost(INSN_COST);
15868   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15869   ins_encode %{
15870     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15871             as_FloatRegister($src1$$reg),
15872             as_FloatRegister($src2$$reg));
15873   %}
15874   ins_pipe(vdop128);
15875 %}
15876 
15877 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15878 %{
15879   predicate(n->as_Vector()->length() == 2);
15880   match(Set dst (SubVL src1 src2));
15881   ins_cost(INSN_COST);
15882   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15883   ins_encode %{
15884     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15885             as_FloatRegister($src1$$reg),
15886             as_FloatRegister($src2$$reg));
15887   %}
15888   ins_pipe(vdop128);
15889 %}
15890 
15891 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15892 %{
15893   predicate(n->as_Vector()->length() == 2);
15894   match(Set dst (SubVF src1 src2));
15895   ins_cost(INSN_COST);
15896   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15897   ins_encode %{
15898     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15899             as_FloatRegister($src1$$reg),
15900             as_FloatRegister($src2$$reg));
15901   %}
15902   ins_pipe(vdop_fp64);
15903 %}
15904 
15905 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15906 %{
15907   predicate(n->as_Vector()->length() == 4);
15908   match(Set dst (SubVF src1 src2));
15909   ins_cost(INSN_COST);
15910   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15911   ins_encode %{
15912     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15913             as_FloatRegister($src1$$reg),
15914             as_FloatRegister($src2$$reg));
15915   %}
15916   ins_pipe(vdop_fp128);
15917 %}
15918 
15919 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15920 %{
15921   predicate(n->as_Vector()->length() == 2);
15922   match(Set dst (SubVD src1 src2));
15923   ins_cost(INSN_COST);
15924   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15925   ins_encode %{
15926     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15927             as_FloatRegister($src1$$reg),
15928             as_FloatRegister($src2$$reg));
15929   %}
15930   ins_pipe(vdop_fp128);
15931 %}
15932 
15933 // --------------------------------- MUL --------------------------------------
15934 
15935 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15936 %{
15937   predicate(n->as_Vector()->length() == 2 ||
15938             n->as_Vector()->length() == 4);
15939   match(Set dst (MulVS src1 src2));
15940   ins_cost(INSN_COST);
15941   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15942   ins_encode %{
15943     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15944             as_FloatRegister($src1$$reg),
15945             as_FloatRegister($src2$$reg));
15946   %}
15947   ins_pipe(vmul64);
15948 %}
15949 
15950 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15951 %{
15952   predicate(n->as_Vector()->length() == 8);
15953   match(Set dst (MulVS src1 src2));
15954   ins_cost(INSN_COST);
15955   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15956   ins_encode %{
15957     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15958             as_FloatRegister($src1$$reg),
15959             as_FloatRegister($src2$$reg));
15960   %}
15961   ins_pipe(vmul128);
15962 %}
15963 
15964 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15965 %{
15966   predicate(n->as_Vector()->length() == 2);
15967   match(Set dst (MulVI src1 src2));
15968   ins_cost(INSN_COST);
15969   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15970   ins_encode %{
15971     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15972             as_FloatRegister($src1$$reg),
15973             as_FloatRegister($src2$$reg));
15974   %}
15975   ins_pipe(vmul64);
15976 %}
15977 
15978 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15979 %{
15980   predicate(n->as_Vector()->length() == 4);
15981   match(Set dst (MulVI src1 src2));
15982   ins_cost(INSN_COST);
15983   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15984   ins_encode %{
15985     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15986             as_FloatRegister($src1$$reg),
15987             as_FloatRegister($src2$$reg));
15988   %}
15989   ins_pipe(vmul128);
15990 %}
15991 
15992 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15993 %{
15994   predicate(n->as_Vector()->length() == 2);
15995   match(Set dst (MulVF src1 src2));
15996   ins_cost(INSN_COST);
15997   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15998   ins_encode %{
15999     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
16000             as_FloatRegister($src1$$reg),
16001             as_FloatRegister($src2$$reg));
16002   %}
16003   ins_pipe(vmuldiv_fp64);
16004 %}
16005 
16006 instruct vmul4F(vecX dst, vecX src1, vecX src2)
16007 %{
16008   predicate(n->as_Vector()->length() == 4);
16009   match(Set dst (MulVF src1 src2));
16010   ins_cost(INSN_COST);
16011   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
16012   ins_encode %{
16013     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
16014             as_FloatRegister($src1$$reg),
16015             as_FloatRegister($src2$$reg));
16016   %}
16017   ins_pipe(vmuldiv_fp128);
16018 %}
16019 
16020 instruct vmul2D(vecX dst, vecX src1, vecX src2)
16021 %{
16022   predicate(n->as_Vector()->length() == 2);
16023   match(Set dst (MulVD src1 src2));
16024   ins_cost(INSN_COST);
16025   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
16026   ins_encode %{
16027     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
16028             as_FloatRegister($src1$$reg),
16029             as_FloatRegister($src2$$reg));
16030   %}
16031   ins_pipe(vmuldiv_fp128);
16032 %}
16033 
16034 // --------------------------------- MLA --------------------------------------
16035 
16036 instruct vmla4S(vecD dst, vecD src1, vecD src2)
16037 %{
16038   predicate(n->as_Vector()->length() == 2 ||
16039             n->as_Vector()->length() == 4);
16040   match(Set dst (AddVS dst (MulVS src1 src2)));
16041   ins_cost(INSN_COST);
16042   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16043   ins_encode %{
16044     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16045             as_FloatRegister($src1$$reg),
16046             as_FloatRegister($src2$$reg));
16047   %}
16048   ins_pipe(vmla64);
16049 %}
16050 
16051 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16052 %{
16053   predicate(n->as_Vector()->length() == 8);
16054   match(Set dst (AddVS dst (MulVS src1 src2)));
16055   ins_cost(INSN_COST);
16056   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16057   ins_encode %{
16058     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16059             as_FloatRegister($src1$$reg),
16060             as_FloatRegister($src2$$reg));
16061   %}
16062   ins_pipe(vmla128);
16063 %}
16064 
16065 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16066 %{
16067   predicate(n->as_Vector()->length() == 2);
16068   match(Set dst (AddVI dst (MulVI src1 src2)));
16069   ins_cost(INSN_COST);
16070   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16071   ins_encode %{
16072     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16073             as_FloatRegister($src1$$reg),
16074             as_FloatRegister($src2$$reg));
16075   %}
16076   ins_pipe(vmla64);
16077 %}
16078 
16079 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16080 %{
16081   predicate(n->as_Vector()->length() == 4);
16082   match(Set dst (AddVI dst (MulVI src1 src2)));
16083   ins_cost(INSN_COST);
16084   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16085   ins_encode %{
16086     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16087             as_FloatRegister($src1$$reg),
16088             as_FloatRegister($src2$$reg));
16089   %}
16090   ins_pipe(vmla128);
16091 %}
16092 
16093 // --------------------------------- MLS --------------------------------------
16094 
16095 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16096 %{
16097   predicate(n->as_Vector()->length() == 2 ||
16098             n->as_Vector()->length() == 4);
16099   match(Set dst (SubVS dst (MulVS src1 src2)));
16100   ins_cost(INSN_COST);
16101   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16102   ins_encode %{
16103     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16104             as_FloatRegister($src1$$reg),
16105             as_FloatRegister($src2$$reg));
16106   %}
16107   ins_pipe(vmla64);
16108 %}
16109 
16110 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16111 %{
16112   predicate(n->as_Vector()->length() == 8);
16113   match(Set dst (SubVS dst (MulVS src1 src2)));
16114   ins_cost(INSN_COST);
16115   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16116   ins_encode %{
16117     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16118             as_FloatRegister($src1$$reg),
16119             as_FloatRegister($src2$$reg));
16120   %}
16121   ins_pipe(vmla128);
16122 %}
16123 
16124 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16125 %{
16126   predicate(n->as_Vector()->length() == 2);
16127   match(Set dst (SubVI dst (MulVI src1 src2)));
16128   ins_cost(INSN_COST);
16129   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16130   ins_encode %{
16131     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16132             as_FloatRegister($src1$$reg),
16133             as_FloatRegister($src2$$reg));
16134   %}
16135   ins_pipe(vmla64);
16136 %}
16137 
16138 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16139 %{
16140   predicate(n->as_Vector()->length() == 4);
16141   match(Set dst (SubVI dst (MulVI src1 src2)));
16142   ins_cost(INSN_COST);
16143   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16144   ins_encode %{
16145     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16146             as_FloatRegister($src1$$reg),
16147             as_FloatRegister($src2$$reg));
16148   %}
16149   ins_pipe(vmla128);
16150 %}
16151 
16152 // --------------------------------- DIV --------------------------------------
16153 
16154 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16155 %{
16156   predicate(n->as_Vector()->length() == 2);
16157   match(Set dst (DivVF src1 src2));
16158   ins_cost(INSN_COST);
16159   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16160   ins_encode %{
16161     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16162             as_FloatRegister($src1$$reg),
16163             as_FloatRegister($src2$$reg));
16164   %}
16165   ins_pipe(vmuldiv_fp64);
16166 %}
16167 
16168 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16169 %{
16170   predicate(n->as_Vector()->length() == 4);
16171   match(Set dst (DivVF src1 src2));
16172   ins_cost(INSN_COST);
16173   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16174   ins_encode %{
16175     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16176             as_FloatRegister($src1$$reg),
16177             as_FloatRegister($src2$$reg));
16178   %}
16179   ins_pipe(vmuldiv_fp128);
16180 %}
16181 
16182 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16183 %{
16184   predicate(n->as_Vector()->length() == 2);
16185   match(Set dst (DivVD src1 src2));
16186   ins_cost(INSN_COST);
16187   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16188   ins_encode %{
16189     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16190             as_FloatRegister($src1$$reg),
16191             as_FloatRegister($src2$$reg));
16192   %}
16193   ins_pipe(vmuldiv_fp128);
16194 %}
16195 
16196 // --------------------------------- SQRT -------------------------------------
16197 
16198 instruct vsqrt2D(vecX dst, vecX src)
16199 %{
16200   predicate(n->as_Vector()->length() == 2);
16201   match(Set dst (SqrtVD src));
16202   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16203   ins_encode %{
16204     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16205              as_FloatRegister($src$$reg));
16206   %}
16207   ins_pipe(vsqrt_fp128);
16208 %}
16209 
16210 // --------------------------------- ABS --------------------------------------
16211 
16212 instruct vabs2F(vecD dst, vecD src)
16213 %{
16214   predicate(n->as_Vector()->length() == 2);
16215   match(Set dst (AbsVF src));
16216   ins_cost(INSN_COST * 3);
16217   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16218   ins_encode %{
16219     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16220             as_FloatRegister($src$$reg));
16221   %}
16222   ins_pipe(vunop_fp64);
16223 %}
16224 
16225 instruct vabs4F(vecX dst, vecX src)
16226 %{
16227   predicate(n->as_Vector()->length() == 4);
16228   match(Set dst (AbsVF src));
16229   ins_cost(INSN_COST * 3);
16230   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16231   ins_encode %{
16232     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16233             as_FloatRegister($src$$reg));
16234   %}
16235   ins_pipe(vunop_fp128);
16236 %}
16237 
16238 instruct vabs2D(vecX dst, vecX src)
16239 %{
16240   predicate(n->as_Vector()->length() == 2);
16241   match(Set dst (AbsVD src));
16242   ins_cost(INSN_COST * 3);
16243   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16244   ins_encode %{
16245     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16246             as_FloatRegister($src$$reg));
16247   %}
16248   ins_pipe(vunop_fp128);
16249 %}
16250 
16251 // --------------------------------- NEG --------------------------------------
16252 
16253 instruct vneg2F(vecD dst, vecD src)
16254 %{
16255   predicate(n->as_Vector()->length() == 2);
16256   match(Set dst (NegVF src));
16257   ins_cost(INSN_COST * 3);
16258   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16259   ins_encode %{
16260     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16261             as_FloatRegister($src$$reg));
16262   %}
16263   ins_pipe(vunop_fp64);
16264 %}
16265 
16266 instruct vneg4F(vecX dst, vecX src)
16267 %{
16268   predicate(n->as_Vector()->length() == 4);
16269   match(Set dst (NegVF src));
16270   ins_cost(INSN_COST * 3);
16271   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16272   ins_encode %{
16273     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16274             as_FloatRegister($src$$reg));
16275   %}
16276   ins_pipe(vunop_fp128);
16277 %}
16278 
16279 instruct vneg2D(vecX dst, vecX src)
16280 %{
16281   predicate(n->as_Vector()->length() == 2);
16282   match(Set dst (NegVD src));
16283   ins_cost(INSN_COST * 3);
16284   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16285   ins_encode %{
16286     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16287             as_FloatRegister($src$$reg));
16288   %}
16289   ins_pipe(vunop_fp128);
16290 %}
16291 
16292 // --------------------------------- AND --------------------------------------
16293 
16294 instruct vand8B(vecD dst, vecD src1, vecD src2)
16295 %{
16296   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16297             n->as_Vector()->length_in_bytes() == 8);
16298   match(Set dst (AndV src1 src2));
16299   ins_cost(INSN_COST);
16300   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16301   ins_encode %{
16302     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16303             as_FloatRegister($src1$$reg),
16304             as_FloatRegister($src2$$reg));
16305   %}
16306   ins_pipe(vlogical64);
16307 %}
16308 
16309 instruct vand16B(vecX dst, vecX src1, vecX src2)
16310 %{
16311   predicate(n->as_Vector()->length_in_bytes() == 16);
16312   match(Set dst (AndV src1 src2));
16313   ins_cost(INSN_COST);
16314   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16315   ins_encode %{
16316     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16317             as_FloatRegister($src1$$reg),
16318             as_FloatRegister($src2$$reg));
16319   %}
16320   ins_pipe(vlogical128);
16321 %}
16322 
16323 // --------------------------------- OR ---------------------------------------
16324 
16325 instruct vor8B(vecD dst, vecD src1, vecD src2)
16326 %{
16327   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16328             n->as_Vector()->length_in_bytes() == 8);
16329   match(Set dst (OrV src1 src2));
16330   ins_cost(INSN_COST);
16331   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16332   ins_encode %{
16333     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16334             as_FloatRegister($src1$$reg),
16335             as_FloatRegister($src2$$reg));
16336   %}
16337   ins_pipe(vlogical64);
16338 %}
16339 
16340 instruct vor16B(vecX dst, vecX src1, vecX src2)
16341 %{
16342   predicate(n->as_Vector()->length_in_bytes() == 16);
16343   match(Set dst (OrV src1 src2));
16344   ins_cost(INSN_COST);
16345   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16346   ins_encode %{
16347     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16348             as_FloatRegister($src1$$reg),
16349             as_FloatRegister($src2$$reg));
16350   %}
16351   ins_pipe(vlogical128);
16352 %}
16353 
16354 // --------------------------------- XOR --------------------------------------
16355 
16356 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16357 %{
16358   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16359             n->as_Vector()->length_in_bytes() == 8);
16360   match(Set dst (XorV src1 src2));
16361   ins_cost(INSN_COST);
16362   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16363   ins_encode %{
16364     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16365             as_FloatRegister($src1$$reg),
16366             as_FloatRegister($src2$$reg));
16367   %}
16368   ins_pipe(vlogical64);
16369 %}
16370 
16371 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16372 %{
16373   predicate(n->as_Vector()->length_in_bytes() == 16);
16374   match(Set dst (XorV src1 src2));
16375   ins_cost(INSN_COST);
16376   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16377   ins_encode %{
16378     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16379             as_FloatRegister($src1$$reg),
16380             as_FloatRegister($src2$$reg));
16381   %}
16382   ins_pipe(vlogical128);
16383 %}
16384 
16385 // ------------------------------ Shift ---------------------------------------
16386 
16387 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16388   match(Set dst (LShiftCntV cnt));
16389   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16390   ins_encode %{
16391     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16392   %}
16393   ins_pipe(vdup_reg_reg128);
16394 %}
16395 
16396 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16397 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16398   match(Set dst (RShiftCntV cnt));
16399   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16400   ins_encode %{
16401     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16402     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16403   %}
16404   ins_pipe(vdup_reg_reg128);
16405 %}
16406 
16407 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16408   predicate(n->as_Vector()->length() == 4 ||
16409             n->as_Vector()->length() == 8);
16410   match(Set dst (LShiftVB src shift));
16411   match(Set dst (RShiftVB src shift));
16412   ins_cost(INSN_COST);
16413   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16414   ins_encode %{
16415     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16416             as_FloatRegister($src$$reg),
16417             as_FloatRegister($shift$$reg));
16418   %}
16419   ins_pipe(vshift64);
16420 %}
16421 
16422 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16423   predicate(n->as_Vector()->length() == 16);
16424   match(Set dst (LShiftVB src shift));
16425   match(Set dst (RShiftVB src shift));
16426   ins_cost(INSN_COST);
16427   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16428   ins_encode %{
16429     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16430             as_FloatRegister($src$$reg),
16431             as_FloatRegister($shift$$reg));
16432   %}
16433   ins_pipe(vshift128);
16434 %}
16435 
16436 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16437   predicate(n->as_Vector()->length() == 4 ||
16438             n->as_Vector()->length() == 8);
16439   match(Set dst (URShiftVB src shift));
16440   ins_cost(INSN_COST);
16441   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16442   ins_encode %{
16443     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16444             as_FloatRegister($src$$reg),
16445             as_FloatRegister($shift$$reg));
16446   %}
16447   ins_pipe(vshift64);
16448 %}
16449 
16450 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16451   predicate(n->as_Vector()->length() == 16);
16452   match(Set dst (URShiftVB src shift));
16453   ins_cost(INSN_COST);
16454   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16455   ins_encode %{
16456     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16457             as_FloatRegister($src$$reg),
16458             as_FloatRegister($shift$$reg));
16459   %}
16460   ins_pipe(vshift128);
16461 %}
16462 
16463 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16464   predicate(n->as_Vector()->length() == 4 ||
16465             n->as_Vector()->length() == 8);
16466   match(Set dst (LShiftVB src shift));
16467   ins_cost(INSN_COST);
16468   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16469   ins_encode %{
16470     int sh = (int)$shift$$constant & 31;
16471     if (sh >= 8) {
16472       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16473              as_FloatRegister($src$$reg),
16474              as_FloatRegister($src$$reg));
16475     } else {
16476       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16477              as_FloatRegister($src$$reg), sh);
16478     }
16479   %}
16480   ins_pipe(vshift64_imm);
16481 %}
16482 
16483 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16484   predicate(n->as_Vector()->length() == 16);
16485   match(Set dst (LShiftVB src shift));
16486   ins_cost(INSN_COST);
16487   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16488   ins_encode %{
16489     int sh = (int)$shift$$constant & 31;
16490     if (sh >= 8) {
16491       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16492              as_FloatRegister($src$$reg),
16493              as_FloatRegister($src$$reg));
16494     } else {
16495       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16496              as_FloatRegister($src$$reg), sh);
16497     }
16498   %}
16499   ins_pipe(vshift128_imm);
16500 %}
16501 
16502 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16503   predicate(n->as_Vector()->length() == 4 ||
16504             n->as_Vector()->length() == 8);
16505   match(Set dst (RShiftVB src shift));
16506   ins_cost(INSN_COST);
16507   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16508   ins_encode %{
16509     int sh = (int)$shift$$constant & 31;
16510     if (sh >= 8) sh = 7;
16511     sh = -sh & 7;
16512     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16513            as_FloatRegister($src$$reg), sh);
16514   %}
16515   ins_pipe(vshift64_imm);
16516 %}
16517 
16518 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16519   predicate(n->as_Vector()->length() == 16);
16520   match(Set dst (RShiftVB src shift));
16521   ins_cost(INSN_COST);
16522   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16523   ins_encode %{
16524     int sh = (int)$shift$$constant & 31;
16525     if (sh >= 8) sh = 7;
16526     sh = -sh & 7;
16527     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16528            as_FloatRegister($src$$reg), sh);
16529   %}
16530   ins_pipe(vshift128_imm);
16531 %}
16532 
16533 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16534   predicate(n->as_Vector()->length() == 4 ||
16535             n->as_Vector()->length() == 8);
16536   match(Set dst (URShiftVB src shift));
16537   ins_cost(INSN_COST);
16538   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16539   ins_encode %{
16540     int sh = (int)$shift$$constant & 31;
16541     if (sh >= 8) {
16542       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16543              as_FloatRegister($src$$reg),
16544              as_FloatRegister($src$$reg));
16545     } else {
16546       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16547              as_FloatRegister($src$$reg), -sh & 7);
16548     }
16549   %}
16550   ins_pipe(vshift64_imm);
16551 %}
16552 
16553 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16554   predicate(n->as_Vector()->length() == 16);
16555   match(Set dst (URShiftVB src shift));
16556   ins_cost(INSN_COST);
16557   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16558   ins_encode %{
16559     int sh = (int)$shift$$constant & 31;
16560     if (sh >= 8) {
16561       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16562              as_FloatRegister($src$$reg),
16563              as_FloatRegister($src$$reg));
16564     } else {
16565       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16566              as_FloatRegister($src$$reg), -sh & 7);
16567     }
16568   %}
16569   ins_pipe(vshift128_imm);
16570 %}
16571 
16572 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16573   predicate(n->as_Vector()->length() == 2 ||
16574             n->as_Vector()->length() == 4);
16575   match(Set dst (LShiftVS src shift));
16576   match(Set dst (RShiftVS src shift));
16577   ins_cost(INSN_COST);
16578   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16579   ins_encode %{
16580     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16581             as_FloatRegister($src$$reg),
16582             as_FloatRegister($shift$$reg));
16583   %}
16584   ins_pipe(vshift64);
16585 %}
16586 
16587 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16588   predicate(n->as_Vector()->length() == 8);
16589   match(Set dst (LShiftVS src shift));
16590   match(Set dst (RShiftVS src shift));
16591   ins_cost(INSN_COST);
16592   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16593   ins_encode %{
16594     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16595             as_FloatRegister($src$$reg),
16596             as_FloatRegister($shift$$reg));
16597   %}
16598   ins_pipe(vshift128);
16599 %}
16600 
16601 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16602   predicate(n->as_Vector()->length() == 2 ||
16603             n->as_Vector()->length() == 4);
16604   match(Set dst (URShiftVS src shift));
16605   ins_cost(INSN_COST);
16606   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16607   ins_encode %{
16608     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16609             as_FloatRegister($src$$reg),
16610             as_FloatRegister($shift$$reg));
16611   %}
16612   ins_pipe(vshift64);
16613 %}
16614 
16615 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16616   predicate(n->as_Vector()->length() == 8);
16617   match(Set dst (URShiftVS src shift));
16618   ins_cost(INSN_COST);
16619   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16620   ins_encode %{
16621     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16622             as_FloatRegister($src$$reg),
16623             as_FloatRegister($shift$$reg));
16624   %}
16625   ins_pipe(vshift128);
16626 %}
16627 
16628 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16629   predicate(n->as_Vector()->length() == 2 ||
16630             n->as_Vector()->length() == 4);
16631   match(Set dst (LShiftVS src shift));
16632   ins_cost(INSN_COST);
16633   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16634   ins_encode %{
16635     int sh = (int)$shift$$constant & 31;
16636     if (sh >= 16) {
16637       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16638              as_FloatRegister($src$$reg),
16639              as_FloatRegister($src$$reg));
16640     } else {
16641       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16642              as_FloatRegister($src$$reg), sh);
16643     }
16644   %}
16645   ins_pipe(vshift64_imm);
16646 %}
16647 
16648 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16649   predicate(n->as_Vector()->length() == 8);
16650   match(Set dst (LShiftVS src shift));
16651   ins_cost(INSN_COST);
16652   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16653   ins_encode %{
16654     int sh = (int)$shift$$constant & 31;
16655     if (sh >= 16) {
16656       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16657              as_FloatRegister($src$$reg),
16658              as_FloatRegister($src$$reg));
16659     } else {
16660       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16661              as_FloatRegister($src$$reg), sh);
16662     }
16663   %}
16664   ins_pipe(vshift128_imm);
16665 %}
16666 
16667 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16668   predicate(n->as_Vector()->length() == 2 ||
16669             n->as_Vector()->length() == 4);
16670   match(Set dst (RShiftVS src shift));
16671   ins_cost(INSN_COST);
16672   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16673   ins_encode %{
16674     int sh = (int)$shift$$constant & 31;
16675     if (sh >= 16) sh = 15;
16676     sh = -sh & 15;
16677     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16678            as_FloatRegister($src$$reg), sh);
16679   %}
16680   ins_pipe(vshift64_imm);
16681 %}
16682 
16683 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16684   predicate(n->as_Vector()->length() == 8);
16685   match(Set dst (RShiftVS src shift));
16686   ins_cost(INSN_COST);
16687   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16688   ins_encode %{
16689     int sh = (int)$shift$$constant & 31;
16690     if (sh >= 16) sh = 15;
16691     sh = -sh & 15;
16692     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16693            as_FloatRegister($src$$reg), sh);
16694   %}
16695   ins_pipe(vshift128_imm);
16696 %}
16697 
16698 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16699   predicate(n->as_Vector()->length() == 2 ||
16700             n->as_Vector()->length() == 4);
16701   match(Set dst (URShiftVS src shift));
16702   ins_cost(INSN_COST);
16703   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16704   ins_encode %{
16705     int sh = (int)$shift$$constant & 31;
16706     if (sh >= 16) {
16707       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16708              as_FloatRegister($src$$reg),
16709              as_FloatRegister($src$$reg));
16710     } else {
16711       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16712              as_FloatRegister($src$$reg), -sh & 15);
16713     }
16714   %}
16715   ins_pipe(vshift64_imm);
16716 %}
16717 
16718 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16719   predicate(n->as_Vector()->length() == 8);
16720   match(Set dst (URShiftVS src shift));
16721   ins_cost(INSN_COST);
16722   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16723   ins_encode %{
16724     int sh = (int)$shift$$constant & 31;
16725     if (sh >= 16) {
16726       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16727              as_FloatRegister($src$$reg),
16728              as_FloatRegister($src$$reg));
16729     } else {
16730       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16731              as_FloatRegister($src$$reg), -sh & 15);
16732     }
16733   %}
16734   ins_pipe(vshift128_imm);
16735 %}
16736 
16737 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16738   predicate(n->as_Vector()->length() == 2);
16739   match(Set dst (LShiftVI src shift));
16740   match(Set dst (RShiftVI src shift));
16741   ins_cost(INSN_COST);
16742   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16743   ins_encode %{
16744     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16745             as_FloatRegister($src$$reg),
16746             as_FloatRegister($shift$$reg));
16747   %}
16748   ins_pipe(vshift64);
16749 %}
16750 
16751 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16752   predicate(n->as_Vector()->length() == 4);
16753   match(Set dst (LShiftVI src shift));
16754   match(Set dst (RShiftVI src shift));
16755   ins_cost(INSN_COST);
16756   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16757   ins_encode %{
16758     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16759             as_FloatRegister($src$$reg),
16760             as_FloatRegister($shift$$reg));
16761   %}
16762   ins_pipe(vshift128);
16763 %}
16764 
16765 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16766   predicate(n->as_Vector()->length() == 2);
16767   match(Set dst (URShiftVI src shift));
16768   ins_cost(INSN_COST);
16769   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16770   ins_encode %{
16771     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16772             as_FloatRegister($src$$reg),
16773             as_FloatRegister($shift$$reg));
16774   %}
16775   ins_pipe(vshift64);
16776 %}
16777 
16778 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16779   predicate(n->as_Vector()->length() == 4);
16780   match(Set dst (URShiftVI src shift));
16781   ins_cost(INSN_COST);
16782   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16783   ins_encode %{
16784     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16785             as_FloatRegister($src$$reg),
16786             as_FloatRegister($shift$$reg));
16787   %}
16788   ins_pipe(vshift128);
16789 %}
16790 
16791 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16792   predicate(n->as_Vector()->length() == 2);
16793   match(Set dst (LShiftVI src shift));
16794   ins_cost(INSN_COST);
16795   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16796   ins_encode %{
16797     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16798            as_FloatRegister($src$$reg),
16799            (int)$shift$$constant & 31);
16800   %}
16801   ins_pipe(vshift64_imm);
16802 %}
16803 
16804 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16805   predicate(n->as_Vector()->length() == 4);
16806   match(Set dst (LShiftVI src shift));
16807   ins_cost(INSN_COST);
16808   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16809   ins_encode %{
16810     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16811            as_FloatRegister($src$$reg),
16812            (int)$shift$$constant & 31);
16813   %}
16814   ins_pipe(vshift128_imm);
16815 %}
16816 
16817 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16818   predicate(n->as_Vector()->length() == 2);
16819   match(Set dst (RShiftVI src shift));
16820   ins_cost(INSN_COST);
16821   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16822   ins_encode %{
16823     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16824             as_FloatRegister($src$$reg),
16825             -(int)$shift$$constant & 31);
16826   %}
16827   ins_pipe(vshift64_imm);
16828 %}
16829 
16830 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16831   predicate(n->as_Vector()->length() == 4);
16832   match(Set dst (RShiftVI src shift));
16833   ins_cost(INSN_COST);
16834   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16835   ins_encode %{
16836     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16837             as_FloatRegister($src$$reg),
16838             -(int)$shift$$constant & 31);
16839   %}
16840   ins_pipe(vshift128_imm);
16841 %}
16842 
16843 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16844   predicate(n->as_Vector()->length() == 2);
16845   match(Set dst (URShiftVI src shift));
16846   ins_cost(INSN_COST);
16847   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16848   ins_encode %{
16849     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16850             as_FloatRegister($src$$reg),
16851             -(int)$shift$$constant & 31);
16852   %}
16853   ins_pipe(vshift64_imm);
16854 %}
16855 
16856 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16857   predicate(n->as_Vector()->length() == 4);
16858   match(Set dst (URShiftVI src shift));
16859   ins_cost(INSN_COST);
16860   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16861   ins_encode %{
16862     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16863             as_FloatRegister($src$$reg),
16864             -(int)$shift$$constant & 31);
16865   %}
16866   ins_pipe(vshift128_imm);
16867 %}
16868 
16869 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16870   predicate(n->as_Vector()->length() == 2);
16871   match(Set dst (LShiftVL src shift));
16872   match(Set dst (RShiftVL src shift));
16873   ins_cost(INSN_COST);
16874   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16875   ins_encode %{
16876     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16877             as_FloatRegister($src$$reg),
16878             as_FloatRegister($shift$$reg));
16879   %}
16880   ins_pipe(vshift128);
16881 %}
16882 
16883 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16884   predicate(n->as_Vector()->length() == 2);
16885   match(Set dst (URShiftVL src shift));
16886   ins_cost(INSN_COST);
16887   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16888   ins_encode %{
16889     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16890             as_FloatRegister($src$$reg),
16891             as_FloatRegister($shift$$reg));
16892   %}
16893   ins_pipe(vshift128);
16894 %}
16895 
16896 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16897   predicate(n->as_Vector()->length() == 2);
16898   match(Set dst (LShiftVL src shift));
16899   ins_cost(INSN_COST);
16900   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16901   ins_encode %{
16902     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16903            as_FloatRegister($src$$reg),
16904            (int)$shift$$constant & 63);
16905   %}
16906   ins_pipe(vshift128_imm);
16907 %}
16908 
16909 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16910   predicate(n->as_Vector()->length() == 2);
16911   match(Set dst (RShiftVL src shift));
16912   ins_cost(INSN_COST);
16913   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16914   ins_encode %{
16915     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16916             as_FloatRegister($src$$reg),
16917             -(int)$shift$$constant & 63);
16918   %}
16919   ins_pipe(vshift128_imm);
16920 %}
16921 
16922 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16923   predicate(n->as_Vector()->length() == 2);
16924   match(Set dst (URShiftVL src shift));
16925   ins_cost(INSN_COST);
16926   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16927   ins_encode %{
16928     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16929             as_FloatRegister($src$$reg),
16930             -(int)$shift$$constant & 63);
16931   %}
16932   ins_pipe(vshift128_imm);
16933 %}
16934 
16935 //----------PEEPHOLE RULES-----------------------------------------------------
16936 // These must follow all instruction definitions as they use the names
16937 // defined in the instructions definitions.
16938 //
16939 // peepmatch ( root_instr_name [preceding_instruction]* );
16940 //
16941 // peepconstraint %{
16942 // (instruction_number.operand_name relational_op instruction_number.operand_name
16943 //  [, ...] );
16944 // // instruction numbers are zero-based using left to right order in peepmatch
16945 //
16946 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16947 // // provide an instruction_number.operand_name for each operand that appears
16948 // // in the replacement instruction's match rule
16949 //
16950 // ---------VM FLAGS---------------------------------------------------------
16951 //
16952 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16953 //
16954 // Each peephole rule is given an identifying number starting with zero and
16955 // increasing by one in the order seen by the parser.  An individual peephole
16956 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16957 // on the command-line.
16958 //
16959 // ---------CURRENT LIMITATIONS----------------------------------------------
16960 //
16961 // Only match adjacent instructions in same basic block
16962 // Only equality constraints
16963 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16964 // Only one replacement instruction
16965 //
16966 // ---------EXAMPLE----------------------------------------------------------
16967 //
16968 // // pertinent parts of existing instructions in architecture description
16969 // instruct movI(iRegINoSp dst, iRegI src)
16970 // %{
16971 //   match(Set dst (CopyI src));
16972 // %}
16973 //
16974 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16975 // %{
16976 //   match(Set dst (AddI dst src));
16977 //   effect(KILL cr);
16978 // %}
16979 //
16980 // // Change (inc mov) to lea
16981 // peephole %{
16982 //   // increment preceeded by register-register move
16983 //   peepmatch ( incI_iReg movI );
16984 //   // require that the destination register of the increment
16985 //   // match the destination register of the move
16986 //   peepconstraint ( 0.dst == 1.dst );
16987 //   // construct a replacement instruction that sets
16988 //   // the destination to ( move's source register + one )
16989 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16990 // %}
16991 //
16992 
16993 // Implementation no longer uses movX instructions since
16994 // machine-independent system no longer uses CopyX nodes.
16995 //
16996 // peephole
16997 // %{
16998 //   peepmatch (incI_iReg movI);
16999 //   peepconstraint (0.dst == 1.dst);
17000 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17001 // %}
17002 
17003 // peephole
17004 // %{
17005 //   peepmatch (decI_iReg movI);
17006 //   peepconstraint (0.dst == 1.dst);
17007 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17008 // %}
17009 
17010 // peephole
17011 // %{
17012 //   peepmatch (addI_iReg_imm movI);
17013 //   peepconstraint (0.dst == 1.dst);
17014 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17015 // %}
17016 
17017 // peephole
17018 // %{
17019 //   peepmatch (incL_iReg movL);
17020 //   peepconstraint (0.dst == 1.dst);
17021 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17022 // %}
17023 
17024 // peephole
17025 // %{
17026 //   peepmatch (decL_iReg movL);
17027 //   peepconstraint (0.dst == 1.dst);
17028 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17029 // %}
17030 
17031 // peephole
17032 // %{
17033 //   peepmatch (addL_iReg_imm movL);
17034 //   peepconstraint (0.dst == 1.dst);
17035 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17036 // %}
17037 
17038 // peephole
17039 // %{
17040 //   peepmatch (addP_iReg_imm movP);
17041 //   peepconstraint (0.dst == 1.dst);
17042 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17043 // %}
17044 
17045 // // Change load of spilled value to only a spill
17046 // instruct storeI(memory mem, iRegI src)
17047 // %{
17048 //   match(Set mem (StoreI mem src));
17049 // %}
17050 //
17051 // instruct loadI(iRegINoSp dst, memory mem)
17052 // %{
17053 //   match(Set dst (LoadI mem));
17054 // %}
17055 //
17056 
17057 //----------SMARTSPILL RULES---------------------------------------------------
17058 // These must follow all instruction definitions as they use the names
17059 // defined in the instructions definitions.
17060 
17061 // Local Variables:
17062 // mode: c++
17063 // End: