rev 10586 : 8153837: aarch64: handle special cases for MaxINode & MinINode
Summary: aarch64: handle special cases for MaxINode & MinINode
Reviewed-by: duke

   1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_trailing(MemBarNode *leading);
1045   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1046   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1047 
1048   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1049 
1050   bool unnecessary_acquire(const Node *barrier);
1051   bool needs_acquiring_load(const Node *load);
1052 
1053   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1054 
1055   bool unnecessary_release(const Node *barrier);
1056   bool unnecessary_volatile(const Node *barrier);
1057   bool needs_releasing_store(const Node *store);
1058 
1059   // predicate controlling translation of CompareAndSwapX
1060   bool needs_acquiring_load_exclusive(const Node *load);
1061 
1062   // predicate controlling translation of StoreCM
1063   bool unnecessary_storestore(const Node *storecm);
1064 %}
1065 
1066 source %{
1067 
1068   // Optimizaton of volatile gets and puts
1069   // -------------------------------------
1070   //
1071   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1072   // use to implement volatile reads and writes. For a volatile read
1073   // we simply need
1074   //
1075   //   ldar<x>
1076   //
1077   // and for a volatile write we need
1078   //
1079   //   stlr<x>
1080   //
1081   // Alternatively, we can implement them by pairing a normal
1082   // load/store with a memory barrier. For a volatile read we need
1083   //
1084   //   ldr<x>
1085   //   dmb ishld
1086   //
1087   // for a volatile write
1088   //
1089   //   dmb ish
1090   //   str<x>
1091   //   dmb ish
1092   //
1093   // We can also use ldaxr and stlxr to implement compare and swap CAS
1094   // sequences. These are normally translated to an instruction
1095   // sequence like the following
1096   //
1097   //   dmb      ish
1098   // retry:
1099   //   ldxr<x>   rval raddr
1100   //   cmp       rval rold
1101   //   b.ne done
1102   //   stlxr<x>  rval, rnew, rold
1103   //   cbnz      rval retry
1104   // done:
1105   //   cset      r0, eq
1106   //   dmb ishld
1107   //
1108   // Note that the exclusive store is already using an stlxr
1109   // instruction. That is required to ensure visibility to other
1110   // threads of the exclusive write (assuming it succeeds) before that
1111   // of any subsequent writes.
1112   //
1113   // The following instruction sequence is an improvement on the above
1114   //
1115   // retry:
1116   //   ldaxr<x>  rval raddr
1117   //   cmp       rval rold
1118   //   b.ne done
1119   //   stlxr<x>  rval, rnew, rold
1120   //   cbnz      rval retry
1121   // done:
1122   //   cset      r0, eq
1123   //
1124   // We don't need the leading dmb ish since the stlxr guarantees
1125   // visibility of prior writes in the case that the swap is
1126   // successful. Crucially we don't have to worry about the case where
1127   // the swap is not successful since no valid program should be
1128   // relying on visibility of prior changes by the attempting thread
1129   // in the case where the CAS fails.
1130   //
1131   // Similarly, we don't need the trailing dmb ishld if we substitute
1132   // an ldaxr instruction since that will provide all the guarantees we
1133   // require regarding observation of changes made by other threads
1134   // before any change to the CAS address observed by the load.
1135   //
1136   // In order to generate the desired instruction sequence we need to
1137   // be able to identify specific 'signature' ideal graph node
1138   // sequences which i) occur as a translation of a volatile reads or
1139   // writes or CAS operations and ii) do not occur through any other
1140   // translation or graph transformation. We can then provide
1141   // alternative aldc matching rules which translate these node
1142   // sequences to the desired machine code sequences. Selection of the
1143   // alternative rules can be implemented by predicates which identify
1144   // the relevant node sequences.
1145   //
1146   // The ideal graph generator translates a volatile read to the node
1147   // sequence
1148   //
1149   //   LoadX[mo_acquire]
1150   //   MemBarAcquire
1151   //
1152   // As a special case when using the compressed oops optimization we
1153   // may also see this variant
1154   //
1155   //   LoadN[mo_acquire]
1156   //   DecodeN
1157   //   MemBarAcquire
1158   //
1159   // A volatile write is translated to the node sequence
1160   //
1161   //   MemBarRelease
1162   //   StoreX[mo_release] {CardMark}-optional
1163   //   MemBarVolatile
1164   //
1165   // n.b. the above node patterns are generated with a strict
1166   // 'signature' configuration of input and output dependencies (see
1167   // the predicates below for exact details). The card mark may be as
1168   // simple as a few extra nodes or, in a few GC configurations, may
1169   // include more complex control flow between the leading and
1170   // trailing memory barriers. However, whatever the card mark
1171   // configuration these signatures are unique to translated volatile
1172   // reads/stores -- they will not appear as a result of any other
1173   // bytecode translation or inlining nor as a consequence of
1174   // optimizing transforms.
1175   //
1176   // We also want to catch inlined unsafe volatile gets and puts and
1177   // be able to implement them using either ldar<x>/stlr<x> or some
1178   // combination of ldr<x>/stlr<x> and dmb instructions.
1179   //
1180   // Inlined unsafe volatiles puts manifest as a minor variant of the
1181   // normal volatile put node sequence containing an extra cpuorder
1182   // membar
1183   //
1184   //   MemBarRelease
1185   //   MemBarCPUOrder
1186   //   StoreX[mo_release] {CardMark}-optional
1187   //   MemBarVolatile
1188   //
1189   // n.b. as an aside, the cpuorder membar is not itself subject to
1190   // matching and translation by adlc rules.  However, the rule
1191   // predicates need to detect its presence in order to correctly
1192   // select the desired adlc rules.
1193   //
1194   // Inlined unsafe volatile gets manifest as a somewhat different
1195   // node sequence to a normal volatile get
1196   //
1197   //   MemBarCPUOrder
1198   //        ||       \\
1199   //   MemBarAcquire LoadX[mo_acquire]
1200   //        ||
1201   //   MemBarCPUOrder
1202   //
1203   // In this case the acquire membar does not directly depend on the
1204   // load. However, we can be sure that the load is generated from an
1205   // inlined unsafe volatile get if we see it dependent on this unique
1206   // sequence of membar nodes. Similarly, given an acquire membar we
1207   // can know that it was added because of an inlined unsafe volatile
1208   // get if it is fed and feeds a cpuorder membar and if its feed
1209   // membar also feeds an acquiring load.
1210   //
1211   // Finally an inlined (Unsafe) CAS operation is translated to the
1212   // following ideal graph
1213   //
1214   //   MemBarRelease
1215   //   MemBarCPUOrder
1216   //   CompareAndSwapX {CardMark}-optional
1217   //   MemBarCPUOrder
1218   //   MemBarAcquire
1219   //
1220   // So, where we can identify these volatile read and write
1221   // signatures we can choose to plant either of the above two code
1222   // sequences. For a volatile read we can simply plant a normal
1223   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1224   // also choose to inhibit translation of the MemBarAcquire and
1225   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1226   //
1227   // When we recognise a volatile store signature we can choose to
1228   // plant at a dmb ish as a translation for the MemBarRelease, a
1229   // normal str<x> and then a dmb ish for the MemBarVolatile.
1230   // Alternatively, we can inhibit translation of the MemBarRelease
1231   // and MemBarVolatile and instead plant a simple stlr<x>
1232   // instruction.
1233   //
1234   // when we recognise a CAS signature we can choose to plant a dmb
1235   // ish as a translation for the MemBarRelease, the conventional
1236   // macro-instruction sequence for the CompareAndSwap node (which
1237   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1238   // Alternatively, we can elide generation of the dmb instructions
1239   // and plant the alternative CompareAndSwap macro-instruction
1240   // sequence (which uses ldaxr<x>).
1241   //
1242   // Of course, the above only applies when we see these signature
1243   // configurations. We still want to plant dmb instructions in any
1244   // other cases where we may see a MemBarAcquire, MemBarRelease or
1245   // MemBarVolatile. For example, at the end of a constructor which
1246   // writes final/volatile fields we will see a MemBarRelease
1247   // instruction and this needs a 'dmb ish' lest we risk the
1248   // constructed object being visible without making the
1249   // final/volatile field writes visible.
1250   //
1251   // n.b. the translation rules below which rely on detection of the
1252   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1253   // If we see anything other than the signature configurations we
1254   // always just translate the loads and stores to ldr<x> and str<x>
1255   // and translate acquire, release and volatile membars to the
1256   // relevant dmb instructions.
1257   //
1258 
1259   // graph traversal helpers used for volatile put/get and CAS
1260   // optimization
1261 
1262   // 1) general purpose helpers
1263 
1264   // if node n is linked to a parent MemBarNode by an intervening
1265   // Control and Memory ProjNode return the MemBarNode otherwise return
1266   // NULL.
1267   //
1268   // n may only be a Load or a MemBar.
1269 
1270   MemBarNode *parent_membar(const Node *n)
1271   {
1272     Node *ctl = NULL;
1273     Node *mem = NULL;
1274     Node *membar = NULL;
1275 
1276     if (n->is_Load()) {
1277       ctl = n->lookup(LoadNode::Control);
1278       mem = n->lookup(LoadNode::Memory);
1279     } else if (n->is_MemBar()) {
1280       ctl = n->lookup(TypeFunc::Control);
1281       mem = n->lookup(TypeFunc::Memory);
1282     } else {
1283         return NULL;
1284     }
1285 
1286     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1287       return NULL;
1288     }
1289 
1290     membar = ctl->lookup(0);
1291 
1292     if (!membar || !membar->is_MemBar()) {
1293       return NULL;
1294     }
1295 
1296     if (mem->lookup(0) != membar) {
1297       return NULL;
1298     }
1299 
1300     return membar->as_MemBar();
1301   }
1302 
1303   // if n is linked to a child MemBarNode by intervening Control and
1304   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1305 
1306   MemBarNode *child_membar(const MemBarNode *n)
1307   {
1308     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1309     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1310 
1311     // MemBar needs to have both a Ctl and Mem projection
1312     if (! ctl || ! mem)
1313       return NULL;
1314 
1315     MemBarNode *child = NULL;
1316     Node *x;
1317 
1318     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1319       x = ctl->fast_out(i);
1320       // if we see a membar we keep hold of it. we may also see a new
1321       // arena copy of the original but it will appear later
1322       if (x->is_MemBar()) {
1323           child = x->as_MemBar();
1324           break;
1325       }
1326     }
1327 
1328     if (child == NULL) {
1329       return NULL;
1330     }
1331 
1332     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1333       x = mem->fast_out(i);
1334       // if we see a membar we keep hold of it. we may also see a new
1335       // arena copy of the original but it will appear later
1336       if (x == child) {
1337         return child;
1338       }
1339     }
1340     return NULL;
1341   }
1342 
1343   // helper predicate use to filter candidates for a leading memory
1344   // barrier
1345   //
1346   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1347   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1348 
1349   bool leading_membar(const MemBarNode *barrier)
1350   {
1351     int opcode = barrier->Opcode();
1352     // if this is a release membar we are ok
1353     if (opcode == Op_MemBarRelease) {
1354       return true;
1355     }
1356     // if its a cpuorder membar . . .
1357     if (opcode != Op_MemBarCPUOrder) {
1358       return false;
1359     }
1360     // then the parent has to be a release membar
1361     MemBarNode *parent = parent_membar(barrier);
1362     if (!parent) {
1363       return false;
1364     }
1365     opcode = parent->Opcode();
1366     return opcode == Op_MemBarRelease;
1367   }
1368 
1369   // 2) card mark detection helper
1370 
1371   // helper predicate which can be used to detect a volatile membar
1372   // introduced as part of a conditional card mark sequence either by
1373   // G1 or by CMS when UseCondCardMark is true.
1374   //
1375   // membar can be definitively determined to be part of a card mark
1376   // sequence if and only if all the following hold
1377   //
1378   // i) it is a MemBarVolatile
1379   //
1380   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1381   // true
1382   //
1383   // iii) the node's Mem projection feeds a StoreCM node.
1384 
1385   bool is_card_mark_membar(const MemBarNode *barrier)
1386   {
1387     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1388       return false;
1389     }
1390 
1391     if (barrier->Opcode() != Op_MemBarVolatile) {
1392       return false;
1393     }
1394 
1395     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1396 
1397     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1398       Node *y = mem->fast_out(i);
1399       if (y->Opcode() == Op_StoreCM) {
1400         return true;
1401       }
1402     }
1403 
1404     return false;
1405   }
1406 
1407 
1408   // 3) helper predicates to traverse volatile put or CAS graphs which
1409   // may contain GC barrier subgraphs
1410 
1411   // Preamble
1412   // --------
1413   //
1414   // for volatile writes we can omit generating barriers and employ a
1415   // releasing store when we see a node sequence sequence with a
1416   // leading MemBarRelease and a trailing MemBarVolatile as follows
1417   //
1418   //   MemBarRelease
1419   //  {    ||        } -- optional
1420   //  {MemBarCPUOrder}
1421   //       ||       \\
1422   //       ||     StoreX[mo_release]
1423   //       | \ Bot    / ???
1424   //       | MergeMem
1425   //       | /
1426   //   MemBarVolatile
1427   //
1428   // where
1429   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1430   //  | \ and / indicate further routing of the Ctl and Mem feeds
1431   //
1432   // Note that the memory feed from the CPUOrder membar to the
1433   // MergeMem node is an AliasIdxBot slice while the feed from the
1434   // StoreX is for a slice determined by the type of value being
1435   // written.
1436   //
1437   // the diagram above shows the graph we see for non-object stores.
1438   // for a volatile Object store (StoreN/P) we may see other nodes
1439   // below the leading membar because of the need for a GC pre- or
1440   // post-write barrier.
1441   //
1442   // with most GC configurations we with see this simple variant which
1443   // includes a post-write barrier card mark.
1444   //
1445   //   MemBarRelease______________________________
1446   //         ||    \\               Ctl \        \\
1447   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1448   //         | \ Bot  / oop                 . . .  /
1449   //         | MergeMem
1450   //         | /
1451   //         ||      /
1452   //   MemBarVolatile
1453   //
1454   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1455   // the object address to an int used to compute the card offset) and
1456   // Ctl+Mem to a StoreB node (which does the actual card mark).
1457   //
1458   // n.b. a StoreCM node is only ever used when CMS (with or without
1459   // CondCardMark) or G1 is configured. This abstract instruction
1460   // differs from a normal card mark write (StoreB) because it implies
1461   // a requirement to order visibility of the card mark (StoreCM)
1462   // after that of the object put (StoreP/N) using a StoreStore memory
1463   // barrier. Note that this is /not/ a requirement to order the
1464   // instructions in the generated code (that is already guaranteed by
1465   // the order of memory dependencies). Rather it is a requirement to
1466   // ensure visibility order which only applies on architectures like
1467   // AArch64 which do not implement TSO. This ordering is required for
1468   // both non-volatile and volatile puts.
1469   //
1470   // That implies that we need to translate a StoreCM using the
1471   // sequence
1472   //
1473   //   dmb ishst
1474   //   stlrb
1475   //
1476   // This dmb cannot be omitted even when the associated StoreX or
1477   // CompareAndSwapX is implemented using stlr. However, as described
1478   // below there are circumstances where a specific GC configuration
1479   // requires a stronger barrier in which case it can be omitted.
1480   // 
1481   // With the Serial or Parallel GC using +CondCardMark the card mark
1482   // is performed conditionally on it currently being unmarked in
1483   // which case the volatile put graph looks slightly different
1484   //
1485   //   MemBarRelease____________________________________________
1486   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1487   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1488   //         | \ Bot / oop                          \            |
1489   //         | MergeMem                            . . .      StoreB
1490   //         | /                                                /
1491   //         ||     /
1492   //   MemBarVolatile
1493   //
1494   // It is worth noting at this stage that all the above
1495   // configurations can be uniquely identified by checking that the
1496   // memory flow includes the following subgraph:
1497   //
1498   //   MemBarRelease
1499   //  {MemBarCPUOrder}
1500   //      |  \      . . .
1501   //      |  StoreX[mo_release]  . . .
1502   //  Bot |   / oop
1503   //     MergeMem
1504   //      |
1505   //   MemBarVolatile
1506   //
1507   // This is referred to as a *normal* volatile store subgraph. It can
1508   // easily be detected starting from any candidate MemBarRelease,
1509   // StoreX[mo_release] or MemBarVolatile node.
1510   //
1511   // A small variation on this normal case occurs for an unsafe CAS
1512   // operation. The basic memory flow subgraph for a non-object CAS is
1513   // as follows
1514   //
1515   //   MemBarRelease
1516   //         ||
1517   //   MemBarCPUOrder
1518   //          |     \\   . . .
1519   //          |     CompareAndSwapX
1520   //          |       |
1521   //      Bot |     SCMemProj
1522   //           \     / Bot
1523   //           MergeMem
1524   //           /
1525   //   MemBarCPUOrder
1526   //         ||
1527   //   MemBarAcquire
1528   //
1529   // The same basic variations on this arrangement (mutatis mutandis)
1530   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1531   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1532   // flow subgraph is still present.
1533   // 
1534   // This is referred to as a *normal* CAS subgraph. It can easily be
1535   // detected starting from any candidate MemBarRelease,
1536   // StoreX[mo_release] or MemBarAcquire node.
1537   //
1538   // The code below uses two helper predicates, leading_to_trailing
1539   // and trailing_to_leading to identify these normal graphs, one
1540   // validating the layout starting from the top membar and searching
1541   // down and the other validating the layout starting from the lower
1542   // membar and searching up.
1543   //
1544   // There are two special case GC configurations when the simple
1545   // normal graphs above may not be generated: when using G1 (which
1546   // always employs a conditional card mark); and when using CMS with
1547   // conditional card marking (+CondCardMark) configured. These GCs
1548   // are both concurrent rather than stop-the world GCs. So they
1549   // introduce extra Ctl+Mem flow into the graph between the leading
1550   // and trailing membar nodes, in particular enforcing stronger
1551   // memory serialisation beween the object put and the corresponding
1552   // conditional card mark. CMS employs a post-write GC barrier while
1553   // G1 employs both a pre- and post-write GC barrier.
1554   //
1555   // The post-write barrier subgraph for these configurations includes
1556   // a MemBarVolatile node -- referred to as a card mark membar --
1557   // which is needed to order the card write (StoreCM) operation in
1558   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1559   // operations performed by GC threads i.e. a card mark membar
1560   // constitutes a StoreLoad barrier hence must be translated to a dmb
1561   // ish (whether or not it sits inside a volatile store sequence).
1562   //
1563   // Of course, the use of the dmb ish for the card mark membar also
1564   // implies theat the StoreCM which follows can omit the dmb ishst
1565   // instruction. The necessary visibility ordering will already be
1566   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1567   // needs to be generated for as part of the StoreCM sequence with GC
1568   // configuration +CMS -CondCardMark.
1569   // 
1570   // Of course all these extra barrier nodes may well be absent --
1571   // they are only inserted for object puts. Their potential presence
1572   // significantly complicates the task of identifying whether a
1573   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1574   // MemBarAcquire forms part of a volatile put or CAS when using
1575   // these GC configurations (see below) and also complicates the
1576   // decision as to how to translate a MemBarVolatile and StoreCM.
1577   //
1578   // So, thjis means that a card mark MemBarVolatile occurring in the
1579   // post-barrier graph it needs to be distinguished from a normal
1580   // trailing MemBarVolatile. Resolving this is straightforward: a
1581   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1582   // node and that is a unique marker
1583   //
1584   //      MemBarVolatile (card mark)
1585   //       C |    \     . . .
1586   //         |   StoreCM   . . .
1587   //       . . .
1588   //
1589   // Returning to the task of translating the object put and the
1590   // leading/trailing membar nodes: what do the node graphs look like
1591   // for these 2 special cases? and how can we determine the status of
1592   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1593   // normal and non-normal cases?
1594   //
1595   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1596   // which selects conditonal execution based on the value loaded
1597   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1598   // intervening StoreLoad barrier (MemBarVolatile).
1599   //
1600   // So, with CMS we may see a node graph for a volatile object store
1601   // which looks like this
1602   //
1603   //   MemBarRelease
1604   //   MemBarCPUOrder_(leading)____________________
1605   //     C |  | M \       \\               M |   C \
1606   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1607   //       |  | Bot \    / oop      \        |
1608   //       |  |    MergeMem          \      / 
1609   //       |  |      /                |    /
1610   //     MemBarVolatile (card mark)   |   /
1611   //     C |  ||    M |               |  /
1612   //       | LoadB    | Bot       oop | / Bot
1613   //       |   |      |              / /
1614   //       | Cmp      |\            / /
1615   //       | /        | \          / /
1616   //       If         |  \        / /
1617   //       | \        |   \      / /
1618   // IfFalse  IfTrue  |    \    / /
1619   //       \     / \  |    |   / /
1620   //        \   / StoreCM  |  / /
1621   //         \ /      \   /  / /
1622   //        Region     Phi  / /
1623   //          | \   Raw |  / /
1624   //          |  . . .  | / /
1625   //          |       MergeMem
1626   //          |           |
1627   //        MemBarVolatile (trailing)
1628   //
1629   // Notice that there are two MergeMem nodes below the leading
1630   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1631   // the leading membar and the oopptr Mem slice from the Store into
1632   // the card mark membar. The trailing MergeMem merges the
1633   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1634   // slice from the StoreCM and an oop slice from the StoreN/P node
1635   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1636   // associated with the If region).
1637   //
1638   // So, in the case of CMS + CondCardMark the volatile object store
1639   // graph still includes a normal volatile store subgraph from the
1640   // leading membar to the trailing membar. However, it also contains
1641   // the same shape memory flow to the card mark membar. The two flows
1642   // can be distinguished by testing whether or not the downstream
1643   // membar is a card mark membar.
1644   //
1645   // The graph for a CAS also varies with CMS + CondCardMark, in
1646   // particular employing a control feed from the CompareAndSwapX node
1647   // through a CmpI and If to the card mark membar and StoreCM which
1648   // updates the associated card. This avoids executing the card mark
1649   // if the CAS fails. However, it can be seen from the diagram below
1650   // that the presence of the barrier does not alter the normal CAS
1651   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1652   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1653   // MemBarAcquire pair.
1654   //
1655   //   MemBarRelease
1656   //   MemBarCPUOrder__(leading)_______________________
1657   //   C /  M |                        \\            C \
1658   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1659   //          |                  C /  M |
1660   //          |                 CmpI    |
1661   //          |                  /      |
1662   //          |               . . .     |
1663   //          |              IfTrue     |
1664   //          |              /          |
1665   //       MemBarVolatile (card mark)   |
1666   //        C |  ||    M |              |
1667   //          | LoadB    | Bot   ______/|
1668   //          |   |      |      /       |
1669   //          | Cmp      |     /      SCMemProj
1670   //          | /        |    /         |
1671   //          If         |   /         /
1672   //          | \        |  /         / Bot
1673   //     IfFalse  IfTrue | /         /
1674   //          |   / \   / / prec    /
1675   //   . . .  |  /  StoreCM        /
1676   //        \ | /      | raw      /
1677   //        Region    . . .      /
1678   //           | \              /
1679   //           |   . . .   \    / Bot
1680   //           |        MergeMem
1681   //           |          /
1682   //         MemBarCPUOrder
1683   //         MemBarAcquire (trailing)
1684   //
1685   // This has a slightly different memory subgraph to the one seen
1686   // previously but the core of it has a similar memory flow to the
1687   // CAS normal subgraph:
1688   //
1689   //   MemBarRelease
1690   //   MemBarCPUOrder____
1691   //         |          \      . . .
1692   //         |       CompareAndSwapX  . . .
1693   //         |       C /  M |
1694   //         |      CmpI    |
1695   //         |       /      |
1696   //         |      . .    /
1697   //     Bot |   IfTrue   /
1698   //         |   /       /
1699   //    MemBarVolatile  /
1700   //         | ...     /
1701   //      StoreCM ... /
1702   //         |       / 
1703   //       . . .  SCMemProj
1704   //      Raw \    / Bot
1705   //        MergeMem
1706   //           |
1707   //   MemBarCPUOrder
1708   //   MemBarAcquire
1709   //
1710   // The G1 graph for a volatile object put is a lot more complicated.
1711   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1712   // which adds the old value to the SATB queue; the releasing store
1713   // itself; and, finally, a post-write graph which performs a card
1714   // mark.
1715   //
1716   // The pre-write graph may be omitted, but only when the put is
1717   // writing to a newly allocated (young gen) object and then only if
1718   // there is a direct memory chain to the Initialize node for the
1719   // object allocation. This will not happen for a volatile put since
1720   // any memory chain passes through the leading membar.
1721   //
1722   // The pre-write graph includes a series of 3 If tests. The outermost
1723   // If tests whether SATB is enabled (no else case). The next If tests
1724   // whether the old value is non-NULL (no else case). The third tests
1725   // whether the SATB queue index is > 0, if so updating the queue. The
1726   // else case for this third If calls out to the runtime to allocate a
1727   // new queue buffer.
1728   //
1729   // So with G1 the pre-write and releasing store subgraph looks like
1730   // this (the nested Ifs are omitted).
1731   //
1732   //  MemBarRelease (leading)____________
1733   //     C |  ||  M \   M \    M \  M \ . . .
1734   //       | LoadB   \  LoadL  LoadN   \
1735   //       | /        \                 \
1736   //       If         |\                 \
1737   //       | \        | \                 \
1738   //  IfFalse  IfTrue |  \                 \
1739   //       |     |    |   \                 |
1740   //       |     If   |   /\                |
1741   //       |     |          \               |
1742   //       |                 \              |
1743   //       |    . . .         \             |
1744   //       | /       | /       |            |
1745   //      Region  Phi[M]       |            |
1746   //       | \       |         |            |
1747   //       |  \_____ | ___     |            |
1748   //     C | C \     |   C \ M |            |
1749   //       | CastP2X | StoreN/P[mo_release] |
1750   //       |         |         |            |
1751   //     C |       M |       M |          M |
1752   //        \        | Raw     | oop       / Bot
1753   //                  . . .
1754   //          (post write subtree elided)
1755   //                    . . .
1756   //             C \         M /
1757   //         MemBarVolatile (trailing)
1758   //
1759   // Note that the three memory feeds into the post-write tree are an
1760   // AliasRawIdx slice associated with the writes in the pre-write
1761   // tree, an oop type slice from the StoreX specific to the type of
1762   // the volatile field and the AliasBotIdx slice emanating from the
1763   // leading membar.
1764   //
1765   // n.b. the LoadB in this subgraph is not the card read -- it's a
1766   // read of the SATB queue active flag.
1767   //
1768   // The CAS graph is once again a variant of the above with a
1769   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1770   // value from the CompareAndSwapX node is fed into the post-write
1771   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1772   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1773   //
1774   //  MemBarRelease (leading)____________
1775   //     C |  ||  M \   M \    M \  M \ . . .
1776   //       | LoadB   \  LoadL  LoadN   \
1777   //       | /        \                 \
1778   //       If         |\                 \
1779   //       | \        | \                 \
1780   //  IfFalse  IfTrue |  \                 \
1781   //       |     |    |   \                 \
1782   //       |     If   |    \                 |
1783   //       |     |          \                |
1784   //       |                 \               |
1785   //       |    . . .         \              |
1786   //       | /       | /       \             |
1787   //      Region  Phi[M]        \            |
1788   //       | \       |           \           |
1789   //       |  \_____ |            |          |
1790   //     C | C \     |            |          |
1791   //       | CastP2X |     CompareAndSwapX   |
1792   //       |         |   res |     |         |
1793   //     C |       M |       |  SCMemProj  M |
1794   //        \        | Raw   |     | Bot    / Bot
1795   //                  . . .
1796   //          (post write subtree elided)
1797   //                    . . .
1798   //             C \         M /
1799   //         MemBarVolatile (trailing)
1800   //
1801   // The G1 post-write subtree is also optional, this time when the
1802   // new value being written is either null or can be identified as a
1803   // newly allocated (young gen) object with no intervening control
1804   // flow. The latter cannot happen but the former may, in which case
1805   // the card mark membar is omitted and the memory feeds from the
1806   // leading membar and the SToreN/P are merged direct into the
1807   // trailing membar as per the normal subgraph. So, the only special
1808   // case which arises is when the post-write subgraph is generated.
1809   //
1810   // The kernel of the post-write G1 subgraph is the card mark itself
1811   // which includes a card mark memory barrier (MemBarVolatile), a
1812   // card test (LoadB), and a conditional update (If feeding a
1813   // StoreCM). These nodes are surrounded by a series of nested Ifs
1814   // which try to avoid doing the card mark. The top level If skips if
1815   // the object reference does not cross regions (i.e. it tests if
1816   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1817   // need not be recorded. The next If, which skips on a NULL value,
1818   // may be absent (it is not generated if the type of value is >=
1819   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1820   // checking if card_val != young).  n.b. although this test requires
1821   // a pre-read of the card it can safely be done before the StoreLoad
1822   // barrier. However that does not bypass the need to reread the card
1823   // after the barrier.
1824   //
1825   //                (pre-write subtree elided)
1826   //        . . .                  . . .    . . .  . . .
1827   //        C |               M |    M |    M |
1828   //       Region            Phi[M] StoreN    |
1829   //          |            Raw  |  oop |  Bot |
1830   //         / \_______         |\     |\     |\
1831   //      C / C \      . . .    | \    | \    | \
1832   //       If   CastP2X . . .   |  \   |  \   |  \
1833   //       / \                  |   \  |   \  |   \
1834   //      /   \                 |    \ |    \ |    \
1835   // IfFalse IfTrue             |      |      |     \
1836   //   |       |                 \     |     /       |
1837   //   |       If                 \    | \  /   \    |
1838   //   |      / \                  \   |   /     \   |
1839   //   |     /   \                  \  |  / \     |  |
1840   //   | IfFalse IfTrue           MergeMem   \    |  |
1841   //   |  . . .    / \                 |      \   |  |
1842   //   |          /   \                |       |  |  |
1843   //   |     IfFalse IfTrue            |       |  |  |
1844   //   |      . . .    |               |       |  |  |
1845   //   |               If             /        |  |  |
1846   //   |               / \           /         |  |  |
1847   //   |              /   \         /          |  |  |
1848   //   |         IfFalse IfTrue    /           |  |  |
1849   //   |           . . .   |      /            |  |  |
1850   //   |                    \    /             |  |  |
1851   //   |                     \  /              |  |  |
1852   //   |         MemBarVolatile__(card mark  ) |  |  |
1853   //   |              ||   C |     \           |  |  |
1854   //   |             LoadB   If     |         /   |  |
1855   //   |                    / \ Raw |        /   /  /
1856   //   |                   . . .    |       /   /  /
1857   //   |                        \   |      /   /  /
1858   //   |                        StoreCM   /   /  /
1859   //   |                           |     /   /  /
1860   //   |                            . . .   /  /
1861   //   |                                   /  /
1862   //   |   . . .                          /  /
1863   //   |    |             | /            /  /
1864   //   |    |           Phi[M] /        /  /
1865   //   |    |             |   /        /  /
1866   //   |    |             |  /        /  /
1867   //   |  Region  . . .  Phi[M]      /  /
1868   //   |    |             |         /  /
1869   //    \   |             |        /  /
1870   //     \  | . . .       |       /  /
1871   //      \ |             |      /  /
1872   //      Region         Phi[M] /  /
1873   //        |               \  /  /
1874   //         \             MergeMem
1875   //          \            /
1876   //          MemBarVolatile
1877   //
1878   // As with CMS + CondCardMark the first MergeMem merges the
1879   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1880   // slice from the Store into the card mark membar. However, in this
1881   // case it may also merge an AliasRawIdx mem slice from the pre
1882   // barrier write.
1883   //
1884   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1885   // leading membar with an oop slice from the StoreN and an
1886   // AliasRawIdx slice from the post barrier writes. In this case the
1887   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1888   // which combine feeds from the If regions in the post barrier
1889   // subgraph.
1890   //
1891   // So, for G1 the same characteristic subgraph arises as for CMS +
1892   // CondCardMark. There is a normal subgraph feeding the card mark
1893   // membar and a normal subgraph feeding the trailing membar.
1894   //
1895   // The CAS graph when using G1GC also includes an optional
1896   // post-write subgraph. It is very similar to the above graph except
1897   // for a few details.
1898   // 
1899   // - The control flow is gated by an additonal If which tests the
1900   // result from the CompareAndSwapX node
1901   // 
1902   //  - The MergeMem which feeds the card mark membar only merges the
1903   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1904   // slice from the pre-barrier. It does not merge the SCMemProj
1905   // AliasIdxBot slice. So, this subgraph does not look like the
1906   // normal CAS subgraph.
1907   //
1908   // - The MergeMem which feeds the trailing membar merges the
1909   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1910   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1911   // has two AliasIdxBot input slices. However, this subgraph does
1912   // still look like the normal CAS subgraph.
1913   //
1914   // So, the upshot is:
1915   //
1916   // In all cases a volatile put graph will include a *normal*
1917   // volatile store subgraph betwen the leading membar and the
1918   // trailing membar. It may also include a normal volatile store
1919   // subgraph betwen the leading membar and the card mark membar.
1920   //
1921   // In all cases a CAS graph will contain a unique normal CAS graph
1922   // feeding the trailing membar.
1923   //
1924   // In all cases where there is a card mark membar (either as part of
1925   // a volatile object put or CAS) it will be fed by a MergeMem whose
1926   // AliasIdxBot slice feed will be a leading membar.
1927   //
1928   // The predicates controlling generation of instructions for store
1929   // and barrier nodes employ a few simple helper functions (described
1930   // below) which identify the presence or absence of all these
1931   // subgraph configurations and provide a means of traversing from
1932   // one node in the subgraph to another.
1933 
1934   // is_CAS(int opcode)
1935   //
1936   // return true if opcode is one of the possible CompareAndSwapX
1937   // values otherwise false.
1938 
1939   bool is_CAS(int opcode)
1940   {
1941     return (opcode == Op_CompareAndSwapI ||
1942             opcode == Op_CompareAndSwapL ||
1943             opcode == Op_CompareAndSwapN ||
1944             opcode == Op_CompareAndSwapP);
1945   }
1946 
1947   // leading_to_trailing
1948   //
1949   //graph traversal helper which detects the normal case Mem feed from
1950   // a release membar (or, optionally, its cpuorder child) to a
1951   // dependent volatile membar i.e. it ensures that one or other of
1952   // the following Mem flow subgraph is present.
1953   //
1954   //   MemBarRelease {leading}
1955   //   {MemBarCPUOrder} {optional}
1956   //     Bot |  \      . . .
1957   //         |  StoreN/P[mo_release]  . . .
1958   //         |   /
1959   //        MergeMem
1960   //         |
1961   //   MemBarVolatile {not card mark}
1962   //
1963   //   MemBarRelease {leading}
1964   //   {MemBarCPUOrder} {optional}
1965   //      |       \      . . .
1966   //      |     CompareAndSwapX  . . .
1967   //               |
1968   //     . . .    SCMemProj
1969   //           \   |
1970   //      |    MergeMem
1971   //      |       /
1972   //    MemBarCPUOrder
1973   //    MemBarAcquire {trailing}
1974   //
1975   // the predicate needs to be capable of distinguishing the following
1976   // volatile put graph which may arises when a GC post barrier
1977   // inserts a card mark membar
1978   //
1979   //   MemBarRelease {leading}
1980   //   {MemBarCPUOrder}__
1981   //     Bot |   \       \
1982   //         |   StoreN/P \
1983   //         |    / \     |
1984   //        MergeMem \    |
1985   //         |        \   |
1986   //   MemBarVolatile  \  |
1987   //    {card mark}     \ |
1988   //                  MergeMem
1989   //                      |
1990   // {not card mark} MemBarVolatile
1991   //
1992   // if the correct configuration is present returns the trailing
1993   // membar otherwise NULL.
1994   //
1995   // the input membar is expected to be either a cpuorder membar or a
1996   // release membar. in the latter case it should not have a cpu membar
1997   // child.
1998   //
1999   // the returned value may be a card mark or trailing membar
2000   //
2001 
2002   MemBarNode *leading_to_trailing(MemBarNode *leading)
2003   {
2004     assert((leading->Opcode() == Op_MemBarRelease ||
2005             leading->Opcode() == Op_MemBarCPUOrder),
2006            "expecting a volatile or cpuroder membar!");
2007 
2008     // check the mem flow
2009     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2010 
2011     if (!mem) {
2012       return NULL;
2013     }
2014 
2015     Node *x = NULL;
2016     StoreNode * st = NULL;
2017     LoadStoreNode *cas = NULL;
2018     MergeMemNode *mm = NULL;
2019     MergeMemNode *mm2 = NULL;
2020 
2021     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2022       x = mem->fast_out(i);
2023       if (x->is_MergeMem()) {
2024         if (mm != NULL) {
2025           if (mm2 != NULL) {
2026           // should not see more than 2 merge mems
2027             return NULL;
2028           } else {
2029             mm2 = x->as_MergeMem();
2030           }
2031         } else {
2032           mm = x->as_MergeMem();
2033         }
2034       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2035         // two releasing stores/CAS nodes is one too many
2036         if (st != NULL || cas != NULL) {
2037           return NULL;
2038         }
2039         st = x->as_Store();
2040       } else if (is_CAS(x->Opcode())) {
2041         if (st != NULL || cas != NULL) {
2042           return NULL;
2043         }
2044         cas = x->as_LoadStore();
2045       }
2046     }
2047 
2048     // must have a store or a cas
2049     if (!st && !cas) {
2050       return NULL;
2051     }
2052 
2053     // must have at least one merge if we also have st
2054     if (st && !mm) {
2055       return NULL;
2056     }
2057 
2058     if (cas) {
2059       Node *y = NULL;
2060       // look for an SCMemProj
2061       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2062         x = cas->fast_out(i);
2063         if (x->is_Proj()) {
2064           y = x;
2065           break;
2066         }
2067       }
2068       if (y == NULL) {
2069         return NULL;
2070       }
2071       // the proj must feed a MergeMem
2072       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2073         x = y->fast_out(i);
2074         if (x->is_MergeMem()) {
2075           mm = x->as_MergeMem();
2076           break;
2077         }
2078       }
2079       if (mm == NULL) {
2080         return NULL;
2081       }
2082       MemBarNode *mbar = NULL;
2083       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2084       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2085         x = mm->fast_out(i);
2086         if (x->is_MemBar()) {
2087           int opcode = x->Opcode();
2088           if (opcode == Op_MemBarCPUOrder) {
2089             MemBarNode *z =  x->as_MemBar();
2090             z = child_membar(z);
2091             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2092               mbar = z;
2093             }
2094           }
2095           break;
2096         }
2097       }
2098       return mbar;
2099     } else {
2100       Node *y = NULL;
2101       // ensure the store feeds the first mergemem;
2102       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2103         if (st->fast_out(i) == mm) {
2104           y = st;
2105           break;
2106         }
2107       }
2108       if (y == NULL) {
2109         return NULL;
2110       }
2111       if (mm2 != NULL) {
2112         // ensure the store feeds the second mergemem;
2113         y = NULL;
2114         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2115           if (st->fast_out(i) == mm2) {
2116             y = st;
2117           }
2118         }
2119         if (y == NULL) {
2120           return NULL;
2121         }
2122       }
2123 
2124       MemBarNode *mbar = NULL;
2125       // ensure the first mergemem feeds a volatile membar
2126       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2127         x = mm->fast_out(i);
2128         if (x->is_MemBar()) {
2129           int opcode = x->Opcode();
2130           if (opcode == Op_MemBarVolatile) {
2131             mbar = x->as_MemBar();
2132           }
2133           break;
2134         }
2135       }
2136       if (mm2 == NULL) {
2137         // this is our only option for a trailing membar
2138         return mbar;
2139       }
2140       // ensure the second mergemem feeds a volatile membar
2141       MemBarNode *mbar2 = NULL;
2142       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2143         x = mm2->fast_out(i);
2144         if (x->is_MemBar()) {
2145           int opcode = x->Opcode();
2146           if (opcode == Op_MemBarVolatile) {
2147             mbar2 = x->as_MemBar();
2148           }
2149           break;
2150         }
2151       }
2152       // if we have two merge mems we must have two volatile membars
2153       if (mbar == NULL || mbar2 == NULL) {
2154         return NULL;
2155       }
2156       // return the trailing membar
2157       if (is_card_mark_membar(mbar2)) {
2158         return mbar;
2159       } else {
2160         if (is_card_mark_membar(mbar)) {
2161           return mbar2;
2162         } else {
2163           return NULL;
2164         }
2165       }
2166     }
2167   }
2168 
2169   // trailing_to_leading
2170   //
2171   // graph traversal helper which detects the normal case Mem feed
2172   // from a trailing membar to a preceding release membar (optionally
2173   // its cpuorder child) i.e. it ensures that one or other of the
2174   // following Mem flow subgraphs is present.
2175   //
2176   //   MemBarRelease {leading}
2177   //   MemBarCPUOrder {optional}
2178   //    | Bot |  \      . . .
2179   //    |     |  StoreN/P[mo_release]  . . .
2180   //    |     |   /
2181   //    |    MergeMem
2182   //    |     |
2183   //   MemBarVolatile {not card mark}
2184   //
2185   //   MemBarRelease {leading}
2186   //   MemBarCPUOrder {optional}
2187   //      |       \      . . .
2188   //      |     CompareAndSwapX  . . .
2189   //               |
2190   //     . . .    SCMemProj
2191   //           \   |
2192   //      |    MergeMem
2193   //      |       |
2194   //    MemBarCPUOrder
2195   //    MemBarAcquire {trailing}
2196   //
2197   // this predicate checks for the same flow as the previous predicate
2198   // but starting from the bottom rather than the top.
2199   //
2200   // if the configuration is present returns the cpuorder member for
2201   // preference or when absent the release membar otherwise NULL.
2202   //
2203   // n.b. the input membar is expected to be a MemBarVolatile or
2204   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2205   // mark membar.
2206 
2207   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2208   {
2209     // input must be a volatile membar
2210     assert((barrier->Opcode() == Op_MemBarVolatile ||
2211             barrier->Opcode() == Op_MemBarAcquire),
2212            "expecting a volatile or an acquire membar");
2213 
2214     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2215            !is_card_mark_membar(barrier),
2216            "not expecting a card mark membar");
2217     Node *x;
2218     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2219 
2220     // if we have an acquire membar then it must be fed via a CPUOrder
2221     // membar
2222 
2223     if (is_cas) {
2224       // skip to parent barrier which must be a cpuorder
2225       x = parent_membar(barrier);
2226       if (x->Opcode() != Op_MemBarCPUOrder)
2227         return NULL;
2228     } else {
2229       // start from the supplied barrier
2230       x = (Node *)barrier;
2231     }
2232 
2233     // the Mem feed to the membar should be a merge
2234     x = x ->in(TypeFunc::Memory);
2235     if (!x->is_MergeMem())
2236       return NULL;
2237 
2238     MergeMemNode *mm = x->as_MergeMem();
2239 
2240     if (is_cas) {
2241       // the merge should be fed from the CAS via an SCMemProj node
2242       x = NULL;
2243       for (uint idx = 1; idx < mm->req(); idx++) {
2244         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2245           x = mm->in(idx);
2246           break;
2247         }
2248       }
2249       if (x == NULL) {
2250         return NULL;
2251       }
2252       // check for a CAS feeding this proj
2253       x = x->in(0);
2254       int opcode = x->Opcode();
2255       if (!is_CAS(opcode)) {
2256         return NULL;
2257       }
2258       // the CAS should get its mem feed from the leading membar
2259       x = x->in(MemNode::Memory);
2260     } else {
2261       // the merge should get its Bottom mem feed from the leading membar
2262       x = mm->in(Compile::AliasIdxBot);
2263     }
2264 
2265     // ensure this is a non control projection
2266     if (!x->is_Proj() || x->is_CFG()) {
2267       return NULL;
2268     }
2269     // if it is fed by a membar that's the one we want
2270     x = x->in(0);
2271 
2272     if (!x->is_MemBar()) {
2273       return NULL;
2274     }
2275 
2276     MemBarNode *leading = x->as_MemBar();
2277     // reject invalid candidates
2278     if (!leading_membar(leading)) {
2279       return NULL;
2280     }
2281 
2282     // ok, we have a leading membar, now for the sanity clauses
2283 
2284     // the leading membar must feed Mem to a releasing store or CAS
2285     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2286     StoreNode *st = NULL;
2287     LoadStoreNode *cas = NULL;
2288     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2289       x = mem->fast_out(i);
2290       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2291         // two stores or CASes is one too many
2292         if (st != NULL || cas != NULL) {
2293           return NULL;
2294         }
2295         st = x->as_Store();
2296       } else if (is_CAS(x->Opcode())) {
2297         if (st != NULL || cas != NULL) {
2298           return NULL;
2299         }
2300         cas = x->as_LoadStore();
2301       }
2302     }
2303 
2304     // we should not have both a store and a cas
2305     if (st == NULL & cas == NULL) {
2306       return NULL;
2307     }
2308 
2309     if (st == NULL) {
2310       // nothing more to check
2311       return leading;
2312     } else {
2313       // we should not have a store if we started from an acquire
2314       if (is_cas) {
2315         return NULL;
2316       }
2317 
2318       // the store should feed the merge we used to get here
2319       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2320         if (st->fast_out(i) == mm) {
2321           return leading;
2322         }
2323       }
2324     }
2325 
2326     return NULL;
2327   }
2328 
2329   // card_mark_to_leading
2330   //
2331   // graph traversal helper which traverses from a card mark volatile
2332   // membar to a leading membar i.e. it ensures that the following Mem
2333   // flow subgraph is present.
2334   //
2335   //    MemBarRelease {leading}
2336   //   {MemBarCPUOrder} {optional}
2337   //         |   . . .
2338   //     Bot |   /
2339   //      MergeMem
2340   //         |
2341   //     MemBarVolatile (card mark)
2342   //        |     \
2343   //      . . .   StoreCM
2344   //
2345   // if the configuration is present returns the cpuorder member for
2346   // preference or when absent the release membar otherwise NULL.
2347   //
2348   // n.b. the input membar is expected to be a MemBarVolatile amd must
2349   // be a card mark membar.
2350 
2351   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2352   {
2353     // input must be a card mark volatile membar
2354     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2355 
2356     // the Mem feed to the membar should be a merge
2357     Node *x = barrier->in(TypeFunc::Memory);
2358     if (!x->is_MergeMem()) {
2359       return NULL;
2360     }
2361 
2362     MergeMemNode *mm = x->as_MergeMem();
2363 
2364     x = mm->in(Compile::AliasIdxBot);
2365 
2366     if (!x->is_MemBar()) {
2367       return NULL;
2368     }
2369 
2370     MemBarNode *leading = x->as_MemBar();
2371 
2372     if (leading_membar(leading)) {
2373       return leading;
2374     }
2375 
2376     return NULL;
2377   }
2378 
2379 bool unnecessary_acquire(const Node *barrier)
2380 {
2381   assert(barrier->is_MemBar(), "expecting a membar");
2382 
2383   if (UseBarriersForVolatile) {
2384     // we need to plant a dmb
2385     return false;
2386   }
2387 
2388   // a volatile read derived from bytecode (or also from an inlined
2389   // SHA field read via LibraryCallKit::load_field_from_object)
2390   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2391   // with a bogus read dependency on it's preceding load. so in those
2392   // cases we will find the load node at the PARMS offset of the
2393   // acquire membar.  n.b. there may be an intervening DecodeN node.
2394   //
2395   // a volatile load derived from an inlined unsafe field access
2396   // manifests as a cpuorder membar with Ctl and Mem projections
2397   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2398   // acquire then feeds another cpuorder membar via Ctl and Mem
2399   // projections. The load has no output dependency on these trailing
2400   // membars because subsequent nodes inserted into the graph take
2401   // their control feed from the final membar cpuorder meaning they
2402   // are all ordered after the load.
2403 
2404   Node *x = barrier->lookup(TypeFunc::Parms);
2405   if (x) {
2406     // we are starting from an acquire and it has a fake dependency
2407     //
2408     // need to check for
2409     //
2410     //   LoadX[mo_acquire]
2411     //   {  |1   }
2412     //   {DecodeN}
2413     //      |Parms
2414     //   MemBarAcquire*
2415     //
2416     // where * tags node we were passed
2417     // and |k means input k
2418     if (x->is_DecodeNarrowPtr()) {
2419       x = x->in(1);
2420     }
2421 
2422     return (x->is_Load() && x->as_Load()->is_acquire());
2423   }
2424 
2425   // now check for an unsafe volatile get
2426 
2427   // need to check for
2428   //
2429   //   MemBarCPUOrder
2430   //        ||       \\
2431   //   MemBarAcquire* LoadX[mo_acquire]
2432   //        ||
2433   //   MemBarCPUOrder
2434   //
2435   // where * tags node we were passed
2436   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2437 
2438   // check for a parent MemBarCPUOrder
2439   ProjNode *ctl;
2440   ProjNode *mem;
2441   MemBarNode *parent = parent_membar(barrier);
2442   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2443     return false;
2444   ctl = parent->proj_out(TypeFunc::Control);
2445   mem = parent->proj_out(TypeFunc::Memory);
2446   if (!ctl || !mem) {
2447     return false;
2448   }
2449   // ensure the proj nodes both feed a LoadX[mo_acquire]
2450   LoadNode *ld = NULL;
2451   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2452     x = ctl->fast_out(i);
2453     // if we see a load we keep hold of it and stop searching
2454     if (x->is_Load()) {
2455       ld = x->as_Load();
2456       break;
2457     }
2458   }
2459   // it must be an acquiring load
2460   if (ld && ld->is_acquire()) {
2461 
2462     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2463       x = mem->fast_out(i);
2464       // if we see the same load we drop it and stop searching
2465       if (x == ld) {
2466         ld = NULL;
2467         break;
2468       }
2469     }
2470     // we must have dropped the load
2471     if (ld == NULL) {
2472       // check for a child cpuorder membar
2473       MemBarNode *child  = child_membar(barrier->as_MemBar());
2474       if (child && child->Opcode() == Op_MemBarCPUOrder)
2475         return true;
2476     }
2477   }
2478 
2479   // final option for unnecessary mebar is that it is a trailing node
2480   // belonging to a CAS
2481 
2482   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2483 
2484   return leading != NULL;
2485 }
2486 
2487 bool needs_acquiring_load(const Node *n)
2488 {
2489   assert(n->is_Load(), "expecting a load");
2490   if (UseBarriersForVolatile) {
2491     // we use a normal load and a dmb
2492     return false;
2493   }
2494 
2495   LoadNode *ld = n->as_Load();
2496 
2497   if (!ld->is_acquire()) {
2498     return false;
2499   }
2500 
2501   // check if this load is feeding an acquire membar
2502   //
2503   //   LoadX[mo_acquire]
2504   //   {  |1   }
2505   //   {DecodeN}
2506   //      |Parms
2507   //   MemBarAcquire*
2508   //
2509   // where * tags node we were passed
2510   // and |k means input k
2511 
2512   Node *start = ld;
2513   Node *mbacq = NULL;
2514 
2515   // if we hit a DecodeNarrowPtr we reset the start node and restart
2516   // the search through the outputs
2517  restart:
2518 
2519   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2520     Node *x = start->fast_out(i);
2521     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2522       mbacq = x;
2523     } else if (!mbacq &&
2524                (x->is_DecodeNarrowPtr() ||
2525                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2526       start = x;
2527       goto restart;
2528     }
2529   }
2530 
2531   if (mbacq) {
2532     return true;
2533   }
2534 
2535   // now check for an unsafe volatile get
2536 
2537   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2538   //
2539   //     MemBarCPUOrder
2540   //        ||       \\
2541   //   MemBarAcquire* LoadX[mo_acquire]
2542   //        ||
2543   //   MemBarCPUOrder
2544 
2545   MemBarNode *membar;
2546 
2547   membar = parent_membar(ld);
2548 
2549   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2550     return false;
2551   }
2552 
2553   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2554 
2555   membar = child_membar(membar);
2556 
2557   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2558     return false;
2559   }
2560 
2561   membar = child_membar(membar);
2562 
2563   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2564     return false;
2565   }
2566 
2567   return true;
2568 }
2569 
2570 bool unnecessary_release(const Node *n)
2571 {
2572   assert((n->is_MemBar() &&
2573           n->Opcode() == Op_MemBarRelease),
2574          "expecting a release membar");
2575 
2576   if (UseBarriersForVolatile) {
2577     // we need to plant a dmb
2578     return false;
2579   }
2580 
2581   // if there is a dependent CPUOrder barrier then use that as the
2582   // leading
2583 
2584   MemBarNode *barrier = n->as_MemBar();
2585   // check for an intervening cpuorder membar
2586   MemBarNode *b = child_membar(barrier);
2587   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2588     // ok, so start the check from the dependent cpuorder barrier
2589     barrier = b;
2590   }
2591 
2592   // must start with a normal feed
2593   MemBarNode *trailing = leading_to_trailing(barrier);
2594 
2595   return (trailing != NULL);
2596 }
2597 
2598 bool unnecessary_volatile(const Node *n)
2599 {
2600   // assert n->is_MemBar();
2601   if (UseBarriersForVolatile) {
2602     // we need to plant a dmb
2603     return false;
2604   }
2605 
2606   MemBarNode *mbvol = n->as_MemBar();
2607 
2608   // first we check if this is part of a card mark. if so then we have
2609   // to generate a StoreLoad barrier
2610 
2611   if (is_card_mark_membar(mbvol)) {
2612       return false;
2613   }
2614 
2615   // ok, if it's not a card mark then we still need to check if it is
2616   // a trailing membar of a volatile put graph.
2617 
2618   return (trailing_to_leading(mbvol) != NULL);
2619 }
2620 
2621 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2622 
2623 bool needs_releasing_store(const Node *n)
2624 {
2625   // assert n->is_Store();
2626   if (UseBarriersForVolatile) {
2627     // we use a normal store and dmb combination
2628     return false;
2629   }
2630 
2631   StoreNode *st = n->as_Store();
2632 
2633   // the store must be marked as releasing
2634   if (!st->is_release()) {
2635     return false;
2636   }
2637 
2638   // the store must be fed by a membar
2639 
2640   Node *x = st->lookup(StoreNode::Memory);
2641 
2642   if (! x || !x->is_Proj()) {
2643     return false;
2644   }
2645 
2646   ProjNode *proj = x->as_Proj();
2647 
2648   x = proj->lookup(0);
2649 
2650   if (!x || !x->is_MemBar()) {
2651     return false;
2652   }
2653 
2654   MemBarNode *barrier = x->as_MemBar();
2655 
2656   // if the barrier is a release membar or a cpuorder mmebar fed by a
2657   // release membar then we need to check whether that forms part of a
2658   // volatile put graph.
2659 
2660   // reject invalid candidates
2661   if (!leading_membar(barrier)) {
2662     return false;
2663   }
2664 
2665   // does this lead a normal subgraph?
2666   MemBarNode *trailing = leading_to_trailing(barrier);
2667 
2668   return (trailing != NULL);
2669 }
2670 
2671 // predicate controlling translation of CAS
2672 //
2673 // returns true if CAS needs to use an acquiring load otherwise false
2674 
2675 bool needs_acquiring_load_exclusive(const Node *n)
2676 {
2677   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2678   if (UseBarriersForVolatile) {
2679     return false;
2680   }
2681 
2682   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2683 #ifdef ASSERT
2684   LoadStoreNode *st = n->as_LoadStore();
2685 
2686   // the store must be fed by a membar
2687 
2688   Node *x = st->lookup(StoreNode::Memory);
2689 
2690   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2691 
2692   ProjNode *proj = x->as_Proj();
2693 
2694   x = proj->lookup(0);
2695 
2696   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2697 
2698   MemBarNode *barrier = x->as_MemBar();
2699 
2700   // the barrier must be a cpuorder mmebar fed by a release membar
2701 
2702   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2703          "CAS not fed by cpuorder membar!");
2704 
2705   MemBarNode *b = parent_membar(barrier);
2706   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2707           "CAS not fed by cpuorder+release membar pair!");
2708 
2709   // does this lead a normal subgraph?
2710   MemBarNode *mbar = leading_to_trailing(barrier);
2711 
2712   assert(mbar != NULL, "CAS not embedded in normal graph!");
2713 
2714   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2715 #endif // ASSERT
2716   // so we can just return true here
2717   return true;
2718 }
2719 
2720 // predicate controlling translation of StoreCM
2721 //
2722 // returns true if a StoreStore must precede the card write otherwise
2723 // false
2724 
2725 bool unnecessary_storestore(const Node *storecm)
2726 {
2727   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2728 
2729   // we only ever need to generate a dmb ishst between an object put
2730   // and the associated card mark when we are using CMS without
2731   // conditional card marking. Any other occurence will happen when
2732   // performing a card mark using CMS with conditional card marking or
2733   // G1. In those cases the preceding MamBarVolatile will be
2734   // translated to a dmb ish which guarantes visibility of the
2735   // preceding StoreN/P before this StoreCM
2736 
2737   if (!UseConcMarkSweepGC || UseCondCardMark) {
2738     return true;
2739   }
2740 
2741   // if we are implementing volatile puts using barriers then we must
2742   // insert the dmb ishst
2743 
2744   if (UseBarriersForVolatile) {
2745     return false;
2746   }
2747 
2748   // we must be using CMS with conditional card marking so we ahve to
2749   // generate the StoreStore
2750 
2751   return false;
2752 }
2753 
2754 
2755 #define __ _masm.
2756 
2757 // advance declarations for helper functions to convert register
2758 // indices to register objects
2759 
2760 // the ad file has to provide implementations of certain methods
2761 // expected by the generic code
2762 //
2763 // REQUIRED FUNCTIONALITY
2764 
2765 //=============================================================================
2766 
2767 // !!!!! Special hack to get all types of calls to specify the byte offset
2768 //       from the start of the call to the point where the return address
2769 //       will point.
2770 
2771 int MachCallStaticJavaNode::ret_addr_offset()
2772 {
2773   // call should be a simple bl
2774   int off = 4;
2775   return off;
2776 }
2777 
2778 int MachCallDynamicJavaNode::ret_addr_offset()
2779 {
2780   return 16; // movz, movk, movk, bl
2781 }
2782 
2783 int MachCallRuntimeNode::ret_addr_offset() {
2784   // for generated stubs the call will be
2785   //   far_call(addr)
2786   // for real runtime callouts it will be six instructions
2787   // see aarch64_enc_java_to_runtime
2788   //   adr(rscratch2, retaddr)
2789   //   lea(rscratch1, RuntimeAddress(addr)
2790   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2791   //   blrt rscratch1
2792   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2793   if (cb) {
2794     return MacroAssembler::far_branch_size();
2795   } else {
2796     return 6 * NativeInstruction::instruction_size;
2797   }
2798 }
2799 
2800 // Indicate if the safepoint node needs the polling page as an input
2801 
2802 // the shared code plants the oop data at the start of the generated
2803 // code for the safepoint node and that needs ot be at the load
2804 // instruction itself. so we cannot plant a mov of the safepoint poll
2805 // address followed by a load. setting this to true means the mov is
2806 // scheduled as a prior instruction. that's better for scheduling
2807 // anyway.
2808 
2809 bool SafePointNode::needs_polling_address_input()
2810 {
2811   return true;
2812 }
2813 
2814 //=============================================================================
2815 
2816 #ifndef PRODUCT
2817 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2818   st->print("BREAKPOINT");
2819 }
2820 #endif
2821 
2822 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2823   MacroAssembler _masm(&cbuf);
2824   __ brk(0);
2825 }
2826 
2827 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2828   return MachNode::size(ra_);
2829 }
2830 
2831 //=============================================================================
2832 
2833 #ifndef PRODUCT
2834   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2835     st->print("nop \t# %d bytes pad for loops and calls", _count);
2836   }
2837 #endif
2838 
2839   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2840     MacroAssembler _masm(&cbuf);
2841     for (int i = 0; i < _count; i++) {
2842       __ nop();
2843     }
2844   }
2845 
2846   uint MachNopNode::size(PhaseRegAlloc*) const {
2847     return _count * NativeInstruction::instruction_size;
2848   }
2849 
2850 //=============================================================================
2851 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2852 
2853 int Compile::ConstantTable::calculate_table_base_offset() const {
2854   return 0;  // absolute addressing, no offset
2855 }
2856 
2857 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2858 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2859   ShouldNotReachHere();
2860 }
2861 
2862 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2863   // Empty encoding
2864 }
2865 
2866 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2867   return 0;
2868 }
2869 
2870 #ifndef PRODUCT
2871 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2872   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2873 }
2874 #endif
2875 
2876 #ifndef PRODUCT
2877 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2878   Compile* C = ra_->C;
2879 
2880   int framesize = C->frame_slots() << LogBytesPerInt;
2881 
2882   if (C->need_stack_bang(framesize))
2883     st->print("# stack bang size=%d\n\t", framesize);
2884 
2885   if (framesize < ((1 << 9) + 2 * wordSize)) {
2886     st->print("sub  sp, sp, #%d\n\t", framesize);
2887     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2888     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2889   } else {
2890     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2891     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2892     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2893     st->print("sub  sp, sp, rscratch1");
2894   }
2895 }
2896 #endif
2897 
2898 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2899   Compile* C = ra_->C;
2900   MacroAssembler _masm(&cbuf);
2901 
2902   // n.b. frame size includes space for return pc and rfp
2903   const long framesize = C->frame_size_in_bytes();
2904   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2905 
2906   // insert a nop at the start of the prolog so we can patch in a
2907   // branch if we need to invalidate the method later
2908   __ nop();
2909 
2910   int bangsize = C->bang_size_in_bytes();
2911   if (C->need_stack_bang(bangsize) && UseStackBanging)
2912     __ generate_stack_overflow_check(bangsize);
2913 
2914   __ build_frame(framesize);
2915 
2916   if (NotifySimulator) {
2917     __ notify(Assembler::method_entry);
2918   }
2919 
2920   if (VerifyStackAtCalls) {
2921     Unimplemented();
2922   }
2923 
2924   C->set_frame_complete(cbuf.insts_size());
2925 
2926   if (C->has_mach_constant_base_node()) {
2927     // NOTE: We set the table base offset here because users might be
2928     // emitted before MachConstantBaseNode.
2929     Compile::ConstantTable& constant_table = C->constant_table();
2930     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2931   }
2932 }
2933 
2934 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2935 {
2936   return MachNode::size(ra_); // too many variables; just compute it
2937                               // the hard way
2938 }
2939 
2940 int MachPrologNode::reloc() const
2941 {
2942   return 0;
2943 }
2944 
2945 //=============================================================================
2946 
2947 #ifndef PRODUCT
2948 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2949   Compile* C = ra_->C;
2950   int framesize = C->frame_slots() << LogBytesPerInt;
2951 
2952   st->print("# pop frame %d\n\t",framesize);
2953 
2954   if (framesize == 0) {
2955     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2956   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2957     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2958     st->print("add  sp, sp, #%d\n\t", framesize);
2959   } else {
2960     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2961     st->print("add  sp, sp, rscratch1\n\t");
2962     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2963   }
2964 
2965   if (do_polling() && C->is_method_compilation()) {
2966     st->print("# touch polling page\n\t");
2967     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2968     st->print("ldr zr, [rscratch1]");
2969   }
2970 }
2971 #endif
2972 
2973 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2974   Compile* C = ra_->C;
2975   MacroAssembler _masm(&cbuf);
2976   int framesize = C->frame_slots() << LogBytesPerInt;
2977 
2978   __ remove_frame(framesize);
2979 
2980   if (NotifySimulator) {
2981     __ notify(Assembler::method_reentry);
2982   }
2983 
2984   if (do_polling() && C->is_method_compilation()) {
2985     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2986   }
2987 }
2988 
2989 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2990   // Variable size. Determine dynamically.
2991   return MachNode::size(ra_);
2992 }
2993 
2994 int MachEpilogNode::reloc() const {
2995   // Return number of relocatable values contained in this instruction.
2996   return 1; // 1 for polling page.
2997 }
2998 
2999 const Pipeline * MachEpilogNode::pipeline() const {
3000   return MachNode::pipeline_class();
3001 }
3002 
3003 // This method seems to be obsolete. It is declared in machnode.hpp
3004 // and defined in all *.ad files, but it is never called. Should we
3005 // get rid of it?
3006 int MachEpilogNode::safepoint_offset() const {
3007   assert(do_polling(), "no return for this epilog node");
3008   return 4;
3009 }
3010 
3011 //=============================================================================
3012 
3013 // Figure out which register class each belongs in: rc_int, rc_float or
3014 // rc_stack.
3015 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3016 
3017 static enum RC rc_class(OptoReg::Name reg) {
3018 
3019   if (reg == OptoReg::Bad) {
3020     return rc_bad;
3021   }
3022 
3023   // we have 30 int registers * 2 halves
3024   // (rscratch1 and rscratch2 are omitted)
3025 
3026   if (reg < 60) {
3027     return rc_int;
3028   }
3029 
3030   // we have 32 float register * 2 halves
3031   if (reg < 60 + 128) {
3032     return rc_float;
3033   }
3034 
3035   // Between float regs & stack is the flags regs.
3036   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3037 
3038   return rc_stack;
3039 }
3040 
3041 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3042   Compile* C = ra_->C;
3043 
3044   // Get registers to move.
3045   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3046   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3047   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3048   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3049 
3050   enum RC src_hi_rc = rc_class(src_hi);
3051   enum RC src_lo_rc = rc_class(src_lo);
3052   enum RC dst_hi_rc = rc_class(dst_hi);
3053   enum RC dst_lo_rc = rc_class(dst_lo);
3054 
3055   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3056 
3057   if (src_hi != OptoReg::Bad) {
3058     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3059            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3060            "expected aligned-adjacent pairs");
3061   }
3062 
3063   if (src_lo == dst_lo && src_hi == dst_hi) {
3064     return 0;            // Self copy, no move.
3065   }
3066 
3067   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3068               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3069   int src_offset = ra_->reg2offset(src_lo);
3070   int dst_offset = ra_->reg2offset(dst_lo);
3071 
3072   if (bottom_type()->isa_vect() != NULL) {
3073     uint ireg = ideal_reg();
3074     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3075     if (cbuf) {
3076       MacroAssembler _masm(cbuf);
3077       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3078       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3079         // stack->stack
3080         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3081         if (ireg == Op_VecD) {
3082           __ unspill(rscratch1, true, src_offset);
3083           __ spill(rscratch1, true, dst_offset);
3084         } else {
3085           __ spill_copy128(src_offset, dst_offset);
3086         }
3087       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3088         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3089                ireg == Op_VecD ? __ T8B : __ T16B,
3090                as_FloatRegister(Matcher::_regEncode[src_lo]));
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3092         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3093                        ireg == Op_VecD ? __ D : __ Q,
3094                        ra_->reg2offset(dst_lo));
3095       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3096         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(src_lo));
3099       } else {
3100         ShouldNotReachHere();
3101       }
3102     }
3103   } else if (cbuf) {
3104     MacroAssembler _masm(cbuf);
3105     switch (src_lo_rc) {
3106     case rc_int:
3107       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3108         if (is64) {
3109             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3110                    as_Register(Matcher::_regEncode[src_lo]));
3111         } else {
3112             MacroAssembler _masm(cbuf);
3113             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3114                     as_Register(Matcher::_regEncode[src_lo]));
3115         }
3116       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3117         if (is64) {
3118             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3119                      as_Register(Matcher::_regEncode[src_lo]));
3120         } else {
3121             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3122                      as_Register(Matcher::_regEncode[src_lo]));
3123         }
3124       } else {                    // gpr --> stack spill
3125         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3126         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3127       }
3128       break;
3129     case rc_float:
3130       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3131         if (is64) {
3132             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3133                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3134         } else {
3135             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3136                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3137         }
3138       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3139           if (cbuf) {
3140             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3141                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3142         } else {
3143             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3144                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3145         }
3146       } else {                    // fpr --> stack spill
3147         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3148         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3149                  is64 ? __ D : __ S, dst_offset);
3150       }
3151       break;
3152     case rc_stack:
3153       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3154         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3155       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3156         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3157                    is64 ? __ D : __ S, src_offset);
3158       } else {                    // stack --> stack copy
3159         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3160         __ unspill(rscratch1, is64, src_offset);
3161         __ spill(rscratch1, is64, dst_offset);
3162       }
3163       break;
3164     default:
3165       assert(false, "bad rc_class for spill");
3166       ShouldNotReachHere();
3167     }
3168   }
3169 
3170   if (st) {
3171     st->print("spill ");
3172     if (src_lo_rc == rc_stack) {
3173       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3174     } else {
3175       st->print("%s -> ", Matcher::regName[src_lo]);
3176     }
3177     if (dst_lo_rc == rc_stack) {
3178       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3179     } else {
3180       st->print("%s", Matcher::regName[dst_lo]);
3181     }
3182     if (bottom_type()->isa_vect() != NULL) {
3183       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3184     } else {
3185       st->print("\t# spill size = %d", is64 ? 64:32);
3186     }
3187   }
3188 
3189   return 0;
3190 
3191 }
3192 
3193 #ifndef PRODUCT
3194 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3195   if (!ra_)
3196     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3197   else
3198     implementation(NULL, ra_, false, st);
3199 }
3200 #endif
3201 
3202 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3203   implementation(&cbuf, ra_, false, NULL);
3204 }
3205 
3206 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3207   return MachNode::size(ra_);
3208 }
3209 
3210 //=============================================================================
3211 
3212 #ifndef PRODUCT
3213 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3214   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3215   int reg = ra_->get_reg_first(this);
3216   st->print("add %s, rsp, #%d]\t# box lock",
3217             Matcher::regName[reg], offset);
3218 }
3219 #endif
3220 
3221 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3222   MacroAssembler _masm(&cbuf);
3223 
3224   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3225   int reg    = ra_->get_encode(this);
3226 
3227   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3228     __ add(as_Register(reg), sp, offset);
3229   } else {
3230     ShouldNotReachHere();
3231   }
3232 }
3233 
3234 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3235   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3236   return 4;
3237 }
3238 
3239 //=============================================================================
3240 
3241 #ifndef PRODUCT
3242 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3243 {
3244   st->print_cr("# MachUEPNode");
3245   if (UseCompressedClassPointers) {
3246     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3247     if (Universe::narrow_klass_shift() != 0) {
3248       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3249     }
3250   } else {
3251    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3252   }
3253   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3254   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3255 }
3256 #endif
3257 
3258 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3259 {
3260   // This is the unverified entry point.
3261   MacroAssembler _masm(&cbuf);
3262 
3263   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3264   Label skip;
3265   // TODO
3266   // can we avoid this skip and still use a reloc?
3267   __ br(Assembler::EQ, skip);
3268   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3269   __ bind(skip);
3270 }
3271 
3272 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3273 {
3274   return MachNode::size(ra_);
3275 }
3276 
3277 // REQUIRED EMIT CODE
3278 
3279 //=============================================================================
3280 
3281 // Emit exception handler code.
3282 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3283 {
3284   // mov rscratch1 #exception_blob_entry_point
3285   // br rscratch1
3286   // Note that the code buffer's insts_mark is always relative to insts.
3287   // That's why we must use the macroassembler to generate a handler.
3288   MacroAssembler _masm(&cbuf);
3289   address base = __ start_a_stub(size_exception_handler());
3290   if (base == NULL) {
3291     ciEnv::current()->record_failure("CodeCache is full");
3292     return 0;  // CodeBuffer::expand failed
3293   }
3294   int offset = __ offset();
3295   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3296   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3297   __ end_a_stub();
3298   return offset;
3299 }
3300 
3301 // Emit deopt handler code.
3302 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3303 {
3304   // Note that the code buffer's insts_mark is always relative to insts.
3305   // That's why we must use the macroassembler to generate a handler.
3306   MacroAssembler _masm(&cbuf);
3307   address base = __ start_a_stub(size_deopt_handler());
3308   if (base == NULL) {
3309     ciEnv::current()->record_failure("CodeCache is full");
3310     return 0;  // CodeBuffer::expand failed
3311   }
3312   int offset = __ offset();
3313 
3314   __ adr(lr, __ pc());
3315   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3316 
3317   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3318   __ end_a_stub();
3319   return offset;
3320 }
3321 
3322 // REQUIRED MATCHER CODE
3323 
3324 //=============================================================================
3325 
3326 const bool Matcher::match_rule_supported(int opcode) {
3327 
3328   // TODO
3329   // identify extra cases that we might want to provide match rules for
3330   // e.g. Op_StrEquals and other intrinsics
3331   if (!has_match_rule(opcode)) {
3332     return false;
3333   }
3334 
3335   return true;  // Per default match rules are supported.
3336 }
3337 
3338 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3339 
3340   // TODO
3341   // identify extra cases that we might want to provide match rules for
3342   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3343   bool ret_value = match_rule_supported(opcode);
3344   // Add rules here.
3345 
3346   return ret_value;  // Per default match rules are supported.
3347 }
3348 
3349 const int Matcher::float_pressure(int default_pressure_threshold) {
3350   return default_pressure_threshold;
3351 }
3352 
3353 int Matcher::regnum_to_fpu_offset(int regnum)
3354 {
3355   Unimplemented();
3356   return 0;
3357 }
3358 
3359 // Is this branch offset short enough that a short branch can be used?
3360 //
3361 // NOTE: If the platform does not provide any short branch variants, then
3362 //       this method should return false for offset 0.
3363 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3364   // The passed offset is relative to address of the branch.
3365 
3366   return (-32768 <= offset && offset < 32768);
3367 }
3368 
3369 const bool Matcher::isSimpleConstant64(jlong value) {
3370   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3371   // Probably always true, even if a temp register is required.
3372   return true;
3373 }
3374 
3375 // true just means we have fast l2f conversion
3376 const bool Matcher::convL2FSupported(void) {
3377   return true;
3378 }
3379 
3380 // Vector width in bytes.
3381 const int Matcher::vector_width_in_bytes(BasicType bt) {
3382   int size = MIN2(16,(int)MaxVectorSize);
3383   // Minimum 2 values in vector
3384   if (size < 2*type2aelembytes(bt)) size = 0;
3385   // But never < 4
3386   if (size < 4) size = 0;
3387   return size;
3388 }
3389 
3390 // Limits on vector size (number of elements) loaded into vector.
3391 const int Matcher::max_vector_size(const BasicType bt) {
3392   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3393 }
3394 const int Matcher::min_vector_size(const BasicType bt) {
3395 //  For the moment limit the vector size to 8 bytes
3396     int size = 8 / type2aelembytes(bt);
3397     if (size < 2) size = 2;
3398     return size;
3399 }
3400 
3401 // Vector ideal reg.
3402 const int Matcher::vector_ideal_reg(int len) {
3403   switch(len) {
3404     case  8: return Op_VecD;
3405     case 16: return Op_VecX;
3406   }
3407   ShouldNotReachHere();
3408   return 0;
3409 }
3410 
3411 const int Matcher::vector_shift_count_ideal_reg(int size) {
3412   return Op_VecX;
3413 }
3414 
3415 // AES support not yet implemented
3416 const bool Matcher::pass_original_key_for_aes() {
3417   return false;
3418 }
3419 
3420 // x86 supports misaligned vectors store/load.
3421 const bool Matcher::misaligned_vectors_ok() {
3422   return !AlignVector; // can be changed by flag
3423 }
3424 
3425 // false => size gets scaled to BytesPerLong, ok.
3426 const bool Matcher::init_array_count_is_in_bytes = false;
3427 
3428 // Use conditional move (CMOVL)
3429 const int Matcher::long_cmove_cost() {
3430   // long cmoves are no more expensive than int cmoves
3431   return 0;
3432 }
3433 
3434 const int Matcher::float_cmove_cost() {
3435   // float cmoves are no more expensive than int cmoves
3436   return 0;
3437 }
3438 
3439 // Does the CPU require late expand (see block.cpp for description of late expand)?
3440 const bool Matcher::require_postalloc_expand = false;
3441 
3442 // Should the Matcher clone shifts on addressing modes, expecting them
3443 // to be subsumed into complex addressing expressions or compute them
3444 // into registers?  True for Intel but false for most RISCs
3445 const bool Matcher::clone_shift_expressions = false;
3446 
3447 // Do we need to mask the count passed to shift instructions or does
3448 // the cpu only look at the lower 5/6 bits anyway?
3449 const bool Matcher::need_masked_shift_count = false;
3450 
3451 // This affects two different things:
3452 //  - how Decode nodes are matched
3453 //  - how ImplicitNullCheck opportunities are recognized
3454 // If true, the matcher will try to remove all Decodes and match them
3455 // (as operands) into nodes. NullChecks are not prepared to deal with
3456 // Decodes by final_graph_reshaping().
3457 // If false, final_graph_reshaping() forces the decode behind the Cmp
3458 // for a NullCheck. The matcher matches the Decode node into a register.
3459 // Implicit_null_check optimization moves the Decode along with the
3460 // memory operation back up before the NullCheck.
3461 bool Matcher::narrow_oop_use_complex_address() {
3462   return Universe::narrow_oop_shift() == 0;
3463 }
3464 
3465 bool Matcher::narrow_klass_use_complex_address() {
3466 // TODO
3467 // decide whether we need to set this to true
3468   return false;
3469 }
3470 
3471 // Is it better to copy float constants, or load them directly from
3472 // memory?  Intel can load a float constant from a direct address,
3473 // requiring no extra registers.  Most RISCs will have to materialize
3474 // an address into a register first, so they would do better to copy
3475 // the constant from stack.
3476 const bool Matcher::rematerialize_float_constants = false;
3477 
3478 // If CPU can load and store mis-aligned doubles directly then no
3479 // fixup is needed.  Else we split the double into 2 integer pieces
3480 // and move it piece-by-piece.  Only happens when passing doubles into
3481 // C code as the Java calling convention forces doubles to be aligned.
3482 const bool Matcher::misaligned_doubles_ok = true;
3483 
3484 // No-op on amd64
3485 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3486   Unimplemented();
3487 }
3488 
3489 // Advertise here if the CPU requires explicit rounding operations to
3490 // implement the UseStrictFP mode.
3491 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3492 
3493 // Are floats converted to double when stored to stack during
3494 // deoptimization?
3495 bool Matcher::float_in_double() { return true; }
3496 
3497 // Do ints take an entire long register or just half?
3498 // The relevant question is how the int is callee-saved:
3499 // the whole long is written but de-opt'ing will have to extract
3500 // the relevant 32 bits.
3501 const bool Matcher::int_in_long = true;
3502 
3503 // Return whether or not this register is ever used as an argument.
3504 // This function is used on startup to build the trampoline stubs in
3505 // generateOptoStub.  Registers not mentioned will be killed by the VM
3506 // call in the trampoline, and arguments in those registers not be
3507 // available to the callee.
3508 bool Matcher::can_be_java_arg(int reg)
3509 {
3510   return
3511     reg ==  R0_num || reg == R0_H_num ||
3512     reg ==  R1_num || reg == R1_H_num ||
3513     reg ==  R2_num || reg == R2_H_num ||
3514     reg ==  R3_num || reg == R3_H_num ||
3515     reg ==  R4_num || reg == R4_H_num ||
3516     reg ==  R5_num || reg == R5_H_num ||
3517     reg ==  R6_num || reg == R6_H_num ||
3518     reg ==  R7_num || reg == R7_H_num ||
3519     reg ==  V0_num || reg == V0_H_num ||
3520     reg ==  V1_num || reg == V1_H_num ||
3521     reg ==  V2_num || reg == V2_H_num ||
3522     reg ==  V3_num || reg == V3_H_num ||
3523     reg ==  V4_num || reg == V4_H_num ||
3524     reg ==  V5_num || reg == V5_H_num ||
3525     reg ==  V6_num || reg == V6_H_num ||
3526     reg ==  V7_num || reg == V7_H_num;
3527 }
3528 
3529 bool Matcher::is_spillable_arg(int reg)
3530 {
3531   return can_be_java_arg(reg);
3532 }
3533 
3534 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3535   return false;
3536 }
3537 
3538 RegMask Matcher::divI_proj_mask() {
3539   ShouldNotReachHere();
3540   return RegMask();
3541 }
3542 
3543 // Register for MODI projection of divmodI.
3544 RegMask Matcher::modI_proj_mask() {
3545   ShouldNotReachHere();
3546   return RegMask();
3547 }
3548 
3549 // Register for DIVL projection of divmodL.
3550 RegMask Matcher::divL_proj_mask() {
3551   ShouldNotReachHere();
3552   return RegMask();
3553 }
3554 
3555 // Register for MODL projection of divmodL.
3556 RegMask Matcher::modL_proj_mask() {
3557   ShouldNotReachHere();
3558   return RegMask();
3559 }
3560 
3561 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3562   return FP_REG_mask();
3563 }
3564 
3565 // helper for encoding java_to_runtime calls on sim
3566 //
3567 // this is needed to compute the extra arguments required when
3568 // planting a call to the simulator blrt instruction. the TypeFunc
3569 // can be queried to identify the counts for integral, and floating
3570 // arguments and the return type
3571 
3572 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3573 {
3574   int gps = 0;
3575   int fps = 0;
3576   const TypeTuple *domain = tf->domain();
3577   int max = domain->cnt();
3578   for (int i = TypeFunc::Parms; i < max; i++) {
3579     const Type *t = domain->field_at(i);
3580     switch(t->basic_type()) {
3581     case T_FLOAT:
3582     case T_DOUBLE:
3583       fps++;
3584     default:
3585       gps++;
3586     }
3587   }
3588   gpcnt = gps;
3589   fpcnt = fps;
3590   BasicType rt = tf->return_type();
3591   switch (rt) {
3592   case T_VOID:
3593     rtype = MacroAssembler::ret_type_void;
3594     break;
3595   default:
3596     rtype = MacroAssembler::ret_type_integral;
3597     break;
3598   case T_FLOAT:
3599     rtype = MacroAssembler::ret_type_float;
3600     break;
3601   case T_DOUBLE:
3602     rtype = MacroAssembler::ret_type_double;
3603     break;
3604   }
3605 }
3606 
3607 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3608   MacroAssembler _masm(&cbuf);                                          \
3609   {                                                                     \
3610     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3611     guarantee(DISP == 0, "mode not permitted for volatile");            \
3612     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3613     __ INSN(REG, as_Register(BASE));                                    \
3614   }
3615 
3616 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3617 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3618 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3619                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3620 
3621   // Used for all non-volatile memory accesses.  The use of
3622   // $mem->opcode() to discover whether this pattern uses sign-extended
3623   // offsets is something of a kludge.
3624   static void loadStore(MacroAssembler masm, mem_insn insn,
3625                          Register reg, int opcode,
3626                          Register base, int index, int size, int disp)
3627   {
3628     Address::extend scale;
3629 
3630     // Hooboy, this is fugly.  We need a way to communicate to the
3631     // encoder that the index needs to be sign extended, so we have to
3632     // enumerate all the cases.
3633     switch (opcode) {
3634     case INDINDEXSCALEDOFFSETI2L:
3635     case INDINDEXSCALEDI2L:
3636     case INDINDEXSCALEDOFFSETI2LN:
3637     case INDINDEXSCALEDI2LN:
3638     case INDINDEXOFFSETI2L:
3639     case INDINDEXOFFSETI2LN:
3640       scale = Address::sxtw(size);
3641       break;
3642     default:
3643       scale = Address::lsl(size);
3644     }
3645 
3646     if (index == -1) {
3647       (masm.*insn)(reg, Address(base, disp));
3648     } else {
3649       if (disp == 0) {
3650         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3651       } else {
3652         masm.lea(rscratch1, Address(base, disp));
3653         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3654       }
3655     }
3656   }
3657 
3658   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3659                          FloatRegister reg, int opcode,
3660                          Register base, int index, int size, int disp)
3661   {
3662     Address::extend scale;
3663 
3664     switch (opcode) {
3665     case INDINDEXSCALEDOFFSETI2L:
3666     case INDINDEXSCALEDI2L:
3667     case INDINDEXSCALEDOFFSETI2LN:
3668     case INDINDEXSCALEDI2LN:
3669       scale = Address::sxtw(size);
3670       break;
3671     default:
3672       scale = Address::lsl(size);
3673     }
3674 
3675      if (index == -1) {
3676       (masm.*insn)(reg, Address(base, disp));
3677     } else {
3678       if (disp == 0) {
3679         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3680       } else {
3681         masm.lea(rscratch1, Address(base, disp));
3682         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3683       }
3684     }
3685   }
3686 
3687   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3688                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3689                          int opcode, Register base, int index, int size, int disp)
3690   {
3691     if (index == -1) {
3692       (masm.*insn)(reg, T, Address(base, disp));
3693     } else {
3694       assert(disp == 0, "unsupported address mode");
3695       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3696     }
3697   }
3698 
3699 %}
3700 
3701 
3702 
3703 //----------ENCODING BLOCK-----------------------------------------------------
3704 // This block specifies the encoding classes used by the compiler to
3705 // output byte streams.  Encoding classes are parameterized macros
3706 // used by Machine Instruction Nodes in order to generate the bit
3707 // encoding of the instruction.  Operands specify their base encoding
3708 // interface with the interface keyword.  There are currently
3709 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3710 // COND_INTER.  REG_INTER causes an operand to generate a function
3711 // which returns its register number when queried.  CONST_INTER causes
3712 // an operand to generate a function which returns the value of the
3713 // constant when queried.  MEMORY_INTER causes an operand to generate
3714 // four functions which return the Base Register, the Index Register,
3715 // the Scale Value, and the Offset Value of the operand when queried.
3716 // COND_INTER causes an operand to generate six functions which return
3717 // the encoding code (ie - encoding bits for the instruction)
3718 // associated with each basic boolean condition for a conditional
3719 // instruction.
3720 //
3721 // Instructions specify two basic values for encoding.  Again, a
3722 // function is available to check if the constant displacement is an
3723 // oop. They use the ins_encode keyword to specify their encoding
3724 // classes (which must be a sequence of enc_class names, and their
3725 // parameters, specified in the encoding block), and they use the
3726 // opcode keyword to specify, in order, their primary, secondary, and
3727 // tertiary opcode.  Only the opcode sections which a particular
3728 // instruction needs for encoding need to be specified.
3729 encode %{
3730   // Build emit functions for each basic byte or larger field in the
3731   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3732   // from C++ code in the enc_class source block.  Emit functions will
3733   // live in the main source block for now.  In future, we can
3734   // generalize this by adding a syntax that specifies the sizes of
3735   // fields in an order, so that the adlc can build the emit functions
3736   // automagically
3737 
3738   // catch all for unimplemented encodings
3739   enc_class enc_unimplemented %{
3740     MacroAssembler _masm(&cbuf);
3741     __ unimplemented("C2 catch all");
3742   %}
3743 
3744   // BEGIN Non-volatile memory access
3745 
3746   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3747     Register dst_reg = as_Register($dst$$reg);
3748     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3749                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3750   %}
3751 
3752   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3753     Register dst_reg = as_Register($dst$$reg);
3754     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3755                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3756   %}
3757 
3758   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3759     Register dst_reg = as_Register($dst$$reg);
3760     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3761                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3762   %}
3763 
3764   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3765     Register dst_reg = as_Register($dst$$reg);
3766     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3767                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3768   %}
3769 
3770   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3771     Register dst_reg = as_Register($dst$$reg);
3772     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3773                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3774   %}
3775 
3776   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3777     Register dst_reg = as_Register($dst$$reg);
3778     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3779                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3780   %}
3781 
3782   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3783     Register dst_reg = as_Register($dst$$reg);
3784     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3785                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3786   %}
3787 
3788   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3789     Register dst_reg = as_Register($dst$$reg);
3790     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3791                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3792   %}
3793 
3794   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3795     Register dst_reg = as_Register($dst$$reg);
3796     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3797                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3798   %}
3799 
3800   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3801     Register dst_reg = as_Register($dst$$reg);
3802     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3803                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3804   %}
3805 
3806   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3807     Register dst_reg = as_Register($dst$$reg);
3808     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3809                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3810   %}
3811 
3812   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3813     Register dst_reg = as_Register($dst$$reg);
3814     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3815                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3816   %}
3817 
3818   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3819     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3820     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3821                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3822   %}
3823 
3824   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3825     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3826     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3827                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3828   %}
3829 
3830   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3831     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3832     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3833        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3834   %}
3835 
3836   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3837     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3838     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3839        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3840   %}
3841 
3842   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3843     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3844     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3845        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3846   %}
3847 
3848   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3849     Register src_reg = as_Register($src$$reg);
3850     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3851                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3852   %}
3853 
3854   enc_class aarch64_enc_strb0(memory mem) %{
3855     MacroAssembler _masm(&cbuf);
3856     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3857                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3858   %}
3859 
3860   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3861     MacroAssembler _masm(&cbuf);
3862     __ membar(Assembler::StoreStore);
3863     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3864                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3865   %}
3866 
3867   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3868     Register src_reg = as_Register($src$$reg);
3869     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3870                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3871   %}
3872 
3873   enc_class aarch64_enc_strh0(memory mem) %{
3874     MacroAssembler _masm(&cbuf);
3875     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3876                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3877   %}
3878 
3879   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3880     Register src_reg = as_Register($src$$reg);
3881     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3882                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3883   %}
3884 
3885   enc_class aarch64_enc_strw0(memory mem) %{
3886     MacroAssembler _masm(&cbuf);
3887     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3888                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3889   %}
3890 
3891   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3892     Register src_reg = as_Register($src$$reg);
3893     // we sometimes get asked to store the stack pointer into the
3894     // current thread -- we cannot do that directly on AArch64
3895     if (src_reg == r31_sp) {
3896       MacroAssembler _masm(&cbuf);
3897       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3898       __ mov(rscratch2, sp);
3899       src_reg = rscratch2;
3900     }
3901     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3902                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3903   %}
3904 
3905   enc_class aarch64_enc_str0(memory mem) %{
3906     MacroAssembler _masm(&cbuf);
3907     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3908                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3909   %}
3910 
3911   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3912     FloatRegister src_reg = as_FloatRegister($src$$reg);
3913     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3914                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3915   %}
3916 
3917   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3918     FloatRegister src_reg = as_FloatRegister($src$$reg);
3919     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3920                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3921   %}
3922 
3923   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3924     FloatRegister src_reg = as_FloatRegister($src$$reg);
3925     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3926        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3927   %}
3928 
3929   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3930     FloatRegister src_reg = as_FloatRegister($src$$reg);
3931     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3932        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3933   %}
3934 
3935   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3936     FloatRegister src_reg = as_FloatRegister($src$$reg);
3937     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3938        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3939   %}
3940 
3941   // END Non-volatile memory access
3942 
3943   // volatile loads and stores
3944 
3945   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3946     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3947                  rscratch1, stlrb);
3948   %}
3949 
3950   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3951     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3952                  rscratch1, stlrh);
3953   %}
3954 
3955   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3956     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3957                  rscratch1, stlrw);
3958   %}
3959 
3960 
3961   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3962     Register dst_reg = as_Register($dst$$reg);
3963     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3964              rscratch1, ldarb);
3965     __ sxtbw(dst_reg, dst_reg);
3966   %}
3967 
3968   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3969     Register dst_reg = as_Register($dst$$reg);
3970     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3971              rscratch1, ldarb);
3972     __ sxtb(dst_reg, dst_reg);
3973   %}
3974 
3975   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3976     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3977              rscratch1, ldarb);
3978   %}
3979 
3980   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3981     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3982              rscratch1, ldarb);
3983   %}
3984 
3985   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3986     Register dst_reg = as_Register($dst$$reg);
3987     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3988              rscratch1, ldarh);
3989     __ sxthw(dst_reg, dst_reg);
3990   %}
3991 
3992   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3993     Register dst_reg = as_Register($dst$$reg);
3994     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3995              rscratch1, ldarh);
3996     __ sxth(dst_reg, dst_reg);
3997   %}
3998 
3999   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4000     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4001              rscratch1, ldarh);
4002   %}
4003 
4004   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4005     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4006              rscratch1, ldarh);
4007   %}
4008 
4009   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4010     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4011              rscratch1, ldarw);
4012   %}
4013 
4014   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4015     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4016              rscratch1, ldarw);
4017   %}
4018 
4019   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4020     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4021              rscratch1, ldar);
4022   %}
4023 
4024   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4025     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4026              rscratch1, ldarw);
4027     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4028   %}
4029 
4030   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4031     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4032              rscratch1, ldar);
4033     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4034   %}
4035 
4036   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4037     Register src_reg = as_Register($src$$reg);
4038     // we sometimes get asked to store the stack pointer into the
4039     // current thread -- we cannot do that directly on AArch64
4040     if (src_reg == r31_sp) {
4041         MacroAssembler _masm(&cbuf);
4042       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4043       __ mov(rscratch2, sp);
4044       src_reg = rscratch2;
4045     }
4046     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4047                  rscratch1, stlr);
4048   %}
4049 
4050   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4051     {
4052       MacroAssembler _masm(&cbuf);
4053       FloatRegister src_reg = as_FloatRegister($src$$reg);
4054       __ fmovs(rscratch2, src_reg);
4055     }
4056     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4057                  rscratch1, stlrw);
4058   %}
4059 
4060   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4061     {
4062       MacroAssembler _masm(&cbuf);
4063       FloatRegister src_reg = as_FloatRegister($src$$reg);
4064       __ fmovd(rscratch2, src_reg);
4065     }
4066     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4067                  rscratch1, stlr);
4068   %}
4069 
4070   // synchronized read/update encodings
4071 
4072   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4073     MacroAssembler _masm(&cbuf);
4074     Register dst_reg = as_Register($dst$$reg);
4075     Register base = as_Register($mem$$base);
4076     int index = $mem$$index;
4077     int scale = $mem$$scale;
4078     int disp = $mem$$disp;
4079     if (index == -1) {
4080        if (disp != 0) {
4081         __ lea(rscratch1, Address(base, disp));
4082         __ ldaxr(dst_reg, rscratch1);
4083       } else {
4084         // TODO
4085         // should we ever get anything other than this case?
4086         __ ldaxr(dst_reg, base);
4087       }
4088     } else {
4089       Register index_reg = as_Register(index);
4090       if (disp == 0) {
4091         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4092         __ ldaxr(dst_reg, rscratch1);
4093       } else {
4094         __ lea(rscratch1, Address(base, disp));
4095         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4096         __ ldaxr(dst_reg, rscratch1);
4097       }
4098     }
4099   %}
4100 
4101   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4102     MacroAssembler _masm(&cbuf);
4103     Register src_reg = as_Register($src$$reg);
4104     Register base = as_Register($mem$$base);
4105     int index = $mem$$index;
4106     int scale = $mem$$scale;
4107     int disp = $mem$$disp;
4108     if (index == -1) {
4109        if (disp != 0) {
4110         __ lea(rscratch2, Address(base, disp));
4111         __ stlxr(rscratch1, src_reg, rscratch2);
4112       } else {
4113         // TODO
4114         // should we ever get anything other than this case?
4115         __ stlxr(rscratch1, src_reg, base);
4116       }
4117     } else {
4118       Register index_reg = as_Register(index);
4119       if (disp == 0) {
4120         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4121         __ stlxr(rscratch1, src_reg, rscratch2);
4122       } else {
4123         __ lea(rscratch2, Address(base, disp));
4124         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4125         __ stlxr(rscratch1, src_reg, rscratch2);
4126       }
4127     }
4128     __ cmpw(rscratch1, zr);
4129   %}
4130 
4131   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4132     MacroAssembler _masm(&cbuf);
4133     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4134     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4135                Assembler::xword, /*acquire*/ false, /*release*/ true);
4136   %}
4137 
4138   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4139     MacroAssembler _masm(&cbuf);
4140     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4141     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4142                Assembler::word, /*acquire*/ false, /*release*/ true);
4143   %}
4144 
4145 
4146   // The only difference between aarch64_enc_cmpxchg and
4147   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4148   // CompareAndSwap sequence to serve as a barrier on acquiring a
4149   // lock.
4150   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4151     MacroAssembler _masm(&cbuf);
4152     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4153     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4154                Assembler::xword, /*acquire*/ true, /*release*/ true);
4155   %}
4156 
4157   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4158     MacroAssembler _masm(&cbuf);
4159     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4160     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4161                Assembler::word, /*acquire*/ true, /*release*/ true);
4162   %}
4163 
4164 
4165   // auxiliary used for CompareAndSwapX to set result register
4166   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4167     MacroAssembler _masm(&cbuf);
4168     Register res_reg = as_Register($res$$reg);
4169     __ cset(res_reg, Assembler::EQ);
4170   %}
4171 
4172   // prefetch encodings
4173 
4174   enc_class aarch64_enc_prefetchw(memory mem) %{
4175     MacroAssembler _masm(&cbuf);
4176     Register base = as_Register($mem$$base);
4177     int index = $mem$$index;
4178     int scale = $mem$$scale;
4179     int disp = $mem$$disp;
4180     if (index == -1) {
4181       __ prfm(Address(base, disp), PSTL1KEEP);
4182     } else {
4183       Register index_reg = as_Register(index);
4184       if (disp == 0) {
4185         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4186       } else {
4187         __ lea(rscratch1, Address(base, disp));
4188         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4189       }
4190     }
4191   %}
4192 
4193   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4194     MacroAssembler _masm(&cbuf);
4195     Register cnt_reg = as_Register($cnt$$reg);
4196     Register base_reg = as_Register($base$$reg);
4197     // base is word aligned
4198     // cnt is count of words
4199 
4200     Label loop;
4201     Label entry;
4202 
4203 //  Algorithm:
4204 //
4205 //    scratch1 = cnt & 7;
4206 //    cnt -= scratch1;
4207 //    p += scratch1;
4208 //    switch (scratch1) {
4209 //      do {
4210 //        cnt -= 8;
4211 //          p[-8] = 0;
4212 //        case 7:
4213 //          p[-7] = 0;
4214 //        case 6:
4215 //          p[-6] = 0;
4216 //          // ...
4217 //        case 1:
4218 //          p[-1] = 0;
4219 //        case 0:
4220 //          p += 8;
4221 //      } while (cnt);
4222 //    }
4223 
4224     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4225 
4226     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4227     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4228     // base_reg always points to the end of the region we're about to zero
4229     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4230     __ adr(rscratch2, entry);
4231     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4232     __ br(rscratch2);
4233     __ bind(loop);
4234     __ sub(cnt_reg, cnt_reg, unroll);
4235     for (int i = -unroll; i < 0; i++)
4236       __ str(zr, Address(base_reg, i * wordSize));
4237     __ bind(entry);
4238     __ add(base_reg, base_reg, unroll * wordSize);
4239     __ cbnz(cnt_reg, loop);
4240   %}
4241 
4242   /// mov envcodings
4243 
4244   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4245     MacroAssembler _masm(&cbuf);
4246     u_int32_t con = (u_int32_t)$src$$constant;
4247     Register dst_reg = as_Register($dst$$reg);
4248     if (con == 0) {
4249       __ movw(dst_reg, zr);
4250     } else {
4251       __ movw(dst_reg, con);
4252     }
4253   %}
4254 
4255   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4256     MacroAssembler _masm(&cbuf);
4257     Register dst_reg = as_Register($dst$$reg);
4258     u_int64_t con = (u_int64_t)$src$$constant;
4259     if (con == 0) {
4260       __ mov(dst_reg, zr);
4261     } else {
4262       __ mov(dst_reg, con);
4263     }
4264   %}
4265 
4266   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4267     MacroAssembler _masm(&cbuf);
4268     Register dst_reg = as_Register($dst$$reg);
4269     address con = (address)$src$$constant;
4270     if (con == NULL || con == (address)1) {
4271       ShouldNotReachHere();
4272     } else {
4273       relocInfo::relocType rtype = $src->constant_reloc();
4274       if (rtype == relocInfo::oop_type) {
4275         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4276       } else if (rtype == relocInfo::metadata_type) {
4277         __ mov_metadata(dst_reg, (Metadata*)con);
4278       } else {
4279         assert(rtype == relocInfo::none, "unexpected reloc type");
4280         if (con < (address)(uintptr_t)os::vm_page_size()) {
4281           __ mov(dst_reg, con);
4282         } else {
4283           unsigned long offset;
4284           __ adrp(dst_reg, con, offset);
4285           __ add(dst_reg, dst_reg, offset);
4286         }
4287       }
4288     }
4289   %}
4290 
4291   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4292     MacroAssembler _masm(&cbuf);
4293     Register dst_reg = as_Register($dst$$reg);
4294     __ mov(dst_reg, zr);
4295   %}
4296 
4297   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4298     MacroAssembler _masm(&cbuf);
4299     Register dst_reg = as_Register($dst$$reg);
4300     __ mov(dst_reg, (u_int64_t)1);
4301   %}
4302 
4303   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4304     MacroAssembler _masm(&cbuf);
4305     address page = (address)$src$$constant;
4306     Register dst_reg = as_Register($dst$$reg);
4307     unsigned long off;
4308     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4309     assert(off == 0, "assumed offset == 0");
4310   %}
4311 
4312   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4313     MacroAssembler _masm(&cbuf);
4314     __ load_byte_map_base($dst$$Register);
4315   %}
4316 
4317   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4318     MacroAssembler _masm(&cbuf);
4319     Register dst_reg = as_Register($dst$$reg);
4320     address con = (address)$src$$constant;
4321     if (con == NULL) {
4322       ShouldNotReachHere();
4323     } else {
4324       relocInfo::relocType rtype = $src->constant_reloc();
4325       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4326       __ set_narrow_oop(dst_reg, (jobject)con);
4327     }
4328   %}
4329 
4330   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4331     MacroAssembler _masm(&cbuf);
4332     Register dst_reg = as_Register($dst$$reg);
4333     __ mov(dst_reg, zr);
4334   %}
4335 
4336   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4337     MacroAssembler _masm(&cbuf);
4338     Register dst_reg = as_Register($dst$$reg);
4339     address con = (address)$src$$constant;
4340     if (con == NULL) {
4341       ShouldNotReachHere();
4342     } else {
4343       relocInfo::relocType rtype = $src->constant_reloc();
4344       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4345       __ set_narrow_klass(dst_reg, (Klass *)con);
4346     }
4347   %}
4348 
4349   // arithmetic encodings
4350 
4351   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4352     MacroAssembler _masm(&cbuf);
4353     Register dst_reg = as_Register($dst$$reg);
4354     Register src_reg = as_Register($src1$$reg);
4355     int32_t con = (int32_t)$src2$$constant;
4356     // add has primary == 0, subtract has primary == 1
4357     if ($primary) { con = -con; }
4358     if (con < 0) {
4359       __ subw(dst_reg, src_reg, -con);
4360     } else {
4361       __ addw(dst_reg, src_reg, con);
4362     }
4363   %}
4364 
4365   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4366     MacroAssembler _masm(&cbuf);
4367     Register dst_reg = as_Register($dst$$reg);
4368     Register src_reg = as_Register($src1$$reg);
4369     int32_t con = (int32_t)$src2$$constant;
4370     // add has primary == 0, subtract has primary == 1
4371     if ($primary) { con = -con; }
4372     if (con < 0) {
4373       __ sub(dst_reg, src_reg, -con);
4374     } else {
4375       __ add(dst_reg, src_reg, con);
4376     }
4377   %}
4378 
4379   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4380     MacroAssembler _masm(&cbuf);
4381    Register dst_reg = as_Register($dst$$reg);
4382    Register src1_reg = as_Register($src1$$reg);
4383    Register src2_reg = as_Register($src2$$reg);
4384     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4385   %}
4386 
4387   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4388     MacroAssembler _masm(&cbuf);
4389    Register dst_reg = as_Register($dst$$reg);
4390    Register src1_reg = as_Register($src1$$reg);
4391    Register src2_reg = as_Register($src2$$reg);
4392     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4393   %}
4394 
4395   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4396     MacroAssembler _masm(&cbuf);
4397    Register dst_reg = as_Register($dst$$reg);
4398    Register src1_reg = as_Register($src1$$reg);
4399    Register src2_reg = as_Register($src2$$reg);
4400     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4401   %}
4402 
4403   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4404     MacroAssembler _masm(&cbuf);
4405    Register dst_reg = as_Register($dst$$reg);
4406    Register src1_reg = as_Register($src1$$reg);
4407    Register src2_reg = as_Register($src2$$reg);
4408     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4409   %}
4410 
4411   // compare instruction encodings
4412 
4413   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4414     MacroAssembler _masm(&cbuf);
4415     Register reg1 = as_Register($src1$$reg);
4416     Register reg2 = as_Register($src2$$reg);
4417     __ cmpw(reg1, reg2);
4418   %}
4419 
4420   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4421     MacroAssembler _masm(&cbuf);
4422     Register reg = as_Register($src1$$reg);
4423     int32_t val = $src2$$constant;
4424     if (val >= 0) {
4425       __ subsw(zr, reg, val);
4426     } else {
4427       __ addsw(zr, reg, -val);
4428     }
4429   %}
4430 
4431   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4432     MacroAssembler _masm(&cbuf);
4433     Register reg1 = as_Register($src1$$reg);
4434     u_int32_t val = (u_int32_t)$src2$$constant;
4435     __ movw(rscratch1, val);
4436     __ cmpw(reg1, rscratch1);
4437   %}
4438 
4439   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4440     MacroAssembler _masm(&cbuf);
4441     Register reg1 = as_Register($src1$$reg);
4442     Register reg2 = as_Register($src2$$reg);
4443     __ cmp(reg1, reg2);
4444   %}
4445 
4446   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4447     MacroAssembler _masm(&cbuf);
4448     Register reg = as_Register($src1$$reg);
4449     int64_t val = $src2$$constant;
4450     if (val >= 0) {
4451       __ subs(zr, reg, val);
4452     } else if (val != -val) {
4453       __ adds(zr, reg, -val);
4454     } else {
4455     // aargh, Long.MIN_VALUE is a special case
4456       __ orr(rscratch1, zr, (u_int64_t)val);
4457       __ subs(zr, reg, rscratch1);
4458     }
4459   %}
4460 
4461   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4462     MacroAssembler _masm(&cbuf);
4463     Register reg1 = as_Register($src1$$reg);
4464     u_int64_t val = (u_int64_t)$src2$$constant;
4465     __ mov(rscratch1, val);
4466     __ cmp(reg1, rscratch1);
4467   %}
4468 
4469   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4470     MacroAssembler _masm(&cbuf);
4471     Register reg1 = as_Register($src1$$reg);
4472     Register reg2 = as_Register($src2$$reg);
4473     __ cmp(reg1, reg2);
4474   %}
4475 
4476   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4477     MacroAssembler _masm(&cbuf);
4478     Register reg1 = as_Register($src1$$reg);
4479     Register reg2 = as_Register($src2$$reg);
4480     __ cmpw(reg1, reg2);
4481   %}
4482 
4483   enc_class aarch64_enc_testp(iRegP src) %{
4484     MacroAssembler _masm(&cbuf);
4485     Register reg = as_Register($src$$reg);
4486     __ cmp(reg, zr);
4487   %}
4488 
4489   enc_class aarch64_enc_testn(iRegN src) %{
4490     MacroAssembler _masm(&cbuf);
4491     Register reg = as_Register($src$$reg);
4492     __ cmpw(reg, zr);
4493   %}
4494 
4495   enc_class aarch64_enc_b(label lbl) %{
4496     MacroAssembler _masm(&cbuf);
4497     Label *L = $lbl$$label;
4498     __ b(*L);
4499   %}
4500 
4501   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4502     MacroAssembler _masm(&cbuf);
4503     Label *L = $lbl$$label;
4504     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4505   %}
4506 
4507   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4508     MacroAssembler _masm(&cbuf);
4509     Label *L = $lbl$$label;
4510     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4511   %}
4512 
4513   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4514   %{
4515      Register sub_reg = as_Register($sub$$reg);
4516      Register super_reg = as_Register($super$$reg);
4517      Register temp_reg = as_Register($temp$$reg);
4518      Register result_reg = as_Register($result$$reg);
4519 
4520      Label miss;
4521      MacroAssembler _masm(&cbuf);
4522      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4523                                      NULL, &miss,
4524                                      /*set_cond_codes:*/ true);
4525      if ($primary) {
4526        __ mov(result_reg, zr);
4527      }
4528      __ bind(miss);
4529   %}
4530 
4531   enc_class aarch64_enc_java_static_call(method meth) %{
4532     MacroAssembler _masm(&cbuf);
4533 
4534     address addr = (address)$meth$$method;
4535     address call;
4536     if (!_method) {
4537       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4538       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4539     } else {
4540       int method_index = resolved_method_index(cbuf);
4541       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4542                                                   : static_call_Relocation::spec(method_index);
4543       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4544 
4545       // Emit stub for static call
4546       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4547       if (stub == NULL) {
4548         ciEnv::current()->record_failure("CodeCache is full");
4549         return;
4550       }
4551     }
4552     if (call == NULL) {
4553       ciEnv::current()->record_failure("CodeCache is full");
4554       return;
4555     }
4556   %}
4557 
4558   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4559     MacroAssembler _masm(&cbuf);
4560     int method_index = resolved_method_index(cbuf);
4561     address call = __ ic_call((address)$meth$$method, method_index);
4562     if (call == NULL) {
4563       ciEnv::current()->record_failure("CodeCache is full");
4564       return;
4565     }
4566   %}
4567 
4568   enc_class aarch64_enc_call_epilog() %{
4569     MacroAssembler _masm(&cbuf);
4570     if (VerifyStackAtCalls) {
4571       // Check that stack depth is unchanged: find majik cookie on stack
4572       __ call_Unimplemented();
4573     }
4574   %}
4575 
4576   enc_class aarch64_enc_java_to_runtime(method meth) %{
4577     MacroAssembler _masm(&cbuf);
4578 
4579     // some calls to generated routines (arraycopy code) are scheduled
4580     // by C2 as runtime calls. if so we can call them using a br (they
4581     // will be in a reachable segment) otherwise we have to use a blrt
4582     // which loads the absolute address into a register.
4583     address entry = (address)$meth$$method;
4584     CodeBlob *cb = CodeCache::find_blob(entry);
4585     if (cb) {
4586       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4587       if (call == NULL) {
4588         ciEnv::current()->record_failure("CodeCache is full");
4589         return;
4590       }
4591     } else {
4592       int gpcnt;
4593       int fpcnt;
4594       int rtype;
4595       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4596       Label retaddr;
4597       __ adr(rscratch2, retaddr);
4598       __ lea(rscratch1, RuntimeAddress(entry));
4599       // Leave a breadcrumb for JavaThread::pd_last_frame().
4600       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4601       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4602       __ bind(retaddr);
4603       __ add(sp, sp, 2 * wordSize);
4604     }
4605   %}
4606 
4607   enc_class aarch64_enc_rethrow() %{
4608     MacroAssembler _masm(&cbuf);
4609     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4610   %}
4611 
4612   enc_class aarch64_enc_ret() %{
4613     MacroAssembler _masm(&cbuf);
4614     __ ret(lr);
4615   %}
4616 
4617   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4618     MacroAssembler _masm(&cbuf);
4619     Register target_reg = as_Register($jump_target$$reg);
4620     __ br(target_reg);
4621   %}
4622 
4623   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4624     MacroAssembler _masm(&cbuf);
4625     Register target_reg = as_Register($jump_target$$reg);
4626     // exception oop should be in r0
4627     // ret addr has been popped into lr
4628     // callee expects it in r3
4629     __ mov(r3, lr);
4630     __ br(target_reg);
4631   %}
4632 
4633   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4634     MacroAssembler _masm(&cbuf);
4635     Register oop = as_Register($object$$reg);
4636     Register box = as_Register($box$$reg);
4637     Register disp_hdr = as_Register($tmp$$reg);
4638     Register tmp = as_Register($tmp2$$reg);
4639     Label cont;
4640     Label object_has_monitor;
4641     Label cas_failed;
4642 
4643     assert_different_registers(oop, box, tmp, disp_hdr);
4644 
4645     // Load markOop from object into displaced_header.
4646     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4647 
4648     // Always do locking in runtime.
4649     if (EmitSync & 0x01) {
4650       __ cmp(oop, zr);
4651       return;
4652     }
4653 
4654     if (UseBiasedLocking && !UseOptoBiasInlining) {
4655       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4656     }
4657 
4658     // Handle existing monitor
4659     if ((EmitSync & 0x02) == 0) {
4660       // we can use AArch64's bit test and branch here but
4661       // markoopDesc does not define a bit index just the bit value
4662       // so assert in case the bit pos changes
4663 #     define __monitor_value_log2 1
4664       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4665       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4666 #     undef __monitor_value_log2
4667     }
4668 
4669     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4670     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4671 
4672     // Load Compare Value application register.
4673 
4674     // Initialize the box. (Must happen before we update the object mark!)
4675     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4676 
4677     // Compare object markOop with mark and if equal exchange scratch1
4678     // with object markOop.
4679     if (UseLSE) {
4680       __ mov(tmp, disp_hdr);
4681       __ casal(Assembler::xword, tmp, box, oop);
4682       __ cmp(tmp, disp_hdr);
4683       __ br(Assembler::EQ, cont);
4684     } else {
4685       Label retry_load;
4686       __ prfm(Address(oop), PSTL1STRM);
4687       __ bind(retry_load);
4688       __ ldaxr(tmp, oop);
4689       __ cmp(tmp, disp_hdr);
4690       __ br(Assembler::NE, cas_failed);
4691       // use stlxr to ensure update is immediately visible
4692       __ stlxr(tmp, box, oop);
4693       __ cbzw(tmp, cont);
4694       __ b(retry_load);
4695     }
4696 
4697     // Formerly:
4698     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4699     //               /*newv=*/box,
4700     //               /*addr=*/oop,
4701     //               /*tmp=*/tmp,
4702     //               cont,
4703     //               /*fail*/NULL);
4704 
4705     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4706 
4707     // If the compare-and-exchange succeeded, then we found an unlocked
4708     // object, will have now locked it will continue at label cont
4709 
4710     __ bind(cas_failed);
4711     // We did not see an unlocked object so try the fast recursive case.
4712 
4713     // Check if the owner is self by comparing the value in the
4714     // markOop of object (disp_hdr) with the stack pointer.
4715     __ mov(rscratch1, sp);
4716     __ sub(disp_hdr, disp_hdr, rscratch1);
4717     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4718     // If condition is true we are cont and hence we can store 0 as the
4719     // displaced header in the box, which indicates that it is a recursive lock.
4720     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4721     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4722 
4723     // Handle existing monitor.
4724     if ((EmitSync & 0x02) == 0) {
4725       __ b(cont);
4726 
4727       __ bind(object_has_monitor);
4728       // The object's monitor m is unlocked iff m->owner == NULL,
4729       // otherwise m->owner may contain a thread or a stack address.
4730       //
4731       // Try to CAS m->owner from NULL to current thread.
4732       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4733       __ mov(disp_hdr, zr);
4734 
4735       if (UseLSE) {
4736         __ mov(rscratch1, disp_hdr);
4737         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4738         __ cmp(rscratch1, disp_hdr);
4739       } else {
4740         Label retry_load, fail;
4741         __ prfm(Address(tmp), PSTL1STRM);
4742         __ bind(retry_load);
4743         __ ldaxr(rscratch1, tmp);
4744         __ cmp(disp_hdr, rscratch1);
4745         __ br(Assembler::NE, fail);
4746         // use stlxr to ensure update is immediately visible
4747         __ stlxr(rscratch1, rthread, tmp);
4748         __ cbnzw(rscratch1, retry_load);
4749         __ bind(fail);
4750       }
4751 
4752       // Label next;
4753       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4754       //               /*newv=*/rthread,
4755       //               /*addr=*/tmp,
4756       //               /*tmp=*/rscratch1,
4757       //               /*succeed*/next,
4758       //               /*fail*/NULL);
4759       // __ bind(next);
4760 
4761       // store a non-null value into the box.
4762       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4763 
4764       // PPC port checks the following invariants
4765       // #ifdef ASSERT
4766       // bne(flag, cont);
4767       // We have acquired the monitor, check some invariants.
4768       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4769       // Invariant 1: _recursions should be 0.
4770       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4771       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4772       //                        "monitor->_recursions should be 0", -1);
4773       // Invariant 2: OwnerIsThread shouldn't be 0.
4774       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4775       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4776       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4777       // #endif
4778     }
4779 
4780     __ bind(cont);
4781     // flag == EQ indicates success
4782     // flag == NE indicates failure
4783 
4784   %}
4785 
4786   // TODO
4787   // reimplement this with custom cmpxchgptr code
4788   // which avoids some of the unnecessary branching
4789   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4790     MacroAssembler _masm(&cbuf);
4791     Register oop = as_Register($object$$reg);
4792     Register box = as_Register($box$$reg);
4793     Register disp_hdr = as_Register($tmp$$reg);
4794     Register tmp = as_Register($tmp2$$reg);
4795     Label cont;
4796     Label object_has_monitor;
4797     Label cas_failed;
4798 
4799     assert_different_registers(oop, box, tmp, disp_hdr);
4800 
4801     // Always do locking in runtime.
4802     if (EmitSync & 0x01) {
4803       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4804       return;
4805     }
4806 
4807     if (UseBiasedLocking && !UseOptoBiasInlining) {
4808       __ biased_locking_exit(oop, tmp, cont);
4809     }
4810 
4811     // Find the lock address and load the displaced header from the stack.
4812     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4813 
4814     // If the displaced header is 0, we have a recursive unlock.
4815     __ cmp(disp_hdr, zr);
4816     __ br(Assembler::EQ, cont);
4817 
4818 
4819     // Handle existing monitor.
4820     if ((EmitSync & 0x02) == 0) {
4821       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4822       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4823     }
4824 
4825     // Check if it is still a light weight lock, this is is true if we
4826     // see the stack address of the basicLock in the markOop of the
4827     // object.
4828 
4829       if (UseLSE) {
4830         __ mov(tmp, box);
4831         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4832         __ cmp(tmp, box);
4833       } else {
4834         Label retry_load;
4835         __ prfm(Address(oop), PSTL1STRM);
4836         __ bind(retry_load);
4837         __ ldxr(tmp, oop);
4838         __ cmp(box, tmp);
4839         __ br(Assembler::NE, cas_failed);
4840         // use stlxr to ensure update is immediately visible
4841         __ stlxr(tmp, disp_hdr, oop);
4842         __ cbzw(tmp, cont);
4843         __ b(retry_load);
4844       }
4845 
4846     // __ cmpxchgptr(/*compare_value=*/box,
4847     //               /*exchange_value=*/disp_hdr,
4848     //               /*where=*/oop,
4849     //               /*result=*/tmp,
4850     //               cont,
4851     //               /*cas_failed*/NULL);
4852     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4853 
4854     __ bind(cas_failed);
4855 
4856     // Handle existing monitor.
4857     if ((EmitSync & 0x02) == 0) {
4858       __ b(cont);
4859 
4860       __ bind(object_has_monitor);
4861       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4862       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4863       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4864       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4865       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4866       __ cmp(rscratch1, zr);
4867       __ br(Assembler::NE, cont);
4868 
4869       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4870       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4871       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4872       __ cmp(rscratch1, zr);
4873       __ cbnz(rscratch1, cont);
4874       // need a release store here
4875       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4876       __ stlr(rscratch1, tmp); // rscratch1 is zero
4877     }
4878 
4879     __ bind(cont);
4880     // flag == EQ indicates success
4881     // flag == NE indicates failure
4882   %}
4883 
4884 %}
4885 
4886 //----------FRAME--------------------------------------------------------------
4887 // Definition of frame structure and management information.
4888 //
4889 //  S T A C K   L A Y O U T    Allocators stack-slot number
4890 //                             |   (to get allocators register number
4891 //  G  Owned by    |        |  v    add OptoReg::stack0())
4892 //  r   CALLER     |        |
4893 //  o     |        +--------+      pad to even-align allocators stack-slot
4894 //  w     V        |  pad0  |        numbers; owned by CALLER
4895 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4896 //  h     ^        |   in   |  5
4897 //        |        |  args  |  4   Holes in incoming args owned by SELF
4898 //  |     |        |        |  3
4899 //  |     |        +--------+
4900 //  V     |        | old out|      Empty on Intel, window on Sparc
4901 //        |    old |preserve|      Must be even aligned.
4902 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4903 //        |        |   in   |  3   area for Intel ret address
4904 //     Owned by    |preserve|      Empty on Sparc.
4905 //       SELF      +--------+
4906 //        |        |  pad2  |  2   pad to align old SP
4907 //        |        +--------+  1
4908 //        |        | locks  |  0
4909 //        |        +--------+----> OptoReg::stack0(), even aligned
4910 //        |        |  pad1  | 11   pad to align new SP
4911 //        |        +--------+
4912 //        |        |        | 10
4913 //        |        | spills |  9   spills
4914 //        V        |        |  8   (pad0 slot for callee)
4915 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4916 //        ^        |  out   |  7
4917 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4918 //     Owned by    +--------+
4919 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4920 //        |    new |preserve|      Must be even-aligned.
4921 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4922 //        |        |        |
4923 //
4924 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4925 //         known from SELF's arguments and the Java calling convention.
4926 //         Region 6-7 is determined per call site.
4927 // Note 2: If the calling convention leaves holes in the incoming argument
4928 //         area, those holes are owned by SELF.  Holes in the outgoing area
4929 //         are owned by the CALLEE.  Holes should not be nessecary in the
4930 //         incoming area, as the Java calling convention is completely under
4931 //         the control of the AD file.  Doubles can be sorted and packed to
4932 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4933 //         varargs C calling conventions.
4934 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4935 //         even aligned with pad0 as needed.
4936 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4937 //           (the latter is true on Intel but is it false on AArch64?)
4938 //         region 6-11 is even aligned; it may be padded out more so that
4939 //         the region from SP to FP meets the minimum stack alignment.
4940 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4941 //         alignment.  Region 11, pad1, may be dynamically extended so that
4942 //         SP meets the minimum alignment.
4943 
4944 frame %{
4945   // What direction does stack grow in (assumed to be same for C & Java)
4946   stack_direction(TOWARDS_LOW);
4947 
4948   // These three registers define part of the calling convention
4949   // between compiled code and the interpreter.
4950 
4951   // Inline Cache Register or methodOop for I2C.
4952   inline_cache_reg(R12);
4953 
4954   // Method Oop Register when calling interpreter.
4955   interpreter_method_oop_reg(R12);
4956 
4957   // Number of stack slots consumed by locking an object
4958   sync_stack_slots(2);
4959 
4960   // Compiled code's Frame Pointer
4961   frame_pointer(R31);
4962 
4963   // Interpreter stores its frame pointer in a register which is
4964   // stored to the stack by I2CAdaptors.
4965   // I2CAdaptors convert from interpreted java to compiled java.
4966   interpreter_frame_pointer(R29);
4967 
4968   // Stack alignment requirement
4969   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4970 
4971   // Number of stack slots between incoming argument block and the start of
4972   // a new frame.  The PROLOG must add this many slots to the stack.  The
4973   // EPILOG must remove this many slots. aarch64 needs two slots for
4974   // return address and fp.
4975   // TODO think this is correct but check
4976   in_preserve_stack_slots(4);
4977 
4978   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4979   // for calls to C.  Supports the var-args backing area for register parms.
4980   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4981 
4982   // The after-PROLOG location of the return address.  Location of
4983   // return address specifies a type (REG or STACK) and a number
4984   // representing the register number (i.e. - use a register name) or
4985   // stack slot.
4986   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4987   // Otherwise, it is above the locks and verification slot and alignment word
4988   // TODO this may well be correct but need to check why that - 2 is there
4989   // ppc port uses 0 but we definitely need to allow for fixed_slots
4990   // which folds in the space used for monitors
4991   return_addr(STACK - 2 +
4992               round_to((Compile::current()->in_preserve_stack_slots() +
4993                         Compile::current()->fixed_slots()),
4994                        stack_alignment_in_slots()));
4995 
4996   // Body of function which returns an integer array locating
4997   // arguments either in registers or in stack slots.  Passed an array
4998   // of ideal registers called "sig" and a "length" count.  Stack-slot
4999   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5000   // arguments for a CALLEE.  Incoming stack arguments are
5001   // automatically biased by the preserve_stack_slots field above.
5002 
5003   calling_convention
5004   %{
5005     // No difference between ingoing/outgoing just pass false
5006     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5007   %}
5008 
5009   c_calling_convention
5010   %{
5011     // This is obviously always outgoing
5012     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5013   %}
5014 
5015   // Location of compiled Java return values.  Same as C for now.
5016   return_value
5017   %{
5018     // TODO do we allow ideal_reg == Op_RegN???
5019     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5020            "only return normal values");
5021 
5022     static const int lo[Op_RegL + 1] = { // enum name
5023       0,                                 // Op_Node
5024       0,                                 // Op_Set
5025       R0_num,                            // Op_RegN
5026       R0_num,                            // Op_RegI
5027       R0_num,                            // Op_RegP
5028       V0_num,                            // Op_RegF
5029       V0_num,                            // Op_RegD
5030       R0_num                             // Op_RegL
5031     };
5032 
5033     static const int hi[Op_RegL + 1] = { // enum name
5034       0,                                 // Op_Node
5035       0,                                 // Op_Set
5036       OptoReg::Bad,                       // Op_RegN
5037       OptoReg::Bad,                      // Op_RegI
5038       R0_H_num,                          // Op_RegP
5039       OptoReg::Bad,                      // Op_RegF
5040       V0_H_num,                          // Op_RegD
5041       R0_H_num                           // Op_RegL
5042     };
5043 
5044     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5045   %}
5046 %}
5047 
5048 //----------ATTRIBUTES---------------------------------------------------------
5049 //----------Operand Attributes-------------------------------------------------
5050 op_attrib op_cost(1);        // Required cost attribute
5051 
5052 //----------Instruction Attributes---------------------------------------------
5053 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5054 ins_attrib ins_size(32);        // Required size attribute (in bits)
5055 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5056                                 // a non-matching short branch variant
5057                                 // of some long branch?
5058 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5059                                 // be a power of 2) specifies the
5060                                 // alignment that some part of the
5061                                 // instruction (not necessarily the
5062                                 // start) requires.  If > 1, a
5063                                 // compute_padding() function must be
5064                                 // provided for the instruction
5065 
5066 //----------OPERANDS-----------------------------------------------------------
5067 // Operand definitions must precede instruction definitions for correct parsing
5068 // in the ADLC because operands constitute user defined types which are used in
5069 // instruction definitions.
5070 
5071 //----------Simple Operands----------------------------------------------------
5072 
5073 // Integer operands 32 bit
5074 // 32 bit immediate
5075 operand immI()
5076 %{
5077   match(ConI);
5078 
5079   op_cost(0);
5080   format %{ %}
5081   interface(CONST_INTER);
5082 %}
5083 
5084 // 32 bit zero
5085 operand immI0()
5086 %{
5087   predicate(n->get_int() == 0);
5088   match(ConI);
5089 
5090   op_cost(0);
5091   format %{ %}
5092   interface(CONST_INTER);
5093 %}
5094 
5095 // 32 bit unit increment
5096 operand immI_1()
5097 %{
5098   predicate(n->get_int() == 1);
5099   match(ConI);
5100 
5101   op_cost(0);
5102   format %{ %}
5103   interface(CONST_INTER);
5104 %}
5105 
5106 // 32 bit unit decrement
5107 operand immI_M1()
5108 %{
5109   predicate(n->get_int() == -1);
5110   match(ConI);
5111 
5112   op_cost(0);
5113   format %{ %}
5114   interface(CONST_INTER);
5115 %}
5116 
5117 operand immI_le_4()
5118 %{
5119   predicate(n->get_int() <= 4);
5120   match(ConI);
5121 
5122   op_cost(0);
5123   format %{ %}
5124   interface(CONST_INTER);
5125 %}
5126 
5127 operand immI_31()
5128 %{
5129   predicate(n->get_int() == 31);
5130   match(ConI);
5131 
5132   op_cost(0);
5133   format %{ %}
5134   interface(CONST_INTER);
5135 %}
5136 
5137 operand immI_8()
5138 %{
5139   predicate(n->get_int() == 8);
5140   match(ConI);
5141 
5142   op_cost(0);
5143   format %{ %}
5144   interface(CONST_INTER);
5145 %}
5146 
5147 operand immI_16()
5148 %{
5149   predicate(n->get_int() == 16);
5150   match(ConI);
5151 
5152   op_cost(0);
5153   format %{ %}
5154   interface(CONST_INTER);
5155 %}
5156 
5157 operand immI_24()
5158 %{
5159   predicate(n->get_int() == 24);
5160   match(ConI);
5161 
5162   op_cost(0);
5163   format %{ %}
5164   interface(CONST_INTER);
5165 %}
5166 
5167 operand immI_32()
5168 %{
5169   predicate(n->get_int() == 32);
5170   match(ConI);
5171 
5172   op_cost(0);
5173   format %{ %}
5174   interface(CONST_INTER);
5175 %}
5176 
5177 operand immI_48()
5178 %{
5179   predicate(n->get_int() == 48);
5180   match(ConI);
5181 
5182   op_cost(0);
5183   format %{ %}
5184   interface(CONST_INTER);
5185 %}
5186 
5187 operand immI_56()
5188 %{
5189   predicate(n->get_int() == 56);
5190   match(ConI);
5191 
5192   op_cost(0);
5193   format %{ %}
5194   interface(CONST_INTER);
5195 %}
5196 
5197 operand immI_64()
5198 %{
5199   predicate(n->get_int() == 64);
5200   match(ConI);
5201 
5202   op_cost(0);
5203   format %{ %}
5204   interface(CONST_INTER);
5205 %}
5206 
5207 operand immI_255()
5208 %{
5209   predicate(n->get_int() == 255);
5210   match(ConI);
5211 
5212   op_cost(0);
5213   format %{ %}
5214   interface(CONST_INTER);
5215 %}
5216 
5217 operand immI_65535()
5218 %{
5219   predicate(n->get_int() == 65535);
5220   match(ConI);
5221 
5222   op_cost(0);
5223   format %{ %}
5224   interface(CONST_INTER);
5225 %}
5226 
5227 operand immL_63()
5228 %{
5229   predicate(n->get_int() == 63);
5230   match(ConI);
5231 
5232   op_cost(0);
5233   format %{ %}
5234   interface(CONST_INTER);
5235 %}
5236 
5237 operand immL_255()
5238 %{
5239   predicate(n->get_int() == 255);
5240   match(ConI);
5241 
5242   op_cost(0);
5243   format %{ %}
5244   interface(CONST_INTER);
5245 %}
5246 
5247 operand immL_65535()
5248 %{
5249   predicate(n->get_long() == 65535L);
5250   match(ConL);
5251 
5252   op_cost(0);
5253   format %{ %}
5254   interface(CONST_INTER);
5255 %}
5256 
5257 operand immL_4294967295()
5258 %{
5259   predicate(n->get_long() == 4294967295L);
5260   match(ConL);
5261 
5262   op_cost(0);
5263   format %{ %}
5264   interface(CONST_INTER);
5265 %}
5266 
5267 operand immL_bitmask()
5268 %{
5269   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5270             && is_power_of_2(n->get_long() + 1));
5271   match(ConL);
5272 
5273   op_cost(0);
5274   format %{ %}
5275   interface(CONST_INTER);
5276 %}
5277 
5278 operand immI_bitmask()
5279 %{
5280   predicate(((n->get_int() & 0xc0000000) == 0)
5281             && is_power_of_2(n->get_int() + 1));
5282   match(ConI);
5283 
5284   op_cost(0);
5285   format %{ %}
5286   interface(CONST_INTER);
5287 %}
5288 
5289 // Scale values for scaled offset addressing modes (up to long but not quad)
5290 operand immIScale()
5291 %{
5292   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5293   match(ConI);
5294 
5295   op_cost(0);
5296   format %{ %}
5297   interface(CONST_INTER);
5298 %}
5299 
5300 // 26 bit signed offset -- for pc-relative branches
5301 operand immI26()
5302 %{
5303   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5304   match(ConI);
5305 
5306   op_cost(0);
5307   format %{ %}
5308   interface(CONST_INTER);
5309 %}
5310 
5311 // 19 bit signed offset -- for pc-relative loads
5312 operand immI19()
5313 %{
5314   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5315   match(ConI);
5316 
5317   op_cost(0);
5318   format %{ %}
5319   interface(CONST_INTER);
5320 %}
5321 
5322 // 12 bit unsigned offset -- for base plus immediate loads
5323 operand immIU12()
5324 %{
5325   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5326   match(ConI);
5327 
5328   op_cost(0);
5329   format %{ %}
5330   interface(CONST_INTER);
5331 %}
5332 
5333 operand immLU12()
5334 %{
5335   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5336   match(ConL);
5337 
5338   op_cost(0);
5339   format %{ %}
5340   interface(CONST_INTER);
5341 %}
5342 
5343 // Offset for scaled or unscaled immediate loads and stores
5344 operand immIOffset()
5345 %{
5346   predicate(Address::offset_ok_for_immed(n->get_int()));
5347   match(ConI);
5348 
5349   op_cost(0);
5350   format %{ %}
5351   interface(CONST_INTER);
5352 %}
5353 
5354 operand immLoffset()
5355 %{
5356   predicate(Address::offset_ok_for_immed(n->get_long()));
5357   match(ConL);
5358 
5359   op_cost(0);
5360   format %{ %}
5361   interface(CONST_INTER);
5362 %}
5363 
5364 // 32 bit integer valid for add sub immediate
5365 operand immIAddSub()
5366 %{
5367   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5368   match(ConI);
5369   op_cost(0);
5370   format %{ %}
5371   interface(CONST_INTER);
5372 %}
5373 
5374 // 32 bit unsigned integer valid for logical immediate
5375 // TODO -- check this is right when e.g the mask is 0x80000000
5376 operand immILog()
5377 %{
5378   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5379   match(ConI);
5380 
5381   op_cost(0);
5382   format %{ %}
5383   interface(CONST_INTER);
5384 %}
5385 
5386 // Integer operands 64 bit
5387 // 64 bit immediate
5388 operand immL()
5389 %{
5390   match(ConL);
5391 
5392   op_cost(0);
5393   format %{ %}
5394   interface(CONST_INTER);
5395 %}
5396 
5397 // 64 bit zero
5398 operand immL0()
5399 %{
5400   predicate(n->get_long() == 0);
5401   match(ConL);
5402 
5403   op_cost(0);
5404   format %{ %}
5405   interface(CONST_INTER);
5406 %}
5407 
5408 // 64 bit unit increment
5409 operand immL_1()
5410 %{
5411   predicate(n->get_long() == 1);
5412   match(ConL);
5413 
5414   op_cost(0);
5415   format %{ %}
5416   interface(CONST_INTER);
5417 %}
5418 
5419 // 64 bit unit decrement
5420 operand immL_M1()
5421 %{
5422   predicate(n->get_long() == -1);
5423   match(ConL);
5424 
5425   op_cost(0);
5426   format %{ %}
5427   interface(CONST_INTER);
5428 %}
5429 
5430 // 32 bit offset of pc in thread anchor
5431 
5432 operand immL_pc_off()
5433 %{
5434   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5435                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5436   match(ConL);
5437 
5438   op_cost(0);
5439   format %{ %}
5440   interface(CONST_INTER);
5441 %}
5442 
5443 // 64 bit integer valid for add sub immediate
5444 operand immLAddSub()
5445 %{
5446   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5447   match(ConL);
5448   op_cost(0);
5449   format %{ %}
5450   interface(CONST_INTER);
5451 %}
5452 
5453 // 64 bit integer valid for logical immediate
5454 operand immLLog()
5455 %{
5456   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5457   match(ConL);
5458   op_cost(0);
5459   format %{ %}
5460   interface(CONST_INTER);
5461 %}
5462 
5463 // Long Immediate: low 32-bit mask
5464 operand immL_32bits()
5465 %{
5466   predicate(n->get_long() == 0xFFFFFFFFL);
5467   match(ConL);
5468   op_cost(0);
5469   format %{ %}
5470   interface(CONST_INTER);
5471 %}
5472 
5473 // Pointer operands
5474 // Pointer Immediate
5475 operand immP()
5476 %{
5477   match(ConP);
5478 
5479   op_cost(0);
5480   format %{ %}
5481   interface(CONST_INTER);
5482 %}
5483 
5484 // NULL Pointer Immediate
5485 operand immP0()
5486 %{
5487   predicate(n->get_ptr() == 0);
5488   match(ConP);
5489 
5490   op_cost(0);
5491   format %{ %}
5492   interface(CONST_INTER);
5493 %}
5494 
5495 // Pointer Immediate One
5496 // this is used in object initialization (initial object header)
5497 operand immP_1()
5498 %{
5499   predicate(n->get_ptr() == 1);
5500   match(ConP);
5501 
5502   op_cost(0);
5503   format %{ %}
5504   interface(CONST_INTER);
5505 %}
5506 
5507 // Polling Page Pointer Immediate
5508 operand immPollPage()
5509 %{
5510   predicate((address)n->get_ptr() == os::get_polling_page());
5511   match(ConP);
5512 
5513   op_cost(0);
5514   format %{ %}
5515   interface(CONST_INTER);
5516 %}
5517 
5518 // Card Table Byte Map Base
5519 operand immByteMapBase()
5520 %{
5521   // Get base of card map
5522   predicate((jbyte*)n->get_ptr() ==
5523         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5524   match(ConP);
5525 
5526   op_cost(0);
5527   format %{ %}
5528   interface(CONST_INTER);
5529 %}
5530 
5531 // Pointer Immediate Minus One
5532 // this is used when we want to write the current PC to the thread anchor
5533 operand immP_M1()
5534 %{
5535   predicate(n->get_ptr() == -1);
5536   match(ConP);
5537 
5538   op_cost(0);
5539   format %{ %}
5540   interface(CONST_INTER);
5541 %}
5542 
5543 // Pointer Immediate Minus Two
5544 // this is used when we want to write the current PC to the thread anchor
5545 operand immP_M2()
5546 %{
5547   predicate(n->get_ptr() == -2);
5548   match(ConP);
5549 
5550   op_cost(0);
5551   format %{ %}
5552   interface(CONST_INTER);
5553 %}
5554 
5555 // Float and Double operands
5556 // Double Immediate
5557 operand immD()
5558 %{
5559   match(ConD);
5560   op_cost(0);
5561   format %{ %}
5562   interface(CONST_INTER);
5563 %}
5564 
5565 // Double Immediate: +0.0d
5566 operand immD0()
5567 %{
5568   predicate(jlong_cast(n->getd()) == 0);
5569   match(ConD);
5570 
5571   op_cost(0);
5572   format %{ %}
5573   interface(CONST_INTER);
5574 %}
5575 
5576 // constant 'double +0.0'.
5577 operand immDPacked()
5578 %{
5579   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5580   match(ConD);
5581   op_cost(0);
5582   format %{ %}
5583   interface(CONST_INTER);
5584 %}
5585 
5586 // Float Immediate
5587 operand immF()
5588 %{
5589   match(ConF);
5590   op_cost(0);
5591   format %{ %}
5592   interface(CONST_INTER);
5593 %}
5594 
5595 // Float Immediate: +0.0f.
5596 operand immF0()
5597 %{
5598   predicate(jint_cast(n->getf()) == 0);
5599   match(ConF);
5600 
5601   op_cost(0);
5602   format %{ %}
5603   interface(CONST_INTER);
5604 %}
5605 
5606 //
5607 operand immFPacked()
5608 %{
5609   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5610   match(ConF);
5611   op_cost(0);
5612   format %{ %}
5613   interface(CONST_INTER);
5614 %}
5615 
5616 // Narrow pointer operands
5617 // Narrow Pointer Immediate
5618 operand immN()
5619 %{
5620   match(ConN);
5621 
5622   op_cost(0);
5623   format %{ %}
5624   interface(CONST_INTER);
5625 %}
5626 
5627 // Narrow NULL Pointer Immediate
5628 operand immN0()
5629 %{
5630   predicate(n->get_narrowcon() == 0);
5631   match(ConN);
5632 
5633   op_cost(0);
5634   format %{ %}
5635   interface(CONST_INTER);
5636 %}
5637 
5638 operand immNKlass()
5639 %{
5640   match(ConNKlass);
5641 
5642   op_cost(0);
5643   format %{ %}
5644   interface(CONST_INTER);
5645 %}
5646 
5647 // Integer 32 bit Register Operands
5648 // Integer 32 bitRegister (excludes SP)
5649 operand iRegI()
5650 %{
5651   constraint(ALLOC_IN_RC(any_reg32));
5652   match(RegI);
5653   match(iRegINoSp);
5654   op_cost(0);
5655   format %{ %}
5656   interface(REG_INTER);
5657 %}
5658 
5659 // Integer 32 bit Register not Special
5660 operand iRegINoSp()
5661 %{
5662   constraint(ALLOC_IN_RC(no_special_reg32));
5663   match(RegI);
5664   op_cost(0);
5665   format %{ %}
5666   interface(REG_INTER);
5667 %}
5668 
5669 // Integer 64 bit Register Operands
5670 // Integer 64 bit Register (includes SP)
5671 operand iRegL()
5672 %{
5673   constraint(ALLOC_IN_RC(any_reg));
5674   match(RegL);
5675   match(iRegLNoSp);
5676   op_cost(0);
5677   format %{ %}
5678   interface(REG_INTER);
5679 %}
5680 
5681 // Integer 64 bit Register not Special
5682 operand iRegLNoSp()
5683 %{
5684   constraint(ALLOC_IN_RC(no_special_reg));
5685   match(RegL);
5686   format %{ %}
5687   interface(REG_INTER);
5688 %}
5689 
5690 // Pointer Register Operands
5691 // Pointer Register
5692 operand iRegP()
5693 %{
5694   constraint(ALLOC_IN_RC(ptr_reg));
5695   match(RegP);
5696   match(iRegPNoSp);
5697   match(iRegP_R0);
5698   //match(iRegP_R2);
5699   //match(iRegP_R4);
5700   //match(iRegP_R5);
5701   match(thread_RegP);
5702   op_cost(0);
5703   format %{ %}
5704   interface(REG_INTER);
5705 %}
5706 
5707 // Pointer 64 bit Register not Special
5708 operand iRegPNoSp()
5709 %{
5710   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5711   match(RegP);
5712   // match(iRegP);
5713   // match(iRegP_R0);
5714   // match(iRegP_R2);
5715   // match(iRegP_R4);
5716   // match(iRegP_R5);
5717   // match(thread_RegP);
5718   op_cost(0);
5719   format %{ %}
5720   interface(REG_INTER);
5721 %}
5722 
5723 // Pointer 64 bit Register R0 only
5724 operand iRegP_R0()
5725 %{
5726   constraint(ALLOC_IN_RC(r0_reg));
5727   match(RegP);
5728   // match(iRegP);
5729   match(iRegPNoSp);
5730   op_cost(0);
5731   format %{ %}
5732   interface(REG_INTER);
5733 %}
5734 
5735 // Pointer 64 bit Register R1 only
5736 operand iRegP_R1()
5737 %{
5738   constraint(ALLOC_IN_RC(r1_reg));
5739   match(RegP);
5740   // match(iRegP);
5741   match(iRegPNoSp);
5742   op_cost(0);
5743   format %{ %}
5744   interface(REG_INTER);
5745 %}
5746 
5747 // Pointer 64 bit Register R2 only
5748 operand iRegP_R2()
5749 %{
5750   constraint(ALLOC_IN_RC(r2_reg));
5751   match(RegP);
5752   // match(iRegP);
5753   match(iRegPNoSp);
5754   op_cost(0);
5755   format %{ %}
5756   interface(REG_INTER);
5757 %}
5758 
5759 // Pointer 64 bit Register R3 only
5760 operand iRegP_R3()
5761 %{
5762   constraint(ALLOC_IN_RC(r3_reg));
5763   match(RegP);
5764   // match(iRegP);
5765   match(iRegPNoSp);
5766   op_cost(0);
5767   format %{ %}
5768   interface(REG_INTER);
5769 %}
5770 
5771 // Pointer 64 bit Register R4 only
5772 operand iRegP_R4()
5773 %{
5774   constraint(ALLOC_IN_RC(r4_reg));
5775   match(RegP);
5776   // match(iRegP);
5777   match(iRegPNoSp);
5778   op_cost(0);
5779   format %{ %}
5780   interface(REG_INTER);
5781 %}
5782 
5783 // Pointer 64 bit Register R5 only
5784 operand iRegP_R5()
5785 %{
5786   constraint(ALLOC_IN_RC(r5_reg));
5787   match(RegP);
5788   // match(iRegP);
5789   match(iRegPNoSp);
5790   op_cost(0);
5791   format %{ %}
5792   interface(REG_INTER);
5793 %}
5794 
5795 // Pointer 64 bit Register R10 only
5796 operand iRegP_R10()
5797 %{
5798   constraint(ALLOC_IN_RC(r10_reg));
5799   match(RegP);
5800   // match(iRegP);
5801   match(iRegPNoSp);
5802   op_cost(0);
5803   format %{ %}
5804   interface(REG_INTER);
5805 %}
5806 
5807 // Long 64 bit Register R11 only
5808 operand iRegL_R11()
5809 %{
5810   constraint(ALLOC_IN_RC(r11_reg));
5811   match(RegL);
5812   match(iRegLNoSp);
5813   op_cost(0);
5814   format %{ %}
5815   interface(REG_INTER);
5816 %}
5817 
5818 // Pointer 64 bit Register FP only
5819 operand iRegP_FP()
5820 %{
5821   constraint(ALLOC_IN_RC(fp_reg));
5822   match(RegP);
5823   // match(iRegP);
5824   op_cost(0);
5825   format %{ %}
5826   interface(REG_INTER);
5827 %}
5828 
5829 // Register R0 only
5830 operand iRegI_R0()
5831 %{
5832   constraint(ALLOC_IN_RC(int_r0_reg));
5833   match(RegI);
5834   match(iRegINoSp);
5835   op_cost(0);
5836   format %{ %}
5837   interface(REG_INTER);
5838 %}
5839 
5840 // Register R2 only
5841 operand iRegI_R2()
5842 %{
5843   constraint(ALLOC_IN_RC(int_r2_reg));
5844   match(RegI);
5845   match(iRegINoSp);
5846   op_cost(0);
5847   format %{ %}
5848   interface(REG_INTER);
5849 %}
5850 
5851 // Register R3 only
5852 operand iRegI_R3()
5853 %{
5854   constraint(ALLOC_IN_RC(int_r3_reg));
5855   match(RegI);
5856   match(iRegINoSp);
5857   op_cost(0);
5858   format %{ %}
5859   interface(REG_INTER);
5860 %}
5861 
5862 
5863 // Register R2 only
5864 operand iRegI_R4()
5865 %{
5866   constraint(ALLOC_IN_RC(int_r4_reg));
5867   match(RegI);
5868   match(iRegINoSp);
5869   op_cost(0);
5870   format %{ %}
5871   interface(REG_INTER);
5872 %}
5873 
5874 
5875 // Pointer Register Operands
5876 // Narrow Pointer Register
5877 operand iRegN()
5878 %{
5879   constraint(ALLOC_IN_RC(any_reg32));
5880   match(RegN);
5881   match(iRegNNoSp);
5882   op_cost(0);
5883   format %{ %}
5884   interface(REG_INTER);
5885 %}
5886 
5887 // Integer 64 bit Register not Special
5888 operand iRegNNoSp()
5889 %{
5890   constraint(ALLOC_IN_RC(no_special_reg32));
5891   match(RegN);
5892   op_cost(0);
5893   format %{ %}
5894   interface(REG_INTER);
5895 %}
5896 
5897 // heap base register -- used for encoding immN0
5898 
5899 operand iRegIHeapbase()
5900 %{
5901   constraint(ALLOC_IN_RC(heapbase_reg));
5902   match(RegI);
5903   op_cost(0);
5904   format %{ %}
5905   interface(REG_INTER);
5906 %}
5907 
5908 // Float Register
5909 // Float register operands
5910 operand vRegF()
5911 %{
5912   constraint(ALLOC_IN_RC(float_reg));
5913   match(RegF);
5914 
5915   op_cost(0);
5916   format %{ %}
5917   interface(REG_INTER);
5918 %}
5919 
5920 // Double Register
5921 // Double register operands
5922 operand vRegD()
5923 %{
5924   constraint(ALLOC_IN_RC(double_reg));
5925   match(RegD);
5926 
5927   op_cost(0);
5928   format %{ %}
5929   interface(REG_INTER);
5930 %}
5931 
5932 operand vecD()
5933 %{
5934   constraint(ALLOC_IN_RC(vectord_reg));
5935   match(VecD);
5936 
5937   op_cost(0);
5938   format %{ %}
5939   interface(REG_INTER);
5940 %}
5941 
5942 operand vecX()
5943 %{
5944   constraint(ALLOC_IN_RC(vectorx_reg));
5945   match(VecX);
5946 
5947   op_cost(0);
5948   format %{ %}
5949   interface(REG_INTER);
5950 %}
5951 
5952 operand vRegD_V0()
5953 %{
5954   constraint(ALLOC_IN_RC(v0_reg));
5955   match(RegD);
5956   op_cost(0);
5957   format %{ %}
5958   interface(REG_INTER);
5959 %}
5960 
5961 operand vRegD_V1()
5962 %{
5963   constraint(ALLOC_IN_RC(v1_reg));
5964   match(RegD);
5965   op_cost(0);
5966   format %{ %}
5967   interface(REG_INTER);
5968 %}
5969 
5970 operand vRegD_V2()
5971 %{
5972   constraint(ALLOC_IN_RC(v2_reg));
5973   match(RegD);
5974   op_cost(0);
5975   format %{ %}
5976   interface(REG_INTER);
5977 %}
5978 
5979 operand vRegD_V3()
5980 %{
5981   constraint(ALLOC_IN_RC(v3_reg));
5982   match(RegD);
5983   op_cost(0);
5984   format %{ %}
5985   interface(REG_INTER);
5986 %}
5987 
5988 // Flags register, used as output of signed compare instructions
5989 
5990 // note that on AArch64 we also use this register as the output for
5991 // for floating point compare instructions (CmpF CmpD). this ensures
5992 // that ordered inequality tests use GT, GE, LT or LE none of which
5993 // pass through cases where the result is unordered i.e. one or both
5994 // inputs to the compare is a NaN. this means that the ideal code can
5995 // replace e.g. a GT with an LE and not end up capturing the NaN case
5996 // (where the comparison should always fail). EQ and NE tests are
5997 // always generated in ideal code so that unordered folds into the NE
5998 // case, matching the behaviour of AArch64 NE.
5999 //
6000 // This differs from x86 where the outputs of FP compares use a
6001 // special FP flags registers and where compares based on this
6002 // register are distinguished into ordered inequalities (cmpOpUCF) and
6003 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6004 // to explicitly handle the unordered case in branches. x86 also has
6005 // to include extra CMoveX rules to accept a cmpOpUCF input.
6006 
6007 operand rFlagsReg()
6008 %{
6009   constraint(ALLOC_IN_RC(int_flags));
6010   match(RegFlags);
6011 
6012   op_cost(0);
6013   format %{ "RFLAGS" %}
6014   interface(REG_INTER);
6015 %}
6016 
6017 // Flags register, used as output of unsigned compare instructions
6018 operand rFlagsRegU()
6019 %{
6020   constraint(ALLOC_IN_RC(int_flags));
6021   match(RegFlags);
6022 
6023   op_cost(0);
6024   format %{ "RFLAGSU" %}
6025   interface(REG_INTER);
6026 %}
6027 
6028 // Special Registers
6029 
6030 // Method Register
6031 operand inline_cache_RegP(iRegP reg)
6032 %{
6033   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6034   match(reg);
6035   match(iRegPNoSp);
6036   op_cost(0);
6037   format %{ %}
6038   interface(REG_INTER);
6039 %}
6040 
6041 operand interpreter_method_oop_RegP(iRegP reg)
6042 %{
6043   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6044   match(reg);
6045   match(iRegPNoSp);
6046   op_cost(0);
6047   format %{ %}
6048   interface(REG_INTER);
6049 %}
6050 
6051 // Thread Register
6052 operand thread_RegP(iRegP reg)
6053 %{
6054   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6055   match(reg);
6056   op_cost(0);
6057   format %{ %}
6058   interface(REG_INTER);
6059 %}
6060 
6061 operand lr_RegP(iRegP reg)
6062 %{
6063   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6064   match(reg);
6065   op_cost(0);
6066   format %{ %}
6067   interface(REG_INTER);
6068 %}
6069 
6070 //----------Memory Operands----------------------------------------------------
6071 
6072 operand indirect(iRegP reg)
6073 %{
6074   constraint(ALLOC_IN_RC(ptr_reg));
6075   match(reg);
6076   op_cost(0);
6077   format %{ "[$reg]" %}
6078   interface(MEMORY_INTER) %{
6079     base($reg);
6080     index(0xffffffff);
6081     scale(0x0);
6082     disp(0x0);
6083   %}
6084 %}
6085 
6086 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6087 %{
6088   constraint(ALLOC_IN_RC(ptr_reg));
6089   match(AddP (AddP reg (LShiftL lreg scale)) off);
6090   op_cost(INSN_COST);
6091   format %{ "$reg, $lreg lsl($scale), $off" %}
6092   interface(MEMORY_INTER) %{
6093     base($reg);
6094     index($lreg);
6095     scale($scale);
6096     disp($off);
6097   %}
6098 %}
6099 
6100 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6101 %{
6102   constraint(ALLOC_IN_RC(ptr_reg));
6103   match(AddP (AddP reg (LShiftL lreg scale)) off);
6104   op_cost(INSN_COST);
6105   format %{ "$reg, $lreg lsl($scale), $off" %}
6106   interface(MEMORY_INTER) %{
6107     base($reg);
6108     index($lreg);
6109     scale($scale);
6110     disp($off);
6111   %}
6112 %}
6113 
6114 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6115 %{
6116   constraint(ALLOC_IN_RC(ptr_reg));
6117   match(AddP (AddP reg (ConvI2L ireg)) off);
6118   op_cost(INSN_COST);
6119   format %{ "$reg, $ireg, $off I2L" %}
6120   interface(MEMORY_INTER) %{
6121     base($reg);
6122     index($ireg);
6123     scale(0x0);
6124     disp($off);
6125   %}
6126 %}
6127 
6128 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6129 %{
6130   constraint(ALLOC_IN_RC(ptr_reg));
6131   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6132   op_cost(INSN_COST);
6133   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6134   interface(MEMORY_INTER) %{
6135     base($reg);
6136     index($ireg);
6137     scale($scale);
6138     disp($off);
6139   %}
6140 %}
6141 
6142 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6143 %{
6144   constraint(ALLOC_IN_RC(ptr_reg));
6145   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6146   op_cost(0);
6147   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6148   interface(MEMORY_INTER) %{
6149     base($reg);
6150     index($ireg);
6151     scale($scale);
6152     disp(0x0);
6153   %}
6154 %}
6155 
6156 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6157 %{
6158   constraint(ALLOC_IN_RC(ptr_reg));
6159   match(AddP reg (LShiftL lreg scale));
6160   op_cost(0);
6161   format %{ "$reg, $lreg lsl($scale)" %}
6162   interface(MEMORY_INTER) %{
6163     base($reg);
6164     index($lreg);
6165     scale($scale);
6166     disp(0x0);
6167   %}
6168 %}
6169 
6170 operand indIndex(iRegP reg, iRegL lreg)
6171 %{
6172   constraint(ALLOC_IN_RC(ptr_reg));
6173   match(AddP reg lreg);
6174   op_cost(0);
6175   format %{ "$reg, $lreg" %}
6176   interface(MEMORY_INTER) %{
6177     base($reg);
6178     index($lreg);
6179     scale(0x0);
6180     disp(0x0);
6181   %}
6182 %}
6183 
6184 operand indOffI(iRegP reg, immIOffset off)
6185 %{
6186   constraint(ALLOC_IN_RC(ptr_reg));
6187   match(AddP reg off);
6188   op_cost(0);
6189   format %{ "[$reg, $off]" %}
6190   interface(MEMORY_INTER) %{
6191     base($reg);
6192     index(0xffffffff);
6193     scale(0x0);
6194     disp($off);
6195   %}
6196 %}
6197 
6198 operand indOffL(iRegP reg, immLoffset off)
6199 %{
6200   constraint(ALLOC_IN_RC(ptr_reg));
6201   match(AddP reg off);
6202   op_cost(0);
6203   format %{ "[$reg, $off]" %}
6204   interface(MEMORY_INTER) %{
6205     base($reg);
6206     index(0xffffffff);
6207     scale(0x0);
6208     disp($off);
6209   %}
6210 %}
6211 
6212 
6213 operand indirectN(iRegN reg)
6214 %{
6215   predicate(Universe::narrow_oop_shift() == 0);
6216   constraint(ALLOC_IN_RC(ptr_reg));
6217   match(DecodeN reg);
6218   op_cost(0);
6219   format %{ "[$reg]\t# narrow" %}
6220   interface(MEMORY_INTER) %{
6221     base($reg);
6222     index(0xffffffff);
6223     scale(0x0);
6224     disp(0x0);
6225   %}
6226 %}
6227 
6228 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6229 %{
6230   predicate(Universe::narrow_oop_shift() == 0);
6231   constraint(ALLOC_IN_RC(ptr_reg));
6232   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6233   op_cost(0);
6234   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6235   interface(MEMORY_INTER) %{
6236     base($reg);
6237     index($lreg);
6238     scale($scale);
6239     disp($off);
6240   %}
6241 %}
6242 
6243 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6244 %{
6245   predicate(Universe::narrow_oop_shift() == 0);
6246   constraint(ALLOC_IN_RC(ptr_reg));
6247   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6248   op_cost(INSN_COST);
6249   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6250   interface(MEMORY_INTER) %{
6251     base($reg);
6252     index($lreg);
6253     scale($scale);
6254     disp($off);
6255   %}
6256 %}
6257 
6258 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6259 %{
6260   predicate(Universe::narrow_oop_shift() == 0);
6261   constraint(ALLOC_IN_RC(ptr_reg));
6262   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6263   op_cost(INSN_COST);
6264   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6265   interface(MEMORY_INTER) %{
6266     base($reg);
6267     index($ireg);
6268     scale(0x0);
6269     disp($off);
6270   %}
6271 %}
6272 
6273 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6274 %{
6275   predicate(Universe::narrow_oop_shift() == 0);
6276   constraint(ALLOC_IN_RC(ptr_reg));
6277   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6278   op_cost(INSN_COST);
6279   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6280   interface(MEMORY_INTER) %{
6281     base($reg);
6282     index($ireg);
6283     scale($scale);
6284     disp($off);
6285   %}
6286 %}
6287 
6288 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6289 %{
6290   predicate(Universe::narrow_oop_shift() == 0);
6291   constraint(ALLOC_IN_RC(ptr_reg));
6292   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6293   op_cost(0);
6294   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6295   interface(MEMORY_INTER) %{
6296     base($reg);
6297     index($ireg);
6298     scale($scale);
6299     disp(0x0);
6300   %}
6301 %}
6302 
6303 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6304 %{
6305   predicate(Universe::narrow_oop_shift() == 0);
6306   constraint(ALLOC_IN_RC(ptr_reg));
6307   match(AddP (DecodeN reg) (LShiftL lreg scale));
6308   op_cost(0);
6309   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6310   interface(MEMORY_INTER) %{
6311     base($reg);
6312     index($lreg);
6313     scale($scale);
6314     disp(0x0);
6315   %}
6316 %}
6317 
6318 operand indIndexN(iRegN reg, iRegL lreg)
6319 %{
6320   predicate(Universe::narrow_oop_shift() == 0);
6321   constraint(ALLOC_IN_RC(ptr_reg));
6322   match(AddP (DecodeN reg) lreg);
6323   op_cost(0);
6324   format %{ "$reg, $lreg\t# narrow" %}
6325   interface(MEMORY_INTER) %{
6326     base($reg);
6327     index($lreg);
6328     scale(0x0);
6329     disp(0x0);
6330   %}
6331 %}
6332 
6333 operand indOffIN(iRegN reg, immIOffset off)
6334 %{
6335   predicate(Universe::narrow_oop_shift() == 0);
6336   constraint(ALLOC_IN_RC(ptr_reg));
6337   match(AddP (DecodeN reg) off);
6338   op_cost(0);
6339   format %{ "[$reg, $off]\t# narrow" %}
6340   interface(MEMORY_INTER) %{
6341     base($reg);
6342     index(0xffffffff);
6343     scale(0x0);
6344     disp($off);
6345   %}
6346 %}
6347 
6348 operand indOffLN(iRegN reg, immLoffset off)
6349 %{
6350   predicate(Universe::narrow_oop_shift() == 0);
6351   constraint(ALLOC_IN_RC(ptr_reg));
6352   match(AddP (DecodeN reg) off);
6353   op_cost(0);
6354   format %{ "[$reg, $off]\t# narrow" %}
6355   interface(MEMORY_INTER) %{
6356     base($reg);
6357     index(0xffffffff);
6358     scale(0x0);
6359     disp($off);
6360   %}
6361 %}
6362 
6363 
6364 
6365 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6366 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6367 %{
6368   constraint(ALLOC_IN_RC(ptr_reg));
6369   match(AddP reg off);
6370   op_cost(0);
6371   format %{ "[$reg, $off]" %}
6372   interface(MEMORY_INTER) %{
6373     base($reg);
6374     index(0xffffffff);
6375     scale(0x0);
6376     disp($off);
6377   %}
6378 %}
6379 
6380 //----------Special Memory Operands--------------------------------------------
6381 // Stack Slot Operand - This operand is used for loading and storing temporary
6382 //                      values on the stack where a match requires a value to
6383 //                      flow through memory.
6384 operand stackSlotP(sRegP reg)
6385 %{
6386   constraint(ALLOC_IN_RC(stack_slots));
6387   op_cost(100);
6388   // No match rule because this operand is only generated in matching
6389   // match(RegP);
6390   format %{ "[$reg]" %}
6391   interface(MEMORY_INTER) %{
6392     base(0x1e);  // RSP
6393     index(0x0);  // No Index
6394     scale(0x0);  // No Scale
6395     disp($reg);  // Stack Offset
6396   %}
6397 %}
6398 
6399 operand stackSlotI(sRegI reg)
6400 %{
6401   constraint(ALLOC_IN_RC(stack_slots));
6402   // No match rule because this operand is only generated in matching
6403   // match(RegI);
6404   format %{ "[$reg]" %}
6405   interface(MEMORY_INTER) %{
6406     base(0x1e);  // RSP
6407     index(0x0);  // No Index
6408     scale(0x0);  // No Scale
6409     disp($reg);  // Stack Offset
6410   %}
6411 %}
6412 
6413 operand stackSlotF(sRegF reg)
6414 %{
6415   constraint(ALLOC_IN_RC(stack_slots));
6416   // No match rule because this operand is only generated in matching
6417   // match(RegF);
6418   format %{ "[$reg]" %}
6419   interface(MEMORY_INTER) %{
6420     base(0x1e);  // RSP
6421     index(0x0);  // No Index
6422     scale(0x0);  // No Scale
6423     disp($reg);  // Stack Offset
6424   %}
6425 %}
6426 
6427 operand stackSlotD(sRegD reg)
6428 %{
6429   constraint(ALLOC_IN_RC(stack_slots));
6430   // No match rule because this operand is only generated in matching
6431   // match(RegD);
6432   format %{ "[$reg]" %}
6433   interface(MEMORY_INTER) %{
6434     base(0x1e);  // RSP
6435     index(0x0);  // No Index
6436     scale(0x0);  // No Scale
6437     disp($reg);  // Stack Offset
6438   %}
6439 %}
6440 
6441 operand stackSlotL(sRegL reg)
6442 %{
6443   constraint(ALLOC_IN_RC(stack_slots));
6444   // No match rule because this operand is only generated in matching
6445   // match(RegL);
6446   format %{ "[$reg]" %}
6447   interface(MEMORY_INTER) %{
6448     base(0x1e);  // RSP
6449     index(0x0);  // No Index
6450     scale(0x0);  // No Scale
6451     disp($reg);  // Stack Offset
6452   %}
6453 %}
6454 
6455 // Operands for expressing Control Flow
6456 // NOTE: Label is a predefined operand which should not be redefined in
6457 //       the AD file. It is generically handled within the ADLC.
6458 
6459 //----------Conditional Branch Operands----------------------------------------
6460 // Comparison Op  - This is the operation of the comparison, and is limited to
6461 //                  the following set of codes:
6462 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6463 //
6464 // Other attributes of the comparison, such as unsignedness, are specified
6465 // by the comparison instruction that sets a condition code flags register.
6466 // That result is represented by a flags operand whose subtype is appropriate
6467 // to the unsignedness (etc.) of the comparison.
6468 //
6469 // Later, the instruction which matches both the Comparison Op (a Bool) and
6470 // the flags (produced by the Cmp) specifies the coding of the comparison op
6471 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6472 
6473 // used for signed integral comparisons and fp comparisons
6474 
6475 operand cmpOp()
6476 %{
6477   match(Bool);
6478 
6479   format %{ "" %}
6480   interface(COND_INTER) %{
6481     equal(0x0, "eq");
6482     not_equal(0x1, "ne");
6483     less(0xb, "lt");
6484     greater_equal(0xa, "ge");
6485     less_equal(0xd, "le");
6486     greater(0xc, "gt");
6487     overflow(0x6, "vs");
6488     no_overflow(0x7, "vc");
6489   %}
6490 %}
6491 
6492 // used for unsigned integral comparisons
6493 
6494 operand cmpOpU()
6495 %{
6496   match(Bool);
6497 
6498   format %{ "" %}
6499   interface(COND_INTER) %{
6500     equal(0x0, "eq");
6501     not_equal(0x1, "ne");
6502     less(0x3, "lo");
6503     greater_equal(0x2, "hs");
6504     less_equal(0x9, "ls");
6505     greater(0x8, "hi");
6506     overflow(0x6, "vs");
6507     no_overflow(0x7, "vc");
6508   %}
6509 %}
6510 
6511 // Special operand allowing long args to int ops to be truncated for free
6512 
6513 operand iRegL2I(iRegL reg) %{
6514 
6515   op_cost(0);
6516 
6517   match(ConvL2I reg);
6518 
6519   format %{ "l2i($reg)" %}
6520 
6521   interface(REG_INTER)
6522 %}
6523 
6524 opclass vmem(indirect, indIndex, indOffI, indOffL);
6525 
6526 //----------OPERAND CLASSES----------------------------------------------------
6527 // Operand Classes are groups of operands that are used as to simplify
6528 // instruction definitions by not requiring the AD writer to specify
6529 // separate instructions for every form of operand when the
6530 // instruction accepts multiple operand types with the same basic
6531 // encoding and format. The classic case of this is memory operands.
6532 
6533 // memory is used to define read/write location for load/store
6534 // instruction defs. we can turn a memory op into an Address
6535 
6536 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6537                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6538 
6539 
6540 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6541 // operations. it allows the src to be either an iRegI or a (ConvL2I
6542 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6543 // can be elided because the 32-bit instruction will just employ the
6544 // lower 32 bits anyway.
6545 //
6546 // n.b. this does not elide all L2I conversions. if the truncated
6547 // value is consumed by more than one operation then the ConvL2I
6548 // cannot be bundled into the consuming nodes so an l2i gets planted
6549 // (actually a movw $dst $src) and the downstream instructions consume
6550 // the result of the l2i as an iRegI input. That's a shame since the
6551 // movw is actually redundant but its not too costly.
6552 
6553 opclass iRegIorL2I(iRegI, iRegL2I);
6554 
6555 //----------PIPELINE-----------------------------------------------------------
6556 // Rules which define the behavior of the target architectures pipeline.
6557 
6558 // For specific pipelines, eg A53, define the stages of that pipeline
6559 //pipe_desc(ISS, EX1, EX2, WR);
6560 #define ISS S0
6561 #define EX1 S1
6562 #define EX2 S2
6563 #define WR  S3
6564 
6565 // Integer ALU reg operation
6566 pipeline %{
6567 
6568 attributes %{
6569   // ARM instructions are of fixed length
6570   fixed_size_instructions;        // Fixed size instructions TODO does
6571   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6572   // ARM instructions come in 32-bit word units
6573   instruction_unit_size = 4;         // An instruction is 4 bytes long
6574   instruction_fetch_unit_size = 64;  // The processor fetches one line
6575   instruction_fetch_units = 1;       // of 64 bytes
6576 
6577   // List of nop instructions
6578   nops( MachNop );
6579 %}
6580 
6581 // We don't use an actual pipeline model so don't care about resources
6582 // or description. we do use pipeline classes to introduce fixed
6583 // latencies
6584 
6585 //----------RESOURCES----------------------------------------------------------
6586 // Resources are the functional units available to the machine
6587 
6588 resources( INS0, INS1, INS01 = INS0 | INS1,
6589            ALU0, ALU1, ALU = ALU0 | ALU1,
6590            MAC,
6591            DIV,
6592            BRANCH,
6593            LDST,
6594            NEON_FP);
6595 
6596 //----------PIPELINE DESCRIPTION-----------------------------------------------
6597 // Pipeline Description specifies the stages in the machine's pipeline
6598 
6599 // Define the pipeline as a generic 6 stage pipeline
6600 pipe_desc(S0, S1, S2, S3, S4, S5);
6601 
6602 //----------PIPELINE CLASSES---------------------------------------------------
6603 // Pipeline Classes describe the stages in which input and output are
6604 // referenced by the hardware pipeline.
6605 
6606 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6607 %{
6608   single_instruction;
6609   src1   : S1(read);
6610   src2   : S2(read);
6611   dst    : S5(write);
6612   INS01  : ISS;
6613   NEON_FP : S5;
6614 %}
6615 
6616 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6617 %{
6618   single_instruction;
6619   src1   : S1(read);
6620   src2   : S2(read);
6621   dst    : S5(write);
6622   INS01  : ISS;
6623   NEON_FP : S5;
6624 %}
6625 
6626 pipe_class fp_uop_s(vRegF dst, vRegF src)
6627 %{
6628   single_instruction;
6629   src    : S1(read);
6630   dst    : S5(write);
6631   INS01  : ISS;
6632   NEON_FP : S5;
6633 %}
6634 
6635 pipe_class fp_uop_d(vRegD dst, vRegD src)
6636 %{
6637   single_instruction;
6638   src    : S1(read);
6639   dst    : S5(write);
6640   INS01  : ISS;
6641   NEON_FP : S5;
6642 %}
6643 
6644 pipe_class fp_d2f(vRegF dst, vRegD src)
6645 %{
6646   single_instruction;
6647   src    : S1(read);
6648   dst    : S5(write);
6649   INS01  : ISS;
6650   NEON_FP : S5;
6651 %}
6652 
6653 pipe_class fp_f2d(vRegD dst, vRegF src)
6654 %{
6655   single_instruction;
6656   src    : S1(read);
6657   dst    : S5(write);
6658   INS01  : ISS;
6659   NEON_FP : S5;
6660 %}
6661 
6662 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6663 %{
6664   single_instruction;
6665   src    : S1(read);
6666   dst    : S5(write);
6667   INS01  : ISS;
6668   NEON_FP : S5;
6669 %}
6670 
6671 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6672 %{
6673   single_instruction;
6674   src    : S1(read);
6675   dst    : S5(write);
6676   INS01  : ISS;
6677   NEON_FP : S5;
6678 %}
6679 
6680 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6681 %{
6682   single_instruction;
6683   src    : S1(read);
6684   dst    : S5(write);
6685   INS01  : ISS;
6686   NEON_FP : S5;
6687 %}
6688 
6689 pipe_class fp_l2f(vRegF dst, iRegL src)
6690 %{
6691   single_instruction;
6692   src    : S1(read);
6693   dst    : S5(write);
6694   INS01  : ISS;
6695   NEON_FP : S5;
6696 %}
6697 
6698 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6699 %{
6700   single_instruction;
6701   src    : S1(read);
6702   dst    : S5(write);
6703   INS01  : ISS;
6704   NEON_FP : S5;
6705 %}
6706 
6707 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6708 %{
6709   single_instruction;
6710   src    : S1(read);
6711   dst    : S5(write);
6712   INS01  : ISS;
6713   NEON_FP : S5;
6714 %}
6715 
6716 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6717 %{
6718   single_instruction;
6719   src    : S1(read);
6720   dst    : S5(write);
6721   INS01  : ISS;
6722   NEON_FP : S5;
6723 %}
6724 
6725 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6726 %{
6727   single_instruction;
6728   src    : S1(read);
6729   dst    : S5(write);
6730   INS01  : ISS;
6731   NEON_FP : S5;
6732 %}
6733 
6734 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6735 %{
6736   single_instruction;
6737   src1   : S1(read);
6738   src2   : S2(read);
6739   dst    : S5(write);
6740   INS0   : ISS;
6741   NEON_FP : S5;
6742 %}
6743 
6744 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6745 %{
6746   single_instruction;
6747   src1   : S1(read);
6748   src2   : S2(read);
6749   dst    : S5(write);
6750   INS0   : ISS;
6751   NEON_FP : S5;
6752 %}
6753 
6754 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6755 %{
6756   single_instruction;
6757   cr     : S1(read);
6758   src1   : S1(read);
6759   src2   : S1(read);
6760   dst    : S3(write);
6761   INS01  : ISS;
6762   NEON_FP : S3;
6763 %}
6764 
6765 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6766 %{
6767   single_instruction;
6768   cr     : S1(read);
6769   src1   : S1(read);
6770   src2   : S1(read);
6771   dst    : S3(write);
6772   INS01  : ISS;
6773   NEON_FP : S3;
6774 %}
6775 
6776 pipe_class fp_imm_s(vRegF dst)
6777 %{
6778   single_instruction;
6779   dst    : S3(write);
6780   INS01  : ISS;
6781   NEON_FP : S3;
6782 %}
6783 
6784 pipe_class fp_imm_d(vRegD dst)
6785 %{
6786   single_instruction;
6787   dst    : S3(write);
6788   INS01  : ISS;
6789   NEON_FP : S3;
6790 %}
6791 
6792 pipe_class fp_load_constant_s(vRegF dst)
6793 %{
6794   single_instruction;
6795   dst    : S4(write);
6796   INS01  : ISS;
6797   NEON_FP : S4;
6798 %}
6799 
6800 pipe_class fp_load_constant_d(vRegD dst)
6801 %{
6802   single_instruction;
6803   dst    : S4(write);
6804   INS01  : ISS;
6805   NEON_FP : S4;
6806 %}
6807 
6808 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6809 %{
6810   single_instruction;
6811   dst    : S5(write);
6812   src1   : S1(read);
6813   src2   : S1(read);
6814   INS01  : ISS;
6815   NEON_FP : S5;
6816 %}
6817 
6818 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6819 %{
6820   single_instruction;
6821   dst    : S5(write);
6822   src1   : S1(read);
6823   src2   : S1(read);
6824   INS0   : ISS;
6825   NEON_FP : S5;
6826 %}
6827 
6828 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6829 %{
6830   single_instruction;
6831   dst    : S5(write);
6832   src1   : S1(read);
6833   src2   : S1(read);
6834   dst    : S1(read);
6835   INS01  : ISS;
6836   NEON_FP : S5;
6837 %}
6838 
6839 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6840 %{
6841   single_instruction;
6842   dst    : S5(write);
6843   src1   : S1(read);
6844   src2   : S1(read);
6845   dst    : S1(read);
6846   INS0   : ISS;
6847   NEON_FP : S5;
6848 %}
6849 
6850 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6851 %{
6852   single_instruction;
6853   dst    : S4(write);
6854   src1   : S2(read);
6855   src2   : S2(read);
6856   INS01  : ISS;
6857   NEON_FP : S4;
6858 %}
6859 
6860 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6861 %{
6862   single_instruction;
6863   dst    : S4(write);
6864   src1   : S2(read);
6865   src2   : S2(read);
6866   INS0   : ISS;
6867   NEON_FP : S4;
6868 %}
6869 
6870 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6871 %{
6872   single_instruction;
6873   dst    : S3(write);
6874   src1   : S2(read);
6875   src2   : S2(read);
6876   INS01  : ISS;
6877   NEON_FP : S3;
6878 %}
6879 
6880 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6881 %{
6882   single_instruction;
6883   dst    : S3(write);
6884   src1   : S2(read);
6885   src2   : S2(read);
6886   INS0   : ISS;
6887   NEON_FP : S3;
6888 %}
6889 
6890 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6891 %{
6892   single_instruction;
6893   dst    : S3(write);
6894   src    : S1(read);
6895   shift  : S1(read);
6896   INS01  : ISS;
6897   NEON_FP : S3;
6898 %}
6899 
6900 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6901 %{
6902   single_instruction;
6903   dst    : S3(write);
6904   src    : S1(read);
6905   shift  : S1(read);
6906   INS0   : ISS;
6907   NEON_FP : S3;
6908 %}
6909 
6910 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6911 %{
6912   single_instruction;
6913   dst    : S3(write);
6914   src    : S1(read);
6915   INS01  : ISS;
6916   NEON_FP : S3;
6917 %}
6918 
6919 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6920 %{
6921   single_instruction;
6922   dst    : S3(write);
6923   src    : S1(read);
6924   INS0   : ISS;
6925   NEON_FP : S3;
6926 %}
6927 
6928 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6929 %{
6930   single_instruction;
6931   dst    : S5(write);
6932   src1   : S1(read);
6933   src2   : S1(read);
6934   INS01  : ISS;
6935   NEON_FP : S5;
6936 %}
6937 
6938 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6939 %{
6940   single_instruction;
6941   dst    : S5(write);
6942   src1   : S1(read);
6943   src2   : S1(read);
6944   INS0   : ISS;
6945   NEON_FP : S5;
6946 %}
6947 
6948 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6949 %{
6950   single_instruction;
6951   dst    : S5(write);
6952   src1   : S1(read);
6953   src2   : S1(read);
6954   INS0   : ISS;
6955   NEON_FP : S5;
6956 %}
6957 
6958 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6959 %{
6960   single_instruction;
6961   dst    : S5(write);
6962   src1   : S1(read);
6963   src2   : S1(read);
6964   INS0   : ISS;
6965   NEON_FP : S5;
6966 %}
6967 
6968 pipe_class vsqrt_fp128(vecX dst, vecX src)
6969 %{
6970   single_instruction;
6971   dst    : S5(write);
6972   src    : S1(read);
6973   INS0   : ISS;
6974   NEON_FP : S5;
6975 %}
6976 
6977 pipe_class vunop_fp64(vecD dst, vecD src)
6978 %{
6979   single_instruction;
6980   dst    : S5(write);
6981   src    : S1(read);
6982   INS01  : ISS;
6983   NEON_FP : S5;
6984 %}
6985 
6986 pipe_class vunop_fp128(vecX dst, vecX src)
6987 %{
6988   single_instruction;
6989   dst    : S5(write);
6990   src    : S1(read);
6991   INS0   : ISS;
6992   NEON_FP : S5;
6993 %}
6994 
6995 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6996 %{
6997   single_instruction;
6998   dst    : S3(write);
6999   src    : S1(read);
7000   INS01  : ISS;
7001   NEON_FP : S3;
7002 %}
7003 
7004 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7005 %{
7006   single_instruction;
7007   dst    : S3(write);
7008   src    : S1(read);
7009   INS01  : ISS;
7010   NEON_FP : S3;
7011 %}
7012 
7013 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7014 %{
7015   single_instruction;
7016   dst    : S3(write);
7017   src    : S1(read);
7018   INS01  : ISS;
7019   NEON_FP : S3;
7020 %}
7021 
7022 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7023 %{
7024   single_instruction;
7025   dst    : S3(write);
7026   src    : S1(read);
7027   INS01  : ISS;
7028   NEON_FP : S3;
7029 %}
7030 
7031 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7032 %{
7033   single_instruction;
7034   dst    : S3(write);
7035   src    : S1(read);
7036   INS01  : ISS;
7037   NEON_FP : S3;
7038 %}
7039 
7040 pipe_class vmovi_reg_imm64(vecD dst)
7041 %{
7042   single_instruction;
7043   dst    : S3(write);
7044   INS01  : ISS;
7045   NEON_FP : S3;
7046 %}
7047 
7048 pipe_class vmovi_reg_imm128(vecX dst)
7049 %{
7050   single_instruction;
7051   dst    : S3(write);
7052   INS0   : ISS;
7053   NEON_FP : S3;
7054 %}
7055 
7056 pipe_class vload_reg_mem64(vecD dst, vmem mem)
7057 %{
7058   single_instruction;
7059   dst    : S5(write);
7060   mem    : ISS(read);
7061   INS01  : ISS;
7062   NEON_FP : S3;
7063 %}
7064 
7065 pipe_class vload_reg_mem128(vecX dst, vmem mem)
7066 %{
7067   single_instruction;
7068   dst    : S5(write);
7069   mem    : ISS(read);
7070   INS01  : ISS;
7071   NEON_FP : S3;
7072 %}
7073 
7074 pipe_class vstore_reg_mem64(vecD src, vmem mem)
7075 %{
7076   single_instruction;
7077   mem    : ISS(read);
7078   src    : S2(read);
7079   INS01  : ISS;
7080   NEON_FP : S3;
7081 %}
7082 
7083 pipe_class vstore_reg_mem128(vecD src, vmem mem)
7084 %{
7085   single_instruction;
7086   mem    : ISS(read);
7087   src    : S2(read);
7088   INS01  : ISS;
7089   NEON_FP : S3;
7090 %}
7091 
7092 //------- Integer ALU operations --------------------------
7093 
7094 // Integer ALU reg-reg operation
7095 // Operands needed in EX1, result generated in EX2
7096 // Eg.  ADD     x0, x1, x2
7097 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7098 %{
7099   single_instruction;
7100   dst    : EX2(write);
7101   src1   : EX1(read);
7102   src2   : EX1(read);
7103   INS01  : ISS; // Dual issue as instruction 0 or 1
7104   ALU    : EX2;
7105 %}
7106 
7107 // Integer ALU reg-reg operation with constant shift
7108 // Shifted register must be available in LATE_ISS instead of EX1
7109 // Eg.  ADD     x0, x1, x2, LSL #2
7110 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7111 %{
7112   single_instruction;
7113   dst    : EX2(write);
7114   src1   : EX1(read);
7115   src2   : ISS(read);
7116   INS01  : ISS;
7117   ALU    : EX2;
7118 %}
7119 
7120 // Integer ALU reg operation with constant shift
7121 // Eg.  LSL     x0, x1, #shift
7122 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7123 %{
7124   single_instruction;
7125   dst    : EX2(write);
7126   src1   : ISS(read);
7127   INS01  : ISS;
7128   ALU    : EX2;
7129 %}
7130 
7131 // Integer ALU reg-reg operation with variable shift
7132 // Both operands must be available in LATE_ISS instead of EX1
7133 // Result is available in EX1 instead of EX2
7134 // Eg.  LSLV    x0, x1, x2
7135 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7136 %{
7137   single_instruction;
7138   dst    : EX1(write);
7139   src1   : ISS(read);
7140   src2   : ISS(read);
7141   INS01  : ISS;
7142   ALU    : EX1;
7143 %}
7144 
7145 // Integer ALU reg-reg operation with extract
7146 // As for _vshift above, but result generated in EX2
7147 // Eg.  EXTR    x0, x1, x2, #N
7148 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7149 %{
7150   single_instruction;
7151   dst    : EX2(write);
7152   src1   : ISS(read);
7153   src2   : ISS(read);
7154   INS1   : ISS; // Can only dual issue as Instruction 1
7155   ALU    : EX1;
7156 %}
7157 
7158 // Integer ALU reg operation
7159 // Eg.  NEG     x0, x1
7160 pipe_class ialu_reg(iRegI dst, iRegI src)
7161 %{
7162   single_instruction;
7163   dst    : EX2(write);
7164   src    : EX1(read);
7165   INS01  : ISS;
7166   ALU    : EX2;
7167 %}
7168 
7169 // Integer ALU reg mmediate operation
7170 // Eg.  ADD     x0, x1, #N
7171 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7172 %{
7173   single_instruction;
7174   dst    : EX2(write);
7175   src1   : EX1(read);
7176   INS01  : ISS;
7177   ALU    : EX2;
7178 %}
7179 
7180 // Integer ALU immediate operation (no source operands)
7181 // Eg.  MOV     x0, #N
7182 pipe_class ialu_imm(iRegI dst)
7183 %{
7184   single_instruction;
7185   dst    : EX1(write);
7186   INS01  : ISS;
7187   ALU    : EX1;
7188 %}
7189 
7190 //------- Compare operation -------------------------------
7191 
7192 // Compare reg-reg
7193 // Eg.  CMP     x0, x1
7194 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7195 %{
7196   single_instruction;
7197 //  fixed_latency(16);
7198   cr     : EX2(write);
7199   op1    : EX1(read);
7200   op2    : EX1(read);
7201   INS01  : ISS;
7202   ALU    : EX2;
7203 %}
7204 
7205 // Compare reg-reg
7206 // Eg.  CMP     x0, #N
7207 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7208 %{
7209   single_instruction;
7210 //  fixed_latency(16);
7211   cr     : EX2(write);
7212   op1    : EX1(read);
7213   INS01  : ISS;
7214   ALU    : EX2;
7215 %}
7216 
7217 //------- Conditional instructions ------------------------
7218 
7219 // Conditional no operands
7220 // Eg.  CSINC   x0, zr, zr, <cond>
7221 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7222 %{
7223   single_instruction;
7224   cr     : EX1(read);
7225   dst    : EX2(write);
7226   INS01  : ISS;
7227   ALU    : EX2;
7228 %}
7229 
7230 // Conditional 2 operand
7231 // EG.  CSEL    X0, X1, X2, <cond>
7232 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7233 %{
7234   single_instruction;
7235   cr     : EX1(read);
7236   src1   : EX1(read);
7237   src2   : EX1(read);
7238   dst    : EX2(write);
7239   INS01  : ISS;
7240   ALU    : EX2;
7241 %}
7242 
7243 // Conditional 2 operand
7244 // EG.  CSEL    X0, X1, X2, <cond>
7245 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7246 %{
7247   single_instruction;
7248   cr     : EX1(read);
7249   src    : EX1(read);
7250   dst    : EX2(write);
7251   INS01  : ISS;
7252   ALU    : EX2;
7253 %}
7254 
7255 //------- Multiply pipeline operations --------------------
7256 
7257 // Multiply reg-reg
7258 // Eg.  MUL     w0, w1, w2
7259 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7260 %{
7261   single_instruction;
7262   dst    : WR(write);
7263   src1   : ISS(read);
7264   src2   : ISS(read);
7265   INS01  : ISS;
7266   MAC    : WR;
7267 %}
7268 
7269 // Multiply accumulate
7270 // Eg.  MADD    w0, w1, w2, w3
7271 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7272 %{
7273   single_instruction;
7274   dst    : WR(write);
7275   src1   : ISS(read);
7276   src2   : ISS(read);
7277   src3   : ISS(read);
7278   INS01  : ISS;
7279   MAC    : WR;
7280 %}
7281 
7282 // Eg.  MUL     w0, w1, w2
7283 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7284 %{
7285   single_instruction;
7286   fixed_latency(3); // Maximum latency for 64 bit mul
7287   dst    : WR(write);
7288   src1   : ISS(read);
7289   src2   : ISS(read);
7290   INS01  : ISS;
7291   MAC    : WR;
7292 %}
7293 
7294 // Multiply accumulate
7295 // Eg.  MADD    w0, w1, w2, w3
7296 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7297 %{
7298   single_instruction;
7299   fixed_latency(3); // Maximum latency for 64 bit mul
7300   dst    : WR(write);
7301   src1   : ISS(read);
7302   src2   : ISS(read);
7303   src3   : ISS(read);
7304   INS01  : ISS;
7305   MAC    : WR;
7306 %}
7307 
7308 //------- Divide pipeline operations --------------------
7309 
7310 // Eg.  SDIV    w0, w1, w2
7311 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7312 %{
7313   single_instruction;
7314   fixed_latency(8); // Maximum latency for 32 bit divide
7315   dst    : WR(write);
7316   src1   : ISS(read);
7317   src2   : ISS(read);
7318   INS0   : ISS; // Can only dual issue as instruction 0
7319   DIV    : WR;
7320 %}
7321 
7322 // Eg.  SDIV    x0, x1, x2
7323 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7324 %{
7325   single_instruction;
7326   fixed_latency(16); // Maximum latency for 64 bit divide
7327   dst    : WR(write);
7328   src1   : ISS(read);
7329   src2   : ISS(read);
7330   INS0   : ISS; // Can only dual issue as instruction 0
7331   DIV    : WR;
7332 %}
7333 
7334 //------- Load pipeline operations ------------------------
7335 
7336 // Load - prefetch
7337 // Eg.  PFRM    <mem>
7338 pipe_class iload_prefetch(memory mem)
7339 %{
7340   single_instruction;
7341   mem    : ISS(read);
7342   INS01  : ISS;
7343   LDST   : WR;
7344 %}
7345 
7346 // Load - reg, mem
7347 // Eg.  LDR     x0, <mem>
7348 pipe_class iload_reg_mem(iRegI dst, memory mem)
7349 %{
7350   single_instruction;
7351   dst    : WR(write);
7352   mem    : ISS(read);
7353   INS01  : ISS;
7354   LDST   : WR;
7355 %}
7356 
7357 // Load - reg, reg
7358 // Eg.  LDR     x0, [sp, x1]
7359 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7360 %{
7361   single_instruction;
7362   dst    : WR(write);
7363   src    : ISS(read);
7364   INS01  : ISS;
7365   LDST   : WR;
7366 %}
7367 
7368 //------- Store pipeline operations -----------------------
7369 
7370 // Store - zr, mem
7371 // Eg.  STR     zr, <mem>
7372 pipe_class istore_mem(memory mem)
7373 %{
7374   single_instruction;
7375   mem    : ISS(read);
7376   INS01  : ISS;
7377   LDST   : WR;
7378 %}
7379 
7380 // Store - reg, mem
7381 // Eg.  STR     x0, <mem>
7382 pipe_class istore_reg_mem(iRegI src, memory mem)
7383 %{
7384   single_instruction;
7385   mem    : ISS(read);
7386   src    : EX2(read);
7387   INS01  : ISS;
7388   LDST   : WR;
7389 %}
7390 
7391 // Store - reg, reg
7392 // Eg. STR      x0, [sp, x1]
7393 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7394 %{
7395   single_instruction;
7396   dst    : ISS(read);
7397   src    : EX2(read);
7398   INS01  : ISS;
7399   LDST   : WR;
7400 %}
7401 
7402 //------- Store pipeline operations -----------------------
7403 
7404 // Branch
7405 pipe_class pipe_branch()
7406 %{
7407   single_instruction;
7408   INS01  : ISS;
7409   BRANCH : EX1;
7410 %}
7411 
7412 // Conditional branch
7413 pipe_class pipe_branch_cond(rFlagsReg cr)
7414 %{
7415   single_instruction;
7416   cr     : EX1(read);
7417   INS01  : ISS;
7418   BRANCH : EX1;
7419 %}
7420 
7421 // Compare & Branch
7422 // EG.  CBZ/CBNZ
7423 pipe_class pipe_cmp_branch(iRegI op1)
7424 %{
7425   single_instruction;
7426   op1    : EX1(read);
7427   INS01  : ISS;
7428   BRANCH : EX1;
7429 %}
7430 
7431 //------- Synchronisation operations ----------------------
7432 
7433 // Any operation requiring serialization.
7434 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7435 pipe_class pipe_serial()
7436 %{
7437   single_instruction;
7438   force_serialization;
7439   fixed_latency(16);
7440   INS01  : ISS(2); // Cannot dual issue with any other instruction
7441   LDST   : WR;
7442 %}
7443 
7444 // Generic big/slow expanded idiom - also serialized
7445 pipe_class pipe_slow()
7446 %{
7447   instruction_count(10);
7448   multiple_bundles;
7449   force_serialization;
7450   fixed_latency(16);
7451   INS01  : ISS(2); // Cannot dual issue with any other instruction
7452   LDST   : WR;
7453 %}
7454 
7455 // Empty pipeline class
7456 pipe_class pipe_class_empty()
7457 %{
7458   single_instruction;
7459   fixed_latency(0);
7460 %}
7461 
7462 // Default pipeline class.
7463 pipe_class pipe_class_default()
7464 %{
7465   single_instruction;
7466   fixed_latency(2);
7467 %}
7468 
7469 // Pipeline class for compares.
7470 pipe_class pipe_class_compare()
7471 %{
7472   single_instruction;
7473   fixed_latency(16);
7474 %}
7475 
7476 // Pipeline class for memory operations.
7477 pipe_class pipe_class_memory()
7478 %{
7479   single_instruction;
7480   fixed_latency(16);
7481 %}
7482 
7483 // Pipeline class for call.
7484 pipe_class pipe_class_call()
7485 %{
7486   single_instruction;
7487   fixed_latency(100);
7488 %}
7489 
7490 // Define the class for the Nop node.
7491 define %{
7492    MachNop = pipe_class_empty;
7493 %}
7494 
7495 %}
7496 //----------INSTRUCTIONS-------------------------------------------------------
7497 //
7498 // match      -- States which machine-independent subtree may be replaced
7499 //               by this instruction.
7500 // ins_cost   -- The estimated cost of this instruction is used by instruction
7501 //               selection to identify a minimum cost tree of machine
7502 //               instructions that matches a tree of machine-independent
7503 //               instructions.
7504 // format     -- A string providing the disassembly for this instruction.
7505 //               The value of an instruction's operand may be inserted
7506 //               by referring to it with a '$' prefix.
7507 // opcode     -- Three instruction opcodes may be provided.  These are referred
7508 //               to within an encode class as $primary, $secondary, and $tertiary
7509 //               rrspectively.  The primary opcode is commonly used to
7510 //               indicate the type of machine instruction, while secondary
7511 //               and tertiary are often used for prefix options or addressing
7512 //               modes.
7513 // ins_encode -- A list of encode classes with parameters. The encode class
7514 //               name must have been defined in an 'enc_class' specification
7515 //               in the encode section of the architecture description.
7516 
7517 // ============================================================================
7518 // Memory (Load/Store) Instructions
7519 
7520 // Load Instructions
7521 
7522 // Load Byte (8 bit signed)
7523 instruct loadB(iRegINoSp dst, memory mem)
7524 %{
7525   match(Set dst (LoadB mem));
7526   predicate(!needs_acquiring_load(n));
7527 
7528   ins_cost(4 * INSN_COST);
7529   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7530 
7531   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7532 
7533   ins_pipe(iload_reg_mem);
7534 %}
7535 
7536 // Load Byte (8 bit signed) into long
7537 instruct loadB2L(iRegLNoSp dst, memory mem)
7538 %{
7539   match(Set dst (ConvI2L (LoadB mem)));
7540   predicate(!needs_acquiring_load(n->in(1)));
7541 
7542   ins_cost(4 * INSN_COST);
7543   format %{ "ldrsb  $dst, $mem\t# byte" %}
7544 
7545   ins_encode(aarch64_enc_ldrsb(dst, mem));
7546 
7547   ins_pipe(iload_reg_mem);
7548 %}
7549 
7550 // Load Byte (8 bit unsigned)
7551 instruct loadUB(iRegINoSp dst, memory mem)
7552 %{
7553   match(Set dst (LoadUB mem));
7554   predicate(!needs_acquiring_load(n));
7555 
7556   ins_cost(4 * INSN_COST);
7557   format %{ "ldrbw  $dst, $mem\t# byte" %}
7558 
7559   ins_encode(aarch64_enc_ldrb(dst, mem));
7560 
7561   ins_pipe(iload_reg_mem);
7562 %}
7563 
7564 // Load Byte (8 bit unsigned) into long
7565 instruct loadUB2L(iRegLNoSp dst, memory mem)
7566 %{
7567   match(Set dst (ConvI2L (LoadUB mem)));
7568   predicate(!needs_acquiring_load(n->in(1)));
7569 
7570   ins_cost(4 * INSN_COST);
7571   format %{ "ldrb  $dst, $mem\t# byte" %}
7572 
7573   ins_encode(aarch64_enc_ldrb(dst, mem));
7574 
7575   ins_pipe(iload_reg_mem);
7576 %}
7577 
7578 // Load Short (16 bit signed)
7579 instruct loadS(iRegINoSp dst, memory mem)
7580 %{
7581   match(Set dst (LoadS mem));
7582   predicate(!needs_acquiring_load(n));
7583 
7584   ins_cost(4 * INSN_COST);
7585   format %{ "ldrshw  $dst, $mem\t# short" %}
7586 
7587   ins_encode(aarch64_enc_ldrshw(dst, mem));
7588 
7589   ins_pipe(iload_reg_mem);
7590 %}
7591 
7592 // Load Short (16 bit signed) into long
7593 instruct loadS2L(iRegLNoSp dst, memory mem)
7594 %{
7595   match(Set dst (ConvI2L (LoadS mem)));
7596   predicate(!needs_acquiring_load(n->in(1)));
7597 
7598   ins_cost(4 * INSN_COST);
7599   format %{ "ldrsh  $dst, $mem\t# short" %}
7600 
7601   ins_encode(aarch64_enc_ldrsh(dst, mem));
7602 
7603   ins_pipe(iload_reg_mem);
7604 %}
7605 
7606 // Load Char (16 bit unsigned)
7607 instruct loadUS(iRegINoSp dst, memory mem)
7608 %{
7609   match(Set dst (LoadUS mem));
7610   predicate(!needs_acquiring_load(n));
7611 
7612   ins_cost(4 * INSN_COST);
7613   format %{ "ldrh  $dst, $mem\t# short" %}
7614 
7615   ins_encode(aarch64_enc_ldrh(dst, mem));
7616 
7617   ins_pipe(iload_reg_mem);
7618 %}
7619 
7620 // Load Short/Char (16 bit unsigned) into long
7621 instruct loadUS2L(iRegLNoSp dst, memory mem)
7622 %{
7623   match(Set dst (ConvI2L (LoadUS mem)));
7624   predicate(!needs_acquiring_load(n->in(1)));
7625 
7626   ins_cost(4 * INSN_COST);
7627   format %{ "ldrh  $dst, $mem\t# short" %}
7628 
7629   ins_encode(aarch64_enc_ldrh(dst, mem));
7630 
7631   ins_pipe(iload_reg_mem);
7632 %}
7633 
7634 // Load Integer (32 bit signed)
7635 instruct loadI(iRegINoSp dst, memory mem)
7636 %{
7637   match(Set dst (LoadI mem));
7638   predicate(!needs_acquiring_load(n));
7639 
7640   ins_cost(4 * INSN_COST);
7641   format %{ "ldrw  $dst, $mem\t# int" %}
7642 
7643   ins_encode(aarch64_enc_ldrw(dst, mem));
7644 
7645   ins_pipe(iload_reg_mem);
7646 %}
7647 
7648 // Load Integer (32 bit signed) into long
7649 instruct loadI2L(iRegLNoSp dst, memory mem)
7650 %{
7651   match(Set dst (ConvI2L (LoadI mem)));
7652   predicate(!needs_acquiring_load(n->in(1)));
7653 
7654   ins_cost(4 * INSN_COST);
7655   format %{ "ldrsw  $dst, $mem\t# int" %}
7656 
7657   ins_encode(aarch64_enc_ldrsw(dst, mem));
7658 
7659   ins_pipe(iload_reg_mem);
7660 %}
7661 
7662 // Load Integer (32 bit unsigned) into long
7663 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7664 %{
7665   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7666   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7667 
7668   ins_cost(4 * INSN_COST);
7669   format %{ "ldrw  $dst, $mem\t# int" %}
7670 
7671   ins_encode(aarch64_enc_ldrw(dst, mem));
7672 
7673   ins_pipe(iload_reg_mem);
7674 %}
7675 
7676 // Load Long (64 bit signed)
7677 instruct loadL(iRegLNoSp dst, memory mem)
7678 %{
7679   match(Set dst (LoadL mem));
7680   predicate(!needs_acquiring_load(n));
7681 
7682   ins_cost(4 * INSN_COST);
7683   format %{ "ldr  $dst, $mem\t# int" %}
7684 
7685   ins_encode(aarch64_enc_ldr(dst, mem));
7686 
7687   ins_pipe(iload_reg_mem);
7688 %}
7689 
7690 // Load Range
7691 instruct loadRange(iRegINoSp dst, memory mem)
7692 %{
7693   match(Set dst (LoadRange mem));
7694 
7695   ins_cost(4 * INSN_COST);
7696   format %{ "ldrw  $dst, $mem\t# range" %}
7697 
7698   ins_encode(aarch64_enc_ldrw(dst, mem));
7699 
7700   ins_pipe(iload_reg_mem);
7701 %}
7702 
7703 // Load Pointer
7704 instruct loadP(iRegPNoSp dst, memory mem)
7705 %{
7706   match(Set dst (LoadP mem));
7707   predicate(!needs_acquiring_load(n));
7708 
7709   ins_cost(4 * INSN_COST);
7710   format %{ "ldr  $dst, $mem\t# ptr" %}
7711 
7712   ins_encode(aarch64_enc_ldr(dst, mem));
7713 
7714   ins_pipe(iload_reg_mem);
7715 %}
7716 
7717 // Load Compressed Pointer
7718 instruct loadN(iRegNNoSp dst, memory mem)
7719 %{
7720   match(Set dst (LoadN mem));
7721   predicate(!needs_acquiring_load(n));
7722 
7723   ins_cost(4 * INSN_COST);
7724   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7725 
7726   ins_encode(aarch64_enc_ldrw(dst, mem));
7727 
7728   ins_pipe(iload_reg_mem);
7729 %}
7730 
7731 // Load Klass Pointer
7732 instruct loadKlass(iRegPNoSp dst, memory mem)
7733 %{
7734   match(Set dst (LoadKlass mem));
7735   predicate(!needs_acquiring_load(n));
7736 
7737   ins_cost(4 * INSN_COST);
7738   format %{ "ldr  $dst, $mem\t# class" %}
7739 
7740   ins_encode(aarch64_enc_ldr(dst, mem));
7741 
7742   ins_pipe(iload_reg_mem);
7743 %}
7744 
7745 // Load Narrow Klass Pointer
7746 instruct loadNKlass(iRegNNoSp dst, memory mem)
7747 %{
7748   match(Set dst (LoadNKlass mem));
7749   predicate(!needs_acquiring_load(n));
7750 
7751   ins_cost(4 * INSN_COST);
7752   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7753 
7754   ins_encode(aarch64_enc_ldrw(dst, mem));
7755 
7756   ins_pipe(iload_reg_mem);
7757 %}
7758 
7759 // Load Float
7760 instruct loadF(vRegF dst, memory mem)
7761 %{
7762   match(Set dst (LoadF mem));
7763   predicate(!needs_acquiring_load(n));
7764 
7765   ins_cost(4 * INSN_COST);
7766   format %{ "ldrs  $dst, $mem\t# float" %}
7767 
7768   ins_encode( aarch64_enc_ldrs(dst, mem) );
7769 
7770   ins_pipe(pipe_class_memory);
7771 %}
7772 
7773 // Load Double
7774 instruct loadD(vRegD dst, memory mem)
7775 %{
7776   match(Set dst (LoadD mem));
7777   predicate(!needs_acquiring_load(n));
7778 
7779   ins_cost(4 * INSN_COST);
7780   format %{ "ldrd  $dst, $mem\t# double" %}
7781 
7782   ins_encode( aarch64_enc_ldrd(dst, mem) );
7783 
7784   ins_pipe(pipe_class_memory);
7785 %}
7786 
7787 
7788 // Load Int Constant
7789 instruct loadConI(iRegINoSp dst, immI src)
7790 %{
7791   match(Set dst src);
7792 
7793   ins_cost(INSN_COST);
7794   format %{ "mov $dst, $src\t# int" %}
7795 
7796   ins_encode( aarch64_enc_movw_imm(dst, src) );
7797 
7798   ins_pipe(ialu_imm);
7799 %}
7800 
7801 // Load Long Constant
7802 instruct loadConL(iRegLNoSp dst, immL src)
7803 %{
7804   match(Set dst src);
7805 
7806   ins_cost(INSN_COST);
7807   format %{ "mov $dst, $src\t# long" %}
7808 
7809   ins_encode( aarch64_enc_mov_imm(dst, src) );
7810 
7811   ins_pipe(ialu_imm);
7812 %}
7813 
7814 // Load Pointer Constant
7815 
7816 instruct loadConP(iRegPNoSp dst, immP con)
7817 %{
7818   match(Set dst con);
7819 
7820   ins_cost(INSN_COST * 4);
7821   format %{
7822     "mov  $dst, $con\t# ptr\n\t"
7823   %}
7824 
7825   ins_encode(aarch64_enc_mov_p(dst, con));
7826 
7827   ins_pipe(ialu_imm);
7828 %}
7829 
7830 // Load Null Pointer Constant
7831 
7832 instruct loadConP0(iRegPNoSp dst, immP0 con)
7833 %{
7834   match(Set dst con);
7835 
7836   ins_cost(INSN_COST);
7837   format %{ "mov  $dst, $con\t# NULL ptr" %}
7838 
7839   ins_encode(aarch64_enc_mov_p0(dst, con));
7840 
7841   ins_pipe(ialu_imm);
7842 %}
7843 
7844 // Load Pointer Constant One
7845 
7846 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7847 %{
7848   match(Set dst con);
7849 
7850   ins_cost(INSN_COST);
7851   format %{ "mov  $dst, $con\t# NULL ptr" %}
7852 
7853   ins_encode(aarch64_enc_mov_p1(dst, con));
7854 
7855   ins_pipe(ialu_imm);
7856 %}
7857 
7858 // Load Poll Page Constant
7859 
7860 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7861 %{
7862   match(Set dst con);
7863 
7864   ins_cost(INSN_COST);
7865   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7866 
7867   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7868 
7869   ins_pipe(ialu_imm);
7870 %}
7871 
7872 // Load Byte Map Base Constant
7873 
7874 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7875 %{
7876   match(Set dst con);
7877 
7878   ins_cost(INSN_COST);
7879   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7880 
7881   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7882 
7883   ins_pipe(ialu_imm);
7884 %}
7885 
7886 // Load Narrow Pointer Constant
7887 
7888 instruct loadConN(iRegNNoSp dst, immN con)
7889 %{
7890   match(Set dst con);
7891 
7892   ins_cost(INSN_COST * 4);
7893   format %{ "mov  $dst, $con\t# compressed ptr" %}
7894 
7895   ins_encode(aarch64_enc_mov_n(dst, con));
7896 
7897   ins_pipe(ialu_imm);
7898 %}
7899 
7900 // Load Narrow Null Pointer Constant
7901 
7902 instruct loadConN0(iRegNNoSp dst, immN0 con)
7903 %{
7904   match(Set dst con);
7905 
7906   ins_cost(INSN_COST);
7907   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7908 
7909   ins_encode(aarch64_enc_mov_n0(dst, con));
7910 
7911   ins_pipe(ialu_imm);
7912 %}
7913 
7914 // Load Narrow Klass Constant
7915 
7916 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7917 %{
7918   match(Set dst con);
7919 
7920   ins_cost(INSN_COST);
7921   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7922 
7923   ins_encode(aarch64_enc_mov_nk(dst, con));
7924 
7925   ins_pipe(ialu_imm);
7926 %}
7927 
7928 // Load Packed Float Constant
7929 
7930 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7931   match(Set dst con);
7932   ins_cost(INSN_COST * 4);
7933   format %{ "fmovs  $dst, $con"%}
7934   ins_encode %{
7935     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7936   %}
7937 
7938   ins_pipe(fp_imm_s);
7939 %}
7940 
7941 // Load Float Constant
7942 
7943 instruct loadConF(vRegF dst, immF con) %{
7944   match(Set dst con);
7945 
7946   ins_cost(INSN_COST * 4);
7947 
7948   format %{
7949     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7950   %}
7951 
7952   ins_encode %{
7953     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7954   %}
7955 
7956   ins_pipe(fp_load_constant_s);
7957 %}
7958 
7959 // Load Packed Double Constant
7960 
7961 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7962   match(Set dst con);
7963   ins_cost(INSN_COST);
7964   format %{ "fmovd  $dst, $con"%}
7965   ins_encode %{
7966     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7967   %}
7968 
7969   ins_pipe(fp_imm_d);
7970 %}
7971 
7972 // Load Double Constant
7973 
7974 instruct loadConD(vRegD dst, immD con) %{
7975   match(Set dst con);
7976 
7977   ins_cost(INSN_COST * 5);
7978   format %{
7979     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7980   %}
7981 
7982   ins_encode %{
7983     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7984   %}
7985 
7986   ins_pipe(fp_load_constant_d);
7987 %}
7988 
7989 // Store Instructions
7990 
7991 // Store CMS card-mark Immediate
7992 instruct storeimmCM0(immI0 zero, memory mem)
7993 %{
7994   match(Set mem (StoreCM mem zero));
7995   predicate(unnecessary_storestore(n));
7996 
7997   ins_cost(INSN_COST);
7998   format %{ "strb zr, $mem\t# byte" %}
7999 
8000   ins_encode(aarch64_enc_strb0(mem));
8001 
8002   ins_pipe(istore_mem);
8003 %}
8004 
8005 // Store CMS card-mark Immediate with intervening StoreStore
8006 // needed when using CMS with no conditional card marking
8007 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8008 %{
8009   match(Set mem (StoreCM mem zero));
8010 
8011   ins_cost(INSN_COST * 2);
8012   format %{ "dmb ishst"
8013       "\n\tstrb zr, $mem\t# byte" %}
8014 
8015   ins_encode(aarch64_enc_strb0_ordered(mem));
8016 
8017   ins_pipe(istore_mem);
8018 %}
8019 
8020 // Store Byte
8021 instruct storeB(iRegIorL2I src, memory mem)
8022 %{
8023   match(Set mem (StoreB mem src));
8024   predicate(!needs_releasing_store(n));
8025 
8026   ins_cost(INSN_COST);
8027   format %{ "strb  $src, $mem\t# byte" %}
8028 
8029   ins_encode(aarch64_enc_strb(src, mem));
8030 
8031   ins_pipe(istore_reg_mem);
8032 %}
8033 
8034 
8035 instruct storeimmB0(immI0 zero, memory mem)
8036 %{
8037   match(Set mem (StoreB mem zero));
8038   predicate(!needs_releasing_store(n));
8039 
8040   ins_cost(INSN_COST);
8041   format %{ "strb rscractch2, $mem\t# byte" %}
8042 
8043   ins_encode(aarch64_enc_strb0(mem));
8044 
8045   ins_pipe(istore_mem);
8046 %}
8047 
8048 // Store Char/Short
8049 instruct storeC(iRegIorL2I src, memory mem)
8050 %{
8051   match(Set mem (StoreC mem src));
8052   predicate(!needs_releasing_store(n));
8053 
8054   ins_cost(INSN_COST);
8055   format %{ "strh  $src, $mem\t# short" %}
8056 
8057   ins_encode(aarch64_enc_strh(src, mem));
8058 
8059   ins_pipe(istore_reg_mem);
8060 %}
8061 
8062 instruct storeimmC0(immI0 zero, memory mem)
8063 %{
8064   match(Set mem (StoreC mem zero));
8065   predicate(!needs_releasing_store(n));
8066 
8067   ins_cost(INSN_COST);
8068   format %{ "strh  zr, $mem\t# short" %}
8069 
8070   ins_encode(aarch64_enc_strh0(mem));
8071 
8072   ins_pipe(istore_mem);
8073 %}
8074 
8075 // Store Integer
8076 
8077 instruct storeI(iRegIorL2I src, memory mem)
8078 %{
8079   match(Set mem(StoreI mem src));
8080   predicate(!needs_releasing_store(n));
8081 
8082   ins_cost(INSN_COST);
8083   format %{ "strw  $src, $mem\t# int" %}
8084 
8085   ins_encode(aarch64_enc_strw(src, mem));
8086 
8087   ins_pipe(istore_reg_mem);
8088 %}
8089 
8090 instruct storeimmI0(immI0 zero, memory mem)
8091 %{
8092   match(Set mem(StoreI mem zero));
8093   predicate(!needs_releasing_store(n));
8094 
8095   ins_cost(INSN_COST);
8096   format %{ "strw  zr, $mem\t# int" %}
8097 
8098   ins_encode(aarch64_enc_strw0(mem));
8099 
8100   ins_pipe(istore_mem);
8101 %}
8102 
8103 // Store Long (64 bit signed)
8104 instruct storeL(iRegL src, memory mem)
8105 %{
8106   match(Set mem (StoreL mem src));
8107   predicate(!needs_releasing_store(n));
8108 
8109   ins_cost(INSN_COST);
8110   format %{ "str  $src, $mem\t# int" %}
8111 
8112   ins_encode(aarch64_enc_str(src, mem));
8113 
8114   ins_pipe(istore_reg_mem);
8115 %}
8116 
8117 // Store Long (64 bit signed)
8118 instruct storeimmL0(immL0 zero, memory mem)
8119 %{
8120   match(Set mem (StoreL mem zero));
8121   predicate(!needs_releasing_store(n));
8122 
8123   ins_cost(INSN_COST);
8124   format %{ "str  zr, $mem\t# int" %}
8125 
8126   ins_encode(aarch64_enc_str0(mem));
8127 
8128   ins_pipe(istore_mem);
8129 %}
8130 
8131 // Store Pointer
8132 instruct storeP(iRegP src, memory mem)
8133 %{
8134   match(Set mem (StoreP mem src));
8135   predicate(!needs_releasing_store(n));
8136 
8137   ins_cost(INSN_COST);
8138   format %{ "str  $src, $mem\t# ptr" %}
8139 
8140   ins_encode(aarch64_enc_str(src, mem));
8141 
8142   ins_pipe(istore_reg_mem);
8143 %}
8144 
8145 // Store Pointer
8146 instruct storeimmP0(immP0 zero, memory mem)
8147 %{
8148   match(Set mem (StoreP mem zero));
8149   predicate(!needs_releasing_store(n));
8150 
8151   ins_cost(INSN_COST);
8152   format %{ "str zr, $mem\t# ptr" %}
8153 
8154   ins_encode(aarch64_enc_str0(mem));
8155 
8156   ins_pipe(istore_mem);
8157 %}
8158 
8159 // Store Compressed Pointer
8160 instruct storeN(iRegN src, memory mem)
8161 %{
8162   match(Set mem (StoreN mem src));
8163   predicate(!needs_releasing_store(n));
8164 
8165   ins_cost(INSN_COST);
8166   format %{ "strw  $src, $mem\t# compressed ptr" %}
8167 
8168   ins_encode(aarch64_enc_strw(src, mem));
8169 
8170   ins_pipe(istore_reg_mem);
8171 %}
8172 
8173 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8174 %{
8175   match(Set mem (StoreN mem zero));
8176   predicate(Universe::narrow_oop_base() == NULL &&
8177             Universe::narrow_klass_base() == NULL &&
8178             (!needs_releasing_store(n)));
8179 
8180   ins_cost(INSN_COST);
8181   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8182 
8183   ins_encode(aarch64_enc_strw(heapbase, mem));
8184 
8185   ins_pipe(istore_reg_mem);
8186 %}
8187 
8188 // Store Float
8189 instruct storeF(vRegF src, memory mem)
8190 %{
8191   match(Set mem (StoreF mem src));
8192   predicate(!needs_releasing_store(n));
8193 
8194   ins_cost(INSN_COST);
8195   format %{ "strs  $src, $mem\t# float" %}
8196 
8197   ins_encode( aarch64_enc_strs(src, mem) );
8198 
8199   ins_pipe(pipe_class_memory);
8200 %}
8201 
8202 // TODO
8203 // implement storeImmF0 and storeFImmPacked
8204 
8205 // Store Double
8206 instruct storeD(vRegD src, memory mem)
8207 %{
8208   match(Set mem (StoreD mem src));
8209   predicate(!needs_releasing_store(n));
8210 
8211   ins_cost(INSN_COST);
8212   format %{ "strd  $src, $mem\t# double" %}
8213 
8214   ins_encode( aarch64_enc_strd(src, mem) );
8215 
8216   ins_pipe(pipe_class_memory);
8217 %}
8218 
8219 // Store Compressed Klass Pointer
8220 instruct storeNKlass(iRegN src, memory mem)
8221 %{
8222   predicate(!needs_releasing_store(n));
8223   match(Set mem (StoreNKlass mem src));
8224 
8225   ins_cost(INSN_COST);
8226   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8227 
8228   ins_encode(aarch64_enc_strw(src, mem));
8229 
8230   ins_pipe(istore_reg_mem);
8231 %}
8232 
8233 // TODO
8234 // implement storeImmD0 and storeDImmPacked
8235 
8236 // prefetch instructions
8237 // Must be safe to execute with invalid address (cannot fault).
8238 
8239 instruct prefetchalloc( memory mem ) %{
8240   match(PrefetchAllocation mem);
8241 
8242   ins_cost(INSN_COST);
8243   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8244 
8245   ins_encode( aarch64_enc_prefetchw(mem) );
8246 
8247   ins_pipe(iload_prefetch);
8248 %}
8249 
8250 //  ---------------- volatile loads and stores ----------------
8251 
8252 // Load Byte (8 bit signed)
8253 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8254 %{
8255   match(Set dst (LoadB mem));
8256 
8257   ins_cost(VOLATILE_REF_COST);
8258   format %{ "ldarsb  $dst, $mem\t# byte" %}
8259 
8260   ins_encode(aarch64_enc_ldarsb(dst, mem));
8261 
8262   ins_pipe(pipe_serial);
8263 %}
8264 
8265 // Load Byte (8 bit signed) into long
8266 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8267 %{
8268   match(Set dst (ConvI2L (LoadB mem)));
8269 
8270   ins_cost(VOLATILE_REF_COST);
8271   format %{ "ldarsb  $dst, $mem\t# byte" %}
8272 
8273   ins_encode(aarch64_enc_ldarsb(dst, mem));
8274 
8275   ins_pipe(pipe_serial);
8276 %}
8277 
8278 // Load Byte (8 bit unsigned)
8279 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8280 %{
8281   match(Set dst (LoadUB mem));
8282 
8283   ins_cost(VOLATILE_REF_COST);
8284   format %{ "ldarb  $dst, $mem\t# byte" %}
8285 
8286   ins_encode(aarch64_enc_ldarb(dst, mem));
8287 
8288   ins_pipe(pipe_serial);
8289 %}
8290 
8291 // Load Byte (8 bit unsigned) into long
8292 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8293 %{
8294   match(Set dst (ConvI2L (LoadUB mem)));
8295 
8296   ins_cost(VOLATILE_REF_COST);
8297   format %{ "ldarb  $dst, $mem\t# byte" %}
8298 
8299   ins_encode(aarch64_enc_ldarb(dst, mem));
8300 
8301   ins_pipe(pipe_serial);
8302 %}
8303 
8304 // Load Short (16 bit signed)
8305 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8306 %{
8307   match(Set dst (LoadS mem));
8308 
8309   ins_cost(VOLATILE_REF_COST);
8310   format %{ "ldarshw  $dst, $mem\t# short" %}
8311 
8312   ins_encode(aarch64_enc_ldarshw(dst, mem));
8313 
8314   ins_pipe(pipe_serial);
8315 %}
8316 
8317 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8318 %{
8319   match(Set dst (LoadUS mem));
8320 
8321   ins_cost(VOLATILE_REF_COST);
8322   format %{ "ldarhw  $dst, $mem\t# short" %}
8323 
8324   ins_encode(aarch64_enc_ldarhw(dst, mem));
8325 
8326   ins_pipe(pipe_serial);
8327 %}
8328 
8329 // Load Short/Char (16 bit unsigned) into long
8330 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8331 %{
8332   match(Set dst (ConvI2L (LoadUS mem)));
8333 
8334   ins_cost(VOLATILE_REF_COST);
8335   format %{ "ldarh  $dst, $mem\t# short" %}
8336 
8337   ins_encode(aarch64_enc_ldarh(dst, mem));
8338 
8339   ins_pipe(pipe_serial);
8340 %}
8341 
8342 // Load Short/Char (16 bit signed) into long
8343 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8344 %{
8345   match(Set dst (ConvI2L (LoadS mem)));
8346 
8347   ins_cost(VOLATILE_REF_COST);
8348   format %{ "ldarh  $dst, $mem\t# short" %}
8349 
8350   ins_encode(aarch64_enc_ldarsh(dst, mem));
8351 
8352   ins_pipe(pipe_serial);
8353 %}
8354 
8355 // Load Integer (32 bit signed)
8356 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8357 %{
8358   match(Set dst (LoadI mem));
8359 
8360   ins_cost(VOLATILE_REF_COST);
8361   format %{ "ldarw  $dst, $mem\t# int" %}
8362 
8363   ins_encode(aarch64_enc_ldarw(dst, mem));
8364 
8365   ins_pipe(pipe_serial);
8366 %}
8367 
8368 // Load Integer (32 bit unsigned) into long
8369 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8370 %{
8371   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8372 
8373   ins_cost(VOLATILE_REF_COST);
8374   format %{ "ldarw  $dst, $mem\t# int" %}
8375 
8376   ins_encode(aarch64_enc_ldarw(dst, mem));
8377 
8378   ins_pipe(pipe_serial);
8379 %}
8380 
8381 // Load Long (64 bit signed)
8382 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8383 %{
8384   match(Set dst (LoadL mem));
8385 
8386   ins_cost(VOLATILE_REF_COST);
8387   format %{ "ldar  $dst, $mem\t# int" %}
8388 
8389   ins_encode(aarch64_enc_ldar(dst, mem));
8390 
8391   ins_pipe(pipe_serial);
8392 %}
8393 
8394 // Load Pointer
8395 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8396 %{
8397   match(Set dst (LoadP mem));
8398 
8399   ins_cost(VOLATILE_REF_COST);
8400   format %{ "ldar  $dst, $mem\t# ptr" %}
8401 
8402   ins_encode(aarch64_enc_ldar(dst, mem));
8403 
8404   ins_pipe(pipe_serial);
8405 %}
8406 
8407 // Load Compressed Pointer
8408 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8409 %{
8410   match(Set dst (LoadN mem));
8411 
8412   ins_cost(VOLATILE_REF_COST);
8413   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8414 
8415   ins_encode(aarch64_enc_ldarw(dst, mem));
8416 
8417   ins_pipe(pipe_serial);
8418 %}
8419 
8420 // Load Float
8421 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8422 %{
8423   match(Set dst (LoadF mem));
8424 
8425   ins_cost(VOLATILE_REF_COST);
8426   format %{ "ldars  $dst, $mem\t# float" %}
8427 
8428   ins_encode( aarch64_enc_fldars(dst, mem) );
8429 
8430   ins_pipe(pipe_serial);
8431 %}
8432 
8433 // Load Double
8434 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8435 %{
8436   match(Set dst (LoadD mem));
8437 
8438   ins_cost(VOLATILE_REF_COST);
8439   format %{ "ldard  $dst, $mem\t# double" %}
8440 
8441   ins_encode( aarch64_enc_fldard(dst, mem) );
8442 
8443   ins_pipe(pipe_serial);
8444 %}
8445 
8446 // Store Byte
8447 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8448 %{
8449   match(Set mem (StoreB mem src));
8450 
8451   ins_cost(VOLATILE_REF_COST);
8452   format %{ "stlrb  $src, $mem\t# byte" %}
8453 
8454   ins_encode(aarch64_enc_stlrb(src, mem));
8455 
8456   ins_pipe(pipe_class_memory);
8457 %}
8458 
8459 // Store Char/Short
8460 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8461 %{
8462   match(Set mem (StoreC mem src));
8463 
8464   ins_cost(VOLATILE_REF_COST);
8465   format %{ "stlrh  $src, $mem\t# short" %}
8466 
8467   ins_encode(aarch64_enc_stlrh(src, mem));
8468 
8469   ins_pipe(pipe_class_memory);
8470 %}
8471 
8472 // Store Integer
8473 
8474 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8475 %{
8476   match(Set mem(StoreI mem src));
8477 
8478   ins_cost(VOLATILE_REF_COST);
8479   format %{ "stlrw  $src, $mem\t# int" %}
8480 
8481   ins_encode(aarch64_enc_stlrw(src, mem));
8482 
8483   ins_pipe(pipe_class_memory);
8484 %}
8485 
8486 // Store Long (64 bit signed)
8487 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8488 %{
8489   match(Set mem (StoreL mem src));
8490 
8491   ins_cost(VOLATILE_REF_COST);
8492   format %{ "stlr  $src, $mem\t# int" %}
8493 
8494   ins_encode(aarch64_enc_stlr(src, mem));
8495 
8496   ins_pipe(pipe_class_memory);
8497 %}
8498 
8499 // Store Pointer
8500 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8501 %{
8502   match(Set mem (StoreP mem src));
8503 
8504   ins_cost(VOLATILE_REF_COST);
8505   format %{ "stlr  $src, $mem\t# ptr" %}
8506 
8507   ins_encode(aarch64_enc_stlr(src, mem));
8508 
8509   ins_pipe(pipe_class_memory);
8510 %}
8511 
8512 // Store Compressed Pointer
8513 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8514 %{
8515   match(Set mem (StoreN mem src));
8516 
8517   ins_cost(VOLATILE_REF_COST);
8518   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8519 
8520   ins_encode(aarch64_enc_stlrw(src, mem));
8521 
8522   ins_pipe(pipe_class_memory);
8523 %}
8524 
8525 // Store Float
8526 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8527 %{
8528   match(Set mem (StoreF mem src));
8529 
8530   ins_cost(VOLATILE_REF_COST);
8531   format %{ "stlrs  $src, $mem\t# float" %}
8532 
8533   ins_encode( aarch64_enc_fstlrs(src, mem) );
8534 
8535   ins_pipe(pipe_class_memory);
8536 %}
8537 
8538 // TODO
8539 // implement storeImmF0 and storeFImmPacked
8540 
8541 // Store Double
8542 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8543 %{
8544   match(Set mem (StoreD mem src));
8545 
8546   ins_cost(VOLATILE_REF_COST);
8547   format %{ "stlrd  $src, $mem\t# double" %}
8548 
8549   ins_encode( aarch64_enc_fstlrd(src, mem) );
8550 
8551   ins_pipe(pipe_class_memory);
8552 %}
8553 
8554 //  ---------------- end of volatile loads and stores ----------------
8555 
8556 // ============================================================================
8557 // BSWAP Instructions
8558 
8559 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8560   match(Set dst (ReverseBytesI src));
8561 
8562   ins_cost(INSN_COST);
8563   format %{ "revw  $dst, $src" %}
8564 
8565   ins_encode %{
8566     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8567   %}
8568 
8569   ins_pipe(ialu_reg);
8570 %}
8571 
8572 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8573   match(Set dst (ReverseBytesL src));
8574 
8575   ins_cost(INSN_COST);
8576   format %{ "rev  $dst, $src" %}
8577 
8578   ins_encode %{
8579     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8580   %}
8581 
8582   ins_pipe(ialu_reg);
8583 %}
8584 
8585 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8586   match(Set dst (ReverseBytesUS src));
8587 
8588   ins_cost(INSN_COST);
8589   format %{ "rev16w  $dst, $src" %}
8590 
8591   ins_encode %{
8592     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8593   %}
8594 
8595   ins_pipe(ialu_reg);
8596 %}
8597 
8598 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8599   match(Set dst (ReverseBytesS src));
8600 
8601   ins_cost(INSN_COST);
8602   format %{ "rev16w  $dst, $src\n\t"
8603             "sbfmw $dst, $dst, #0, #15" %}
8604 
8605   ins_encode %{
8606     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8607     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8608   %}
8609 
8610   ins_pipe(ialu_reg);
8611 %}
8612 
8613 // ============================================================================
8614 // Zero Count Instructions
8615 
8616 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8617   match(Set dst (CountLeadingZerosI src));
8618 
8619   ins_cost(INSN_COST);
8620   format %{ "clzw  $dst, $src" %}
8621   ins_encode %{
8622     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8623   %}
8624 
8625   ins_pipe(ialu_reg);
8626 %}
8627 
8628 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8629   match(Set dst (CountLeadingZerosL src));
8630 
8631   ins_cost(INSN_COST);
8632   format %{ "clz   $dst, $src" %}
8633   ins_encode %{
8634     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8635   %}
8636 
8637   ins_pipe(ialu_reg);
8638 %}
8639 
8640 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8641   match(Set dst (CountTrailingZerosI src));
8642 
8643   ins_cost(INSN_COST * 2);
8644   format %{ "rbitw  $dst, $src\n\t"
8645             "clzw   $dst, $dst" %}
8646   ins_encode %{
8647     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8648     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8649   %}
8650 
8651   ins_pipe(ialu_reg);
8652 %}
8653 
8654 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8655   match(Set dst (CountTrailingZerosL src));
8656 
8657   ins_cost(INSN_COST * 2);
8658   format %{ "rbit   $dst, $src\n\t"
8659             "clz    $dst, $dst" %}
8660   ins_encode %{
8661     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8662     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8663   %}
8664 
8665   ins_pipe(ialu_reg);
8666 %}
8667 
8668 //---------- Population Count Instructions -------------------------------------
8669 //
8670 
8671 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8672   predicate(UsePopCountInstruction);
8673   match(Set dst (PopCountI src));
8674   effect(TEMP tmp);
8675   ins_cost(INSN_COST * 13);
8676 
8677   format %{ "movw   $src, $src\n\t"
8678             "mov    $tmp, $src\t# vector (1D)\n\t"
8679             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8680             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8681             "mov    $dst, $tmp\t# vector (1D)" %}
8682   ins_encode %{
8683     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8684     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8685     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8686     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8687     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8688   %}
8689 
8690   ins_pipe(pipe_class_default);
8691 %}
8692 
8693 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8694   predicate(UsePopCountInstruction);
8695   match(Set dst (PopCountI (LoadI mem)));
8696   effect(TEMP tmp);
8697   ins_cost(INSN_COST * 13);
8698 
8699   format %{ "ldrs   $tmp, $mem\n\t"
8700             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8701             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8702             "mov    $dst, $tmp\t# vector (1D)" %}
8703   ins_encode %{
8704     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8705     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8706                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8707     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8708     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8709     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8710   %}
8711 
8712   ins_pipe(pipe_class_default);
8713 %}
8714 
8715 // Note: Long.bitCount(long) returns an int.
8716 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8717   predicate(UsePopCountInstruction);
8718   match(Set dst (PopCountL src));
8719   effect(TEMP tmp);
8720   ins_cost(INSN_COST * 13);
8721 
8722   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8723             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8724             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8725             "mov    $dst, $tmp\t# vector (1D)" %}
8726   ins_encode %{
8727     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8728     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8729     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8730     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8731   %}
8732 
8733   ins_pipe(pipe_class_default);
8734 %}
8735 
8736 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8737   predicate(UsePopCountInstruction);
8738   match(Set dst (PopCountL (LoadL mem)));
8739   effect(TEMP tmp);
8740   ins_cost(INSN_COST * 13);
8741 
8742   format %{ "ldrd   $tmp, $mem\n\t"
8743             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8744             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8745             "mov    $dst, $tmp\t# vector (1D)" %}
8746   ins_encode %{
8747     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8748     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8749                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8750     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8751     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8752     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8753   %}
8754 
8755   ins_pipe(pipe_class_default);
8756 %}
8757 
8758 // ============================================================================
8759 // MemBar Instruction
8760 
8761 instruct load_fence() %{
8762   match(LoadFence);
8763   ins_cost(VOLATILE_REF_COST);
8764 
8765   format %{ "load_fence" %}
8766 
8767   ins_encode %{
8768     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8769   %}
8770   ins_pipe(pipe_serial);
8771 %}
8772 
8773 instruct unnecessary_membar_acquire() %{
8774   predicate(unnecessary_acquire(n));
8775   match(MemBarAcquire);
8776   ins_cost(0);
8777 
8778   format %{ "membar_acquire (elided)" %}
8779 
8780   ins_encode %{
8781     __ block_comment("membar_acquire (elided)");
8782   %}
8783 
8784   ins_pipe(pipe_class_empty);
8785 %}
8786 
8787 instruct membar_acquire() %{
8788   match(MemBarAcquire);
8789   ins_cost(VOLATILE_REF_COST);
8790 
8791   format %{ "membar_acquire" %}
8792 
8793   ins_encode %{
8794     __ block_comment("membar_acquire");
8795     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8796   %}
8797 
8798   ins_pipe(pipe_serial);
8799 %}
8800 
8801 
8802 instruct membar_acquire_lock() %{
8803   match(MemBarAcquireLock);
8804   ins_cost(VOLATILE_REF_COST);
8805 
8806   format %{ "membar_acquire_lock (elided)" %}
8807 
8808   ins_encode %{
8809     __ block_comment("membar_acquire_lock (elided)");
8810   %}
8811 
8812   ins_pipe(pipe_serial);
8813 %}
8814 
8815 instruct store_fence() %{
8816   match(StoreFence);
8817   ins_cost(VOLATILE_REF_COST);
8818 
8819   format %{ "store_fence" %}
8820 
8821   ins_encode %{
8822     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8823   %}
8824   ins_pipe(pipe_serial);
8825 %}
8826 
8827 instruct unnecessary_membar_release() %{
8828   predicate(unnecessary_release(n));
8829   match(MemBarRelease);
8830   ins_cost(0);
8831 
8832   format %{ "membar_release (elided)" %}
8833 
8834   ins_encode %{
8835     __ block_comment("membar_release (elided)");
8836   %}
8837   ins_pipe(pipe_serial);
8838 %}
8839 
8840 instruct membar_release() %{
8841   match(MemBarRelease);
8842   ins_cost(VOLATILE_REF_COST);
8843 
8844   format %{ "membar_release" %}
8845 
8846   ins_encode %{
8847     __ block_comment("membar_release");
8848     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8849   %}
8850   ins_pipe(pipe_serial);
8851 %}
8852 
8853 instruct membar_storestore() %{
8854   match(MemBarStoreStore);
8855   ins_cost(VOLATILE_REF_COST);
8856 
8857   format %{ "MEMBAR-store-store" %}
8858 
8859   ins_encode %{
8860     __ membar(Assembler::StoreStore);
8861   %}
8862   ins_pipe(pipe_serial);
8863 %}
8864 
8865 instruct membar_release_lock() %{
8866   match(MemBarReleaseLock);
8867   ins_cost(VOLATILE_REF_COST);
8868 
8869   format %{ "membar_release_lock (elided)" %}
8870 
8871   ins_encode %{
8872     __ block_comment("membar_release_lock (elided)");
8873   %}
8874 
8875   ins_pipe(pipe_serial);
8876 %}
8877 
8878 instruct unnecessary_membar_volatile() %{
8879   predicate(unnecessary_volatile(n));
8880   match(MemBarVolatile);
8881   ins_cost(0);
8882 
8883   format %{ "membar_volatile (elided)" %}
8884 
8885   ins_encode %{
8886     __ block_comment("membar_volatile (elided)");
8887   %}
8888 
8889   ins_pipe(pipe_serial);
8890 %}
8891 
8892 instruct membar_volatile() %{
8893   match(MemBarVolatile);
8894   ins_cost(VOLATILE_REF_COST*100);
8895 
8896   format %{ "membar_volatile" %}
8897 
8898   ins_encode %{
8899     __ block_comment("membar_volatile");
8900     __ membar(Assembler::StoreLoad);
8901   %}
8902 
8903   ins_pipe(pipe_serial);
8904 %}
8905 
8906 // ============================================================================
8907 // Cast/Convert Instructions
8908 
8909 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8910   match(Set dst (CastX2P src));
8911 
8912   ins_cost(INSN_COST);
8913   format %{ "mov $dst, $src\t# long -> ptr" %}
8914 
8915   ins_encode %{
8916     if ($dst$$reg != $src$$reg) {
8917       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8918     }
8919   %}
8920 
8921   ins_pipe(ialu_reg);
8922 %}
8923 
8924 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8925   match(Set dst (CastP2X src));
8926 
8927   ins_cost(INSN_COST);
8928   format %{ "mov $dst, $src\t# ptr -> long" %}
8929 
8930   ins_encode %{
8931     if ($dst$$reg != $src$$reg) {
8932       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8933     }
8934   %}
8935 
8936   ins_pipe(ialu_reg);
8937 %}
8938 
8939 // Convert oop into int for vectors alignment masking
8940 instruct convP2I(iRegINoSp dst, iRegP src) %{
8941   match(Set dst (ConvL2I (CastP2X src)));
8942 
8943   ins_cost(INSN_COST);
8944   format %{ "movw $dst, $src\t# ptr -> int" %}
8945   ins_encode %{
8946     __ movw($dst$$Register, $src$$Register);
8947   %}
8948 
8949   ins_pipe(ialu_reg);
8950 %}
8951 
8952 // Convert compressed oop into int for vectors alignment masking
8953 // in case of 32bit oops (heap < 4Gb).
8954 instruct convN2I(iRegINoSp dst, iRegN src)
8955 %{
8956   predicate(Universe::narrow_oop_shift() == 0);
8957   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8958 
8959   ins_cost(INSN_COST);
8960   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8961   ins_encode %{
8962     __ movw($dst$$Register, $src$$Register);
8963   %}
8964 
8965   ins_pipe(ialu_reg);
8966 %}
8967 
8968 
8969 // Convert oop pointer into compressed form
8970 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8971   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8972   match(Set dst (EncodeP src));
8973   effect(KILL cr);
8974   ins_cost(INSN_COST * 3);
8975   format %{ "encode_heap_oop $dst, $src" %}
8976   ins_encode %{
8977     Register s = $src$$Register;
8978     Register d = $dst$$Register;
8979     __ encode_heap_oop(d, s);
8980   %}
8981   ins_pipe(ialu_reg);
8982 %}
8983 
8984 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8985   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8986   match(Set dst (EncodeP src));
8987   ins_cost(INSN_COST * 3);
8988   format %{ "encode_heap_oop_not_null $dst, $src" %}
8989   ins_encode %{
8990     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8991   %}
8992   ins_pipe(ialu_reg);
8993 %}
8994 
8995 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8996   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8997             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8998   match(Set dst (DecodeN src));
8999   ins_cost(INSN_COST * 3);
9000   format %{ "decode_heap_oop $dst, $src" %}
9001   ins_encode %{
9002     Register s = $src$$Register;
9003     Register d = $dst$$Register;
9004     __ decode_heap_oop(d, s);
9005   %}
9006   ins_pipe(ialu_reg);
9007 %}
9008 
9009 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9010   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9011             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9012   match(Set dst (DecodeN src));
9013   ins_cost(INSN_COST * 3);
9014   format %{ "decode_heap_oop_not_null $dst, $src" %}
9015   ins_encode %{
9016     Register s = $src$$Register;
9017     Register d = $dst$$Register;
9018     __ decode_heap_oop_not_null(d, s);
9019   %}
9020   ins_pipe(ialu_reg);
9021 %}
9022 
9023 // n.b. AArch64 implementations of encode_klass_not_null and
9024 // decode_klass_not_null do not modify the flags register so, unlike
9025 // Intel, we don't kill CR as a side effect here
9026 
9027 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9028   match(Set dst (EncodePKlass src));
9029 
9030   ins_cost(INSN_COST * 3);
9031   format %{ "encode_klass_not_null $dst,$src" %}
9032 
9033   ins_encode %{
9034     Register src_reg = as_Register($src$$reg);
9035     Register dst_reg = as_Register($dst$$reg);
9036     __ encode_klass_not_null(dst_reg, src_reg);
9037   %}
9038 
9039    ins_pipe(ialu_reg);
9040 %}
9041 
9042 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9043   match(Set dst (DecodeNKlass src));
9044 
9045   ins_cost(INSN_COST * 3);
9046   format %{ "decode_klass_not_null $dst,$src" %}
9047 
9048   ins_encode %{
9049     Register src_reg = as_Register($src$$reg);
9050     Register dst_reg = as_Register($dst$$reg);
9051     if (dst_reg != src_reg) {
9052       __ decode_klass_not_null(dst_reg, src_reg);
9053     } else {
9054       __ decode_klass_not_null(dst_reg);
9055     }
9056   %}
9057 
9058    ins_pipe(ialu_reg);
9059 %}
9060 
9061 instruct checkCastPP(iRegPNoSp dst)
9062 %{
9063   match(Set dst (CheckCastPP dst));
9064 
9065   size(0);
9066   format %{ "# checkcastPP of $dst" %}
9067   ins_encode(/* empty encoding */);
9068   ins_pipe(pipe_class_empty);
9069 %}
9070 
9071 instruct castPP(iRegPNoSp dst)
9072 %{
9073   match(Set dst (CastPP dst));
9074 
9075   size(0);
9076   format %{ "# castPP of $dst" %}
9077   ins_encode(/* empty encoding */);
9078   ins_pipe(pipe_class_empty);
9079 %}
9080 
9081 instruct castII(iRegI dst)
9082 %{
9083   match(Set dst (CastII dst));
9084 
9085   size(0);
9086   format %{ "# castII of $dst" %}
9087   ins_encode(/* empty encoding */);
9088   ins_cost(0);
9089   ins_pipe(pipe_class_empty);
9090 %}
9091 
9092 // ============================================================================
9093 // Atomic operation instructions
9094 //
9095 // Intel and SPARC both implement Ideal Node LoadPLocked and
9096 // Store{PIL}Conditional instructions using a normal load for the
9097 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9098 //
9099 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9100 // pair to lock object allocations from Eden space when not using
9101 // TLABs.
9102 //
9103 // There does not appear to be a Load{IL}Locked Ideal Node and the
9104 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9105 // and to use StoreIConditional only for 32-bit and StoreLConditional
9106 // only for 64-bit.
9107 //
9108 // We implement LoadPLocked and StorePLocked instructions using,
9109 // respectively the AArch64 hw load-exclusive and store-conditional
9110 // instructions. Whereas we must implement each of
9111 // Store{IL}Conditional using a CAS which employs a pair of
9112 // instructions comprising a load-exclusive followed by a
9113 // store-conditional.
9114 
9115 
9116 // Locked-load (linked load) of the current heap-top
9117 // used when updating the eden heap top
9118 // implemented using ldaxr on AArch64
9119 
9120 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9121 %{
9122   match(Set dst (LoadPLocked mem));
9123 
9124   ins_cost(VOLATILE_REF_COST);
9125 
9126   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9127 
9128   ins_encode(aarch64_enc_ldaxr(dst, mem));
9129 
9130   ins_pipe(pipe_serial);
9131 %}
9132 
9133 // Conditional-store of the updated heap-top.
9134 // Used during allocation of the shared heap.
9135 // Sets flag (EQ) on success.
9136 // implemented using stlxr on AArch64.
9137 
9138 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9139 %{
9140   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9141 
9142   ins_cost(VOLATILE_REF_COST);
9143 
9144  // TODO
9145  // do we need to do a store-conditional release or can we just use a
9146  // plain store-conditional?
9147 
9148   format %{
9149     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9150     "cmpw rscratch1, zr\t# EQ on successful write"
9151   %}
9152 
9153   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9154 
9155   ins_pipe(pipe_serial);
9156 %}
9157 
9158 
9159 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9160 // when attempting to rebias a lock towards the current thread.  We
9161 // must use the acquire form of cmpxchg in order to guarantee acquire
9162 // semantics in this case.
9163 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9164 %{
9165   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9166 
9167   ins_cost(VOLATILE_REF_COST);
9168 
9169   format %{
9170     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9171     "cmpw rscratch1, zr\t# EQ on successful write"
9172   %}
9173 
9174   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9175 
9176   ins_pipe(pipe_slow);
9177 %}
9178 
9179 // storeIConditional also has acquire semantics, for no better reason
9180 // than matching storeLConditional.  At the time of writing this
9181 // comment storeIConditional was not used anywhere by AArch64.
9182 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9183 %{
9184   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9185 
9186   ins_cost(VOLATILE_REF_COST);
9187 
9188   format %{
9189     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9190     "cmpw rscratch1, zr\t# EQ on successful write"
9191   %}
9192 
9193   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9194 
9195   ins_pipe(pipe_slow);
9196 %}
9197 
9198 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9199 // can't match them
9200 
9201 // standard CompareAndSwapX when we are using barriers
9202 // these have higher priority than the rules selected by a predicate
9203 
9204 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9205 
9206   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9207   ins_cost(2 * VOLATILE_REF_COST);
9208 
9209   effect(KILL cr);
9210 
9211  format %{
9212     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9213     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9214  %}
9215 
9216  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9217             aarch64_enc_cset_eq(res));
9218 
9219   ins_pipe(pipe_slow);
9220 %}
9221 
9222 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9223 
9224   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9225   ins_cost(2 * VOLATILE_REF_COST);
9226 
9227   effect(KILL cr);
9228 
9229  format %{
9230     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9231     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9232  %}
9233 
9234  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9235             aarch64_enc_cset_eq(res));
9236 
9237   ins_pipe(pipe_slow);
9238 %}
9239 
9240 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9241 
9242   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9243   ins_cost(2 * VOLATILE_REF_COST);
9244 
9245   effect(KILL cr);
9246 
9247  format %{
9248     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9249     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9250  %}
9251 
9252  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9253             aarch64_enc_cset_eq(res));
9254 
9255   ins_pipe(pipe_slow);
9256 %}
9257 
9258 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9259 
9260   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9261   ins_cost(2 * VOLATILE_REF_COST);
9262 
9263   effect(KILL cr);
9264 
9265  format %{
9266     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9267     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9268  %}
9269 
9270  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9271             aarch64_enc_cset_eq(res));
9272 
9273   ins_pipe(pipe_slow);
9274 %}
9275 
9276 // alternative CompareAndSwapX when we are eliding barriers
9277 
9278 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9279 
9280   predicate(needs_acquiring_load_exclusive(n));
9281   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9282   ins_cost(VOLATILE_REF_COST);
9283 
9284   effect(KILL cr);
9285 
9286  format %{
9287     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9288     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9289  %}
9290 
9291  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9292             aarch64_enc_cset_eq(res));
9293 
9294   ins_pipe(pipe_slow);
9295 %}
9296 
9297 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9298 
9299   predicate(needs_acquiring_load_exclusive(n));
9300   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9301   ins_cost(VOLATILE_REF_COST);
9302 
9303   effect(KILL cr);
9304 
9305  format %{
9306     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9307     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9308  %}
9309 
9310  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9311             aarch64_enc_cset_eq(res));
9312 
9313   ins_pipe(pipe_slow);
9314 %}
9315 
9316 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9317 
9318   predicate(needs_acquiring_load_exclusive(n));
9319   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9320   ins_cost(VOLATILE_REF_COST);
9321 
9322   effect(KILL cr);
9323 
9324  format %{
9325     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9326     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9327  %}
9328 
9329  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9330             aarch64_enc_cset_eq(res));
9331 
9332   ins_pipe(pipe_slow);
9333 %}
9334 
9335 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9336 
9337   predicate(needs_acquiring_load_exclusive(n));
9338   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9339   ins_cost(VOLATILE_REF_COST);
9340 
9341   effect(KILL cr);
9342 
9343  format %{
9344     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9345     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9346  %}
9347 
9348  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9349             aarch64_enc_cset_eq(res));
9350 
9351   ins_pipe(pipe_slow);
9352 %}
9353 
9354 
9355 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9356   match(Set prev (GetAndSetI mem newv));
9357   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9358   ins_encode %{
9359     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9360   %}
9361   ins_pipe(pipe_serial);
9362 %}
9363 
9364 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9365   match(Set prev (GetAndSetL mem newv));
9366   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9367   ins_encode %{
9368     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9369   %}
9370   ins_pipe(pipe_serial);
9371 %}
9372 
9373 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9374   match(Set prev (GetAndSetN mem newv));
9375   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9376   ins_encode %{
9377     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9378   %}
9379   ins_pipe(pipe_serial);
9380 %}
9381 
9382 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9383   match(Set prev (GetAndSetP mem newv));
9384   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9385   ins_encode %{
9386     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9387   %}
9388   ins_pipe(pipe_serial);
9389 %}
9390 
9391 
9392 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9393   match(Set newval (GetAndAddL mem incr));
9394   ins_cost(INSN_COST * 10);
9395   format %{ "get_and_addL $newval, [$mem], $incr" %}
9396   ins_encode %{
9397     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9398   %}
9399   ins_pipe(pipe_serial);
9400 %}
9401 
9402 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9403   predicate(n->as_LoadStore()->result_not_used());
9404   match(Set dummy (GetAndAddL mem incr));
9405   ins_cost(INSN_COST * 9);
9406   format %{ "get_and_addL [$mem], $incr" %}
9407   ins_encode %{
9408     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9409   %}
9410   ins_pipe(pipe_serial);
9411 %}
9412 
9413 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9414   match(Set newval (GetAndAddL mem incr));
9415   ins_cost(INSN_COST * 10);
9416   format %{ "get_and_addL $newval, [$mem], $incr" %}
9417   ins_encode %{
9418     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9419   %}
9420   ins_pipe(pipe_serial);
9421 %}
9422 
9423 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9424   predicate(n->as_LoadStore()->result_not_used());
9425   match(Set dummy (GetAndAddL mem incr));
9426   ins_cost(INSN_COST * 9);
9427   format %{ "get_and_addL [$mem], $incr" %}
9428   ins_encode %{
9429     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9430   %}
9431   ins_pipe(pipe_serial);
9432 %}
9433 
9434 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9435   match(Set newval (GetAndAddI mem incr));
9436   ins_cost(INSN_COST * 10);
9437   format %{ "get_and_addI $newval, [$mem], $incr" %}
9438   ins_encode %{
9439     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9440   %}
9441   ins_pipe(pipe_serial);
9442 %}
9443 
9444 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9445   predicate(n->as_LoadStore()->result_not_used());
9446   match(Set dummy (GetAndAddI mem incr));
9447   ins_cost(INSN_COST * 9);
9448   format %{ "get_and_addI [$mem], $incr" %}
9449   ins_encode %{
9450     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9451   %}
9452   ins_pipe(pipe_serial);
9453 %}
9454 
9455 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9456   match(Set newval (GetAndAddI mem incr));
9457   ins_cost(INSN_COST * 10);
9458   format %{ "get_and_addI $newval, [$mem], $incr" %}
9459   ins_encode %{
9460     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9461   %}
9462   ins_pipe(pipe_serial);
9463 %}
9464 
9465 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9466   predicate(n->as_LoadStore()->result_not_used());
9467   match(Set dummy (GetAndAddI mem incr));
9468   ins_cost(INSN_COST * 9);
9469   format %{ "get_and_addI [$mem], $incr" %}
9470   ins_encode %{
9471     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9472   %}
9473   ins_pipe(pipe_serial);
9474 %}
9475 
9476 // Manifest a CmpL result in an integer register.
9477 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9478 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9479 %{
9480   match(Set dst (CmpL3 src1 src2));
9481   effect(KILL flags);
9482 
9483   ins_cost(INSN_COST * 6);
9484   format %{
9485       "cmp $src1, $src2"
9486       "csetw $dst, ne"
9487       "cnegw $dst, lt"
9488   %}
9489   // format %{ "CmpL3 $dst, $src1, $src2" %}
9490   ins_encode %{
9491     __ cmp($src1$$Register, $src2$$Register);
9492     __ csetw($dst$$Register, Assembler::NE);
9493     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9494   %}
9495 
9496   ins_pipe(pipe_class_default);
9497 %}
9498 
9499 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9500 %{
9501   match(Set dst (CmpL3 src1 src2));
9502   effect(KILL flags);
9503 
9504   ins_cost(INSN_COST * 6);
9505   format %{
9506       "cmp $src1, $src2"
9507       "csetw $dst, ne"
9508       "cnegw $dst, lt"
9509   %}
9510   ins_encode %{
9511     int32_t con = (int32_t)$src2$$constant;
9512      if (con < 0) {
9513       __ adds(zr, $src1$$Register, -con);
9514     } else {
9515       __ subs(zr, $src1$$Register, con);
9516     }
9517     __ csetw($dst$$Register, Assembler::NE);
9518     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9519   %}
9520 
9521   ins_pipe(pipe_class_default);
9522 %}
9523 
9524 // ============================================================================
9525 // Conditional Move Instructions
9526 
9527 // n.b. we have identical rules for both a signed compare op (cmpOp)
9528 // and an unsigned compare op (cmpOpU). it would be nice if we could
9529 // define an op class which merged both inputs and use it to type the
9530 // argument to a single rule. unfortunatelyt his fails because the
9531 // opclass does not live up to the COND_INTER interface of its
9532 // component operands. When the generic code tries to negate the
9533 // operand it ends up running the generci Machoper::negate method
9534 // which throws a ShouldNotHappen. So, we have to provide two flavours
9535 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9536 
9537 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9538   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9539 
9540   ins_cost(INSN_COST * 2);
9541   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9542 
9543   ins_encode %{
9544     __ cselw(as_Register($dst$$reg),
9545              as_Register($src2$$reg),
9546              as_Register($src1$$reg),
9547              (Assembler::Condition)$cmp$$cmpcode);
9548   %}
9549 
9550   ins_pipe(icond_reg_reg);
9551 %}
9552 
9553 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9554   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9555 
9556   ins_cost(INSN_COST * 2);
9557   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9558 
9559   ins_encode %{
9560     __ cselw(as_Register($dst$$reg),
9561              as_Register($src2$$reg),
9562              as_Register($src1$$reg),
9563              (Assembler::Condition)$cmp$$cmpcode);
9564   %}
9565 
9566   ins_pipe(icond_reg_reg);
9567 %}
9568 
9569 // special cases where one arg is zero
9570 
9571 // n.b. this is selected in preference to the rule above because it
9572 // avoids loading constant 0 into a source register
9573 
9574 // TODO
9575 // we ought only to be able to cull one of these variants as the ideal
9576 // transforms ought always to order the zero consistently (to left/right?)
9577 
9578 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9579   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9580 
9581   ins_cost(INSN_COST * 2);
9582   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9583 
9584   ins_encode %{
9585     __ cselw(as_Register($dst$$reg),
9586              as_Register($src$$reg),
9587              zr,
9588              (Assembler::Condition)$cmp$$cmpcode);
9589   %}
9590 
9591   ins_pipe(icond_reg);
9592 %}
9593 
9594 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9595   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9596 
9597   ins_cost(INSN_COST * 2);
9598   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9599 
9600   ins_encode %{
9601     __ cselw(as_Register($dst$$reg),
9602              as_Register($src$$reg),
9603              zr,
9604              (Assembler::Condition)$cmp$$cmpcode);
9605   %}
9606 
9607   ins_pipe(icond_reg);
9608 %}
9609 
9610 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9611   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9612 
9613   ins_cost(INSN_COST * 2);
9614   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9615 
9616   ins_encode %{
9617     __ cselw(as_Register($dst$$reg),
9618              zr,
9619              as_Register($src$$reg),
9620              (Assembler::Condition)$cmp$$cmpcode);
9621   %}
9622 
9623   ins_pipe(icond_reg);
9624 %}
9625 
9626 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9627   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9628 
9629   ins_cost(INSN_COST * 2);
9630   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9631 
9632   ins_encode %{
9633     __ cselw(as_Register($dst$$reg),
9634              zr,
9635              as_Register($src$$reg),
9636              (Assembler::Condition)$cmp$$cmpcode);
9637   %}
9638 
9639   ins_pipe(icond_reg);
9640 %}
9641 
9642 // special case for creating a boolean 0 or 1
9643 
9644 // n.b. this is selected in preference to the rule above because it
9645 // avoids loading constants 0 and 1 into a source register
9646 
9647 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9648   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9649 
9650   ins_cost(INSN_COST * 2);
9651   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9652 
9653   ins_encode %{
9654     // equivalently
9655     // cset(as_Register($dst$$reg),
9656     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9657     __ csincw(as_Register($dst$$reg),
9658              zr,
9659              zr,
9660              (Assembler::Condition)$cmp$$cmpcode);
9661   %}
9662 
9663   ins_pipe(icond_none);
9664 %}
9665 
9666 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9667   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9668 
9669   ins_cost(INSN_COST * 2);
9670   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9671 
9672   ins_encode %{
9673     // equivalently
9674     // cset(as_Register($dst$$reg),
9675     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9676     __ csincw(as_Register($dst$$reg),
9677              zr,
9678              zr,
9679              (Assembler::Condition)$cmp$$cmpcode);
9680   %}
9681 
9682   ins_pipe(icond_none);
9683 %}
9684 
9685 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9686   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9687 
9688   ins_cost(INSN_COST * 2);
9689   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9690 
9691   ins_encode %{
9692     __ csel(as_Register($dst$$reg),
9693             as_Register($src2$$reg),
9694             as_Register($src1$$reg),
9695             (Assembler::Condition)$cmp$$cmpcode);
9696   %}
9697 
9698   ins_pipe(icond_reg_reg);
9699 %}
9700 
9701 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9702   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9703 
9704   ins_cost(INSN_COST * 2);
9705   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9706 
9707   ins_encode %{
9708     __ csel(as_Register($dst$$reg),
9709             as_Register($src2$$reg),
9710             as_Register($src1$$reg),
9711             (Assembler::Condition)$cmp$$cmpcode);
9712   %}
9713 
9714   ins_pipe(icond_reg_reg);
9715 %}
9716 
9717 // special cases where one arg is zero
9718 
9719 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9720   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9721 
9722   ins_cost(INSN_COST * 2);
9723   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9724 
9725   ins_encode %{
9726     __ csel(as_Register($dst$$reg),
9727             zr,
9728             as_Register($src$$reg),
9729             (Assembler::Condition)$cmp$$cmpcode);
9730   %}
9731 
9732   ins_pipe(icond_reg);
9733 %}
9734 
9735 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9736   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9737 
9738   ins_cost(INSN_COST * 2);
9739   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9740 
9741   ins_encode %{
9742     __ csel(as_Register($dst$$reg),
9743             zr,
9744             as_Register($src$$reg),
9745             (Assembler::Condition)$cmp$$cmpcode);
9746   %}
9747 
9748   ins_pipe(icond_reg);
9749 %}
9750 
9751 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9752   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9753 
9754   ins_cost(INSN_COST * 2);
9755   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9756 
9757   ins_encode %{
9758     __ csel(as_Register($dst$$reg),
9759             as_Register($src$$reg),
9760             zr,
9761             (Assembler::Condition)$cmp$$cmpcode);
9762   %}
9763 
9764   ins_pipe(icond_reg);
9765 %}
9766 
9767 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9768   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9769 
9770   ins_cost(INSN_COST * 2);
9771   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9772 
9773   ins_encode %{
9774     __ csel(as_Register($dst$$reg),
9775             as_Register($src$$reg),
9776             zr,
9777             (Assembler::Condition)$cmp$$cmpcode);
9778   %}
9779 
9780   ins_pipe(icond_reg);
9781 %}
9782 
9783 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9784   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9785 
9786   ins_cost(INSN_COST * 2);
9787   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9788 
9789   ins_encode %{
9790     __ csel(as_Register($dst$$reg),
9791             as_Register($src2$$reg),
9792             as_Register($src1$$reg),
9793             (Assembler::Condition)$cmp$$cmpcode);
9794   %}
9795 
9796   ins_pipe(icond_reg_reg);
9797 %}
9798 
9799 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9800   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9801 
9802   ins_cost(INSN_COST * 2);
9803   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9804 
9805   ins_encode %{
9806     __ csel(as_Register($dst$$reg),
9807             as_Register($src2$$reg),
9808             as_Register($src1$$reg),
9809             (Assembler::Condition)$cmp$$cmpcode);
9810   %}
9811 
9812   ins_pipe(icond_reg_reg);
9813 %}
9814 
9815 // special cases where one arg is zero
9816 
9817 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9818   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9819 
9820   ins_cost(INSN_COST * 2);
9821   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9822 
9823   ins_encode %{
9824     __ csel(as_Register($dst$$reg),
9825             zr,
9826             as_Register($src$$reg),
9827             (Assembler::Condition)$cmp$$cmpcode);
9828   %}
9829 
9830   ins_pipe(icond_reg);
9831 %}
9832 
9833 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9834   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9835 
9836   ins_cost(INSN_COST * 2);
9837   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9838 
9839   ins_encode %{
9840     __ csel(as_Register($dst$$reg),
9841             zr,
9842             as_Register($src$$reg),
9843             (Assembler::Condition)$cmp$$cmpcode);
9844   %}
9845 
9846   ins_pipe(icond_reg);
9847 %}
9848 
9849 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9850   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9851 
9852   ins_cost(INSN_COST * 2);
9853   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9854 
9855   ins_encode %{
9856     __ csel(as_Register($dst$$reg),
9857             as_Register($src$$reg),
9858             zr,
9859             (Assembler::Condition)$cmp$$cmpcode);
9860   %}
9861 
9862   ins_pipe(icond_reg);
9863 %}
9864 
9865 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9866   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9867 
9868   ins_cost(INSN_COST * 2);
9869   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9870 
9871   ins_encode %{
9872     __ csel(as_Register($dst$$reg),
9873             as_Register($src$$reg),
9874             zr,
9875             (Assembler::Condition)$cmp$$cmpcode);
9876   %}
9877 
9878   ins_pipe(icond_reg);
9879 %}
9880 
9881 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9882   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9883 
9884   ins_cost(INSN_COST * 2);
9885   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9886 
9887   ins_encode %{
9888     __ cselw(as_Register($dst$$reg),
9889              as_Register($src2$$reg),
9890              as_Register($src1$$reg),
9891              (Assembler::Condition)$cmp$$cmpcode);
9892   %}
9893 
9894   ins_pipe(icond_reg_reg);
9895 %}
9896 
9897 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9898   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9899 
9900   ins_cost(INSN_COST * 2);
9901   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9902 
9903   ins_encode %{
9904     __ cselw(as_Register($dst$$reg),
9905              as_Register($src2$$reg),
9906              as_Register($src1$$reg),
9907              (Assembler::Condition)$cmp$$cmpcode);
9908   %}
9909 
9910   ins_pipe(icond_reg_reg);
9911 %}
9912 
9913 // special cases where one arg is zero
9914 
9915 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9916   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9917 
9918   ins_cost(INSN_COST * 2);
9919   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9920 
9921   ins_encode %{
9922     __ cselw(as_Register($dst$$reg),
9923              zr,
9924              as_Register($src$$reg),
9925              (Assembler::Condition)$cmp$$cmpcode);
9926   %}
9927 
9928   ins_pipe(icond_reg);
9929 %}
9930 
9931 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9932   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9933 
9934   ins_cost(INSN_COST * 2);
9935   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9936 
9937   ins_encode %{
9938     __ cselw(as_Register($dst$$reg),
9939              zr,
9940              as_Register($src$$reg),
9941              (Assembler::Condition)$cmp$$cmpcode);
9942   %}
9943 
9944   ins_pipe(icond_reg);
9945 %}
9946 
9947 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9948   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9949 
9950   ins_cost(INSN_COST * 2);
9951   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9952 
9953   ins_encode %{
9954     __ cselw(as_Register($dst$$reg),
9955              as_Register($src$$reg),
9956              zr,
9957              (Assembler::Condition)$cmp$$cmpcode);
9958   %}
9959 
9960   ins_pipe(icond_reg);
9961 %}
9962 
9963 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9964   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9965 
9966   ins_cost(INSN_COST * 2);
9967   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9968 
9969   ins_encode %{
9970     __ cselw(as_Register($dst$$reg),
9971              as_Register($src$$reg),
9972              zr,
9973              (Assembler::Condition)$cmp$$cmpcode);
9974   %}
9975 
9976   ins_pipe(icond_reg);
9977 %}
9978 
9979 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9980 %{
9981   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9982 
9983   ins_cost(INSN_COST * 3);
9984 
9985   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9986   ins_encode %{
9987     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9988     __ fcsels(as_FloatRegister($dst$$reg),
9989               as_FloatRegister($src2$$reg),
9990               as_FloatRegister($src1$$reg),
9991               cond);
9992   %}
9993 
9994   ins_pipe(fp_cond_reg_reg_s);
9995 %}
9996 
9997 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9998 %{
9999   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10000 
10001   ins_cost(INSN_COST * 3);
10002 
10003   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10004   ins_encode %{
10005     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10006     __ fcsels(as_FloatRegister($dst$$reg),
10007               as_FloatRegister($src2$$reg),
10008               as_FloatRegister($src1$$reg),
10009               cond);
10010   %}
10011 
10012   ins_pipe(fp_cond_reg_reg_s);
10013 %}
10014 
10015 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10016 %{
10017   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10018 
10019   ins_cost(INSN_COST * 3);
10020 
10021   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10022   ins_encode %{
10023     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10024     __ fcseld(as_FloatRegister($dst$$reg),
10025               as_FloatRegister($src2$$reg),
10026               as_FloatRegister($src1$$reg),
10027               cond);
10028   %}
10029 
10030   ins_pipe(fp_cond_reg_reg_d);
10031 %}
10032 
10033 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10034 %{
10035   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10036 
10037   ins_cost(INSN_COST * 3);
10038 
10039   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10040   ins_encode %{
10041     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10042     __ fcseld(as_FloatRegister($dst$$reg),
10043               as_FloatRegister($src2$$reg),
10044               as_FloatRegister($src1$$reg),
10045               cond);
10046   %}
10047 
10048   ins_pipe(fp_cond_reg_reg_d);
10049 %}
10050 
10051 // ============================================================================
10052 // Arithmetic Instructions
10053 //
10054 
10055 // Integer Addition
10056 
10057 // TODO
10058 // these currently employ operations which do not set CR and hence are
10059 // not flagged as killing CR but we would like to isolate the cases
10060 // where we want to set flags from those where we don't. need to work
10061 // out how to do that.
10062 
10063 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10064   match(Set dst (AddI src1 src2));
10065 
10066   ins_cost(INSN_COST);
10067   format %{ "addw  $dst, $src1, $src2" %}
10068 
10069   ins_encode %{
10070     __ addw(as_Register($dst$$reg),
10071             as_Register($src1$$reg),
10072             as_Register($src2$$reg));
10073   %}
10074 
10075   ins_pipe(ialu_reg_reg);
10076 %}
10077 
10078 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10079   match(Set dst (AddI src1 src2));
10080 
10081   ins_cost(INSN_COST);
10082   format %{ "addw $dst, $src1, $src2" %}
10083 
10084   // use opcode to indicate that this is an add not a sub
10085   opcode(0x0);
10086 
10087   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10088 
10089   ins_pipe(ialu_reg_imm);
10090 %}
10091 
10092 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10093   match(Set dst (AddI (ConvL2I src1) src2));
10094 
10095   ins_cost(INSN_COST);
10096   format %{ "addw $dst, $src1, $src2" %}
10097 
10098   // use opcode to indicate that this is an add not a sub
10099   opcode(0x0);
10100 
10101   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10102 
10103   ins_pipe(ialu_reg_imm);
10104 %}
10105 
10106 // Pointer Addition
10107 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10108   match(Set dst (AddP src1 src2));
10109 
10110   ins_cost(INSN_COST);
10111   format %{ "add $dst, $src1, $src2\t# ptr" %}
10112 
10113   ins_encode %{
10114     __ add(as_Register($dst$$reg),
10115            as_Register($src1$$reg),
10116            as_Register($src2$$reg));
10117   %}
10118 
10119   ins_pipe(ialu_reg_reg);
10120 %}
10121 
10122 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10123   match(Set dst (AddP src1 (ConvI2L src2)));
10124 
10125   ins_cost(1.9 * INSN_COST);
10126   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10127 
10128   ins_encode %{
10129     __ add(as_Register($dst$$reg),
10130            as_Register($src1$$reg),
10131            as_Register($src2$$reg), ext::sxtw);
10132   %}
10133 
10134   ins_pipe(ialu_reg_reg);
10135 %}
10136 
10137 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10138   match(Set dst (AddP src1 (LShiftL src2 scale)));
10139 
10140   ins_cost(1.9 * INSN_COST);
10141   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10142 
10143   ins_encode %{
10144     __ lea(as_Register($dst$$reg),
10145            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10146                    Address::lsl($scale$$constant)));
10147   %}
10148 
10149   ins_pipe(ialu_reg_reg_shift);
10150 %}
10151 
10152 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10153   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10154 
10155   ins_cost(1.9 * INSN_COST);
10156   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10157 
10158   ins_encode %{
10159     __ lea(as_Register($dst$$reg),
10160            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10161                    Address::sxtw($scale$$constant)));
10162   %}
10163 
10164   ins_pipe(ialu_reg_reg_shift);
10165 %}
10166 
10167 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10168   match(Set dst (LShiftL (ConvI2L src) scale));
10169 
10170   ins_cost(INSN_COST);
10171   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10172 
10173   ins_encode %{
10174     __ sbfiz(as_Register($dst$$reg),
10175           as_Register($src$$reg),
10176           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10177   %}
10178 
10179   ins_pipe(ialu_reg_shift);
10180 %}
10181 
10182 // Pointer Immediate Addition
10183 // n.b. this needs to be more expensive than using an indirect memory
10184 // operand
10185 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10186   match(Set dst (AddP src1 src2));
10187 
10188   ins_cost(INSN_COST);
10189   format %{ "add $dst, $src1, $src2\t# ptr" %}
10190 
10191   // use opcode to indicate that this is an add not a sub
10192   opcode(0x0);
10193 
10194   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10195 
10196   ins_pipe(ialu_reg_imm);
10197 %}
10198 
10199 // Long Addition
10200 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10201 
10202   match(Set dst (AddL src1 src2));
10203 
10204   ins_cost(INSN_COST);
10205   format %{ "add  $dst, $src1, $src2" %}
10206 
10207   ins_encode %{
10208     __ add(as_Register($dst$$reg),
10209            as_Register($src1$$reg),
10210            as_Register($src2$$reg));
10211   %}
10212 
10213   ins_pipe(ialu_reg_reg);
10214 %}
10215 
10216 // No constant pool entries requiredLong Immediate Addition.
10217 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10218   match(Set dst (AddL src1 src2));
10219 
10220   ins_cost(INSN_COST);
10221   format %{ "add $dst, $src1, $src2" %}
10222 
10223   // use opcode to indicate that this is an add not a sub
10224   opcode(0x0);
10225 
10226   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10227 
10228   ins_pipe(ialu_reg_imm);
10229 %}
10230 
10231 // Integer Subtraction
10232 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10233   match(Set dst (SubI src1 src2));
10234 
10235   ins_cost(INSN_COST);
10236   format %{ "subw  $dst, $src1, $src2" %}
10237 
10238   ins_encode %{
10239     __ subw(as_Register($dst$$reg),
10240             as_Register($src1$$reg),
10241             as_Register($src2$$reg));
10242   %}
10243 
10244   ins_pipe(ialu_reg_reg);
10245 %}
10246 
10247 // Immediate Subtraction
10248 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10249   match(Set dst (SubI src1 src2));
10250 
10251   ins_cost(INSN_COST);
10252   format %{ "subw $dst, $src1, $src2" %}
10253 
10254   // use opcode to indicate that this is a sub not an add
10255   opcode(0x1);
10256 
10257   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10258 
10259   ins_pipe(ialu_reg_imm);
10260 %}
10261 
10262 // Long Subtraction
10263 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10264 
10265   match(Set dst (SubL src1 src2));
10266 
10267   ins_cost(INSN_COST);
10268   format %{ "sub  $dst, $src1, $src2" %}
10269 
10270   ins_encode %{
10271     __ sub(as_Register($dst$$reg),
10272            as_Register($src1$$reg),
10273            as_Register($src2$$reg));
10274   %}
10275 
10276   ins_pipe(ialu_reg_reg);
10277 %}
10278 
10279 // No constant pool entries requiredLong Immediate Subtraction.
10280 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10281   match(Set dst (SubL src1 src2));
10282 
10283   ins_cost(INSN_COST);
10284   format %{ "sub$dst, $src1, $src2" %}
10285 
10286   // use opcode to indicate that this is a sub not an add
10287   opcode(0x1);
10288 
10289   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10290 
10291   ins_pipe(ialu_reg_imm);
10292 %}
10293 
10294 // Integer Negation (special case for sub)
10295 
10296 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10297   match(Set dst (SubI zero src));
10298 
10299   ins_cost(INSN_COST);
10300   format %{ "negw $dst, $src\t# int" %}
10301 
10302   ins_encode %{
10303     __ negw(as_Register($dst$$reg),
10304             as_Register($src$$reg));
10305   %}
10306 
10307   ins_pipe(ialu_reg);
10308 %}
10309 
10310 // Long Negation
10311 
10312 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10313   match(Set dst (SubL zero src));
10314 
10315   ins_cost(INSN_COST);
10316   format %{ "neg $dst, $src\t# long" %}
10317 
10318   ins_encode %{
10319     __ neg(as_Register($dst$$reg),
10320            as_Register($src$$reg));
10321   %}
10322 
10323   ins_pipe(ialu_reg);
10324 %}
10325 
10326 // Integer Multiply
10327 
10328 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10329   match(Set dst (MulI src1 src2));
10330 
10331   ins_cost(INSN_COST * 3);
10332   format %{ "mulw  $dst, $src1, $src2" %}
10333 
10334   ins_encode %{
10335     __ mulw(as_Register($dst$$reg),
10336             as_Register($src1$$reg),
10337             as_Register($src2$$reg));
10338   %}
10339 
10340   ins_pipe(imul_reg_reg);
10341 %}
10342 
10343 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10344   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10345 
10346   ins_cost(INSN_COST * 3);
10347   format %{ "smull  $dst, $src1, $src2" %}
10348 
10349   ins_encode %{
10350     __ smull(as_Register($dst$$reg),
10351              as_Register($src1$$reg),
10352              as_Register($src2$$reg));
10353   %}
10354 
10355   ins_pipe(imul_reg_reg);
10356 %}
10357 
10358 // Long Multiply
10359 
10360 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10361   match(Set dst (MulL src1 src2));
10362 
10363   ins_cost(INSN_COST * 5);
10364   format %{ "mul  $dst, $src1, $src2" %}
10365 
10366   ins_encode %{
10367     __ mul(as_Register($dst$$reg),
10368            as_Register($src1$$reg),
10369            as_Register($src2$$reg));
10370   %}
10371 
10372   ins_pipe(lmul_reg_reg);
10373 %}
10374 
10375 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10376 %{
10377   match(Set dst (MulHiL src1 src2));
10378 
10379   ins_cost(INSN_COST * 7);
10380   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10381 
10382   ins_encode %{
10383     __ smulh(as_Register($dst$$reg),
10384              as_Register($src1$$reg),
10385              as_Register($src2$$reg));
10386   %}
10387 
10388   ins_pipe(lmul_reg_reg);
10389 %}
10390 
10391 // Combined Integer Multiply & Add/Sub
10392 
10393 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10394   match(Set dst (AddI src3 (MulI src1 src2)));
10395 
10396   ins_cost(INSN_COST * 3);
10397   format %{ "madd  $dst, $src1, $src2, $src3" %}
10398 
10399   ins_encode %{
10400     __ maddw(as_Register($dst$$reg),
10401              as_Register($src1$$reg),
10402              as_Register($src2$$reg),
10403              as_Register($src3$$reg));
10404   %}
10405 
10406   ins_pipe(imac_reg_reg);
10407 %}
10408 
10409 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10410   match(Set dst (SubI src3 (MulI src1 src2)));
10411 
10412   ins_cost(INSN_COST * 3);
10413   format %{ "msub  $dst, $src1, $src2, $src3" %}
10414 
10415   ins_encode %{
10416     __ msubw(as_Register($dst$$reg),
10417              as_Register($src1$$reg),
10418              as_Register($src2$$reg),
10419              as_Register($src3$$reg));
10420   %}
10421 
10422   ins_pipe(imac_reg_reg);
10423 %}
10424 
10425 // Combined Long Multiply & Add/Sub
10426 
10427 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10428   match(Set dst (AddL src3 (MulL src1 src2)));
10429 
10430   ins_cost(INSN_COST * 5);
10431   format %{ "madd  $dst, $src1, $src2, $src3" %}
10432 
10433   ins_encode %{
10434     __ madd(as_Register($dst$$reg),
10435             as_Register($src1$$reg),
10436             as_Register($src2$$reg),
10437             as_Register($src3$$reg));
10438   %}
10439 
10440   ins_pipe(lmac_reg_reg);
10441 %}
10442 
10443 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10444   match(Set dst (SubL src3 (MulL src1 src2)));
10445 
10446   ins_cost(INSN_COST * 5);
10447   format %{ "msub  $dst, $src1, $src2, $src3" %}
10448 
10449   ins_encode %{
10450     __ msub(as_Register($dst$$reg),
10451             as_Register($src1$$reg),
10452             as_Register($src2$$reg),
10453             as_Register($src3$$reg));
10454   %}
10455 
10456   ins_pipe(lmac_reg_reg);
10457 %}
10458 
10459 // Integer Divide
10460 
10461 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10462   match(Set dst (DivI src1 src2));
10463 
10464   ins_cost(INSN_COST * 19);
10465   format %{ "sdivw  $dst, $src1, $src2" %}
10466 
10467   ins_encode(aarch64_enc_divw(dst, src1, src2));
10468   ins_pipe(idiv_reg_reg);
10469 %}
10470 
10471 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10472   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10473   ins_cost(INSN_COST);
10474   format %{ "lsrw $dst, $src1, $div1" %}
10475   ins_encode %{
10476     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10477   %}
10478   ins_pipe(ialu_reg_shift);
10479 %}
10480 
10481 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10482   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10483   ins_cost(INSN_COST);
10484   format %{ "addw $dst, $src, LSR $div1" %}
10485 
10486   ins_encode %{
10487     __ addw(as_Register($dst$$reg),
10488               as_Register($src$$reg),
10489               as_Register($src$$reg),
10490               Assembler::LSR, 31);
10491   %}
10492   ins_pipe(ialu_reg);
10493 %}
10494 
10495 // Long Divide
10496 
10497 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10498   match(Set dst (DivL src1 src2));
10499 
10500   ins_cost(INSN_COST * 35);
10501   format %{ "sdiv   $dst, $src1, $src2" %}
10502 
10503   ins_encode(aarch64_enc_div(dst, src1, src2));
10504   ins_pipe(ldiv_reg_reg);
10505 %}
10506 
10507 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10508   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10509   ins_cost(INSN_COST);
10510   format %{ "lsr $dst, $src1, $div1" %}
10511   ins_encode %{
10512     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10513   %}
10514   ins_pipe(ialu_reg_shift);
10515 %}
10516 
10517 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10518   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10519   ins_cost(INSN_COST);
10520   format %{ "add $dst, $src, $div1" %}
10521 
10522   ins_encode %{
10523     __ add(as_Register($dst$$reg),
10524               as_Register($src$$reg),
10525               as_Register($src$$reg),
10526               Assembler::LSR, 63);
10527   %}
10528   ins_pipe(ialu_reg);
10529 %}
10530 
10531 // Integer Remainder
10532 
10533 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10534   match(Set dst (ModI src1 src2));
10535 
10536   ins_cost(INSN_COST * 22);
10537   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10538             "msubw($dst, rscratch1, $src2, $src1" %}
10539 
10540   ins_encode(aarch64_enc_modw(dst, src1, src2));
10541   ins_pipe(idiv_reg_reg);
10542 %}
10543 
10544 // Long Remainder
10545 
10546 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10547   match(Set dst (ModL src1 src2));
10548 
10549   ins_cost(INSN_COST * 38);
10550   format %{ "sdiv   rscratch1, $src1, $src2\n"
10551             "msub($dst, rscratch1, $src2, $src1" %}
10552 
10553   ins_encode(aarch64_enc_mod(dst, src1, src2));
10554   ins_pipe(ldiv_reg_reg);
10555 %}
10556 
10557 // Integer Shifts
10558 
10559 // Shift Left Register
10560 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10561   match(Set dst (LShiftI src1 src2));
10562 
10563   ins_cost(INSN_COST * 2);
10564   format %{ "lslvw  $dst, $src1, $src2" %}
10565 
10566   ins_encode %{
10567     __ lslvw(as_Register($dst$$reg),
10568              as_Register($src1$$reg),
10569              as_Register($src2$$reg));
10570   %}
10571 
10572   ins_pipe(ialu_reg_reg_vshift);
10573 %}
10574 
10575 // Shift Left Immediate
10576 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10577   match(Set dst (LShiftI src1 src2));
10578 
10579   ins_cost(INSN_COST);
10580   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10581 
10582   ins_encode %{
10583     __ lslw(as_Register($dst$$reg),
10584             as_Register($src1$$reg),
10585             $src2$$constant & 0x1f);
10586   %}
10587 
10588   ins_pipe(ialu_reg_shift);
10589 %}
10590 
10591 // Shift Right Logical Register
10592 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10593   match(Set dst (URShiftI src1 src2));
10594 
10595   ins_cost(INSN_COST * 2);
10596   format %{ "lsrvw  $dst, $src1, $src2" %}
10597 
10598   ins_encode %{
10599     __ lsrvw(as_Register($dst$$reg),
10600              as_Register($src1$$reg),
10601              as_Register($src2$$reg));
10602   %}
10603 
10604   ins_pipe(ialu_reg_reg_vshift);
10605 %}
10606 
10607 // Shift Right Logical Immediate
10608 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10609   match(Set dst (URShiftI src1 src2));
10610 
10611   ins_cost(INSN_COST);
10612   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10613 
10614   ins_encode %{
10615     __ lsrw(as_Register($dst$$reg),
10616             as_Register($src1$$reg),
10617             $src2$$constant & 0x1f);
10618   %}
10619 
10620   ins_pipe(ialu_reg_shift);
10621 %}
10622 
10623 // Shift Right Arithmetic Register
10624 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10625   match(Set dst (RShiftI src1 src2));
10626 
10627   ins_cost(INSN_COST * 2);
10628   format %{ "asrvw  $dst, $src1, $src2" %}
10629 
10630   ins_encode %{
10631     __ asrvw(as_Register($dst$$reg),
10632              as_Register($src1$$reg),
10633              as_Register($src2$$reg));
10634   %}
10635 
10636   ins_pipe(ialu_reg_reg_vshift);
10637 %}
10638 
10639 // Shift Right Arithmetic Immediate
10640 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10641   match(Set dst (RShiftI src1 src2));
10642 
10643   ins_cost(INSN_COST);
10644   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10645 
10646   ins_encode %{
10647     __ asrw(as_Register($dst$$reg),
10648             as_Register($src1$$reg),
10649             $src2$$constant & 0x1f);
10650   %}
10651 
10652   ins_pipe(ialu_reg_shift);
10653 %}
10654 
10655 // Combined Int Mask and Right Shift (using UBFM)
10656 // TODO
10657 
10658 // Long Shifts
10659 
10660 // Shift Left Register
10661 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10662   match(Set dst (LShiftL src1 src2));
10663 
10664   ins_cost(INSN_COST * 2);
10665   format %{ "lslv  $dst, $src1, $src2" %}
10666 
10667   ins_encode %{
10668     __ lslv(as_Register($dst$$reg),
10669             as_Register($src1$$reg),
10670             as_Register($src2$$reg));
10671   %}
10672 
10673   ins_pipe(ialu_reg_reg_vshift);
10674 %}
10675 
10676 // Shift Left Immediate
10677 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10678   match(Set dst (LShiftL src1 src2));
10679 
10680   ins_cost(INSN_COST);
10681   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10682 
10683   ins_encode %{
10684     __ lsl(as_Register($dst$$reg),
10685             as_Register($src1$$reg),
10686             $src2$$constant & 0x3f);
10687   %}
10688 
10689   ins_pipe(ialu_reg_shift);
10690 %}
10691 
10692 // Shift Right Logical Register
10693 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10694   match(Set dst (URShiftL src1 src2));
10695 
10696   ins_cost(INSN_COST * 2);
10697   format %{ "lsrv  $dst, $src1, $src2" %}
10698 
10699   ins_encode %{
10700     __ lsrv(as_Register($dst$$reg),
10701             as_Register($src1$$reg),
10702             as_Register($src2$$reg));
10703   %}
10704 
10705   ins_pipe(ialu_reg_reg_vshift);
10706 %}
10707 
10708 // Shift Right Logical Immediate
10709 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10710   match(Set dst (URShiftL src1 src2));
10711 
10712   ins_cost(INSN_COST);
10713   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10714 
10715   ins_encode %{
10716     __ lsr(as_Register($dst$$reg),
10717            as_Register($src1$$reg),
10718            $src2$$constant & 0x3f);
10719   %}
10720 
10721   ins_pipe(ialu_reg_shift);
10722 %}
10723 
10724 // A special-case pattern for card table stores.
10725 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10726   match(Set dst (URShiftL (CastP2X src1) src2));
10727 
10728   ins_cost(INSN_COST);
10729   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10730 
10731   ins_encode %{
10732     __ lsr(as_Register($dst$$reg),
10733            as_Register($src1$$reg),
10734            $src2$$constant & 0x3f);
10735   %}
10736 
10737   ins_pipe(ialu_reg_shift);
10738 %}
10739 
10740 // Shift Right Arithmetic Register
10741 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10742   match(Set dst (RShiftL src1 src2));
10743 
10744   ins_cost(INSN_COST * 2);
10745   format %{ "asrv  $dst, $src1, $src2" %}
10746 
10747   ins_encode %{
10748     __ asrv(as_Register($dst$$reg),
10749             as_Register($src1$$reg),
10750             as_Register($src2$$reg));
10751   %}
10752 
10753   ins_pipe(ialu_reg_reg_vshift);
10754 %}
10755 
10756 // Shift Right Arithmetic Immediate
10757 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10758   match(Set dst (RShiftL src1 src2));
10759 
10760   ins_cost(INSN_COST);
10761   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10762 
10763   ins_encode %{
10764     __ asr(as_Register($dst$$reg),
10765            as_Register($src1$$reg),
10766            $src2$$constant & 0x3f);
10767   %}
10768 
10769   ins_pipe(ialu_reg_shift);
10770 %}
10771 
10772 // BEGIN This section of the file is automatically generated. Do not edit --------------
10773 
10774 instruct regL_not_reg(iRegLNoSp dst,
10775                          iRegL src1, immL_M1 m1,
10776                          rFlagsReg cr) %{
10777   match(Set dst (XorL src1 m1));
10778   ins_cost(INSN_COST);
10779   format %{ "eon  $dst, $src1, zr" %}
10780 
10781   ins_encode %{
10782     __ eon(as_Register($dst$$reg),
10783               as_Register($src1$$reg),
10784               zr,
10785               Assembler::LSL, 0);
10786   %}
10787 
10788   ins_pipe(ialu_reg);
10789 %}
10790 instruct regI_not_reg(iRegINoSp dst,
10791                          iRegIorL2I src1, immI_M1 m1,
10792                          rFlagsReg cr) %{
10793   match(Set dst (XorI src1 m1));
10794   ins_cost(INSN_COST);
10795   format %{ "eonw  $dst, $src1, zr" %}
10796 
10797   ins_encode %{
10798     __ eonw(as_Register($dst$$reg),
10799               as_Register($src1$$reg),
10800               zr,
10801               Assembler::LSL, 0);
10802   %}
10803 
10804   ins_pipe(ialu_reg);
10805 %}
10806 
10807 instruct AndI_reg_not_reg(iRegINoSp dst,
10808                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10809                          rFlagsReg cr) %{
10810   match(Set dst (AndI src1 (XorI src2 m1)));
10811   ins_cost(INSN_COST);
10812   format %{ "bicw  $dst, $src1, $src2" %}
10813 
10814   ins_encode %{
10815     __ bicw(as_Register($dst$$reg),
10816               as_Register($src1$$reg),
10817               as_Register($src2$$reg),
10818               Assembler::LSL, 0);
10819   %}
10820 
10821   ins_pipe(ialu_reg_reg);
10822 %}
10823 
10824 instruct AndL_reg_not_reg(iRegLNoSp dst,
10825                          iRegL src1, iRegL src2, immL_M1 m1,
10826                          rFlagsReg cr) %{
10827   match(Set dst (AndL src1 (XorL src2 m1)));
10828   ins_cost(INSN_COST);
10829   format %{ "bic  $dst, $src1, $src2" %}
10830 
10831   ins_encode %{
10832     __ bic(as_Register($dst$$reg),
10833               as_Register($src1$$reg),
10834               as_Register($src2$$reg),
10835               Assembler::LSL, 0);
10836   %}
10837 
10838   ins_pipe(ialu_reg_reg);
10839 %}
10840 
10841 instruct OrI_reg_not_reg(iRegINoSp dst,
10842                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10843                          rFlagsReg cr) %{
10844   match(Set dst (OrI src1 (XorI src2 m1)));
10845   ins_cost(INSN_COST);
10846   format %{ "ornw  $dst, $src1, $src2" %}
10847 
10848   ins_encode %{
10849     __ ornw(as_Register($dst$$reg),
10850               as_Register($src1$$reg),
10851               as_Register($src2$$reg),
10852               Assembler::LSL, 0);
10853   %}
10854 
10855   ins_pipe(ialu_reg_reg);
10856 %}
10857 
10858 instruct OrL_reg_not_reg(iRegLNoSp dst,
10859                          iRegL src1, iRegL src2, immL_M1 m1,
10860                          rFlagsReg cr) %{
10861   match(Set dst (OrL src1 (XorL src2 m1)));
10862   ins_cost(INSN_COST);
10863   format %{ "orn  $dst, $src1, $src2" %}
10864 
10865   ins_encode %{
10866     __ orn(as_Register($dst$$reg),
10867               as_Register($src1$$reg),
10868               as_Register($src2$$reg),
10869               Assembler::LSL, 0);
10870   %}
10871 
10872   ins_pipe(ialu_reg_reg);
10873 %}
10874 
10875 instruct XorI_reg_not_reg(iRegINoSp dst,
10876                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10877                          rFlagsReg cr) %{
10878   match(Set dst (XorI m1 (XorI src2 src1)));
10879   ins_cost(INSN_COST);
10880   format %{ "eonw  $dst, $src1, $src2" %}
10881 
10882   ins_encode %{
10883     __ eonw(as_Register($dst$$reg),
10884               as_Register($src1$$reg),
10885               as_Register($src2$$reg),
10886               Assembler::LSL, 0);
10887   %}
10888 
10889   ins_pipe(ialu_reg_reg);
10890 %}
10891 
10892 instruct XorL_reg_not_reg(iRegLNoSp dst,
10893                          iRegL src1, iRegL src2, immL_M1 m1,
10894                          rFlagsReg cr) %{
10895   match(Set dst (XorL m1 (XorL src2 src1)));
10896   ins_cost(INSN_COST);
10897   format %{ "eon  $dst, $src1, $src2" %}
10898 
10899   ins_encode %{
10900     __ eon(as_Register($dst$$reg),
10901               as_Register($src1$$reg),
10902               as_Register($src2$$reg),
10903               Assembler::LSL, 0);
10904   %}
10905 
10906   ins_pipe(ialu_reg_reg);
10907 %}
10908 
10909 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10910                          iRegIorL2I src1, iRegIorL2I src2,
10911                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10912   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10913   ins_cost(1.9 * INSN_COST);
10914   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10915 
10916   ins_encode %{
10917     __ bicw(as_Register($dst$$reg),
10918               as_Register($src1$$reg),
10919               as_Register($src2$$reg),
10920               Assembler::LSR,
10921               $src3$$constant & 0x1f);
10922   %}
10923 
10924   ins_pipe(ialu_reg_reg_shift);
10925 %}
10926 
10927 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10928                          iRegL src1, iRegL src2,
10929                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10930   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10931   ins_cost(1.9 * INSN_COST);
10932   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10933 
10934   ins_encode %{
10935     __ bic(as_Register($dst$$reg),
10936               as_Register($src1$$reg),
10937               as_Register($src2$$reg),
10938               Assembler::LSR,
10939               $src3$$constant & 0x3f);
10940   %}
10941 
10942   ins_pipe(ialu_reg_reg_shift);
10943 %}
10944 
10945 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10946                          iRegIorL2I src1, iRegIorL2I src2,
10947                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10948   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10949   ins_cost(1.9 * INSN_COST);
10950   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10951 
10952   ins_encode %{
10953     __ bicw(as_Register($dst$$reg),
10954               as_Register($src1$$reg),
10955               as_Register($src2$$reg),
10956               Assembler::ASR,
10957               $src3$$constant & 0x1f);
10958   %}
10959 
10960   ins_pipe(ialu_reg_reg_shift);
10961 %}
10962 
10963 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10964                          iRegL src1, iRegL src2,
10965                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10966   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10967   ins_cost(1.9 * INSN_COST);
10968   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10969 
10970   ins_encode %{
10971     __ bic(as_Register($dst$$reg),
10972               as_Register($src1$$reg),
10973               as_Register($src2$$reg),
10974               Assembler::ASR,
10975               $src3$$constant & 0x3f);
10976   %}
10977 
10978   ins_pipe(ialu_reg_reg_shift);
10979 %}
10980 
10981 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10982                          iRegIorL2I src1, iRegIorL2I src2,
10983                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10984   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10985   ins_cost(1.9 * INSN_COST);
10986   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10987 
10988   ins_encode %{
10989     __ bicw(as_Register($dst$$reg),
10990               as_Register($src1$$reg),
10991               as_Register($src2$$reg),
10992               Assembler::LSL,
10993               $src3$$constant & 0x1f);
10994   %}
10995 
10996   ins_pipe(ialu_reg_reg_shift);
10997 %}
10998 
10999 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11000                          iRegL src1, iRegL src2,
11001                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11002   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11003   ins_cost(1.9 * INSN_COST);
11004   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11005 
11006   ins_encode %{
11007     __ bic(as_Register($dst$$reg),
11008               as_Register($src1$$reg),
11009               as_Register($src2$$reg),
11010               Assembler::LSL,
11011               $src3$$constant & 0x3f);
11012   %}
11013 
11014   ins_pipe(ialu_reg_reg_shift);
11015 %}
11016 
11017 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11018                          iRegIorL2I src1, iRegIorL2I src2,
11019                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11020   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11021   ins_cost(1.9 * INSN_COST);
11022   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11023 
11024   ins_encode %{
11025     __ eonw(as_Register($dst$$reg),
11026               as_Register($src1$$reg),
11027               as_Register($src2$$reg),
11028               Assembler::LSR,
11029               $src3$$constant & 0x1f);
11030   %}
11031 
11032   ins_pipe(ialu_reg_reg_shift);
11033 %}
11034 
11035 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11036                          iRegL src1, iRegL src2,
11037                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11038   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11039   ins_cost(1.9 * INSN_COST);
11040   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11041 
11042   ins_encode %{
11043     __ eon(as_Register($dst$$reg),
11044               as_Register($src1$$reg),
11045               as_Register($src2$$reg),
11046               Assembler::LSR,
11047               $src3$$constant & 0x3f);
11048   %}
11049 
11050   ins_pipe(ialu_reg_reg_shift);
11051 %}
11052 
11053 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11054                          iRegIorL2I src1, iRegIorL2I src2,
11055                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11056   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11057   ins_cost(1.9 * INSN_COST);
11058   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11059 
11060   ins_encode %{
11061     __ eonw(as_Register($dst$$reg),
11062               as_Register($src1$$reg),
11063               as_Register($src2$$reg),
11064               Assembler::ASR,
11065               $src3$$constant & 0x1f);
11066   %}
11067 
11068   ins_pipe(ialu_reg_reg_shift);
11069 %}
11070 
11071 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11072                          iRegL src1, iRegL src2,
11073                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11074   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11075   ins_cost(1.9 * INSN_COST);
11076   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11077 
11078   ins_encode %{
11079     __ eon(as_Register($dst$$reg),
11080               as_Register($src1$$reg),
11081               as_Register($src2$$reg),
11082               Assembler::ASR,
11083               $src3$$constant & 0x3f);
11084   %}
11085 
11086   ins_pipe(ialu_reg_reg_shift);
11087 %}
11088 
11089 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11090                          iRegIorL2I src1, iRegIorL2I src2,
11091                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11092   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11093   ins_cost(1.9 * INSN_COST);
11094   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11095 
11096   ins_encode %{
11097     __ eonw(as_Register($dst$$reg),
11098               as_Register($src1$$reg),
11099               as_Register($src2$$reg),
11100               Assembler::LSL,
11101               $src3$$constant & 0x1f);
11102   %}
11103 
11104   ins_pipe(ialu_reg_reg_shift);
11105 %}
11106 
11107 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11108                          iRegL src1, iRegL src2,
11109                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11110   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11111   ins_cost(1.9 * INSN_COST);
11112   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11113 
11114   ins_encode %{
11115     __ eon(as_Register($dst$$reg),
11116               as_Register($src1$$reg),
11117               as_Register($src2$$reg),
11118               Assembler::LSL,
11119               $src3$$constant & 0x3f);
11120   %}
11121 
11122   ins_pipe(ialu_reg_reg_shift);
11123 %}
11124 
11125 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11126                          iRegIorL2I src1, iRegIorL2I src2,
11127                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11128   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11129   ins_cost(1.9 * INSN_COST);
11130   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11131 
11132   ins_encode %{
11133     __ ornw(as_Register($dst$$reg),
11134               as_Register($src1$$reg),
11135               as_Register($src2$$reg),
11136               Assembler::LSR,
11137               $src3$$constant & 0x1f);
11138   %}
11139 
11140   ins_pipe(ialu_reg_reg_shift);
11141 %}
11142 
11143 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11144                          iRegL src1, iRegL src2,
11145                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11146   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11147   ins_cost(1.9 * INSN_COST);
11148   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11149 
11150   ins_encode %{
11151     __ orn(as_Register($dst$$reg),
11152               as_Register($src1$$reg),
11153               as_Register($src2$$reg),
11154               Assembler::LSR,
11155               $src3$$constant & 0x3f);
11156   %}
11157 
11158   ins_pipe(ialu_reg_reg_shift);
11159 %}
11160 
11161 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11162                          iRegIorL2I src1, iRegIorL2I src2,
11163                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11164   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11165   ins_cost(1.9 * INSN_COST);
11166   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11167 
11168   ins_encode %{
11169     __ ornw(as_Register($dst$$reg),
11170               as_Register($src1$$reg),
11171               as_Register($src2$$reg),
11172               Assembler::ASR,
11173               $src3$$constant & 0x1f);
11174   %}
11175 
11176   ins_pipe(ialu_reg_reg_shift);
11177 %}
11178 
11179 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11180                          iRegL src1, iRegL src2,
11181                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11182   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11183   ins_cost(1.9 * INSN_COST);
11184   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11185 
11186   ins_encode %{
11187     __ orn(as_Register($dst$$reg),
11188               as_Register($src1$$reg),
11189               as_Register($src2$$reg),
11190               Assembler::ASR,
11191               $src3$$constant & 0x3f);
11192   %}
11193 
11194   ins_pipe(ialu_reg_reg_shift);
11195 %}
11196 
11197 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11198                          iRegIorL2I src1, iRegIorL2I src2,
11199                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11200   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11201   ins_cost(1.9 * INSN_COST);
11202   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11203 
11204   ins_encode %{
11205     __ ornw(as_Register($dst$$reg),
11206               as_Register($src1$$reg),
11207               as_Register($src2$$reg),
11208               Assembler::LSL,
11209               $src3$$constant & 0x1f);
11210   %}
11211 
11212   ins_pipe(ialu_reg_reg_shift);
11213 %}
11214 
11215 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11216                          iRegL src1, iRegL src2,
11217                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11218   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11219   ins_cost(1.9 * INSN_COST);
11220   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11221 
11222   ins_encode %{
11223     __ orn(as_Register($dst$$reg),
11224               as_Register($src1$$reg),
11225               as_Register($src2$$reg),
11226               Assembler::LSL,
11227               $src3$$constant & 0x3f);
11228   %}
11229 
11230   ins_pipe(ialu_reg_reg_shift);
11231 %}
11232 
11233 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11234                          iRegIorL2I src1, iRegIorL2I src2,
11235                          immI src3, rFlagsReg cr) %{
11236   match(Set dst (AndI src1 (URShiftI src2 src3)));
11237 
11238   ins_cost(1.9 * INSN_COST);
11239   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11240 
11241   ins_encode %{
11242     __ andw(as_Register($dst$$reg),
11243               as_Register($src1$$reg),
11244               as_Register($src2$$reg),
11245               Assembler::LSR,
11246               $src3$$constant & 0x1f);
11247   %}
11248 
11249   ins_pipe(ialu_reg_reg_shift);
11250 %}
11251 
11252 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11253                          iRegL src1, iRegL src2,
11254                          immI src3, rFlagsReg cr) %{
11255   match(Set dst (AndL src1 (URShiftL src2 src3)));
11256 
11257   ins_cost(1.9 * INSN_COST);
11258   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11259 
11260   ins_encode %{
11261     __ andr(as_Register($dst$$reg),
11262               as_Register($src1$$reg),
11263               as_Register($src2$$reg),
11264               Assembler::LSR,
11265               $src3$$constant & 0x3f);
11266   %}
11267 
11268   ins_pipe(ialu_reg_reg_shift);
11269 %}
11270 
11271 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11272                          iRegIorL2I src1, iRegIorL2I src2,
11273                          immI src3, rFlagsReg cr) %{
11274   match(Set dst (AndI src1 (RShiftI src2 src3)));
11275 
11276   ins_cost(1.9 * INSN_COST);
11277   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11278 
11279   ins_encode %{
11280     __ andw(as_Register($dst$$reg),
11281               as_Register($src1$$reg),
11282               as_Register($src2$$reg),
11283               Assembler::ASR,
11284               $src3$$constant & 0x1f);
11285   %}
11286 
11287   ins_pipe(ialu_reg_reg_shift);
11288 %}
11289 
11290 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11291                          iRegL src1, iRegL src2,
11292                          immI src3, rFlagsReg cr) %{
11293   match(Set dst (AndL src1 (RShiftL src2 src3)));
11294 
11295   ins_cost(1.9 * INSN_COST);
11296   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11297 
11298   ins_encode %{
11299     __ andr(as_Register($dst$$reg),
11300               as_Register($src1$$reg),
11301               as_Register($src2$$reg),
11302               Assembler::ASR,
11303               $src3$$constant & 0x3f);
11304   %}
11305 
11306   ins_pipe(ialu_reg_reg_shift);
11307 %}
11308 
11309 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11310                          iRegIorL2I src1, iRegIorL2I src2,
11311                          immI src3, rFlagsReg cr) %{
11312   match(Set dst (AndI src1 (LShiftI src2 src3)));
11313 
11314   ins_cost(1.9 * INSN_COST);
11315   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11316 
11317   ins_encode %{
11318     __ andw(as_Register($dst$$reg),
11319               as_Register($src1$$reg),
11320               as_Register($src2$$reg),
11321               Assembler::LSL,
11322               $src3$$constant & 0x1f);
11323   %}
11324 
11325   ins_pipe(ialu_reg_reg_shift);
11326 %}
11327 
11328 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11329                          iRegL src1, iRegL src2,
11330                          immI src3, rFlagsReg cr) %{
11331   match(Set dst (AndL src1 (LShiftL src2 src3)));
11332 
11333   ins_cost(1.9 * INSN_COST);
11334   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11335 
11336   ins_encode %{
11337     __ andr(as_Register($dst$$reg),
11338               as_Register($src1$$reg),
11339               as_Register($src2$$reg),
11340               Assembler::LSL,
11341               $src3$$constant & 0x3f);
11342   %}
11343 
11344   ins_pipe(ialu_reg_reg_shift);
11345 %}
11346 
11347 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11348                          iRegIorL2I src1, iRegIorL2I src2,
11349                          immI src3, rFlagsReg cr) %{
11350   match(Set dst (XorI src1 (URShiftI src2 src3)));
11351 
11352   ins_cost(1.9 * INSN_COST);
11353   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11354 
11355   ins_encode %{
11356     __ eorw(as_Register($dst$$reg),
11357               as_Register($src1$$reg),
11358               as_Register($src2$$reg),
11359               Assembler::LSR,
11360               $src3$$constant & 0x1f);
11361   %}
11362 
11363   ins_pipe(ialu_reg_reg_shift);
11364 %}
11365 
11366 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11367                          iRegL src1, iRegL src2,
11368                          immI src3, rFlagsReg cr) %{
11369   match(Set dst (XorL src1 (URShiftL src2 src3)));
11370 
11371   ins_cost(1.9 * INSN_COST);
11372   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11373 
11374   ins_encode %{
11375     __ eor(as_Register($dst$$reg),
11376               as_Register($src1$$reg),
11377               as_Register($src2$$reg),
11378               Assembler::LSR,
11379               $src3$$constant & 0x3f);
11380   %}
11381 
11382   ins_pipe(ialu_reg_reg_shift);
11383 %}
11384 
11385 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11386                          iRegIorL2I src1, iRegIorL2I src2,
11387                          immI src3, rFlagsReg cr) %{
11388   match(Set dst (XorI src1 (RShiftI src2 src3)));
11389 
11390   ins_cost(1.9 * INSN_COST);
11391   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11392 
11393   ins_encode %{
11394     __ eorw(as_Register($dst$$reg),
11395               as_Register($src1$$reg),
11396               as_Register($src2$$reg),
11397               Assembler::ASR,
11398               $src3$$constant & 0x1f);
11399   %}
11400 
11401   ins_pipe(ialu_reg_reg_shift);
11402 %}
11403 
11404 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11405                          iRegL src1, iRegL src2,
11406                          immI src3, rFlagsReg cr) %{
11407   match(Set dst (XorL src1 (RShiftL src2 src3)));
11408 
11409   ins_cost(1.9 * INSN_COST);
11410   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11411 
11412   ins_encode %{
11413     __ eor(as_Register($dst$$reg),
11414               as_Register($src1$$reg),
11415               as_Register($src2$$reg),
11416               Assembler::ASR,
11417               $src3$$constant & 0x3f);
11418   %}
11419 
11420   ins_pipe(ialu_reg_reg_shift);
11421 %}
11422 
11423 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11424                          iRegIorL2I src1, iRegIorL2I src2,
11425                          immI src3, rFlagsReg cr) %{
11426   match(Set dst (XorI src1 (LShiftI src2 src3)));
11427 
11428   ins_cost(1.9 * INSN_COST);
11429   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11430 
11431   ins_encode %{
11432     __ eorw(as_Register($dst$$reg),
11433               as_Register($src1$$reg),
11434               as_Register($src2$$reg),
11435               Assembler::LSL,
11436               $src3$$constant & 0x1f);
11437   %}
11438 
11439   ins_pipe(ialu_reg_reg_shift);
11440 %}
11441 
11442 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11443                          iRegL src1, iRegL src2,
11444                          immI src3, rFlagsReg cr) %{
11445   match(Set dst (XorL src1 (LShiftL src2 src3)));
11446 
11447   ins_cost(1.9 * INSN_COST);
11448   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11449 
11450   ins_encode %{
11451     __ eor(as_Register($dst$$reg),
11452               as_Register($src1$$reg),
11453               as_Register($src2$$reg),
11454               Assembler::LSL,
11455               $src3$$constant & 0x3f);
11456   %}
11457 
11458   ins_pipe(ialu_reg_reg_shift);
11459 %}
11460 
11461 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11462                          iRegIorL2I src1, iRegIorL2I src2,
11463                          immI src3, rFlagsReg cr) %{
11464   match(Set dst (OrI src1 (URShiftI src2 src3)));
11465 
11466   ins_cost(1.9 * INSN_COST);
11467   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11468 
11469   ins_encode %{
11470     __ orrw(as_Register($dst$$reg),
11471               as_Register($src1$$reg),
11472               as_Register($src2$$reg),
11473               Assembler::LSR,
11474               $src3$$constant & 0x1f);
11475   %}
11476 
11477   ins_pipe(ialu_reg_reg_shift);
11478 %}
11479 
11480 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11481                          iRegL src1, iRegL src2,
11482                          immI src3, rFlagsReg cr) %{
11483   match(Set dst (OrL src1 (URShiftL src2 src3)));
11484 
11485   ins_cost(1.9 * INSN_COST);
11486   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11487 
11488   ins_encode %{
11489     __ orr(as_Register($dst$$reg),
11490               as_Register($src1$$reg),
11491               as_Register($src2$$reg),
11492               Assembler::LSR,
11493               $src3$$constant & 0x3f);
11494   %}
11495 
11496   ins_pipe(ialu_reg_reg_shift);
11497 %}
11498 
11499 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11500                          iRegIorL2I src1, iRegIorL2I src2,
11501                          immI src3, rFlagsReg cr) %{
11502   match(Set dst (OrI src1 (RShiftI src2 src3)));
11503 
11504   ins_cost(1.9 * INSN_COST);
11505   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11506 
11507   ins_encode %{
11508     __ orrw(as_Register($dst$$reg),
11509               as_Register($src1$$reg),
11510               as_Register($src2$$reg),
11511               Assembler::ASR,
11512               $src3$$constant & 0x1f);
11513   %}
11514 
11515   ins_pipe(ialu_reg_reg_shift);
11516 %}
11517 
11518 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11519                          iRegL src1, iRegL src2,
11520                          immI src3, rFlagsReg cr) %{
11521   match(Set dst (OrL src1 (RShiftL src2 src3)));
11522 
11523   ins_cost(1.9 * INSN_COST);
11524   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11525 
11526   ins_encode %{
11527     __ orr(as_Register($dst$$reg),
11528               as_Register($src1$$reg),
11529               as_Register($src2$$reg),
11530               Assembler::ASR,
11531               $src3$$constant & 0x3f);
11532   %}
11533 
11534   ins_pipe(ialu_reg_reg_shift);
11535 %}
11536 
11537 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11538                          iRegIorL2I src1, iRegIorL2I src2,
11539                          immI src3, rFlagsReg cr) %{
11540   match(Set dst (OrI src1 (LShiftI src2 src3)));
11541 
11542   ins_cost(1.9 * INSN_COST);
11543   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11544 
11545   ins_encode %{
11546     __ orrw(as_Register($dst$$reg),
11547               as_Register($src1$$reg),
11548               as_Register($src2$$reg),
11549               Assembler::LSL,
11550               $src3$$constant & 0x1f);
11551   %}
11552 
11553   ins_pipe(ialu_reg_reg_shift);
11554 %}
11555 
11556 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11557                          iRegL src1, iRegL src2,
11558                          immI src3, rFlagsReg cr) %{
11559   match(Set dst (OrL src1 (LShiftL src2 src3)));
11560 
11561   ins_cost(1.9 * INSN_COST);
11562   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11563 
11564   ins_encode %{
11565     __ orr(as_Register($dst$$reg),
11566               as_Register($src1$$reg),
11567               as_Register($src2$$reg),
11568               Assembler::LSL,
11569               $src3$$constant & 0x3f);
11570   %}
11571 
11572   ins_pipe(ialu_reg_reg_shift);
11573 %}
11574 
11575 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11576                          iRegIorL2I src1, iRegIorL2I src2,
11577                          immI src3, rFlagsReg cr) %{
11578   match(Set dst (AddI src1 (URShiftI src2 src3)));
11579 
11580   ins_cost(1.9 * INSN_COST);
11581   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11582 
11583   ins_encode %{
11584     __ addw(as_Register($dst$$reg),
11585               as_Register($src1$$reg),
11586               as_Register($src2$$reg),
11587               Assembler::LSR,
11588               $src3$$constant & 0x1f);
11589   %}
11590 
11591   ins_pipe(ialu_reg_reg_shift);
11592 %}
11593 
11594 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11595                          iRegL src1, iRegL src2,
11596                          immI src3, rFlagsReg cr) %{
11597   match(Set dst (AddL src1 (URShiftL src2 src3)));
11598 
11599   ins_cost(1.9 * INSN_COST);
11600   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11601 
11602   ins_encode %{
11603     __ add(as_Register($dst$$reg),
11604               as_Register($src1$$reg),
11605               as_Register($src2$$reg),
11606               Assembler::LSR,
11607               $src3$$constant & 0x3f);
11608   %}
11609 
11610   ins_pipe(ialu_reg_reg_shift);
11611 %}
11612 
11613 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11614                          iRegIorL2I src1, iRegIorL2I src2,
11615                          immI src3, rFlagsReg cr) %{
11616   match(Set dst (AddI src1 (RShiftI src2 src3)));
11617 
11618   ins_cost(1.9 * INSN_COST);
11619   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11620 
11621   ins_encode %{
11622     __ addw(as_Register($dst$$reg),
11623               as_Register($src1$$reg),
11624               as_Register($src2$$reg),
11625               Assembler::ASR,
11626               $src3$$constant & 0x1f);
11627   %}
11628 
11629   ins_pipe(ialu_reg_reg_shift);
11630 %}
11631 
11632 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11633                          iRegL src1, iRegL src2,
11634                          immI src3, rFlagsReg cr) %{
11635   match(Set dst (AddL src1 (RShiftL src2 src3)));
11636 
11637   ins_cost(1.9 * INSN_COST);
11638   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11639 
11640   ins_encode %{
11641     __ add(as_Register($dst$$reg),
11642               as_Register($src1$$reg),
11643               as_Register($src2$$reg),
11644               Assembler::ASR,
11645               $src3$$constant & 0x3f);
11646   %}
11647 
11648   ins_pipe(ialu_reg_reg_shift);
11649 %}
11650 
11651 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11652                          iRegIorL2I src1, iRegIorL2I src2,
11653                          immI src3, rFlagsReg cr) %{
11654   match(Set dst (AddI src1 (LShiftI src2 src3)));
11655 
11656   ins_cost(1.9 * INSN_COST);
11657   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11658 
11659   ins_encode %{
11660     __ addw(as_Register($dst$$reg),
11661               as_Register($src1$$reg),
11662               as_Register($src2$$reg),
11663               Assembler::LSL,
11664               $src3$$constant & 0x1f);
11665   %}
11666 
11667   ins_pipe(ialu_reg_reg_shift);
11668 %}
11669 
11670 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11671                          iRegL src1, iRegL src2,
11672                          immI src3, rFlagsReg cr) %{
11673   match(Set dst (AddL src1 (LShiftL src2 src3)));
11674 
11675   ins_cost(1.9 * INSN_COST);
11676   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11677 
11678   ins_encode %{
11679     __ add(as_Register($dst$$reg),
11680               as_Register($src1$$reg),
11681               as_Register($src2$$reg),
11682               Assembler::LSL,
11683               $src3$$constant & 0x3f);
11684   %}
11685 
11686   ins_pipe(ialu_reg_reg_shift);
11687 %}
11688 
11689 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11690                          iRegIorL2I src1, iRegIorL2I src2,
11691                          immI src3, rFlagsReg cr) %{
11692   match(Set dst (SubI src1 (URShiftI src2 src3)));
11693 
11694   ins_cost(1.9 * INSN_COST);
11695   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11696 
11697   ins_encode %{
11698     __ subw(as_Register($dst$$reg),
11699               as_Register($src1$$reg),
11700               as_Register($src2$$reg),
11701               Assembler::LSR,
11702               $src3$$constant & 0x1f);
11703   %}
11704 
11705   ins_pipe(ialu_reg_reg_shift);
11706 %}
11707 
11708 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11709                          iRegL src1, iRegL src2,
11710                          immI src3, rFlagsReg cr) %{
11711   match(Set dst (SubL src1 (URShiftL src2 src3)));
11712 
11713   ins_cost(1.9 * INSN_COST);
11714   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11715 
11716   ins_encode %{
11717     __ sub(as_Register($dst$$reg),
11718               as_Register($src1$$reg),
11719               as_Register($src2$$reg),
11720               Assembler::LSR,
11721               $src3$$constant & 0x3f);
11722   %}
11723 
11724   ins_pipe(ialu_reg_reg_shift);
11725 %}
11726 
11727 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11728                          iRegIorL2I src1, iRegIorL2I src2,
11729                          immI src3, rFlagsReg cr) %{
11730   match(Set dst (SubI src1 (RShiftI src2 src3)));
11731 
11732   ins_cost(1.9 * INSN_COST);
11733   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11734 
11735   ins_encode %{
11736     __ subw(as_Register($dst$$reg),
11737               as_Register($src1$$reg),
11738               as_Register($src2$$reg),
11739               Assembler::ASR,
11740               $src3$$constant & 0x1f);
11741   %}
11742 
11743   ins_pipe(ialu_reg_reg_shift);
11744 %}
11745 
11746 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11747                          iRegL src1, iRegL src2,
11748                          immI src3, rFlagsReg cr) %{
11749   match(Set dst (SubL src1 (RShiftL src2 src3)));
11750 
11751   ins_cost(1.9 * INSN_COST);
11752   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11753 
11754   ins_encode %{
11755     __ sub(as_Register($dst$$reg),
11756               as_Register($src1$$reg),
11757               as_Register($src2$$reg),
11758               Assembler::ASR,
11759               $src3$$constant & 0x3f);
11760   %}
11761 
11762   ins_pipe(ialu_reg_reg_shift);
11763 %}
11764 
11765 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11766                          iRegIorL2I src1, iRegIorL2I src2,
11767                          immI src3, rFlagsReg cr) %{
11768   match(Set dst (SubI src1 (LShiftI src2 src3)));
11769 
11770   ins_cost(1.9 * INSN_COST);
11771   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11772 
11773   ins_encode %{
11774     __ subw(as_Register($dst$$reg),
11775               as_Register($src1$$reg),
11776               as_Register($src2$$reg),
11777               Assembler::LSL,
11778               $src3$$constant & 0x1f);
11779   %}
11780 
11781   ins_pipe(ialu_reg_reg_shift);
11782 %}
11783 
11784 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11785                          iRegL src1, iRegL src2,
11786                          immI src3, rFlagsReg cr) %{
11787   match(Set dst (SubL src1 (LShiftL src2 src3)));
11788 
11789   ins_cost(1.9 * INSN_COST);
11790   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11791 
11792   ins_encode %{
11793     __ sub(as_Register($dst$$reg),
11794               as_Register($src1$$reg),
11795               as_Register($src2$$reg),
11796               Assembler::LSL,
11797               $src3$$constant & 0x3f);
11798   %}
11799 
11800   ins_pipe(ialu_reg_reg_shift);
11801 %}
11802 
11803 
11804 
11805 // Shift Left followed by Shift Right.
11806 // This idiom is used by the compiler for the i2b bytecode etc.
11807 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11808 %{
11809   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11810   // Make sure we are not going to exceed what sbfm can do.
11811   predicate((unsigned int)n->in(2)->get_int() <= 63
11812             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11813 
11814   ins_cost(INSN_COST * 2);
11815   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11816   ins_encode %{
11817     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11818     int s = 63 - lshift;
11819     int r = (rshift - lshift) & 63;
11820     __ sbfm(as_Register($dst$$reg),
11821             as_Register($src$$reg),
11822             r, s);
11823   %}
11824 
11825   ins_pipe(ialu_reg_shift);
11826 %}
11827 
11828 // Shift Left followed by Shift Right.
11829 // This idiom is used by the compiler for the i2b bytecode etc.
11830 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11831 %{
11832   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11833   // Make sure we are not going to exceed what sbfmw can do.
11834   predicate((unsigned int)n->in(2)->get_int() <= 31
11835             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11836 
11837   ins_cost(INSN_COST * 2);
11838   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11839   ins_encode %{
11840     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11841     int s = 31 - lshift;
11842     int r = (rshift - lshift) & 31;
11843     __ sbfmw(as_Register($dst$$reg),
11844             as_Register($src$$reg),
11845             r, s);
11846   %}
11847 
11848   ins_pipe(ialu_reg_shift);
11849 %}
11850 
11851 // Shift Left followed by Shift Right.
11852 // This idiom is used by the compiler for the i2b bytecode etc.
11853 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11854 %{
11855   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11856   // Make sure we are not going to exceed what ubfm can do.
11857   predicate((unsigned int)n->in(2)->get_int() <= 63
11858             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11859 
11860   ins_cost(INSN_COST * 2);
11861   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11862   ins_encode %{
11863     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11864     int s = 63 - lshift;
11865     int r = (rshift - lshift) & 63;
11866     __ ubfm(as_Register($dst$$reg),
11867             as_Register($src$$reg),
11868             r, s);
11869   %}
11870 
11871   ins_pipe(ialu_reg_shift);
11872 %}
11873 
11874 // Shift Left followed by Shift Right.
11875 // This idiom is used by the compiler for the i2b bytecode etc.
11876 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11877 %{
11878   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11879   // Make sure we are not going to exceed what ubfmw can do.
11880   predicate((unsigned int)n->in(2)->get_int() <= 31
11881             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11882 
11883   ins_cost(INSN_COST * 2);
11884   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11885   ins_encode %{
11886     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11887     int s = 31 - lshift;
11888     int r = (rshift - lshift) & 31;
11889     __ ubfmw(as_Register($dst$$reg),
11890             as_Register($src$$reg),
11891             r, s);
11892   %}
11893 
11894   ins_pipe(ialu_reg_shift);
11895 %}
11896 // Bitfield extract with shift & mask
11897 
11898 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11899 %{
11900   match(Set dst (AndI (URShiftI src rshift) mask));
11901 
11902   ins_cost(INSN_COST);
11903   format %{ "ubfxw $dst, $src, $mask" %}
11904   ins_encode %{
11905     int rshift = $rshift$$constant;
11906     long mask = $mask$$constant;
11907     int width = exact_log2(mask+1);
11908     __ ubfxw(as_Register($dst$$reg),
11909             as_Register($src$$reg), rshift, width);
11910   %}
11911   ins_pipe(ialu_reg_shift);
11912 %}
11913 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11914 %{
11915   match(Set dst (AndL (URShiftL src rshift) mask));
11916 
11917   ins_cost(INSN_COST);
11918   format %{ "ubfx $dst, $src, $mask" %}
11919   ins_encode %{
11920     int rshift = $rshift$$constant;
11921     long mask = $mask$$constant;
11922     int width = exact_log2(mask+1);
11923     __ ubfx(as_Register($dst$$reg),
11924             as_Register($src$$reg), rshift, width);
11925   %}
11926   ins_pipe(ialu_reg_shift);
11927 %}
11928 
11929 // We can use ubfx when extending an And with a mask when we know mask
11930 // is positive.  We know that because immI_bitmask guarantees it.
11931 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11932 %{
11933   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11934 
11935   ins_cost(INSN_COST * 2);
11936   format %{ "ubfx $dst, $src, $mask" %}
11937   ins_encode %{
11938     int rshift = $rshift$$constant;
11939     long mask = $mask$$constant;
11940     int width = exact_log2(mask+1);
11941     __ ubfx(as_Register($dst$$reg),
11942             as_Register($src$$reg), rshift, width);
11943   %}
11944   ins_pipe(ialu_reg_shift);
11945 %}
11946 
11947 // Rotations
11948 
11949 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11950 %{
11951   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11952   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11953 
11954   ins_cost(INSN_COST);
11955   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11956 
11957   ins_encode %{
11958     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11959             $rshift$$constant & 63);
11960   %}
11961   ins_pipe(ialu_reg_reg_extr);
11962 %}
11963 
11964 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11965 %{
11966   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11967   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11968 
11969   ins_cost(INSN_COST);
11970   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11971 
11972   ins_encode %{
11973     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11974             $rshift$$constant & 31);
11975   %}
11976   ins_pipe(ialu_reg_reg_extr);
11977 %}
11978 
11979 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11980 %{
11981   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11982   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11983 
11984   ins_cost(INSN_COST);
11985   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11986 
11987   ins_encode %{
11988     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11989             $rshift$$constant & 63);
11990   %}
11991   ins_pipe(ialu_reg_reg_extr);
11992 %}
11993 
11994 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11995 %{
11996   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11997   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11998 
11999   ins_cost(INSN_COST);
12000   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12001 
12002   ins_encode %{
12003     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12004             $rshift$$constant & 31);
12005   %}
12006   ins_pipe(ialu_reg_reg_extr);
12007 %}
12008 
12009 
12010 // rol expander
12011 
12012 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12013 %{
12014   effect(DEF dst, USE src, USE shift);
12015 
12016   format %{ "rol    $dst, $src, $shift" %}
12017   ins_cost(INSN_COST * 3);
12018   ins_encode %{
12019     __ subw(rscratch1, zr, as_Register($shift$$reg));
12020     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12021             rscratch1);
12022     %}
12023   ins_pipe(ialu_reg_reg_vshift);
12024 %}
12025 
12026 // rol expander
12027 
12028 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12029 %{
12030   effect(DEF dst, USE src, USE shift);
12031 
12032   format %{ "rol    $dst, $src, $shift" %}
12033   ins_cost(INSN_COST * 3);
12034   ins_encode %{
12035     __ subw(rscratch1, zr, as_Register($shift$$reg));
12036     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12037             rscratch1);
12038     %}
12039   ins_pipe(ialu_reg_reg_vshift);
12040 %}
12041 
12042 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12043 %{
12044   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12045 
12046   expand %{
12047     rolL_rReg(dst, src, shift, cr);
12048   %}
12049 %}
12050 
12051 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12052 %{
12053   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12054 
12055   expand %{
12056     rolL_rReg(dst, src, shift, cr);
12057   %}
12058 %}
12059 
12060 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12061 %{
12062   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12063 
12064   expand %{
12065     rolL_rReg(dst, src, shift, cr);
12066   %}
12067 %}
12068 
12069 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12070 %{
12071   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12072 
12073   expand %{
12074     rolL_rReg(dst, src, shift, cr);
12075   %}
12076 %}
12077 
12078 // ror expander
12079 
12080 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12081 %{
12082   effect(DEF dst, USE src, USE shift);
12083 
12084   format %{ "ror    $dst, $src, $shift" %}
12085   ins_cost(INSN_COST);
12086   ins_encode %{
12087     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12088             as_Register($shift$$reg));
12089     %}
12090   ins_pipe(ialu_reg_reg_vshift);
12091 %}
12092 
12093 // ror expander
12094 
12095 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12096 %{
12097   effect(DEF dst, USE src, USE shift);
12098 
12099   format %{ "ror    $dst, $src, $shift" %}
12100   ins_cost(INSN_COST);
12101   ins_encode %{
12102     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12103             as_Register($shift$$reg));
12104     %}
12105   ins_pipe(ialu_reg_reg_vshift);
12106 %}
12107 
12108 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12109 %{
12110   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12111 
12112   expand %{
12113     rorL_rReg(dst, src, shift, cr);
12114   %}
12115 %}
12116 
12117 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12118 %{
12119   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12120 
12121   expand %{
12122     rorL_rReg(dst, src, shift, cr);
12123   %}
12124 %}
12125 
12126 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12127 %{
12128   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12129 
12130   expand %{
12131     rorL_rReg(dst, src, shift, cr);
12132   %}
12133 %}
12134 
12135 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12136 %{
12137   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12138 
12139   expand %{
12140     rorL_rReg(dst, src, shift, cr);
12141   %}
12142 %}
12143 
12144 // Add/subtract (extended)
12145 
12146 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12147 %{
12148   match(Set dst (AddL src1 (ConvI2L src2)));
12149   ins_cost(INSN_COST);
12150   format %{ "add  $dst, $src1, sxtw $src2" %}
12151 
12152    ins_encode %{
12153      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12154             as_Register($src2$$reg), ext::sxtw);
12155    %}
12156   ins_pipe(ialu_reg_reg);
12157 %};
12158 
12159 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12160 %{
12161   match(Set dst (SubL src1 (ConvI2L src2)));
12162   ins_cost(INSN_COST);
12163   format %{ "sub  $dst, $src1, sxtw $src2" %}
12164 
12165    ins_encode %{
12166      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12167             as_Register($src2$$reg), ext::sxtw);
12168    %}
12169   ins_pipe(ialu_reg_reg);
12170 %};
12171 
12172 
12173 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12174 %{
12175   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12176   ins_cost(INSN_COST);
12177   format %{ "add  $dst, $src1, sxth $src2" %}
12178 
12179    ins_encode %{
12180      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12181             as_Register($src2$$reg), ext::sxth);
12182    %}
12183   ins_pipe(ialu_reg_reg);
12184 %}
12185 
12186 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12187 %{
12188   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12189   ins_cost(INSN_COST);
12190   format %{ "add  $dst, $src1, sxtb $src2" %}
12191 
12192    ins_encode %{
12193      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12194             as_Register($src2$$reg), ext::sxtb);
12195    %}
12196   ins_pipe(ialu_reg_reg);
12197 %}
12198 
12199 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12200 %{
12201   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12202   ins_cost(INSN_COST);
12203   format %{ "add  $dst, $src1, uxtb $src2" %}
12204 
12205    ins_encode %{
12206      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12207             as_Register($src2$$reg), ext::uxtb);
12208    %}
12209   ins_pipe(ialu_reg_reg);
12210 %}
12211 
12212 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12213 %{
12214   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12215   ins_cost(INSN_COST);
12216   format %{ "add  $dst, $src1, sxth $src2" %}
12217 
12218    ins_encode %{
12219      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12220             as_Register($src2$$reg), ext::sxth);
12221    %}
12222   ins_pipe(ialu_reg_reg);
12223 %}
12224 
12225 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12226 %{
12227   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12228   ins_cost(INSN_COST);
12229   format %{ "add  $dst, $src1, sxtw $src2" %}
12230 
12231    ins_encode %{
12232      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12233             as_Register($src2$$reg), ext::sxtw);
12234    %}
12235   ins_pipe(ialu_reg_reg);
12236 %}
12237 
12238 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12239 %{
12240   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12241   ins_cost(INSN_COST);
12242   format %{ "add  $dst, $src1, sxtb $src2" %}
12243 
12244    ins_encode %{
12245      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12246             as_Register($src2$$reg), ext::sxtb);
12247    %}
12248   ins_pipe(ialu_reg_reg);
12249 %}
12250 
12251 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12252 %{
12253   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12254   ins_cost(INSN_COST);
12255   format %{ "add  $dst, $src1, uxtb $src2" %}
12256 
12257    ins_encode %{
12258      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12259             as_Register($src2$$reg), ext::uxtb);
12260    %}
12261   ins_pipe(ialu_reg_reg);
12262 %}
12263 
12264 
12265 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12266 %{
12267   match(Set dst (AddI src1 (AndI src2 mask)));
12268   ins_cost(INSN_COST);
12269   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12270 
12271    ins_encode %{
12272      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12273             as_Register($src2$$reg), ext::uxtb);
12274    %}
12275   ins_pipe(ialu_reg_reg);
12276 %}
12277 
12278 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12279 %{
12280   match(Set dst (AddI src1 (AndI src2 mask)));
12281   ins_cost(INSN_COST);
12282   format %{ "addw  $dst, $src1, $src2, uxth" %}
12283 
12284    ins_encode %{
12285      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12286             as_Register($src2$$reg), ext::uxth);
12287    %}
12288   ins_pipe(ialu_reg_reg);
12289 %}
12290 
12291 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12292 %{
12293   match(Set dst (AddL src1 (AndL src2 mask)));
12294   ins_cost(INSN_COST);
12295   format %{ "add  $dst, $src1, $src2, uxtb" %}
12296 
12297    ins_encode %{
12298      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12299             as_Register($src2$$reg), ext::uxtb);
12300    %}
12301   ins_pipe(ialu_reg_reg);
12302 %}
12303 
12304 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12305 %{
12306   match(Set dst (AddL src1 (AndL src2 mask)));
12307   ins_cost(INSN_COST);
12308   format %{ "add  $dst, $src1, $src2, uxth" %}
12309 
12310    ins_encode %{
12311      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12312             as_Register($src2$$reg), ext::uxth);
12313    %}
12314   ins_pipe(ialu_reg_reg);
12315 %}
12316 
12317 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12318 %{
12319   match(Set dst (AddL src1 (AndL src2 mask)));
12320   ins_cost(INSN_COST);
12321   format %{ "add  $dst, $src1, $src2, uxtw" %}
12322 
12323    ins_encode %{
12324      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12325             as_Register($src2$$reg), ext::uxtw);
12326    %}
12327   ins_pipe(ialu_reg_reg);
12328 %}
12329 
12330 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12331 %{
12332   match(Set dst (SubI src1 (AndI src2 mask)));
12333   ins_cost(INSN_COST);
12334   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12335 
12336    ins_encode %{
12337      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12338             as_Register($src2$$reg), ext::uxtb);
12339    %}
12340   ins_pipe(ialu_reg_reg);
12341 %}
12342 
12343 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12344 %{
12345   match(Set dst (SubI src1 (AndI src2 mask)));
12346   ins_cost(INSN_COST);
12347   format %{ "subw  $dst, $src1, $src2, uxth" %}
12348 
12349    ins_encode %{
12350      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12351             as_Register($src2$$reg), ext::uxth);
12352    %}
12353   ins_pipe(ialu_reg_reg);
12354 %}
12355 
12356 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12357 %{
12358   match(Set dst (SubL src1 (AndL src2 mask)));
12359   ins_cost(INSN_COST);
12360   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12361 
12362    ins_encode %{
12363      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12364             as_Register($src2$$reg), ext::uxtb);
12365    %}
12366   ins_pipe(ialu_reg_reg);
12367 %}
12368 
12369 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12370 %{
12371   match(Set dst (SubL src1 (AndL src2 mask)));
12372   ins_cost(INSN_COST);
12373   format %{ "sub  $dst, $src1, $src2, uxth" %}
12374 
12375    ins_encode %{
12376      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12377             as_Register($src2$$reg), ext::uxth);
12378    %}
12379   ins_pipe(ialu_reg_reg);
12380 %}
12381 
12382 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12383 %{
12384   match(Set dst (SubL src1 (AndL src2 mask)));
12385   ins_cost(INSN_COST);
12386   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12387 
12388    ins_encode %{
12389      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12390             as_Register($src2$$reg), ext::uxtw);
12391    %}
12392   ins_pipe(ialu_reg_reg);
12393 %}
12394 
12395 // END This section of the file is automatically generated. Do not edit --------------
12396 
12397 // ============================================================================
12398 // Floating Point Arithmetic Instructions
12399 
12400 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12401   match(Set dst (AddF src1 src2));
12402 
12403   ins_cost(INSN_COST * 5);
12404   format %{ "fadds   $dst, $src1, $src2" %}
12405 
12406   ins_encode %{
12407     __ fadds(as_FloatRegister($dst$$reg),
12408              as_FloatRegister($src1$$reg),
12409              as_FloatRegister($src2$$reg));
12410   %}
12411 
12412   ins_pipe(fp_dop_reg_reg_s);
12413 %}
12414 
12415 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12416   match(Set dst (AddD src1 src2));
12417 
12418   ins_cost(INSN_COST * 5);
12419   format %{ "faddd   $dst, $src1, $src2" %}
12420 
12421   ins_encode %{
12422     __ faddd(as_FloatRegister($dst$$reg),
12423              as_FloatRegister($src1$$reg),
12424              as_FloatRegister($src2$$reg));
12425   %}
12426 
12427   ins_pipe(fp_dop_reg_reg_d);
12428 %}
12429 
12430 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12431   match(Set dst (SubF src1 src2));
12432 
12433   ins_cost(INSN_COST * 5);
12434   format %{ "fsubs   $dst, $src1, $src2" %}
12435 
12436   ins_encode %{
12437     __ fsubs(as_FloatRegister($dst$$reg),
12438              as_FloatRegister($src1$$reg),
12439              as_FloatRegister($src2$$reg));
12440   %}
12441 
12442   ins_pipe(fp_dop_reg_reg_s);
12443 %}
12444 
12445 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12446   match(Set dst (SubD src1 src2));
12447 
12448   ins_cost(INSN_COST * 5);
12449   format %{ "fsubd   $dst, $src1, $src2" %}
12450 
12451   ins_encode %{
12452     __ fsubd(as_FloatRegister($dst$$reg),
12453              as_FloatRegister($src1$$reg),
12454              as_FloatRegister($src2$$reg));
12455   %}
12456 
12457   ins_pipe(fp_dop_reg_reg_d);
12458 %}
12459 
12460 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12461   match(Set dst (MulF src1 src2));
12462 
12463   ins_cost(INSN_COST * 6);
12464   format %{ "fmuls   $dst, $src1, $src2" %}
12465 
12466   ins_encode %{
12467     __ fmuls(as_FloatRegister($dst$$reg),
12468              as_FloatRegister($src1$$reg),
12469              as_FloatRegister($src2$$reg));
12470   %}
12471 
12472   ins_pipe(fp_dop_reg_reg_s);
12473 %}
12474 
12475 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12476   match(Set dst (MulD src1 src2));
12477 
12478   ins_cost(INSN_COST * 6);
12479   format %{ "fmuld   $dst, $src1, $src2" %}
12480 
12481   ins_encode %{
12482     __ fmuld(as_FloatRegister($dst$$reg),
12483              as_FloatRegister($src1$$reg),
12484              as_FloatRegister($src2$$reg));
12485   %}
12486 
12487   ins_pipe(fp_dop_reg_reg_d);
12488 %}
12489 
12490 // We cannot use these fused mul w add/sub ops because they don't
12491 // produce the same result as the equivalent separated ops
12492 // (essentially they don't round the intermediate result). that's a
12493 // shame. leaving them here in case we can idenitfy cases where it is
12494 // legitimate to use them
12495 
12496 
12497 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12498 //   match(Set dst (AddF (MulF src1 src2) src3));
12499 
12500 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12501 
12502 //   ins_encode %{
12503 //     __ fmadds(as_FloatRegister($dst$$reg),
12504 //              as_FloatRegister($src1$$reg),
12505 //              as_FloatRegister($src2$$reg),
12506 //              as_FloatRegister($src3$$reg));
12507 //   %}
12508 
12509 //   ins_pipe(pipe_class_default);
12510 // %}
12511 
12512 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12513 //   match(Set dst (AddD (MulD src1 src2) src3));
12514 
12515 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12516 
12517 //   ins_encode %{
12518 //     __ fmaddd(as_FloatRegister($dst$$reg),
12519 //              as_FloatRegister($src1$$reg),
12520 //              as_FloatRegister($src2$$reg),
12521 //              as_FloatRegister($src3$$reg));
12522 //   %}
12523 
12524 //   ins_pipe(pipe_class_default);
12525 // %}
12526 
12527 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12528 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12529 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12530 
12531 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12532 
12533 //   ins_encode %{
12534 //     __ fmsubs(as_FloatRegister($dst$$reg),
12535 //               as_FloatRegister($src1$$reg),
12536 //               as_FloatRegister($src2$$reg),
12537 //              as_FloatRegister($src3$$reg));
12538 //   %}
12539 
12540 //   ins_pipe(pipe_class_default);
12541 // %}
12542 
12543 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12544 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12545 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12546 
12547 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12548 
12549 //   ins_encode %{
12550 //     __ fmsubd(as_FloatRegister($dst$$reg),
12551 //               as_FloatRegister($src1$$reg),
12552 //               as_FloatRegister($src2$$reg),
12553 //               as_FloatRegister($src3$$reg));
12554 //   %}
12555 
12556 //   ins_pipe(pipe_class_default);
12557 // %}
12558 
12559 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12560 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12561 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12562 
12563 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12564 
12565 //   ins_encode %{
12566 //     __ fnmadds(as_FloatRegister($dst$$reg),
12567 //                as_FloatRegister($src1$$reg),
12568 //                as_FloatRegister($src2$$reg),
12569 //                as_FloatRegister($src3$$reg));
12570 //   %}
12571 
12572 //   ins_pipe(pipe_class_default);
12573 // %}
12574 
12575 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12576 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12577 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12578 
12579 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12580 
12581 //   ins_encode %{
12582 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12583 //                as_FloatRegister($src1$$reg),
12584 //                as_FloatRegister($src2$$reg),
12585 //                as_FloatRegister($src3$$reg));
12586 //   %}
12587 
12588 //   ins_pipe(pipe_class_default);
12589 // %}
12590 
12591 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12592 //   match(Set dst (SubF (MulF src1 src2) src3));
12593 
12594 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12595 
12596 //   ins_encode %{
12597 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12598 //                as_FloatRegister($src1$$reg),
12599 //                as_FloatRegister($src2$$reg),
12600 //                as_FloatRegister($src3$$reg));
12601 //   %}
12602 
12603 //   ins_pipe(pipe_class_default);
12604 // %}
12605 
12606 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12607 //   match(Set dst (SubD (MulD src1 src2) src3));
12608 
12609 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12610 
12611 //   ins_encode %{
12612 //   // n.b. insn name should be fnmsubd
12613 //     __ fnmsub(as_FloatRegister($dst$$reg),
12614 //                as_FloatRegister($src1$$reg),
12615 //                as_FloatRegister($src2$$reg),
12616 //                as_FloatRegister($src3$$reg));
12617 //   %}
12618 
12619 //   ins_pipe(pipe_class_default);
12620 // %}
12621 
12622 
12623 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12624   match(Set dst (DivF src1  src2));
12625 
12626   ins_cost(INSN_COST * 18);
12627   format %{ "fdivs   $dst, $src1, $src2" %}
12628 
12629   ins_encode %{
12630     __ fdivs(as_FloatRegister($dst$$reg),
12631              as_FloatRegister($src1$$reg),
12632              as_FloatRegister($src2$$reg));
12633   %}
12634 
12635   ins_pipe(fp_div_s);
12636 %}
12637 
12638 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12639   match(Set dst (DivD src1  src2));
12640 
12641   ins_cost(INSN_COST * 32);
12642   format %{ "fdivd   $dst, $src1, $src2" %}
12643 
12644   ins_encode %{
12645     __ fdivd(as_FloatRegister($dst$$reg),
12646              as_FloatRegister($src1$$reg),
12647              as_FloatRegister($src2$$reg));
12648   %}
12649 
12650   ins_pipe(fp_div_d);
12651 %}
12652 
12653 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12654   match(Set dst (NegF src));
12655 
12656   ins_cost(INSN_COST * 3);
12657   format %{ "fneg   $dst, $src" %}
12658 
12659   ins_encode %{
12660     __ fnegs(as_FloatRegister($dst$$reg),
12661              as_FloatRegister($src$$reg));
12662   %}
12663 
12664   ins_pipe(fp_uop_s);
12665 %}
12666 
12667 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12668   match(Set dst (NegD src));
12669 
12670   ins_cost(INSN_COST * 3);
12671   format %{ "fnegd   $dst, $src" %}
12672 
12673   ins_encode %{
12674     __ fnegd(as_FloatRegister($dst$$reg),
12675              as_FloatRegister($src$$reg));
12676   %}
12677 
12678   ins_pipe(fp_uop_d);
12679 %}
12680 
12681 instruct absF_reg(vRegF dst, vRegF src) %{
12682   match(Set dst (AbsF src));
12683 
12684   ins_cost(INSN_COST * 3);
12685   format %{ "fabss   $dst, $src" %}
12686   ins_encode %{
12687     __ fabss(as_FloatRegister($dst$$reg),
12688              as_FloatRegister($src$$reg));
12689   %}
12690 
12691   ins_pipe(fp_uop_s);
12692 %}
12693 
12694 instruct absD_reg(vRegD dst, vRegD src) %{
12695   match(Set dst (AbsD src));
12696 
12697   ins_cost(INSN_COST * 3);
12698   format %{ "fabsd   $dst, $src" %}
12699   ins_encode %{
12700     __ fabsd(as_FloatRegister($dst$$reg),
12701              as_FloatRegister($src$$reg));
12702   %}
12703 
12704   ins_pipe(fp_uop_d);
12705 %}
12706 
12707 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12708   match(Set dst (SqrtD src));
12709 
12710   ins_cost(INSN_COST * 50);
12711   format %{ "fsqrtd  $dst, $src" %}
12712   ins_encode %{
12713     __ fsqrtd(as_FloatRegister($dst$$reg),
12714              as_FloatRegister($src$$reg));
12715   %}
12716 
12717   ins_pipe(fp_div_s);
12718 %}
12719 
12720 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12721   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12722 
12723   ins_cost(INSN_COST * 50);
12724   format %{ "fsqrts  $dst, $src" %}
12725   ins_encode %{
12726     __ fsqrts(as_FloatRegister($dst$$reg),
12727              as_FloatRegister($src$$reg));
12728   %}
12729 
12730   ins_pipe(fp_div_d);
12731 %}
12732 
12733 // ============================================================================
12734 // Logical Instructions
12735 
12736 // Integer Logical Instructions
12737 
12738 // And Instructions
12739 
12740 
12741 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12742   match(Set dst (AndI src1 src2));
12743 
12744   format %{ "andw  $dst, $src1, $src2\t# int" %}
12745 
12746   ins_cost(INSN_COST);
12747   ins_encode %{
12748     __ andw(as_Register($dst$$reg),
12749             as_Register($src1$$reg),
12750             as_Register($src2$$reg));
12751   %}
12752 
12753   ins_pipe(ialu_reg_reg);
12754 %}
12755 
12756 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12757   match(Set dst (AndI src1 src2));
12758 
12759   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12760 
12761   ins_cost(INSN_COST);
12762   ins_encode %{
12763     __ andw(as_Register($dst$$reg),
12764             as_Register($src1$$reg),
12765             (unsigned long)($src2$$constant));
12766   %}
12767 
12768   ins_pipe(ialu_reg_imm);
12769 %}
12770 
12771 // Or Instructions
12772 
12773 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12774   match(Set dst (OrI src1 src2));
12775 
12776   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12777 
12778   ins_cost(INSN_COST);
12779   ins_encode %{
12780     __ orrw(as_Register($dst$$reg),
12781             as_Register($src1$$reg),
12782             as_Register($src2$$reg));
12783   %}
12784 
12785   ins_pipe(ialu_reg_reg);
12786 %}
12787 
12788 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12789   match(Set dst (OrI src1 src2));
12790 
12791   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12792 
12793   ins_cost(INSN_COST);
12794   ins_encode %{
12795     __ orrw(as_Register($dst$$reg),
12796             as_Register($src1$$reg),
12797             (unsigned long)($src2$$constant));
12798   %}
12799 
12800   ins_pipe(ialu_reg_imm);
12801 %}
12802 
12803 // Xor Instructions
12804 
12805 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12806   match(Set dst (XorI src1 src2));
12807 
12808   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12809 
12810   ins_cost(INSN_COST);
12811   ins_encode %{
12812     __ eorw(as_Register($dst$$reg),
12813             as_Register($src1$$reg),
12814             as_Register($src2$$reg));
12815   %}
12816 
12817   ins_pipe(ialu_reg_reg);
12818 %}
12819 
12820 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12821   match(Set dst (XorI src1 src2));
12822 
12823   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12824 
12825   ins_cost(INSN_COST);
12826   ins_encode %{
12827     __ eorw(as_Register($dst$$reg),
12828             as_Register($src1$$reg),
12829             (unsigned long)($src2$$constant));
12830   %}
12831 
12832   ins_pipe(ialu_reg_imm);
12833 %}
12834 
12835 // Long Logical Instructions
12836 // TODO
12837 
12838 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12839   match(Set dst (AndL src1 src2));
12840 
12841   format %{ "and  $dst, $src1, $src2\t# int" %}
12842 
12843   ins_cost(INSN_COST);
12844   ins_encode %{
12845     __ andr(as_Register($dst$$reg),
12846             as_Register($src1$$reg),
12847             as_Register($src2$$reg));
12848   %}
12849 
12850   ins_pipe(ialu_reg_reg);
12851 %}
12852 
12853 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12854   match(Set dst (AndL src1 src2));
12855 
12856   format %{ "and  $dst, $src1, $src2\t# int" %}
12857 
12858   ins_cost(INSN_COST);
12859   ins_encode %{
12860     __ andr(as_Register($dst$$reg),
12861             as_Register($src1$$reg),
12862             (unsigned long)($src2$$constant));
12863   %}
12864 
12865   ins_pipe(ialu_reg_imm);
12866 %}
12867 
12868 // Or Instructions
12869 
12870 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12871   match(Set dst (OrL src1 src2));
12872 
12873   format %{ "orr  $dst, $src1, $src2\t# int" %}
12874 
12875   ins_cost(INSN_COST);
12876   ins_encode %{
12877     __ orr(as_Register($dst$$reg),
12878            as_Register($src1$$reg),
12879            as_Register($src2$$reg));
12880   %}
12881 
12882   ins_pipe(ialu_reg_reg);
12883 %}
12884 
12885 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12886   match(Set dst (OrL src1 src2));
12887 
12888   format %{ "orr  $dst, $src1, $src2\t# int" %}
12889 
12890   ins_cost(INSN_COST);
12891   ins_encode %{
12892     __ orr(as_Register($dst$$reg),
12893            as_Register($src1$$reg),
12894            (unsigned long)($src2$$constant));
12895   %}
12896 
12897   ins_pipe(ialu_reg_imm);
12898 %}
12899 
12900 // Xor Instructions
12901 
12902 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12903   match(Set dst (XorL src1 src2));
12904 
12905   format %{ "eor  $dst, $src1, $src2\t# int" %}
12906 
12907   ins_cost(INSN_COST);
12908   ins_encode %{
12909     __ eor(as_Register($dst$$reg),
12910            as_Register($src1$$reg),
12911            as_Register($src2$$reg));
12912   %}
12913 
12914   ins_pipe(ialu_reg_reg);
12915 %}
12916 
12917 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12918   match(Set dst (XorL src1 src2));
12919 
12920   ins_cost(INSN_COST);
12921   format %{ "eor  $dst, $src1, $src2\t# int" %}
12922 
12923   ins_encode %{
12924     __ eor(as_Register($dst$$reg),
12925            as_Register($src1$$reg),
12926            (unsigned long)($src2$$constant));
12927   %}
12928 
12929   ins_pipe(ialu_reg_imm);
12930 %}
12931 
12932 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12933 %{
12934   match(Set dst (ConvI2L src));
12935 
12936   ins_cost(INSN_COST);
12937   format %{ "sxtw  $dst, $src\t# i2l" %}
12938   ins_encode %{
12939     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12940   %}
12941   ins_pipe(ialu_reg_shift);
12942 %}
12943 
12944 // this pattern occurs in bigmath arithmetic
12945 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12946 %{
12947   match(Set dst (AndL (ConvI2L src) mask));
12948 
12949   ins_cost(INSN_COST);
12950   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12951   ins_encode %{
12952     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12953   %}
12954 
12955   ins_pipe(ialu_reg_shift);
12956 %}
12957 
12958 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12959   match(Set dst (ConvL2I src));
12960 
12961   ins_cost(INSN_COST);
12962   format %{ "movw  $dst, $src \t// l2i" %}
12963 
12964   ins_encode %{
12965     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12966   %}
12967 
12968   ins_pipe(ialu_reg);
12969 %}
12970 
12971 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12972 %{
12973   match(Set dst (Conv2B src));
12974   effect(KILL cr);
12975 
12976   format %{
12977     "cmpw $src, zr\n\t"
12978     "cset $dst, ne"
12979   %}
12980 
12981   ins_encode %{
12982     __ cmpw(as_Register($src$$reg), zr);
12983     __ cset(as_Register($dst$$reg), Assembler::NE);
12984   %}
12985 
12986   ins_pipe(ialu_reg);
12987 %}
12988 
12989 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12990 %{
12991   match(Set dst (Conv2B src));
12992   effect(KILL cr);
12993 
12994   format %{
12995     "cmp  $src, zr\n\t"
12996     "cset $dst, ne"
12997   %}
12998 
12999   ins_encode %{
13000     __ cmp(as_Register($src$$reg), zr);
13001     __ cset(as_Register($dst$$reg), Assembler::NE);
13002   %}
13003 
13004   ins_pipe(ialu_reg);
13005 %}
13006 
13007 instruct convD2F_reg(vRegF dst, vRegD src) %{
13008   match(Set dst (ConvD2F src));
13009 
13010   ins_cost(INSN_COST * 5);
13011   format %{ "fcvtd  $dst, $src \t// d2f" %}
13012 
13013   ins_encode %{
13014     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13015   %}
13016 
13017   ins_pipe(fp_d2f);
13018 %}
13019 
13020 instruct convF2D_reg(vRegD dst, vRegF src) %{
13021   match(Set dst (ConvF2D src));
13022 
13023   ins_cost(INSN_COST * 5);
13024   format %{ "fcvts  $dst, $src \t// f2d" %}
13025 
13026   ins_encode %{
13027     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13028   %}
13029 
13030   ins_pipe(fp_f2d);
13031 %}
13032 
13033 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13034   match(Set dst (ConvF2I src));
13035 
13036   ins_cost(INSN_COST * 5);
13037   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13038 
13039   ins_encode %{
13040     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13041   %}
13042 
13043   ins_pipe(fp_f2i);
13044 %}
13045 
13046 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13047   match(Set dst (ConvF2L src));
13048 
13049   ins_cost(INSN_COST * 5);
13050   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13051 
13052   ins_encode %{
13053     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13054   %}
13055 
13056   ins_pipe(fp_f2l);
13057 %}
13058 
13059 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13060   match(Set dst (ConvI2F src));
13061 
13062   ins_cost(INSN_COST * 5);
13063   format %{ "scvtfws  $dst, $src \t// i2f" %}
13064 
13065   ins_encode %{
13066     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13067   %}
13068 
13069   ins_pipe(fp_i2f);
13070 %}
13071 
13072 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13073   match(Set dst (ConvL2F src));
13074 
13075   ins_cost(INSN_COST * 5);
13076   format %{ "scvtfs  $dst, $src \t// l2f" %}
13077 
13078   ins_encode %{
13079     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13080   %}
13081 
13082   ins_pipe(fp_l2f);
13083 %}
13084 
13085 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13086   match(Set dst (ConvD2I src));
13087 
13088   ins_cost(INSN_COST * 5);
13089   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13090 
13091   ins_encode %{
13092     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13093   %}
13094 
13095   ins_pipe(fp_d2i);
13096 %}
13097 
13098 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13099   match(Set dst (ConvD2L src));
13100 
13101   ins_cost(INSN_COST * 5);
13102   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13103 
13104   ins_encode %{
13105     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13106   %}
13107 
13108   ins_pipe(fp_d2l);
13109 %}
13110 
13111 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13112   match(Set dst (ConvI2D src));
13113 
13114   ins_cost(INSN_COST * 5);
13115   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13116 
13117   ins_encode %{
13118     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13119   %}
13120 
13121   ins_pipe(fp_i2d);
13122 %}
13123 
13124 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13125   match(Set dst (ConvL2D src));
13126 
13127   ins_cost(INSN_COST * 5);
13128   format %{ "scvtfd  $dst, $src \t// l2d" %}
13129 
13130   ins_encode %{
13131     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13132   %}
13133 
13134   ins_pipe(fp_l2d);
13135 %}
13136 
13137 // stack <-> reg and reg <-> reg shuffles with no conversion
13138 
13139 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13140 
13141   match(Set dst (MoveF2I src));
13142 
13143   effect(DEF dst, USE src);
13144 
13145   ins_cost(4 * INSN_COST);
13146 
13147   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13148 
13149   ins_encode %{
13150     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13151   %}
13152 
13153   ins_pipe(iload_reg_reg);
13154 
13155 %}
13156 
13157 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13158 
13159   match(Set dst (MoveI2F src));
13160 
13161   effect(DEF dst, USE src);
13162 
13163   ins_cost(4 * INSN_COST);
13164 
13165   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13166 
13167   ins_encode %{
13168     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13169   %}
13170 
13171   ins_pipe(pipe_class_memory);
13172 
13173 %}
13174 
13175 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13176 
13177   match(Set dst (MoveD2L src));
13178 
13179   effect(DEF dst, USE src);
13180 
13181   ins_cost(4 * INSN_COST);
13182 
13183   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13184 
13185   ins_encode %{
13186     __ ldr($dst$$Register, Address(sp, $src$$disp));
13187   %}
13188 
13189   ins_pipe(iload_reg_reg);
13190 
13191 %}
13192 
13193 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13194 
13195   match(Set dst (MoveL2D src));
13196 
13197   effect(DEF dst, USE src);
13198 
13199   ins_cost(4 * INSN_COST);
13200 
13201   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13202 
13203   ins_encode %{
13204     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13205   %}
13206 
13207   ins_pipe(pipe_class_memory);
13208 
13209 %}
13210 
13211 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13212 
13213   match(Set dst (MoveF2I src));
13214 
13215   effect(DEF dst, USE src);
13216 
13217   ins_cost(INSN_COST);
13218 
13219   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13220 
13221   ins_encode %{
13222     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13223   %}
13224 
13225   ins_pipe(pipe_class_memory);
13226 
13227 %}
13228 
13229 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13230 
13231   match(Set dst (MoveI2F src));
13232 
13233   effect(DEF dst, USE src);
13234 
13235   ins_cost(INSN_COST);
13236 
13237   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13238 
13239   ins_encode %{
13240     __ strw($src$$Register, Address(sp, $dst$$disp));
13241   %}
13242 
13243   ins_pipe(istore_reg_reg);
13244 
13245 %}
13246 
13247 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13248 
13249   match(Set dst (MoveD2L src));
13250 
13251   effect(DEF dst, USE src);
13252 
13253   ins_cost(INSN_COST);
13254 
13255   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13256 
13257   ins_encode %{
13258     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13259   %}
13260 
13261   ins_pipe(pipe_class_memory);
13262 
13263 %}
13264 
13265 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13266 
13267   match(Set dst (MoveL2D src));
13268 
13269   effect(DEF dst, USE src);
13270 
13271   ins_cost(INSN_COST);
13272 
13273   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13274 
13275   ins_encode %{
13276     __ str($src$$Register, Address(sp, $dst$$disp));
13277   %}
13278 
13279   ins_pipe(istore_reg_reg);
13280 
13281 %}
13282 
13283 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13284 
13285   match(Set dst (MoveF2I src));
13286 
13287   effect(DEF dst, USE src);
13288 
13289   ins_cost(INSN_COST);
13290 
13291   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13292 
13293   ins_encode %{
13294     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13295   %}
13296 
13297   ins_pipe(fp_f2i);
13298 
13299 %}
13300 
13301 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13302 
13303   match(Set dst (MoveI2F src));
13304 
13305   effect(DEF dst, USE src);
13306 
13307   ins_cost(INSN_COST);
13308 
13309   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13310 
13311   ins_encode %{
13312     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13313   %}
13314 
13315   ins_pipe(fp_i2f);
13316 
13317 %}
13318 
13319 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13320 
13321   match(Set dst (MoveD2L src));
13322 
13323   effect(DEF dst, USE src);
13324 
13325   ins_cost(INSN_COST);
13326 
13327   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13328 
13329   ins_encode %{
13330     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13331   %}
13332 
13333   ins_pipe(fp_d2l);
13334 
13335 %}
13336 
13337 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13338 
13339   match(Set dst (MoveL2D src));
13340 
13341   effect(DEF dst, USE src);
13342 
13343   ins_cost(INSN_COST);
13344 
13345   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13346 
13347   ins_encode %{
13348     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13349   %}
13350 
13351   ins_pipe(fp_l2d);
13352 
13353 %}
13354 
13355 // ============================================================================
13356 // clearing of an array
13357 
13358 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13359 %{
13360   match(Set dummy (ClearArray cnt base));
13361   effect(USE_KILL cnt, USE_KILL base);
13362 
13363   ins_cost(4 * INSN_COST);
13364   format %{ "ClearArray $cnt, $base" %}
13365 
13366   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
13367 
13368   ins_pipe(pipe_class_memory);
13369 %}
13370 
13371 // ============================================================================
13372 // Overflow Math Instructions
13373 
13374 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13375 %{
13376   match(Set cr (OverflowAddI op1 op2));
13377 
13378   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13379   ins_cost(INSN_COST);
13380   ins_encode %{
13381     __ cmnw($op1$$Register, $op2$$Register);
13382   %}
13383 
13384   ins_pipe(icmp_reg_reg);
13385 %}
13386 
13387 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13388 %{
13389   match(Set cr (OverflowAddI op1 op2));
13390 
13391   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13392   ins_cost(INSN_COST);
13393   ins_encode %{
13394     __ cmnw($op1$$Register, $op2$$constant);
13395   %}
13396 
13397   ins_pipe(icmp_reg_imm);
13398 %}
13399 
13400 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13401 %{
13402   match(Set cr (OverflowAddL op1 op2));
13403 
13404   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13405   ins_cost(INSN_COST);
13406   ins_encode %{
13407     __ cmn($op1$$Register, $op2$$Register);
13408   %}
13409 
13410   ins_pipe(icmp_reg_reg);
13411 %}
13412 
13413 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13414 %{
13415   match(Set cr (OverflowAddL op1 op2));
13416 
13417   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13418   ins_cost(INSN_COST);
13419   ins_encode %{
13420     __ cmn($op1$$Register, $op2$$constant);
13421   %}
13422 
13423   ins_pipe(icmp_reg_imm);
13424 %}
13425 
13426 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13427 %{
13428   match(Set cr (OverflowSubI op1 op2));
13429 
13430   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13431   ins_cost(INSN_COST);
13432   ins_encode %{
13433     __ cmpw($op1$$Register, $op2$$Register);
13434   %}
13435 
13436   ins_pipe(icmp_reg_reg);
13437 %}
13438 
13439 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13440 %{
13441   match(Set cr (OverflowSubI op1 op2));
13442 
13443   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13444   ins_cost(INSN_COST);
13445   ins_encode %{
13446     __ cmpw($op1$$Register, $op2$$constant);
13447   %}
13448 
13449   ins_pipe(icmp_reg_imm);
13450 %}
13451 
13452 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13453 %{
13454   match(Set cr (OverflowSubL op1 op2));
13455 
13456   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13457   ins_cost(INSN_COST);
13458   ins_encode %{
13459     __ cmp($op1$$Register, $op2$$Register);
13460   %}
13461 
13462   ins_pipe(icmp_reg_reg);
13463 %}
13464 
13465 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13466 %{
13467   match(Set cr (OverflowSubL op1 op2));
13468 
13469   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13470   ins_cost(INSN_COST);
13471   ins_encode %{
13472     __ cmp($op1$$Register, $op2$$constant);
13473   %}
13474 
13475   ins_pipe(icmp_reg_imm);
13476 %}
13477 
13478 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13479 %{
13480   match(Set cr (OverflowSubI zero op1));
13481 
13482   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13483   ins_cost(INSN_COST);
13484   ins_encode %{
13485     __ cmpw(zr, $op1$$Register);
13486   %}
13487 
13488   ins_pipe(icmp_reg_imm);
13489 %}
13490 
13491 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13492 %{
13493   match(Set cr (OverflowSubL zero op1));
13494 
13495   format %{ "cmp   zr, $op1\t# overflow check long" %}
13496   ins_cost(INSN_COST);
13497   ins_encode %{
13498     __ cmp(zr, $op1$$Register);
13499   %}
13500 
13501   ins_pipe(icmp_reg_imm);
13502 %}
13503 
13504 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13505 %{
13506   match(Set cr (OverflowMulI op1 op2));
13507 
13508   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13509             "cmp   rscratch1, rscratch1, sxtw\n\t"
13510             "movw  rscratch1, #0x80000000\n\t"
13511             "cselw rscratch1, rscratch1, zr, NE\n\t"
13512             "cmpw  rscratch1, #1" %}
13513   ins_cost(5 * INSN_COST);
13514   ins_encode %{
13515     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13516     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13517     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13518     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13519     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13520   %}
13521 
13522   ins_pipe(pipe_slow);
13523 %}
13524 
13525 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13526 %{
13527   match(If cmp (OverflowMulI op1 op2));
13528   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13529             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13530   effect(USE labl, KILL cr);
13531 
13532   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13533             "cmp   rscratch1, rscratch1, sxtw\n\t"
13534             "b$cmp   $labl" %}
13535   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13536   ins_encode %{
13537     Label* L = $labl$$label;
13538     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13539     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13540     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13541     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13542   %}
13543 
13544   ins_pipe(pipe_serial);
13545 %}
13546 
13547 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13548 %{
13549   match(Set cr (OverflowMulL op1 op2));
13550 
13551   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13552             "smulh rscratch2, $op1, $op2\n\t"
13553             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13554             "movw  rscratch1, #0x80000000\n\t"
13555             "cselw rscratch1, rscratch1, zr, NE\n\t"
13556             "cmpw  rscratch1, #1" %}
13557   ins_cost(6 * INSN_COST);
13558   ins_encode %{
13559     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13560     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13561     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13562     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13563     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13564     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13565   %}
13566 
13567   ins_pipe(pipe_slow);
13568 %}
13569 
13570 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13571 %{
13572   match(If cmp (OverflowMulL op1 op2));
13573   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13574             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13575   effect(USE labl, KILL cr);
13576 
13577   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13578             "smulh rscratch2, $op1, $op2\n\t"
13579             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13580             "b$cmp $labl" %}
13581   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13582   ins_encode %{
13583     Label* L = $labl$$label;
13584     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13585     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13586     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13587     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13588     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13589   %}
13590 
13591   ins_pipe(pipe_serial);
13592 %}
13593 
13594 // ============================================================================
13595 // Compare Instructions
13596 
13597 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13598 %{
13599   match(Set cr (CmpI op1 op2));
13600 
13601   effect(DEF cr, USE op1, USE op2);
13602 
13603   ins_cost(INSN_COST);
13604   format %{ "cmpw  $op1, $op2" %}
13605 
13606   ins_encode(aarch64_enc_cmpw(op1, op2));
13607 
13608   ins_pipe(icmp_reg_reg);
13609 %}
13610 
13611 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13612 %{
13613   match(Set cr (CmpI op1 zero));
13614 
13615   effect(DEF cr, USE op1);
13616 
13617   ins_cost(INSN_COST);
13618   format %{ "cmpw $op1, 0" %}
13619 
13620   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13621 
13622   ins_pipe(icmp_reg_imm);
13623 %}
13624 
13625 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13626 %{
13627   match(Set cr (CmpI op1 op2));
13628 
13629   effect(DEF cr, USE op1);
13630 
13631   ins_cost(INSN_COST);
13632   format %{ "cmpw  $op1, $op2" %}
13633 
13634   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13635 
13636   ins_pipe(icmp_reg_imm);
13637 %}
13638 
13639 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13640 %{
13641   match(Set cr (CmpI op1 op2));
13642 
13643   effect(DEF cr, USE op1);
13644 
13645   ins_cost(INSN_COST * 2);
13646   format %{ "cmpw  $op1, $op2" %}
13647 
13648   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13649 
13650   ins_pipe(icmp_reg_imm);
13651 %}
13652 
13653 // Unsigned compare Instructions; really, same as signed compare
13654 // except it should only be used to feed an If or a CMovI which takes a
13655 // cmpOpU.
13656 
13657 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13658 %{
13659   match(Set cr (CmpU op1 op2));
13660 
13661   effect(DEF cr, USE op1, USE op2);
13662 
13663   ins_cost(INSN_COST);
13664   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13665 
13666   ins_encode(aarch64_enc_cmpw(op1, op2));
13667 
13668   ins_pipe(icmp_reg_reg);
13669 %}
13670 
13671 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13672 %{
13673   match(Set cr (CmpU op1 zero));
13674 
13675   effect(DEF cr, USE op1);
13676 
13677   ins_cost(INSN_COST);
13678   format %{ "cmpw $op1, #0\t# unsigned" %}
13679 
13680   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13681 
13682   ins_pipe(icmp_reg_imm);
13683 %}
13684 
13685 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13686 %{
13687   match(Set cr (CmpU op1 op2));
13688 
13689   effect(DEF cr, USE op1);
13690 
13691   ins_cost(INSN_COST);
13692   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13693 
13694   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13695 
13696   ins_pipe(icmp_reg_imm);
13697 %}
13698 
13699 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13700 %{
13701   match(Set cr (CmpU op1 op2));
13702 
13703   effect(DEF cr, USE op1);
13704 
13705   ins_cost(INSN_COST * 2);
13706   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13707 
13708   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13709 
13710   ins_pipe(icmp_reg_imm);
13711 %}
13712 
13713 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13714 %{
13715   match(Set cr (CmpL op1 op2));
13716 
13717   effect(DEF cr, USE op1, USE op2);
13718 
13719   ins_cost(INSN_COST);
13720   format %{ "cmp  $op1, $op2" %}
13721 
13722   ins_encode(aarch64_enc_cmp(op1, op2));
13723 
13724   ins_pipe(icmp_reg_reg);
13725 %}
13726 
13727 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13728 %{
13729   match(Set cr (CmpL op1 zero));
13730 
13731   effect(DEF cr, USE op1);
13732 
13733   ins_cost(INSN_COST);
13734   format %{ "tst  $op1" %}
13735 
13736   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13737 
13738   ins_pipe(icmp_reg_imm);
13739 %}
13740 
13741 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13742 %{
13743   match(Set cr (CmpL op1 op2));
13744 
13745   effect(DEF cr, USE op1);
13746 
13747   ins_cost(INSN_COST);
13748   format %{ "cmp  $op1, $op2" %}
13749 
13750   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13751 
13752   ins_pipe(icmp_reg_imm);
13753 %}
13754 
13755 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13756 %{
13757   match(Set cr (CmpL op1 op2));
13758 
13759   effect(DEF cr, USE op1);
13760 
13761   ins_cost(INSN_COST * 2);
13762   format %{ "cmp  $op1, $op2" %}
13763 
13764   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13765 
13766   ins_pipe(icmp_reg_imm);
13767 %}
13768 
13769 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13770 %{
13771   match(Set cr (CmpP op1 op2));
13772 
13773   effect(DEF cr, USE op1, USE op2);
13774 
13775   ins_cost(INSN_COST);
13776   format %{ "cmp  $op1, $op2\t // ptr" %}
13777 
13778   ins_encode(aarch64_enc_cmpp(op1, op2));
13779 
13780   ins_pipe(icmp_reg_reg);
13781 %}
13782 
13783 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13784 %{
13785   match(Set cr (CmpN op1 op2));
13786 
13787   effect(DEF cr, USE op1, USE op2);
13788 
13789   ins_cost(INSN_COST);
13790   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13791 
13792   ins_encode(aarch64_enc_cmpn(op1, op2));
13793 
13794   ins_pipe(icmp_reg_reg);
13795 %}
13796 
13797 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13798 %{
13799   match(Set cr (CmpP op1 zero));
13800 
13801   effect(DEF cr, USE op1, USE zero);
13802 
13803   ins_cost(INSN_COST);
13804   format %{ "cmp  $op1, 0\t // ptr" %}
13805 
13806   ins_encode(aarch64_enc_testp(op1));
13807 
13808   ins_pipe(icmp_reg_imm);
13809 %}
13810 
13811 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13812 %{
13813   match(Set cr (CmpN op1 zero));
13814 
13815   effect(DEF cr, USE op1, USE zero);
13816 
13817   ins_cost(INSN_COST);
13818   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13819 
13820   ins_encode(aarch64_enc_testn(op1));
13821 
13822   ins_pipe(icmp_reg_imm);
13823 %}
13824 
13825 // FP comparisons
13826 //
13827 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13828 // using normal cmpOp. See declaration of rFlagsReg for details.
13829 
13830 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13831 %{
13832   match(Set cr (CmpF src1 src2));
13833 
13834   ins_cost(3 * INSN_COST);
13835   format %{ "fcmps $src1, $src2" %}
13836 
13837   ins_encode %{
13838     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13839   %}
13840 
13841   ins_pipe(pipe_class_compare);
13842 %}
13843 
13844 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13845 %{
13846   match(Set cr (CmpF src1 src2));
13847 
13848   ins_cost(3 * INSN_COST);
13849   format %{ "fcmps $src1, 0.0" %}
13850 
13851   ins_encode %{
13852     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13853   %}
13854 
13855   ins_pipe(pipe_class_compare);
13856 %}
13857 // FROM HERE
13858 
13859 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13860 %{
13861   match(Set cr (CmpD src1 src2));
13862 
13863   ins_cost(3 * INSN_COST);
13864   format %{ "fcmpd $src1, $src2" %}
13865 
13866   ins_encode %{
13867     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13868   %}
13869 
13870   ins_pipe(pipe_class_compare);
13871 %}
13872 
13873 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13874 %{
13875   match(Set cr (CmpD src1 src2));
13876 
13877   ins_cost(3 * INSN_COST);
13878   format %{ "fcmpd $src1, 0.0" %}
13879 
13880   ins_encode %{
13881     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13882   %}
13883 
13884   ins_pipe(pipe_class_compare);
13885 %}
13886 
13887 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13888 %{
13889   match(Set dst (CmpF3 src1 src2));
13890   effect(KILL cr);
13891 
13892   ins_cost(5 * INSN_COST);
13893   format %{ "fcmps $src1, $src2\n\t"
13894             "csinvw($dst, zr, zr, eq\n\t"
13895             "csnegw($dst, $dst, $dst, lt)"
13896   %}
13897 
13898   ins_encode %{
13899     Label done;
13900     FloatRegister s1 = as_FloatRegister($src1$$reg);
13901     FloatRegister s2 = as_FloatRegister($src2$$reg);
13902     Register d = as_Register($dst$$reg);
13903     __ fcmps(s1, s2);
13904     // installs 0 if EQ else -1
13905     __ csinvw(d, zr, zr, Assembler::EQ);
13906     // keeps -1 if less or unordered else installs 1
13907     __ csnegw(d, d, d, Assembler::LT);
13908     __ bind(done);
13909   %}
13910 
13911   ins_pipe(pipe_class_default);
13912 
13913 %}
13914 
13915 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13916 %{
13917   match(Set dst (CmpD3 src1 src2));
13918   effect(KILL cr);
13919 
13920   ins_cost(5 * INSN_COST);
13921   format %{ "fcmpd $src1, $src2\n\t"
13922             "csinvw($dst, zr, zr, eq\n\t"
13923             "csnegw($dst, $dst, $dst, lt)"
13924   %}
13925 
13926   ins_encode %{
13927     Label done;
13928     FloatRegister s1 = as_FloatRegister($src1$$reg);
13929     FloatRegister s2 = as_FloatRegister($src2$$reg);
13930     Register d = as_Register($dst$$reg);
13931     __ fcmpd(s1, s2);
13932     // installs 0 if EQ else -1
13933     __ csinvw(d, zr, zr, Assembler::EQ);
13934     // keeps -1 if less or unordered else installs 1
13935     __ csnegw(d, d, d, Assembler::LT);
13936     __ bind(done);
13937   %}
13938   ins_pipe(pipe_class_default);
13939 
13940 %}
13941 
13942 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13943 %{
13944   match(Set dst (CmpF3 src1 zero));
13945   effect(KILL cr);
13946 
13947   ins_cost(5 * INSN_COST);
13948   format %{ "fcmps $src1, 0.0\n\t"
13949             "csinvw($dst, zr, zr, eq\n\t"
13950             "csnegw($dst, $dst, $dst, lt)"
13951   %}
13952 
13953   ins_encode %{
13954     Label done;
13955     FloatRegister s1 = as_FloatRegister($src1$$reg);
13956     Register d = as_Register($dst$$reg);
13957     __ fcmps(s1, 0.0D);
13958     // installs 0 if EQ else -1
13959     __ csinvw(d, zr, zr, Assembler::EQ);
13960     // keeps -1 if less or unordered else installs 1
13961     __ csnegw(d, d, d, Assembler::LT);
13962     __ bind(done);
13963   %}
13964 
13965   ins_pipe(pipe_class_default);
13966 
13967 %}
13968 
13969 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13970 %{
13971   match(Set dst (CmpD3 src1 zero));
13972   effect(KILL cr);
13973 
13974   ins_cost(5 * INSN_COST);
13975   format %{ "fcmpd $src1, 0.0\n\t"
13976             "csinvw($dst, zr, zr, eq\n\t"
13977             "csnegw($dst, $dst, $dst, lt)"
13978   %}
13979 
13980   ins_encode %{
13981     Label done;
13982     FloatRegister s1 = as_FloatRegister($src1$$reg);
13983     Register d = as_Register($dst$$reg);
13984     __ fcmpd(s1, 0.0D);
13985     // installs 0 if EQ else -1
13986     __ csinvw(d, zr, zr, Assembler::EQ);
13987     // keeps -1 if less or unordered else installs 1
13988     __ csnegw(d, d, d, Assembler::LT);
13989     __ bind(done);
13990   %}
13991   ins_pipe(pipe_class_default);
13992 
13993 %}
13994 
13995 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13996 %{
13997   match(Set dst (CmpLTMask p q));
13998   effect(KILL cr);
13999 
14000   ins_cost(3 * INSN_COST);
14001 
14002   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14003             "csetw $dst, lt\n\t"
14004             "subw $dst, zr, $dst"
14005   %}
14006 
14007   ins_encode %{
14008     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14009     __ csetw(as_Register($dst$$reg), Assembler::LT);
14010     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14011   %}
14012 
14013   ins_pipe(ialu_reg_reg);
14014 %}
14015 
14016 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14017 %{
14018   match(Set dst (CmpLTMask src zero));
14019   effect(KILL cr);
14020 
14021   ins_cost(INSN_COST);
14022 
14023   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14024 
14025   ins_encode %{
14026     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14027   %}
14028 
14029   ins_pipe(ialu_reg_shift);
14030 %}
14031 
14032 // ============================================================================
14033 // Max and Min
14034 
14035 instruct minI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14036 %{
14037   match(Set dst (MinI src1 src2));
14038 
14039   effect(DEF dst, USE src1, USE src2, KILL cr);
14040   size(8);
14041 
14042   ins_cost(INSN_COST * 3);
14043   format %{
14044     "cmpw $src1, $src2\t signed int\n\t"
14045     "cselw $dst, $src1, $src2 lt\t"
14046   %}
14047 
14048   ins_encode %{
14049     __ cmpw(as_Register($src1$$reg),
14050             as_Register($src2$$reg));
14051     __ cselw(as_Register($dst$$reg),
14052              as_Register($src1$$reg),
14053              as_Register($src2$$reg),
14054              Assembler::LT);
14055   %}
14056 
14057   ins_pipe(ialu_reg_reg);
14058 %}

14059 
14060 instruct minI_reg_imm0(iRegINoSp dst, iRegI src1, immI0 src2, rFlagsReg cr)
14061 %{
14062   match(Set dst (MinI src1 src2));
14063 
14064   effect(DEF dst, USE src1, USE src2, KILL cr);
14065   size(8);
14066 
14067   ins_cost(INSN_COST * 3);
14068   format %{
14069     "cmpw $src1, zr\t signed int\n\t"
14070     "cselw $dst, $src1, zr, lt\t"
14071   %}
14072 
14073   ins_encode %{
14074     __ cmpw(as_Register($src1$$reg),
14075             zr);
14076     __ cselw(as_Register($dst$$reg),
14077              as_Register($src1$$reg),
14078              zr,
14079              Assembler::LT);
14080   %}
14081 
14082   ins_pipe(ialu_reg_reg);
14083 %}
14084 
14085 instruct minI_reg_imm1(iRegINoSp dst, iRegI src1, immI_1 src2, rFlagsReg cr)
14086 %{
14087   match(Set dst (MinI src1 src2));
14088 
14089   effect(DEF dst, USE src1, USE src2, KILL cr);
14090   size(8);
14091 
14092   ins_cost(INSN_COST * 3);
14093   format %{
14094     "cmpw $src1, zr\t signed int\n\t"
14095     "csincw $dst, $src1, zr, le\t"
14096   %}
14097 
14098   ins_encode %{
14099     __ cmpw(as_Register($src1$$reg),
14100             zr);
14101     __ csincw(as_Register($dst$$reg),
14102               as_Register($src1$$reg),
14103               zr,
14104               Assembler::LE);
14105   %}
14106 
14107   ins_pipe(ialu_reg_reg);
14108 %}
14109 
14110 instruct minI_reg_immM1(iRegINoSp dst, iRegI src1, immI_M1 src2, rFlagsReg cr)
14111 %{
14112   match(Set dst (MinI src1 src2));
14113 
14114   effect(DEF dst, USE src1, USE src2, KILL cr);
14115   size(8);
14116 
14117   ins_cost(INSN_COST * 3);
14118   format %{
14119     "cmpw $src1, zr\t signed int\n\t"
14120     "csinvw $dst, $src1, zr, lt\t"
14121   %}
14122 
14123   ins_encode %{
14124     __ cmpw(as_Register($src1$$reg),
14125             zr);
14126     __ csinvw(as_Register($dst$$reg),
14127               as_Register($src1$$reg),
14128               zr,
14129               Assembler::LT);
14130   %}
14131 
14132   ins_pipe(ialu_reg_reg);
14133 %}
14134 
14135 instruct maxI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14136 %{
14137   match(Set dst (MaxI src1 src2));
14138 
14139   effect(DEF dst, USE src1, USE src2, KILL cr);
14140   size(8);
14141 
14142   ins_cost(INSN_COST * 3);
14143   format %{
14144     "cmpw $src1, $src2\t signed int\n\t"
14145     "cselw $dst, $src1, $src2 gt\t"
14146   %}
14147 
14148   ins_encode %{
14149     __ cmpw(as_Register($src1$$reg),
14150             as_Register($src2$$reg));
14151     __ cselw(as_Register($dst$$reg),
14152              as_Register($src1$$reg),
14153              as_Register($src2$$reg),
14154              Assembler::GT);
14155   %}
14156 
14157   ins_pipe(ialu_reg_reg);
14158 %}
14159 
14160 instruct maxI_reg_imm0(iRegINoSp dst, iRegI src1, immI0 src2, rFlagsReg cr)
14161 %{
14162   match(Set dst (MaxI src1 src2));
14163 
14164   effect(DEF dst, USE src1, USE src2, KILL cr);
14165   size(8);
14166 
14167   ins_cost(INSN_COST * 3);
14168   format %{
14169     "cmpw $src1, zr\t signed int\n\t"
14170     "cselw $dst, $src1, zr, gt\t"
14171   %}
14172 
14173   ins_encode %{
14174     __ cmpw(as_Register($src1$$reg),
14175             zr);
14176     __ cselw(as_Register($dst$$reg),
14177              as_Register($src1$$reg),
14178              zr,
14179              Assembler::GT);
14180   %}
14181 
14182   ins_pipe(ialu_reg_reg);
14183 %}
14184 
14185 instruct maxI_reg_imm1(iRegINoSp dst, iRegI src1, immI_1 src2, rFlagsReg cr)
14186 %{
14187   match(Set dst (MaxI src1 src2));
14188 
14189   effect(DEF dst, USE src1, USE src2, KILL cr);
14190   size(8);
14191 
14192   ins_cost(INSN_COST * 3);
14193   format %{
14194     "cmpw $src1, zr\t signed int\n\t"
14195     "csincw $dst, $src1, zr, gt\t"
14196   %}
14197 
14198   ins_encode %{
14199     __ cmpw(as_Register($src1$$reg),
14200             zr);
14201     __ csincw(as_Register($dst$$reg),
14202               as_Register($src1$$reg),
14203               zr,
14204               Assembler::GT);
14205   %}
14206 
14207   ins_pipe(ialu_reg_reg);
14208 %}
14209 
14210 instruct maxI_reg_immM1(iRegINoSp dst, iRegI src1, immI_M1 src2, rFlagsReg cr)
14211 %{
14212   match(Set dst (MaxI src1 src2));
14213 
14214   effect(DEF dst, USE src1, USE src2, KILL cr);
14215   size(8);
14216 
14217   ins_cost(INSN_COST * 3);
14218   format %{
14219     "cmpw $src1, zr\t signed int\n\t"
14220     "csinvw $dst, $src1, zr, ge\t"
14221   %}
14222 
14223   ins_encode %{
14224     __ cmpw(as_Register($src1$$reg),
14225             zr);
14226     __ csinvw(as_Register($dst$$reg),
14227               as_Register($src1$$reg),
14228               zr,
14229               Assembler::GE);
14230   %}
14231 
14232   ins_pipe(ialu_reg_reg);
14233 %}
14234 
14235 // ============================================================================
14236 // Branch Instructions
14237 
14238 // Direct Branch.
14239 instruct branch(label lbl)
14240 %{
14241   match(Goto);
14242 
14243   effect(USE lbl);
14244 
14245   ins_cost(BRANCH_COST);
14246   format %{ "b  $lbl" %}
14247 
14248   ins_encode(aarch64_enc_b(lbl));
14249 
14250   ins_pipe(pipe_branch);
14251 %}
14252 
14253 // Conditional Near Branch
14254 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14255 %{
14256   // Same match rule as `branchConFar'.
14257   match(If cmp cr);
14258 
14259   effect(USE lbl);
14260 
14261   ins_cost(BRANCH_COST);
14262   // If set to 1 this indicates that the current instruction is a
14263   // short variant of a long branch. This avoids using this
14264   // instruction in first-pass matching. It will then only be used in
14265   // the `Shorten_branches' pass.
14266   // ins_short_branch(1);
14267   format %{ "b$cmp  $lbl" %}
14268 
14269   ins_encode(aarch64_enc_br_con(cmp, lbl));
14270 
14271   ins_pipe(pipe_branch_cond);
14272 %}
14273 
14274 // Conditional Near Branch Unsigned
14275 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14276 %{
14277   // Same match rule as `branchConFar'.
14278   match(If cmp cr);
14279 
14280   effect(USE lbl);
14281 
14282   ins_cost(BRANCH_COST);
14283   // If set to 1 this indicates that the current instruction is a
14284   // short variant of a long branch. This avoids using this
14285   // instruction in first-pass matching. It will then only be used in
14286   // the `Shorten_branches' pass.
14287   // ins_short_branch(1);
14288   format %{ "b$cmp  $lbl\t# unsigned" %}
14289 
14290   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14291 
14292   ins_pipe(pipe_branch_cond);
14293 %}
14294 
14295 // Make use of CBZ and CBNZ.  These instructions, as well as being
14296 // shorter than (cmp; branch), have the additional benefit of not
14297 // killing the flags.
14298 
14299 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14300   match(If cmp (CmpI op1 op2));
14301   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14302             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14303   effect(USE labl);
14304 
14305   ins_cost(BRANCH_COST);
14306   format %{ "cbw$cmp   $op1, $labl" %}
14307   ins_encode %{
14308     Label* L = $labl$$label;
14309     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14310     if (cond == Assembler::EQ)
14311       __ cbzw($op1$$Register, *L);
14312     else
14313       __ cbnzw($op1$$Register, *L);
14314   %}
14315   ins_pipe(pipe_cmp_branch);
14316 %}
14317 
14318 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14319   match(If cmp (CmpL op1 op2));
14320   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14321             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14322   effect(USE labl);
14323 
14324   ins_cost(BRANCH_COST);
14325   format %{ "cb$cmp   $op1, $labl" %}
14326   ins_encode %{
14327     Label* L = $labl$$label;
14328     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14329     if (cond == Assembler::EQ)
14330       __ cbz($op1$$Register, *L);
14331     else
14332       __ cbnz($op1$$Register, *L);
14333   %}
14334   ins_pipe(pipe_cmp_branch);
14335 %}
14336 
14337 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14338   match(If cmp (CmpP op1 op2));
14339   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14340             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14341   effect(USE labl);
14342 
14343   ins_cost(BRANCH_COST);
14344   format %{ "cb$cmp   $op1, $labl" %}
14345   ins_encode %{
14346     Label* L = $labl$$label;
14347     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14348     if (cond == Assembler::EQ)
14349       __ cbz($op1$$Register, *L);
14350     else
14351       __ cbnz($op1$$Register, *L);
14352   %}
14353   ins_pipe(pipe_cmp_branch);
14354 %}
14355 
14356 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14357   match(If cmp (CmpN op1 op2));
14358   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14359             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14360   effect(USE labl);
14361 
14362   ins_cost(BRANCH_COST);
14363   format %{ "cbw$cmp   $op1, $labl" %}
14364   ins_encode %{
14365     Label* L = $labl$$label;
14366     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14367     if (cond == Assembler::EQ)
14368       __ cbzw($op1$$Register, *L);
14369     else
14370       __ cbnzw($op1$$Register, *L);
14371   %}
14372   ins_pipe(pipe_cmp_branch);
14373 %}
14374 
14375 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14376   match(If cmp (CmpP (DecodeN oop) zero));
14377   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14378             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14379   effect(USE labl);
14380 
14381   ins_cost(BRANCH_COST);
14382   format %{ "cb$cmp   $oop, $labl" %}
14383   ins_encode %{
14384     Label* L = $labl$$label;
14385     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14386     if (cond == Assembler::EQ)
14387       __ cbzw($oop$$Register, *L);
14388     else
14389       __ cbnzw($oop$$Register, *L);
14390   %}
14391   ins_pipe(pipe_cmp_branch);
14392 %}
14393 
14394 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14395   match(If cmp (CmpU op1 op2));
14396   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14397             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14398             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14399             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
14400   effect(USE labl);
14401 
14402   ins_cost(BRANCH_COST);
14403   format %{ "cbw$cmp   $op1, $labl" %}
14404   ins_encode %{
14405     Label* L = $labl$$label;
14406     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14407     if (cond == Assembler::EQ || cond == Assembler::LS)
14408       __ cbzw($op1$$Register, *L);
14409     else
14410       __ cbnzw($op1$$Register, *L);
14411   %}
14412   ins_pipe(pipe_cmp_branch);
14413 %}
14414 
14415 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14416   match(If cmp (CmpU op1 op2));
14417   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14418             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14419             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14420             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
14421   effect(USE labl);
14422 
14423   ins_cost(BRANCH_COST);
14424   format %{ "cb$cmp   $op1, $labl" %}
14425   ins_encode %{
14426     Label* L = $labl$$label;
14427     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14428     if (cond == Assembler::EQ || cond == Assembler::LS)
14429       __ cbz($op1$$Register, *L);
14430     else
14431       __ cbnz($op1$$Register, *L);
14432   %}
14433   ins_pipe(pipe_cmp_branch);
14434 %}
14435 
14436 // Test bit and Branch
14437 
14438 // Patterns for short (< 32KiB) variants
14439 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14440   match(If cmp (CmpL op1 op2));
14441   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14442             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14443   effect(USE labl);
14444 
14445   ins_cost(BRANCH_COST);
14446   format %{ "cb$cmp   $op1, $labl # long" %}
14447   ins_encode %{
14448     Label* L = $labl$$label;
14449     Assembler::Condition cond =
14450       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14451     __ tbr(cond, $op1$$Register, 63, *L);
14452   %}
14453   ins_pipe(pipe_cmp_branch);
14454   ins_short_branch(1);
14455 %}
14456 
14457 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14458   match(If cmp (CmpI op1 op2));
14459   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14460             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14461   effect(USE labl);
14462 
14463   ins_cost(BRANCH_COST);
14464   format %{ "cb$cmp   $op1, $labl # int" %}
14465   ins_encode %{
14466     Label* L = $labl$$label;
14467     Assembler::Condition cond =
14468       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14469     __ tbr(cond, $op1$$Register, 31, *L);
14470   %}
14471   ins_pipe(pipe_cmp_branch);
14472   ins_short_branch(1);
14473 %}
14474 
14475 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14476   match(If cmp (CmpL (AndL op1 op2) op3));
14477   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14478             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14479             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14480   effect(USE labl);
14481 
14482   ins_cost(BRANCH_COST);
14483   format %{ "tb$cmp   $op1, $op2, $labl" %}
14484   ins_encode %{
14485     Label* L = $labl$$label;
14486     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14487     int bit = exact_log2($op2$$constant);
14488     __ tbr(cond, $op1$$Register, bit, *L);
14489   %}
14490   ins_pipe(pipe_cmp_branch);
14491   ins_short_branch(1);
14492 %}
14493 
14494 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14495   match(If cmp (CmpI (AndI op1 op2) op3));
14496   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14497             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14498             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14499   effect(USE labl);
14500 
14501   ins_cost(BRANCH_COST);
14502   format %{ "tb$cmp   $op1, $op2, $labl" %}
14503   ins_encode %{
14504     Label* L = $labl$$label;
14505     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14506     int bit = exact_log2($op2$$constant);
14507     __ tbr(cond, $op1$$Register, bit, *L);
14508   %}
14509   ins_pipe(pipe_cmp_branch);
14510   ins_short_branch(1);
14511 %}
14512 
14513 // And far variants
14514 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14515   match(If cmp (CmpL op1 op2));
14516   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14517             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14518   effect(USE labl);
14519 
14520   ins_cost(BRANCH_COST);
14521   format %{ "cb$cmp   $op1, $labl # long" %}
14522   ins_encode %{
14523     Label* L = $labl$$label;
14524     Assembler::Condition cond =
14525       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14526     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14527   %}
14528   ins_pipe(pipe_cmp_branch);
14529 %}
14530 
14531 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14532   match(If cmp (CmpI op1 op2));
14533   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14534             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14535   effect(USE labl);
14536 
14537   ins_cost(BRANCH_COST);
14538   format %{ "cb$cmp   $op1, $labl # int" %}
14539   ins_encode %{
14540     Label* L = $labl$$label;
14541     Assembler::Condition cond =
14542       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14543     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14544   %}
14545   ins_pipe(pipe_cmp_branch);
14546 %}
14547 
14548 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14549   match(If cmp (CmpL (AndL op1 op2) op3));
14550   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14551             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14552             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14553   effect(USE labl);
14554 
14555   ins_cost(BRANCH_COST);
14556   format %{ "tb$cmp   $op1, $op2, $labl" %}
14557   ins_encode %{
14558     Label* L = $labl$$label;
14559     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14560     int bit = exact_log2($op2$$constant);
14561     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14562   %}
14563   ins_pipe(pipe_cmp_branch);
14564 %}
14565 
14566 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14567   match(If cmp (CmpI (AndI op1 op2) op3));
14568   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14569             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14570             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14571   effect(USE labl);
14572 
14573   ins_cost(BRANCH_COST);
14574   format %{ "tb$cmp   $op1, $op2, $labl" %}
14575   ins_encode %{
14576     Label* L = $labl$$label;
14577     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14578     int bit = exact_log2($op2$$constant);
14579     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14580   %}
14581   ins_pipe(pipe_cmp_branch);
14582 %}
14583 
14584 // Test bits
14585 
14586 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14587   match(Set cr (CmpL (AndL op1 op2) op3));
14588   predicate(Assembler::operand_valid_for_logical_immediate
14589             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14590 
14591   ins_cost(INSN_COST);
14592   format %{ "tst $op1, $op2 # long" %}
14593   ins_encode %{
14594     __ tst($op1$$Register, $op2$$constant);
14595   %}
14596   ins_pipe(ialu_reg_reg);
14597 %}
14598 
14599 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14600   match(Set cr (CmpI (AndI op1 op2) op3));
14601   predicate(Assembler::operand_valid_for_logical_immediate
14602             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14603 
14604   ins_cost(INSN_COST);
14605   format %{ "tst $op1, $op2 # int" %}
14606   ins_encode %{
14607     __ tstw($op1$$Register, $op2$$constant);
14608   %}
14609   ins_pipe(ialu_reg_reg);
14610 %}
14611 
14612 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14613   match(Set cr (CmpL (AndL op1 op2) op3));
14614 
14615   ins_cost(INSN_COST);
14616   format %{ "tst $op1, $op2 # long" %}
14617   ins_encode %{
14618     __ tst($op1$$Register, $op2$$Register);
14619   %}
14620   ins_pipe(ialu_reg_reg);
14621 %}
14622 
14623 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14624   match(Set cr (CmpI (AndI op1 op2) op3));
14625 
14626   ins_cost(INSN_COST);
14627   format %{ "tstw $op1, $op2 # int" %}
14628   ins_encode %{
14629     __ tstw($op1$$Register, $op2$$Register);
14630   %}
14631   ins_pipe(ialu_reg_reg);
14632 %}
14633 
14634 
14635 // Conditional Far Branch
14636 // Conditional Far Branch Unsigned
14637 // TODO: fixme
14638 
14639 // counted loop end branch near
14640 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14641 %{
14642   match(CountedLoopEnd cmp cr);
14643 
14644   effect(USE lbl);
14645 
14646   ins_cost(BRANCH_COST);
14647   // short variant.
14648   // ins_short_branch(1);
14649   format %{ "b$cmp $lbl \t// counted loop end" %}
14650 
14651   ins_encode(aarch64_enc_br_con(cmp, lbl));
14652 
14653   ins_pipe(pipe_branch);
14654 %}
14655 
14656 // counted loop end branch near Unsigned
14657 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14658 %{
14659   match(CountedLoopEnd cmp cr);
14660 
14661   effect(USE lbl);
14662 
14663   ins_cost(BRANCH_COST);
14664   // short variant.
14665   // ins_short_branch(1);
14666   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14667 
14668   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14669 
14670   ins_pipe(pipe_branch);
14671 %}
14672 
14673 // counted loop end branch far
14674 // counted loop end branch far unsigned
14675 // TODO: fixme
14676 
14677 // ============================================================================
14678 // inlined locking and unlocking
14679 
14680 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14681 %{
14682   match(Set cr (FastLock object box));
14683   effect(TEMP tmp, TEMP tmp2);
14684 
14685   // TODO
14686   // identify correct cost
14687   ins_cost(5 * INSN_COST);
14688   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14689 
14690   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14691 
14692   ins_pipe(pipe_serial);
14693 %}
14694 
14695 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14696 %{
14697   match(Set cr (FastUnlock object box));
14698   effect(TEMP tmp, TEMP tmp2);
14699 
14700   ins_cost(5 * INSN_COST);
14701   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14702 
14703   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14704 
14705   ins_pipe(pipe_serial);
14706 %}
14707 
14708 
14709 // ============================================================================
14710 // Safepoint Instructions
14711 
14712 // TODO
14713 // provide a near and far version of this code
14714 
14715 instruct safePoint(iRegP poll)
14716 %{
14717   match(SafePoint poll);
14718 
14719   format %{
14720     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14721   %}
14722   ins_encode %{
14723     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14724   %}
14725   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14726 %}
14727 
14728 
14729 // ============================================================================
14730 // Procedure Call/Return Instructions
14731 
14732 // Call Java Static Instruction
14733 
14734 instruct CallStaticJavaDirect(method meth)
14735 %{
14736   match(CallStaticJava);
14737 
14738   effect(USE meth);
14739 
14740   ins_cost(CALL_COST);
14741 
14742   format %{ "call,static $meth \t// ==> " %}
14743 
14744   ins_encode( aarch64_enc_java_static_call(meth),
14745               aarch64_enc_call_epilog );
14746 
14747   ins_pipe(pipe_class_call);
14748 %}
14749 
14750 // TO HERE
14751 
14752 // Call Java Dynamic Instruction
14753 instruct CallDynamicJavaDirect(method meth)
14754 %{
14755   match(CallDynamicJava);
14756 
14757   effect(USE meth);
14758 
14759   ins_cost(CALL_COST);
14760 
14761   format %{ "CALL,dynamic $meth \t// ==> " %}
14762 
14763   ins_encode( aarch64_enc_java_dynamic_call(meth),
14764                aarch64_enc_call_epilog );
14765 
14766   ins_pipe(pipe_class_call);
14767 %}
14768 
14769 // Call Runtime Instruction
14770 
14771 instruct CallRuntimeDirect(method meth)
14772 %{
14773   match(CallRuntime);
14774 
14775   effect(USE meth);
14776 
14777   ins_cost(CALL_COST);
14778 
14779   format %{ "CALL, runtime $meth" %}
14780 
14781   ins_encode( aarch64_enc_java_to_runtime(meth) );
14782 
14783   ins_pipe(pipe_class_call);
14784 %}
14785 
14786 // Call Runtime Instruction
14787 
14788 instruct CallLeafDirect(method meth)
14789 %{
14790   match(CallLeaf);
14791 
14792   effect(USE meth);
14793 
14794   ins_cost(CALL_COST);
14795 
14796   format %{ "CALL, runtime leaf $meth" %}
14797 
14798   ins_encode( aarch64_enc_java_to_runtime(meth) );
14799 
14800   ins_pipe(pipe_class_call);
14801 %}
14802 
14803 // Call Runtime Instruction
14804 
14805 instruct CallLeafNoFPDirect(method meth)
14806 %{
14807   match(CallLeafNoFP);
14808 
14809   effect(USE meth);
14810 
14811   ins_cost(CALL_COST);
14812 
14813   format %{ "CALL, runtime leaf nofp $meth" %}
14814 
14815   ins_encode( aarch64_enc_java_to_runtime(meth) );
14816 
14817   ins_pipe(pipe_class_call);
14818 %}
14819 
14820 // Tail Call; Jump from runtime stub to Java code.
14821 // Also known as an 'interprocedural jump'.
14822 // Target of jump will eventually return to caller.
14823 // TailJump below removes the return address.
14824 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14825 %{
14826   match(TailCall jump_target method_oop);
14827 
14828   ins_cost(CALL_COST);
14829 
14830   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14831 
14832   ins_encode(aarch64_enc_tail_call(jump_target));
14833 
14834   ins_pipe(pipe_class_call);
14835 %}
14836 
14837 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14838 %{
14839   match(TailJump jump_target ex_oop);
14840 
14841   ins_cost(CALL_COST);
14842 
14843   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14844 
14845   ins_encode(aarch64_enc_tail_jmp(jump_target));
14846 
14847   ins_pipe(pipe_class_call);
14848 %}
14849 
14850 // Create exception oop: created by stack-crawling runtime code.
14851 // Created exception is now available to this handler, and is setup
14852 // just prior to jumping to this handler. No code emitted.
14853 // TODO check
14854 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14855 instruct CreateException(iRegP_R0 ex_oop)
14856 %{
14857   match(Set ex_oop (CreateEx));
14858 
14859   format %{ " -- \t// exception oop; no code emitted" %}
14860 
14861   size(0);
14862 
14863   ins_encode( /*empty*/ );
14864 
14865   ins_pipe(pipe_class_empty);
14866 %}
14867 
14868 // Rethrow exception: The exception oop will come in the first
14869 // argument position. Then JUMP (not call) to the rethrow stub code.
14870 instruct RethrowException() %{
14871   match(Rethrow);
14872   ins_cost(CALL_COST);
14873 
14874   format %{ "b rethrow_stub" %}
14875 
14876   ins_encode( aarch64_enc_rethrow() );
14877 
14878   ins_pipe(pipe_class_call);
14879 %}
14880 
14881 
14882 // Return Instruction
14883 // epilog node loads ret address into lr as part of frame pop
14884 instruct Ret()
14885 %{
14886   match(Return);
14887 
14888   format %{ "ret\t// return register" %}
14889 
14890   ins_encode( aarch64_enc_ret() );
14891 
14892   ins_pipe(pipe_branch);
14893 %}
14894 
14895 // Die now.
14896 instruct ShouldNotReachHere() %{
14897   match(Halt);
14898 
14899   ins_cost(CALL_COST);
14900   format %{ "ShouldNotReachHere" %}
14901 
14902   ins_encode %{
14903     // TODO
14904     // implement proper trap call here
14905     __ brk(999);
14906   %}
14907 
14908   ins_pipe(pipe_class_default);
14909 %}
14910 
14911 // ============================================================================
14912 // Partial Subtype Check
14913 //
14914 // superklass array for an instance of the superklass.  Set a hidden
14915 // internal cache on a hit (cache is checked with exposed code in
14916 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14917 // encoding ALSO sets flags.
14918 
14919 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14920 %{
14921   match(Set result (PartialSubtypeCheck sub super));
14922   effect(KILL cr, KILL temp);
14923 
14924   ins_cost(1100);  // slightly larger than the next version
14925   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14926 
14927   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14928 
14929   opcode(0x1); // Force zero of result reg on hit
14930 
14931   ins_pipe(pipe_class_memory);
14932 %}
14933 
14934 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14935 %{
14936   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14937   effect(KILL temp, KILL result);
14938 
14939   ins_cost(1100);  // slightly larger than the next version
14940   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14941 
14942   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14943 
14944   opcode(0x0); // Don't zero result reg on hit
14945 
14946   ins_pipe(pipe_class_memory);
14947 %}
14948 
14949 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14950                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14951 %{
14952   predicate(!CompactStrings);
14953   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14954   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14955 
14956   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14957   ins_encode %{
14958     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14959     __ asrw($cnt1$$Register, $cnt1$$Register, 1);
14960     __ asrw($cnt2$$Register, $cnt2$$Register, 1);
14961     __ string_compare($str1$$Register, $str2$$Register,
14962                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14963                       $tmp1$$Register);
14964   %}
14965   ins_pipe(pipe_class_memory);
14966 %}
14967 
14968 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14969        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14970 %{
14971   predicate(!CompactStrings);
14972   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14973   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14974          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14975   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14976 
14977   ins_encode %{
14978     __ string_indexof($str1$$Register, $str2$$Register,
14979                       $cnt1$$Register, $cnt2$$Register,
14980                       $tmp1$$Register, $tmp2$$Register,
14981                       $tmp3$$Register, $tmp4$$Register,
14982                       -1, $result$$Register);
14983   %}
14984   ins_pipe(pipe_class_memory);
14985 %}
14986 
14987 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14988                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14989                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14990 %{
14991   predicate(!CompactStrings);
14992   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14993   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14994          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14995   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14996 
14997   ins_encode %{
14998     int icnt2 = (int)$int_cnt2$$constant;
14999     __ string_indexof($str1$$Register, $str2$$Register,
15000                       $cnt1$$Register, zr,
15001                       $tmp1$$Register, $tmp2$$Register,
15002                       $tmp3$$Register, $tmp4$$Register,
15003                       icnt2, $result$$Register);
15004   %}
15005   ins_pipe(pipe_class_memory);
15006 %}
15007 
15008 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
15009                         iRegI_R0 result, rFlagsReg cr)
15010 %{
15011   predicate(!CompactStrings);
15012   match(Set result (StrEquals (Binary str1 str2) cnt));
15013   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
15014 
15015   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
15016   ins_encode %{
15017     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
15018     __ asrw($cnt$$Register, $cnt$$Register, 1);
15019     __ arrays_equals($str1$$Register, $str2$$Register,
15020                      $result$$Register, $cnt$$Register,
15021                      2, /*is_string*/true);
15022   %}
15023   ins_pipe(pipe_class_memory);
15024 %}
15025 
15026 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15027                       iRegP_R10 tmp, rFlagsReg cr)
15028 %{
15029   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
15030   match(Set result (AryEq ary1 ary2));
15031   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15032 
15033   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15034   ins_encode %{
15035     __ arrays_equals($ary1$$Register, $ary2$$Register,
15036                      $result$$Register, $tmp$$Register,
15037                      1, /*is_string*/false);
15038     %}
15039   ins_pipe(pipe_class_memory);
15040 %}
15041 
15042 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15043                       iRegP_R10 tmp, rFlagsReg cr)
15044 %{
15045   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15046   match(Set result (AryEq ary1 ary2));
15047   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15048 
15049   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15050   ins_encode %{
15051     __ arrays_equals($ary1$$Register, $ary2$$Register,
15052                      $result$$Register, $tmp$$Register,
15053                      2, /*is_string*/false);
15054   %}
15055   ins_pipe(pipe_class_memory);
15056 %}
15057 
15058 
15059 // encode char[] to byte[] in ISO_8859_1
15060 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15061                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15062                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15063                           iRegI_R0 result, rFlagsReg cr)
15064 %{
15065   match(Set result (EncodeISOArray src (Binary dst len)));
15066   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15067          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15068 
15069   format %{ "Encode array $src,$dst,$len -> $result" %}
15070   ins_encode %{
15071     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15072          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15073          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15074   %}
15075   ins_pipe( pipe_class_memory );
15076 %}
15077 
15078 // ============================================================================
15079 // This name is KNOWN by the ADLC and cannot be changed.
15080 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15081 // for this guy.
15082 instruct tlsLoadP(thread_RegP dst)
15083 %{
15084   match(Set dst (ThreadLocal));
15085 
15086   ins_cost(0);
15087 
15088   format %{ " -- \t// $dst=Thread::current(), empty" %}
15089 
15090   size(0);
15091 
15092   ins_encode( /*empty*/ );
15093 
15094   ins_pipe(pipe_class_empty);
15095 %}
15096 
15097 // ====================VECTOR INSTRUCTIONS=====================================
15098 
15099 // Load vector (32 bits)
15100 instruct loadV4(vecD dst, vmem mem)
15101 %{
15102   predicate(n->as_LoadVector()->memory_size() == 4);
15103   match(Set dst (LoadVector mem));
15104   ins_cost(4 * INSN_COST);
15105   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15106   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15107   ins_pipe(vload_reg_mem64);
15108 %}
15109 
15110 // Load vector (64 bits)
15111 instruct loadV8(vecD dst, vmem mem)
15112 %{
15113   predicate(n->as_LoadVector()->memory_size() == 8);
15114   match(Set dst (LoadVector mem));
15115   ins_cost(4 * INSN_COST);
15116   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15117   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15118   ins_pipe(vload_reg_mem64);
15119 %}
15120 
15121 // Load Vector (128 bits)
15122 instruct loadV16(vecX dst, vmem mem)
15123 %{
15124   predicate(n->as_LoadVector()->memory_size() == 16);
15125   match(Set dst (LoadVector mem));
15126   ins_cost(4 * INSN_COST);
15127   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15128   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15129   ins_pipe(vload_reg_mem128);
15130 %}
15131 
15132 // Store Vector (32 bits)
15133 instruct storeV4(vecD src, vmem mem)
15134 %{
15135   predicate(n->as_StoreVector()->memory_size() == 4);
15136   match(Set mem (StoreVector mem src));
15137   ins_cost(4 * INSN_COST);
15138   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15139   ins_encode( aarch64_enc_strvS(src, mem) );
15140   ins_pipe(vstore_reg_mem64);
15141 %}
15142 
15143 // Store Vector (64 bits)
15144 instruct storeV8(vecD src, vmem mem)
15145 %{
15146   predicate(n->as_StoreVector()->memory_size() == 8);
15147   match(Set mem (StoreVector mem src));
15148   ins_cost(4 * INSN_COST);
15149   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15150   ins_encode( aarch64_enc_strvD(src, mem) );
15151   ins_pipe(vstore_reg_mem64);
15152 %}
15153 
15154 // Store Vector (128 bits)
15155 instruct storeV16(vecX src, vmem mem)
15156 %{
15157   predicate(n->as_StoreVector()->memory_size() == 16);
15158   match(Set mem (StoreVector mem src));
15159   ins_cost(4 * INSN_COST);
15160   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15161   ins_encode( aarch64_enc_strvQ(src, mem) );
15162   ins_pipe(vstore_reg_mem128);
15163 %}
15164 
15165 instruct replicate8B(vecD dst, iRegIorL2I src)
15166 %{
15167   predicate(n->as_Vector()->length() == 4 ||
15168             n->as_Vector()->length() == 8);
15169   match(Set dst (ReplicateB src));
15170   ins_cost(INSN_COST);
15171   format %{ "dup  $dst, $src\t# vector (8B)" %}
15172   ins_encode %{
15173     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15174   %}
15175   ins_pipe(vdup_reg_reg64);
15176 %}
15177 
15178 instruct replicate16B(vecX dst, iRegIorL2I src)
15179 %{
15180   predicate(n->as_Vector()->length() == 16);
15181   match(Set dst (ReplicateB src));
15182   ins_cost(INSN_COST);
15183   format %{ "dup  $dst, $src\t# vector (16B)" %}
15184   ins_encode %{
15185     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15186   %}
15187   ins_pipe(vdup_reg_reg128);
15188 %}
15189 
15190 instruct replicate8B_imm(vecD dst, immI con)
15191 %{
15192   predicate(n->as_Vector()->length() == 4 ||
15193             n->as_Vector()->length() == 8);
15194   match(Set dst (ReplicateB con));
15195   ins_cost(INSN_COST);
15196   format %{ "movi  $dst, $con\t# vector(8B)" %}
15197   ins_encode %{
15198     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15199   %}
15200   ins_pipe(vmovi_reg_imm64);
15201 %}
15202 
15203 instruct replicate16B_imm(vecX dst, immI con)
15204 %{
15205   predicate(n->as_Vector()->length() == 16);
15206   match(Set dst (ReplicateB con));
15207   ins_cost(INSN_COST);
15208   format %{ "movi  $dst, $con\t# vector(16B)" %}
15209   ins_encode %{
15210     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15211   %}
15212   ins_pipe(vmovi_reg_imm128);
15213 %}
15214 
15215 instruct replicate4S(vecD dst, iRegIorL2I src)
15216 %{
15217   predicate(n->as_Vector()->length() == 2 ||
15218             n->as_Vector()->length() == 4);
15219   match(Set dst (ReplicateS src));
15220   ins_cost(INSN_COST);
15221   format %{ "dup  $dst, $src\t# vector (4S)" %}
15222   ins_encode %{
15223     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15224   %}
15225   ins_pipe(vdup_reg_reg64);
15226 %}
15227 
15228 instruct replicate8S(vecX dst, iRegIorL2I src)
15229 %{
15230   predicate(n->as_Vector()->length() == 8);
15231   match(Set dst (ReplicateS src));
15232   ins_cost(INSN_COST);
15233   format %{ "dup  $dst, $src\t# vector (8S)" %}
15234   ins_encode %{
15235     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15236   %}
15237   ins_pipe(vdup_reg_reg128);
15238 %}
15239 
15240 instruct replicate4S_imm(vecD dst, immI con)
15241 %{
15242   predicate(n->as_Vector()->length() == 2 ||
15243             n->as_Vector()->length() == 4);
15244   match(Set dst (ReplicateS con));
15245   ins_cost(INSN_COST);
15246   format %{ "movi  $dst, $con\t# vector(4H)" %}
15247   ins_encode %{
15248     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15249   %}
15250   ins_pipe(vmovi_reg_imm64);
15251 %}
15252 
15253 instruct replicate8S_imm(vecX dst, immI con)
15254 %{
15255   predicate(n->as_Vector()->length() == 8);
15256   match(Set dst (ReplicateS con));
15257   ins_cost(INSN_COST);
15258   format %{ "movi  $dst, $con\t# vector(8H)" %}
15259   ins_encode %{
15260     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15261   %}
15262   ins_pipe(vmovi_reg_imm128);
15263 %}
15264 
15265 instruct replicate2I(vecD dst, iRegIorL2I src)
15266 %{
15267   predicate(n->as_Vector()->length() == 2);
15268   match(Set dst (ReplicateI src));
15269   ins_cost(INSN_COST);
15270   format %{ "dup  $dst, $src\t# vector (2I)" %}
15271   ins_encode %{
15272     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15273   %}
15274   ins_pipe(vdup_reg_reg64);
15275 %}
15276 
15277 instruct replicate4I(vecX dst, iRegIorL2I src)
15278 %{
15279   predicate(n->as_Vector()->length() == 4);
15280   match(Set dst (ReplicateI src));
15281   ins_cost(INSN_COST);
15282   format %{ "dup  $dst, $src\t# vector (4I)" %}
15283   ins_encode %{
15284     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15285   %}
15286   ins_pipe(vdup_reg_reg128);
15287 %}
15288 
15289 instruct replicate2I_imm(vecD dst, immI con)
15290 %{
15291   predicate(n->as_Vector()->length() == 2);
15292   match(Set dst (ReplicateI con));
15293   ins_cost(INSN_COST);
15294   format %{ "movi  $dst, $con\t# vector(2I)" %}
15295   ins_encode %{
15296     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15297   %}
15298   ins_pipe(vmovi_reg_imm64);
15299 %}
15300 
15301 instruct replicate4I_imm(vecX dst, immI con)
15302 %{
15303   predicate(n->as_Vector()->length() == 4);
15304   match(Set dst (ReplicateI con));
15305   ins_cost(INSN_COST);
15306   format %{ "movi  $dst, $con\t# vector(4I)" %}
15307   ins_encode %{
15308     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15309   %}
15310   ins_pipe(vmovi_reg_imm128);
15311 %}
15312 
15313 instruct replicate2L(vecX dst, iRegL src)
15314 %{
15315   predicate(n->as_Vector()->length() == 2);
15316   match(Set dst (ReplicateL src));
15317   ins_cost(INSN_COST);
15318   format %{ "dup  $dst, $src\t# vector (2L)" %}
15319   ins_encode %{
15320     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15321   %}
15322   ins_pipe(vdup_reg_reg128);
15323 %}
15324 
15325 instruct replicate2L_zero(vecX dst, immI0 zero)
15326 %{
15327   predicate(n->as_Vector()->length() == 2);
15328   match(Set dst (ReplicateI zero));
15329   ins_cost(INSN_COST);
15330   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15331   ins_encode %{
15332     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15333            as_FloatRegister($dst$$reg),
15334            as_FloatRegister($dst$$reg));
15335   %}
15336   ins_pipe(vmovi_reg_imm128);
15337 %}
15338 
15339 instruct replicate2F(vecD dst, vRegF src)
15340 %{
15341   predicate(n->as_Vector()->length() == 2);
15342   match(Set dst (ReplicateF src));
15343   ins_cost(INSN_COST);
15344   format %{ "dup  $dst, $src\t# vector (2F)" %}
15345   ins_encode %{
15346     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15347            as_FloatRegister($src$$reg));
15348   %}
15349   ins_pipe(vdup_reg_freg64);
15350 %}
15351 
15352 instruct replicate4F(vecX dst, vRegF src)
15353 %{
15354   predicate(n->as_Vector()->length() == 4);
15355   match(Set dst (ReplicateF src));
15356   ins_cost(INSN_COST);
15357   format %{ "dup  $dst, $src\t# vector (4F)" %}
15358   ins_encode %{
15359     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15360            as_FloatRegister($src$$reg));
15361   %}
15362   ins_pipe(vdup_reg_freg128);
15363 %}
15364 
15365 instruct replicate2D(vecX dst, vRegD src)
15366 %{
15367   predicate(n->as_Vector()->length() == 2);
15368   match(Set dst (ReplicateD src));
15369   ins_cost(INSN_COST);
15370   format %{ "dup  $dst, $src\t# vector (2D)" %}
15371   ins_encode %{
15372     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15373            as_FloatRegister($src$$reg));
15374   %}
15375   ins_pipe(vdup_reg_dreg128);
15376 %}
15377 
15378 // ====================REDUCTION ARITHMETIC====================================
15379 
15380 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15381 %{
15382   match(Set dst (AddReductionVI src1 src2));
15383   ins_cost(INSN_COST);
15384   effect(TEMP tmp, TEMP tmp2);
15385   format %{ "umov  $tmp, $src2, S, 0\n\t"
15386             "umov  $tmp2, $src2, S, 1\n\t"
15387             "addw  $dst, $src1, $tmp\n\t"
15388             "addw  $dst, $dst, $tmp2\t add reduction2i"
15389   %}
15390   ins_encode %{
15391     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15392     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15393     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15394     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15395   %}
15396   ins_pipe(pipe_class_default);
15397 %}
15398 
15399 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15400 %{
15401   match(Set dst (AddReductionVI src1 src2));
15402   ins_cost(INSN_COST);
15403   effect(TEMP tmp, TEMP tmp2);
15404   format %{ "addv  $tmp, T4S, $src2\n\t"
15405             "umov  $tmp2, $tmp, S, 0\n\t"
15406             "addw  $dst, $tmp2, $src1\t add reduction4i"
15407   %}
15408   ins_encode %{
15409     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15410             as_FloatRegister($src2$$reg));
15411     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15412     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15413   %}
15414   ins_pipe(pipe_class_default);
15415 %}
15416 
15417 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15418 %{
15419   match(Set dst (MulReductionVI src1 src2));
15420   ins_cost(INSN_COST);
15421   effect(TEMP tmp, TEMP dst);
15422   format %{ "umov  $tmp, $src2, S, 0\n\t"
15423             "mul   $dst, $tmp, $src1\n\t"
15424             "umov  $tmp, $src2, S, 1\n\t"
15425             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15426   %}
15427   ins_encode %{
15428     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15429     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15430     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15431     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15432   %}
15433   ins_pipe(pipe_class_default);
15434 %}
15435 
15436 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15437 %{
15438   match(Set dst (MulReductionVI src1 src2));
15439   ins_cost(INSN_COST);
15440   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15441   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15442             "mul   $tmp, $tmp, $src2\n\t"
15443             "umov  $tmp2, $tmp, S, 0\n\t"
15444             "mul   $dst, $tmp2, $src1\n\t"
15445             "umov  $tmp2, $tmp, S, 1\n\t"
15446             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15447   %}
15448   ins_encode %{
15449     __ ins(as_FloatRegister($tmp$$reg), __ D,
15450            as_FloatRegister($src2$$reg), 0, 1);
15451     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15452            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15453     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15454     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15455     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15456     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15457   %}
15458   ins_pipe(pipe_class_default);
15459 %}
15460 
15461 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15462 %{
15463   match(Set dst (AddReductionVF src1 src2));
15464   ins_cost(INSN_COST);
15465   effect(TEMP tmp, TEMP dst);
15466   format %{ "fadds $dst, $src1, $src2\n\t"
15467             "ins   $tmp, S, $src2, 0, 1\n\t"
15468             "fadds $dst, $dst, $tmp\t add reduction2f"
15469   %}
15470   ins_encode %{
15471     __ fadds(as_FloatRegister($dst$$reg),
15472              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15473     __ ins(as_FloatRegister($tmp$$reg), __ S,
15474            as_FloatRegister($src2$$reg), 0, 1);
15475     __ fadds(as_FloatRegister($dst$$reg),
15476              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15477   %}
15478   ins_pipe(pipe_class_default);
15479 %}
15480 
15481 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15482 %{
15483   match(Set dst (AddReductionVF src1 src2));
15484   ins_cost(INSN_COST);
15485   effect(TEMP tmp, TEMP dst);
15486   format %{ "fadds $dst, $src1, $src2\n\t"
15487             "ins   $tmp, S, $src2, 0, 1\n\t"
15488             "fadds $dst, $dst, $tmp\n\t"
15489             "ins   $tmp, S, $src2, 0, 2\n\t"
15490             "fadds $dst, $dst, $tmp\n\t"
15491             "ins   $tmp, S, $src2, 0, 3\n\t"
15492             "fadds $dst, $dst, $tmp\t add reduction4f"
15493   %}
15494   ins_encode %{
15495     __ fadds(as_FloatRegister($dst$$reg),
15496              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15497     __ ins(as_FloatRegister($tmp$$reg), __ S,
15498            as_FloatRegister($src2$$reg), 0, 1);
15499     __ fadds(as_FloatRegister($dst$$reg),
15500              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15501     __ ins(as_FloatRegister($tmp$$reg), __ S,
15502            as_FloatRegister($src2$$reg), 0, 2);
15503     __ fadds(as_FloatRegister($dst$$reg),
15504              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15505     __ ins(as_FloatRegister($tmp$$reg), __ S,
15506            as_FloatRegister($src2$$reg), 0, 3);
15507     __ fadds(as_FloatRegister($dst$$reg),
15508              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15509   %}
15510   ins_pipe(pipe_class_default);
15511 %}
15512 
15513 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15514 %{
15515   match(Set dst (MulReductionVF src1 src2));
15516   ins_cost(INSN_COST);
15517   effect(TEMP tmp, TEMP dst);
15518   format %{ "fmuls $dst, $src1, $src2\n\t"
15519             "ins   $tmp, S, $src2, 0, 1\n\t"
15520             "fmuls $dst, $dst, $tmp\t add reduction4f"
15521   %}
15522   ins_encode %{
15523     __ fmuls(as_FloatRegister($dst$$reg),
15524              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15525     __ ins(as_FloatRegister($tmp$$reg), __ S,
15526            as_FloatRegister($src2$$reg), 0, 1);
15527     __ fmuls(as_FloatRegister($dst$$reg),
15528              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15529   %}
15530   ins_pipe(pipe_class_default);
15531 %}
15532 
15533 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15534 %{
15535   match(Set dst (MulReductionVF src1 src2));
15536   ins_cost(INSN_COST);
15537   effect(TEMP tmp, TEMP dst);
15538   format %{ "fmuls $dst, $src1, $src2\n\t"
15539             "ins   $tmp, S, $src2, 0, 1\n\t"
15540             "fmuls $dst, $dst, $tmp\n\t"
15541             "ins   $tmp, S, $src2, 0, 2\n\t"
15542             "fmuls $dst, $dst, $tmp\n\t"
15543             "ins   $tmp, S, $src2, 0, 3\n\t"
15544             "fmuls $dst, $dst, $tmp\t add reduction4f"
15545   %}
15546   ins_encode %{
15547     __ fmuls(as_FloatRegister($dst$$reg),
15548              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15549     __ ins(as_FloatRegister($tmp$$reg), __ S,
15550            as_FloatRegister($src2$$reg), 0, 1);
15551     __ fmuls(as_FloatRegister($dst$$reg),
15552              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15553     __ ins(as_FloatRegister($tmp$$reg), __ S,
15554            as_FloatRegister($src2$$reg), 0, 2);
15555     __ fmuls(as_FloatRegister($dst$$reg),
15556              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15557     __ ins(as_FloatRegister($tmp$$reg), __ S,
15558            as_FloatRegister($src2$$reg), 0, 3);
15559     __ fmuls(as_FloatRegister($dst$$reg),
15560              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15561   %}
15562   ins_pipe(pipe_class_default);
15563 %}
15564 
15565 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15566 %{
15567   match(Set dst (AddReductionVD src1 src2));
15568   ins_cost(INSN_COST);
15569   effect(TEMP tmp, TEMP dst);
15570   format %{ "faddd $dst, $src1, $src2\n\t"
15571             "ins   $tmp, D, $src2, 0, 1\n\t"
15572             "faddd $dst, $dst, $tmp\t add reduction2d"
15573   %}
15574   ins_encode %{
15575     __ faddd(as_FloatRegister($dst$$reg),
15576              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15577     __ ins(as_FloatRegister($tmp$$reg), __ D,
15578            as_FloatRegister($src2$$reg), 0, 1);
15579     __ faddd(as_FloatRegister($dst$$reg),
15580              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15581   %}
15582   ins_pipe(pipe_class_default);
15583 %}
15584 
15585 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15586 %{
15587   match(Set dst (MulReductionVD src1 src2));
15588   ins_cost(INSN_COST);
15589   effect(TEMP tmp, TEMP dst);
15590   format %{ "fmuld $dst, $src1, $src2\n\t"
15591             "ins   $tmp, D, $src2, 0, 1\n\t"
15592             "fmuld $dst, $dst, $tmp\t add reduction2d"
15593   %}
15594   ins_encode %{
15595     __ fmuld(as_FloatRegister($dst$$reg),
15596              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15597     __ ins(as_FloatRegister($tmp$$reg), __ D,
15598            as_FloatRegister($src2$$reg), 0, 1);
15599     __ fmuld(as_FloatRegister($dst$$reg),
15600              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15601   %}
15602   ins_pipe(pipe_class_default);
15603 %}
15604 
15605 // ====================VECTOR ARITHMETIC=======================================
15606 
15607 // --------------------------------- ADD --------------------------------------
15608 
15609 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15610 %{
15611   predicate(n->as_Vector()->length() == 4 ||
15612             n->as_Vector()->length() == 8);
15613   match(Set dst (AddVB src1 src2));
15614   ins_cost(INSN_COST);
15615   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15616   ins_encode %{
15617     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15618             as_FloatRegister($src1$$reg),
15619             as_FloatRegister($src2$$reg));
15620   %}
15621   ins_pipe(vdop64);
15622 %}
15623 
15624 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15625 %{
15626   predicate(n->as_Vector()->length() == 16);
15627   match(Set dst (AddVB src1 src2));
15628   ins_cost(INSN_COST);
15629   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15630   ins_encode %{
15631     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15632             as_FloatRegister($src1$$reg),
15633             as_FloatRegister($src2$$reg));
15634   %}
15635   ins_pipe(vdop128);
15636 %}
15637 
15638 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15639 %{
15640   predicate(n->as_Vector()->length() == 2 ||
15641             n->as_Vector()->length() == 4);
15642   match(Set dst (AddVS src1 src2));
15643   ins_cost(INSN_COST);
15644   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15645   ins_encode %{
15646     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15647             as_FloatRegister($src1$$reg),
15648             as_FloatRegister($src2$$reg));
15649   %}
15650   ins_pipe(vdop64);
15651 %}
15652 
15653 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15654 %{
15655   predicate(n->as_Vector()->length() == 8);
15656   match(Set dst (AddVS src1 src2));
15657   ins_cost(INSN_COST);
15658   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15659   ins_encode %{
15660     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15661             as_FloatRegister($src1$$reg),
15662             as_FloatRegister($src2$$reg));
15663   %}
15664   ins_pipe(vdop128);
15665 %}
15666 
15667 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15668 %{
15669   predicate(n->as_Vector()->length() == 2);
15670   match(Set dst (AddVI src1 src2));
15671   ins_cost(INSN_COST);
15672   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15673   ins_encode %{
15674     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15675             as_FloatRegister($src1$$reg),
15676             as_FloatRegister($src2$$reg));
15677   %}
15678   ins_pipe(vdop64);
15679 %}
15680 
15681 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15682 %{
15683   predicate(n->as_Vector()->length() == 4);
15684   match(Set dst (AddVI src1 src2));
15685   ins_cost(INSN_COST);
15686   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15687   ins_encode %{
15688     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15689             as_FloatRegister($src1$$reg),
15690             as_FloatRegister($src2$$reg));
15691   %}
15692   ins_pipe(vdop128);
15693 %}
15694 
15695 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15696 %{
15697   predicate(n->as_Vector()->length() == 2);
15698   match(Set dst (AddVL src1 src2));
15699   ins_cost(INSN_COST);
15700   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15701   ins_encode %{
15702     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15703             as_FloatRegister($src1$$reg),
15704             as_FloatRegister($src2$$reg));
15705   %}
15706   ins_pipe(vdop128);
15707 %}
15708 
15709 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15710 %{
15711   predicate(n->as_Vector()->length() == 2);
15712   match(Set dst (AddVF src1 src2));
15713   ins_cost(INSN_COST);
15714   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15715   ins_encode %{
15716     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15717             as_FloatRegister($src1$$reg),
15718             as_FloatRegister($src2$$reg));
15719   %}
15720   ins_pipe(vdop_fp64);
15721 %}
15722 
15723 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15724 %{
15725   predicate(n->as_Vector()->length() == 4);
15726   match(Set dst (AddVF src1 src2));
15727   ins_cost(INSN_COST);
15728   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15729   ins_encode %{
15730     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15731             as_FloatRegister($src1$$reg),
15732             as_FloatRegister($src2$$reg));
15733   %}
15734   ins_pipe(vdop_fp128);
15735 %}
15736 
15737 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15738 %{
15739   match(Set dst (AddVD src1 src2));
15740   ins_cost(INSN_COST);
15741   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15742   ins_encode %{
15743     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15744             as_FloatRegister($src1$$reg),
15745             as_FloatRegister($src2$$reg));
15746   %}
15747   ins_pipe(vdop_fp128);
15748 %}
15749 
15750 // --------------------------------- SUB --------------------------------------
15751 
15752 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15753 %{
15754   predicate(n->as_Vector()->length() == 4 ||
15755             n->as_Vector()->length() == 8);
15756   match(Set dst (SubVB src1 src2));
15757   ins_cost(INSN_COST);
15758   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15759   ins_encode %{
15760     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15761             as_FloatRegister($src1$$reg),
15762             as_FloatRegister($src2$$reg));
15763   %}
15764   ins_pipe(vdop64);
15765 %}
15766 
15767 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15768 %{
15769   predicate(n->as_Vector()->length() == 16);
15770   match(Set dst (SubVB src1 src2));
15771   ins_cost(INSN_COST);
15772   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15773   ins_encode %{
15774     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15775             as_FloatRegister($src1$$reg),
15776             as_FloatRegister($src2$$reg));
15777   %}
15778   ins_pipe(vdop128);
15779 %}
15780 
15781 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15782 %{
15783   predicate(n->as_Vector()->length() == 2 ||
15784             n->as_Vector()->length() == 4);
15785   match(Set dst (SubVS src1 src2));
15786   ins_cost(INSN_COST);
15787   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15788   ins_encode %{
15789     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15790             as_FloatRegister($src1$$reg),
15791             as_FloatRegister($src2$$reg));
15792   %}
15793   ins_pipe(vdop64);
15794 %}
15795 
15796 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15797 %{
15798   predicate(n->as_Vector()->length() == 8);
15799   match(Set dst (SubVS src1 src2));
15800   ins_cost(INSN_COST);
15801   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15802   ins_encode %{
15803     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15804             as_FloatRegister($src1$$reg),
15805             as_FloatRegister($src2$$reg));
15806   %}
15807   ins_pipe(vdop128);
15808 %}
15809 
15810 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15811 %{
15812   predicate(n->as_Vector()->length() == 2);
15813   match(Set dst (SubVI src1 src2));
15814   ins_cost(INSN_COST);
15815   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15816   ins_encode %{
15817     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15818             as_FloatRegister($src1$$reg),
15819             as_FloatRegister($src2$$reg));
15820   %}
15821   ins_pipe(vdop64);
15822 %}
15823 
15824 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15825 %{
15826   predicate(n->as_Vector()->length() == 4);
15827   match(Set dst (SubVI src1 src2));
15828   ins_cost(INSN_COST);
15829   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15830   ins_encode %{
15831     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15832             as_FloatRegister($src1$$reg),
15833             as_FloatRegister($src2$$reg));
15834   %}
15835   ins_pipe(vdop128);
15836 %}
15837 
15838 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15839 %{
15840   predicate(n->as_Vector()->length() == 2);
15841   match(Set dst (SubVL src1 src2));
15842   ins_cost(INSN_COST);
15843   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15844   ins_encode %{
15845     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15846             as_FloatRegister($src1$$reg),
15847             as_FloatRegister($src2$$reg));
15848   %}
15849   ins_pipe(vdop128);
15850 %}
15851 
15852 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15853 %{
15854   predicate(n->as_Vector()->length() == 2);
15855   match(Set dst (SubVF src1 src2));
15856   ins_cost(INSN_COST);
15857   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15858   ins_encode %{
15859     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15860             as_FloatRegister($src1$$reg),
15861             as_FloatRegister($src2$$reg));
15862   %}
15863   ins_pipe(vdop_fp64);
15864 %}
15865 
15866 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15867 %{
15868   predicate(n->as_Vector()->length() == 4);
15869   match(Set dst (SubVF src1 src2));
15870   ins_cost(INSN_COST);
15871   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15872   ins_encode %{
15873     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15874             as_FloatRegister($src1$$reg),
15875             as_FloatRegister($src2$$reg));
15876   %}
15877   ins_pipe(vdop_fp128);
15878 %}
15879 
15880 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15881 %{
15882   predicate(n->as_Vector()->length() == 2);
15883   match(Set dst (SubVD src1 src2));
15884   ins_cost(INSN_COST);
15885   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15886   ins_encode %{
15887     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15888             as_FloatRegister($src1$$reg),
15889             as_FloatRegister($src2$$reg));
15890   %}
15891   ins_pipe(vdop_fp128);
15892 %}
15893 
15894 // --------------------------------- MUL --------------------------------------
15895 
15896 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15897 %{
15898   predicate(n->as_Vector()->length() == 2 ||
15899             n->as_Vector()->length() == 4);
15900   match(Set dst (MulVS src1 src2));
15901   ins_cost(INSN_COST);
15902   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15903   ins_encode %{
15904     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15905             as_FloatRegister($src1$$reg),
15906             as_FloatRegister($src2$$reg));
15907   %}
15908   ins_pipe(vmul64);
15909 %}
15910 
15911 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15912 %{
15913   predicate(n->as_Vector()->length() == 8);
15914   match(Set dst (MulVS src1 src2));
15915   ins_cost(INSN_COST);
15916   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15917   ins_encode %{
15918     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15919             as_FloatRegister($src1$$reg),
15920             as_FloatRegister($src2$$reg));
15921   %}
15922   ins_pipe(vmul128);
15923 %}
15924 
15925 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15926 %{
15927   predicate(n->as_Vector()->length() == 2);
15928   match(Set dst (MulVI src1 src2));
15929   ins_cost(INSN_COST);
15930   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15931   ins_encode %{
15932     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15933             as_FloatRegister($src1$$reg),
15934             as_FloatRegister($src2$$reg));
15935   %}
15936   ins_pipe(vmul64);
15937 %}
15938 
15939 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15940 %{
15941   predicate(n->as_Vector()->length() == 4);
15942   match(Set dst (MulVI src1 src2));
15943   ins_cost(INSN_COST);
15944   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15945   ins_encode %{
15946     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15947             as_FloatRegister($src1$$reg),
15948             as_FloatRegister($src2$$reg));
15949   %}
15950   ins_pipe(vmul128);
15951 %}
15952 
15953 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15954 %{
15955   predicate(n->as_Vector()->length() == 2);
15956   match(Set dst (MulVF src1 src2));
15957   ins_cost(INSN_COST);
15958   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15959   ins_encode %{
15960     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15961             as_FloatRegister($src1$$reg),
15962             as_FloatRegister($src2$$reg));
15963   %}
15964   ins_pipe(vmuldiv_fp64);
15965 %}
15966 
15967 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15968 %{
15969   predicate(n->as_Vector()->length() == 4);
15970   match(Set dst (MulVF src1 src2));
15971   ins_cost(INSN_COST);
15972   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15973   ins_encode %{
15974     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15975             as_FloatRegister($src1$$reg),
15976             as_FloatRegister($src2$$reg));
15977   %}
15978   ins_pipe(vmuldiv_fp128);
15979 %}
15980 
15981 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15982 %{
15983   predicate(n->as_Vector()->length() == 2);
15984   match(Set dst (MulVD src1 src2));
15985   ins_cost(INSN_COST);
15986   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15987   ins_encode %{
15988     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15989             as_FloatRegister($src1$$reg),
15990             as_FloatRegister($src2$$reg));
15991   %}
15992   ins_pipe(vmuldiv_fp128);
15993 %}
15994 
15995 // --------------------------------- MLA --------------------------------------
15996 
15997 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15998 %{
15999   predicate(n->as_Vector()->length() == 2 ||
16000             n->as_Vector()->length() == 4);
16001   match(Set dst (AddVS dst (MulVS src1 src2)));
16002   ins_cost(INSN_COST);
16003   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
16004   ins_encode %{
16005     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
16006             as_FloatRegister($src1$$reg),
16007             as_FloatRegister($src2$$reg));
16008   %}
16009   ins_pipe(vmla64);
16010 %}
16011 
16012 instruct vmla8S(vecX dst, vecX src1, vecX src2)
16013 %{
16014   predicate(n->as_Vector()->length() == 8);
16015   match(Set dst (AddVS dst (MulVS src1 src2)));
16016   ins_cost(INSN_COST);
16017   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
16018   ins_encode %{
16019     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
16020             as_FloatRegister($src1$$reg),
16021             as_FloatRegister($src2$$reg));
16022   %}
16023   ins_pipe(vmla128);
16024 %}
16025 
16026 instruct vmla2I(vecD dst, vecD src1, vecD src2)
16027 %{
16028   predicate(n->as_Vector()->length() == 2);
16029   match(Set dst (AddVI dst (MulVI src1 src2)));
16030   ins_cost(INSN_COST);
16031   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
16032   ins_encode %{
16033     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16034             as_FloatRegister($src1$$reg),
16035             as_FloatRegister($src2$$reg));
16036   %}
16037   ins_pipe(vmla64);
16038 %}
16039 
16040 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16041 %{
16042   predicate(n->as_Vector()->length() == 4);
16043   match(Set dst (AddVI dst (MulVI src1 src2)));
16044   ins_cost(INSN_COST);
16045   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16046   ins_encode %{
16047     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16048             as_FloatRegister($src1$$reg),
16049             as_FloatRegister($src2$$reg));
16050   %}
16051   ins_pipe(vmla128);
16052 %}
16053 
16054 // --------------------------------- MLS --------------------------------------
16055 
16056 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16057 %{
16058   predicate(n->as_Vector()->length() == 2 ||
16059             n->as_Vector()->length() == 4);
16060   match(Set dst (SubVS dst (MulVS src1 src2)));
16061   ins_cost(INSN_COST);
16062   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16063   ins_encode %{
16064     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16065             as_FloatRegister($src1$$reg),
16066             as_FloatRegister($src2$$reg));
16067   %}
16068   ins_pipe(vmla64);
16069 %}
16070 
16071 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16072 %{
16073   predicate(n->as_Vector()->length() == 8);
16074   match(Set dst (SubVS dst (MulVS src1 src2)));
16075   ins_cost(INSN_COST);
16076   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16077   ins_encode %{
16078     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16079             as_FloatRegister($src1$$reg),
16080             as_FloatRegister($src2$$reg));
16081   %}
16082   ins_pipe(vmla128);
16083 %}
16084 
16085 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16086 %{
16087   predicate(n->as_Vector()->length() == 2);
16088   match(Set dst (SubVI dst (MulVI src1 src2)));
16089   ins_cost(INSN_COST);
16090   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16091   ins_encode %{
16092     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16093             as_FloatRegister($src1$$reg),
16094             as_FloatRegister($src2$$reg));
16095   %}
16096   ins_pipe(vmla64);
16097 %}
16098 
16099 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16100 %{
16101   predicate(n->as_Vector()->length() == 4);
16102   match(Set dst (SubVI dst (MulVI src1 src2)));
16103   ins_cost(INSN_COST);
16104   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16105   ins_encode %{
16106     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16107             as_FloatRegister($src1$$reg),
16108             as_FloatRegister($src2$$reg));
16109   %}
16110   ins_pipe(vmla128);
16111 %}
16112 
16113 // --------------------------------- DIV --------------------------------------
16114 
16115 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16116 %{
16117   predicate(n->as_Vector()->length() == 2);
16118   match(Set dst (DivVF src1 src2));
16119   ins_cost(INSN_COST);
16120   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16121   ins_encode %{
16122     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16123             as_FloatRegister($src1$$reg),
16124             as_FloatRegister($src2$$reg));
16125   %}
16126   ins_pipe(vmuldiv_fp64);
16127 %}
16128 
16129 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16130 %{
16131   predicate(n->as_Vector()->length() == 4);
16132   match(Set dst (DivVF src1 src2));
16133   ins_cost(INSN_COST);
16134   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16135   ins_encode %{
16136     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16137             as_FloatRegister($src1$$reg),
16138             as_FloatRegister($src2$$reg));
16139   %}
16140   ins_pipe(vmuldiv_fp128);
16141 %}
16142 
16143 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16144 %{
16145   predicate(n->as_Vector()->length() == 2);
16146   match(Set dst (DivVD src1 src2));
16147   ins_cost(INSN_COST);
16148   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16149   ins_encode %{
16150     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16151             as_FloatRegister($src1$$reg),
16152             as_FloatRegister($src2$$reg));
16153   %}
16154   ins_pipe(vmuldiv_fp128);
16155 %}
16156 
16157 // --------------------------------- SQRT -------------------------------------
16158 
16159 instruct vsqrt2D(vecX dst, vecX src)
16160 %{
16161   predicate(n->as_Vector()->length() == 2);
16162   match(Set dst (SqrtVD src));
16163   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16164   ins_encode %{
16165     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16166              as_FloatRegister($src$$reg));
16167   %}
16168   ins_pipe(vsqrt_fp128);
16169 %}
16170 
16171 // --------------------------------- ABS --------------------------------------
16172 
16173 instruct vabs2F(vecD dst, vecD src)
16174 %{
16175   predicate(n->as_Vector()->length() == 2);
16176   match(Set dst (AbsVF src));
16177   ins_cost(INSN_COST * 3);
16178   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16179   ins_encode %{
16180     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16181             as_FloatRegister($src$$reg));
16182   %}
16183   ins_pipe(vunop_fp64);
16184 %}
16185 
16186 instruct vabs4F(vecX dst, vecX src)
16187 %{
16188   predicate(n->as_Vector()->length() == 4);
16189   match(Set dst (AbsVF src));
16190   ins_cost(INSN_COST * 3);
16191   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16192   ins_encode %{
16193     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16194             as_FloatRegister($src$$reg));
16195   %}
16196   ins_pipe(vunop_fp128);
16197 %}
16198 
16199 instruct vabs2D(vecX dst, vecX src)
16200 %{
16201   predicate(n->as_Vector()->length() == 2);
16202   match(Set dst (AbsVD src));
16203   ins_cost(INSN_COST * 3);
16204   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16205   ins_encode %{
16206     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16207             as_FloatRegister($src$$reg));
16208   %}
16209   ins_pipe(vunop_fp128);
16210 %}
16211 
16212 // --------------------------------- NEG --------------------------------------
16213 
16214 instruct vneg2F(vecD dst, vecD src)
16215 %{
16216   predicate(n->as_Vector()->length() == 2);
16217   match(Set dst (NegVF src));
16218   ins_cost(INSN_COST * 3);
16219   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16220   ins_encode %{
16221     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16222             as_FloatRegister($src$$reg));
16223   %}
16224   ins_pipe(vunop_fp64);
16225 %}
16226 
16227 instruct vneg4F(vecX dst, vecX src)
16228 %{
16229   predicate(n->as_Vector()->length() == 4);
16230   match(Set dst (NegVF src));
16231   ins_cost(INSN_COST * 3);
16232   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16233   ins_encode %{
16234     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16235             as_FloatRegister($src$$reg));
16236   %}
16237   ins_pipe(vunop_fp128);
16238 %}
16239 
16240 instruct vneg2D(vecX dst, vecX src)
16241 %{
16242   predicate(n->as_Vector()->length() == 2);
16243   match(Set dst (NegVD src));
16244   ins_cost(INSN_COST * 3);
16245   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16246   ins_encode %{
16247     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16248             as_FloatRegister($src$$reg));
16249   %}
16250   ins_pipe(vunop_fp128);
16251 %}
16252 
16253 // --------------------------------- AND --------------------------------------
16254 
16255 instruct vand8B(vecD dst, vecD src1, vecD src2)
16256 %{
16257   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16258             n->as_Vector()->length_in_bytes() == 8);
16259   match(Set dst (AndV src1 src2));
16260   ins_cost(INSN_COST);
16261   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16262   ins_encode %{
16263     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16264             as_FloatRegister($src1$$reg),
16265             as_FloatRegister($src2$$reg));
16266   %}
16267   ins_pipe(vlogical64);
16268 %}
16269 
16270 instruct vand16B(vecX dst, vecX src1, vecX src2)
16271 %{
16272   predicate(n->as_Vector()->length_in_bytes() == 16);
16273   match(Set dst (AndV src1 src2));
16274   ins_cost(INSN_COST);
16275   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16276   ins_encode %{
16277     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16278             as_FloatRegister($src1$$reg),
16279             as_FloatRegister($src2$$reg));
16280   %}
16281   ins_pipe(vlogical128);
16282 %}
16283 
16284 // --------------------------------- OR ---------------------------------------
16285 
16286 instruct vor8B(vecD dst, vecD src1, vecD src2)
16287 %{
16288   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16289             n->as_Vector()->length_in_bytes() == 8);
16290   match(Set dst (OrV src1 src2));
16291   ins_cost(INSN_COST);
16292   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16293   ins_encode %{
16294     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16295             as_FloatRegister($src1$$reg),
16296             as_FloatRegister($src2$$reg));
16297   %}
16298   ins_pipe(vlogical64);
16299 %}
16300 
16301 instruct vor16B(vecX dst, vecX src1, vecX src2)
16302 %{
16303   predicate(n->as_Vector()->length_in_bytes() == 16);
16304   match(Set dst (OrV src1 src2));
16305   ins_cost(INSN_COST);
16306   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16307   ins_encode %{
16308     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16309             as_FloatRegister($src1$$reg),
16310             as_FloatRegister($src2$$reg));
16311   %}
16312   ins_pipe(vlogical128);
16313 %}
16314 
16315 // --------------------------------- XOR --------------------------------------
16316 
16317 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16318 %{
16319   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16320             n->as_Vector()->length_in_bytes() == 8);
16321   match(Set dst (XorV src1 src2));
16322   ins_cost(INSN_COST);
16323   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16324   ins_encode %{
16325     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16326             as_FloatRegister($src1$$reg),
16327             as_FloatRegister($src2$$reg));
16328   %}
16329   ins_pipe(vlogical64);
16330 %}
16331 
16332 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16333 %{
16334   predicate(n->as_Vector()->length_in_bytes() == 16);
16335   match(Set dst (XorV src1 src2));
16336   ins_cost(INSN_COST);
16337   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16338   ins_encode %{
16339     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16340             as_FloatRegister($src1$$reg),
16341             as_FloatRegister($src2$$reg));
16342   %}
16343   ins_pipe(vlogical128);
16344 %}
16345 
16346 // ------------------------------ Shift ---------------------------------------
16347 
16348 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16349   match(Set dst (LShiftCntV cnt));
16350   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16351   ins_encode %{
16352     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16353   %}
16354   ins_pipe(vdup_reg_reg128);
16355 %}
16356 
16357 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16358 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16359   match(Set dst (RShiftCntV cnt));
16360   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16361   ins_encode %{
16362     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16363     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16364   %}
16365   ins_pipe(vdup_reg_reg128);
16366 %}
16367 
16368 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16369   predicate(n->as_Vector()->length() == 4 ||
16370             n->as_Vector()->length() == 8);
16371   match(Set dst (LShiftVB src shift));
16372   match(Set dst (RShiftVB src shift));
16373   ins_cost(INSN_COST);
16374   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16375   ins_encode %{
16376     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16377             as_FloatRegister($src$$reg),
16378             as_FloatRegister($shift$$reg));
16379   %}
16380   ins_pipe(vshift64);
16381 %}
16382 
16383 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16384   predicate(n->as_Vector()->length() == 16);
16385   match(Set dst (LShiftVB src shift));
16386   match(Set dst (RShiftVB src shift));
16387   ins_cost(INSN_COST);
16388   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16389   ins_encode %{
16390     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16391             as_FloatRegister($src$$reg),
16392             as_FloatRegister($shift$$reg));
16393   %}
16394   ins_pipe(vshift128);
16395 %}
16396 
16397 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16398   predicate(n->as_Vector()->length() == 4 ||
16399             n->as_Vector()->length() == 8);
16400   match(Set dst (URShiftVB src shift));
16401   ins_cost(INSN_COST);
16402   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16403   ins_encode %{
16404     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16405             as_FloatRegister($src$$reg),
16406             as_FloatRegister($shift$$reg));
16407   %}
16408   ins_pipe(vshift64);
16409 %}
16410 
16411 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16412   predicate(n->as_Vector()->length() == 16);
16413   match(Set dst (URShiftVB src shift));
16414   ins_cost(INSN_COST);
16415   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16416   ins_encode %{
16417     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16418             as_FloatRegister($src$$reg),
16419             as_FloatRegister($shift$$reg));
16420   %}
16421   ins_pipe(vshift128);
16422 %}
16423 
16424 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16425   predicate(n->as_Vector()->length() == 4 ||
16426             n->as_Vector()->length() == 8);
16427   match(Set dst (LShiftVB src shift));
16428   ins_cost(INSN_COST);
16429   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16430   ins_encode %{
16431     int sh = (int)$shift$$constant & 31;
16432     if (sh >= 8) {
16433       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16434              as_FloatRegister($src$$reg),
16435              as_FloatRegister($src$$reg));
16436     } else {
16437       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16438              as_FloatRegister($src$$reg), sh);
16439     }
16440   %}
16441   ins_pipe(vshift64_imm);
16442 %}
16443 
16444 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16445   predicate(n->as_Vector()->length() == 16);
16446   match(Set dst (LShiftVB src shift));
16447   ins_cost(INSN_COST);
16448   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16449   ins_encode %{
16450     int sh = (int)$shift$$constant & 31;
16451     if (sh >= 8) {
16452       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16453              as_FloatRegister($src$$reg),
16454              as_FloatRegister($src$$reg));
16455     } else {
16456       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16457              as_FloatRegister($src$$reg), sh);
16458     }
16459   %}
16460   ins_pipe(vshift128_imm);
16461 %}
16462 
16463 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16464   predicate(n->as_Vector()->length() == 4 ||
16465             n->as_Vector()->length() == 8);
16466   match(Set dst (RShiftVB src shift));
16467   ins_cost(INSN_COST);
16468   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16469   ins_encode %{
16470     int sh = (int)$shift$$constant & 31;
16471     if (sh >= 8) sh = 7;
16472     sh = -sh & 7;
16473     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16474            as_FloatRegister($src$$reg), sh);
16475   %}
16476   ins_pipe(vshift64_imm);
16477 %}
16478 
16479 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16480   predicate(n->as_Vector()->length() == 16);
16481   match(Set dst (RShiftVB src shift));
16482   ins_cost(INSN_COST);
16483   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16484   ins_encode %{
16485     int sh = (int)$shift$$constant & 31;
16486     if (sh >= 8) sh = 7;
16487     sh = -sh & 7;
16488     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16489            as_FloatRegister($src$$reg), sh);
16490   %}
16491   ins_pipe(vshift128_imm);
16492 %}
16493 
16494 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16495   predicate(n->as_Vector()->length() == 4 ||
16496             n->as_Vector()->length() == 8);
16497   match(Set dst (URShiftVB src shift));
16498   ins_cost(INSN_COST);
16499   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16500   ins_encode %{
16501     int sh = (int)$shift$$constant & 31;
16502     if (sh >= 8) {
16503       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16504              as_FloatRegister($src$$reg),
16505              as_FloatRegister($src$$reg));
16506     } else {
16507       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16508              as_FloatRegister($src$$reg), -sh & 7);
16509     }
16510   %}
16511   ins_pipe(vshift64_imm);
16512 %}
16513 
16514 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16515   predicate(n->as_Vector()->length() == 16);
16516   match(Set dst (URShiftVB src shift));
16517   ins_cost(INSN_COST);
16518   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16519   ins_encode %{
16520     int sh = (int)$shift$$constant & 31;
16521     if (sh >= 8) {
16522       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16523              as_FloatRegister($src$$reg),
16524              as_FloatRegister($src$$reg));
16525     } else {
16526       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16527              as_FloatRegister($src$$reg), -sh & 7);
16528     }
16529   %}
16530   ins_pipe(vshift128_imm);
16531 %}
16532 
16533 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16534   predicate(n->as_Vector()->length() == 2 ||
16535             n->as_Vector()->length() == 4);
16536   match(Set dst (LShiftVS src shift));
16537   match(Set dst (RShiftVS src shift));
16538   ins_cost(INSN_COST);
16539   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16540   ins_encode %{
16541     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16542             as_FloatRegister($src$$reg),
16543             as_FloatRegister($shift$$reg));
16544   %}
16545   ins_pipe(vshift64);
16546 %}
16547 
16548 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16549   predicate(n->as_Vector()->length() == 8);
16550   match(Set dst (LShiftVS src shift));
16551   match(Set dst (RShiftVS src shift));
16552   ins_cost(INSN_COST);
16553   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16554   ins_encode %{
16555     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16556             as_FloatRegister($src$$reg),
16557             as_FloatRegister($shift$$reg));
16558   %}
16559   ins_pipe(vshift128);
16560 %}
16561 
16562 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16563   predicate(n->as_Vector()->length() == 2 ||
16564             n->as_Vector()->length() == 4);
16565   match(Set dst (URShiftVS src shift));
16566   ins_cost(INSN_COST);
16567   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16568   ins_encode %{
16569     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16570             as_FloatRegister($src$$reg),
16571             as_FloatRegister($shift$$reg));
16572   %}
16573   ins_pipe(vshift64);
16574 %}
16575 
16576 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16577   predicate(n->as_Vector()->length() == 8);
16578   match(Set dst (URShiftVS src shift));
16579   ins_cost(INSN_COST);
16580   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16581   ins_encode %{
16582     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16583             as_FloatRegister($src$$reg),
16584             as_FloatRegister($shift$$reg));
16585   %}
16586   ins_pipe(vshift128);
16587 %}
16588 
16589 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16590   predicate(n->as_Vector()->length() == 2 ||
16591             n->as_Vector()->length() == 4);
16592   match(Set dst (LShiftVS src shift));
16593   ins_cost(INSN_COST);
16594   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16595   ins_encode %{
16596     int sh = (int)$shift$$constant & 31;
16597     if (sh >= 16) {
16598       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16599              as_FloatRegister($src$$reg),
16600              as_FloatRegister($src$$reg));
16601     } else {
16602       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16603              as_FloatRegister($src$$reg), sh);
16604     }
16605   %}
16606   ins_pipe(vshift64_imm);
16607 %}
16608 
16609 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16610   predicate(n->as_Vector()->length() == 8);
16611   match(Set dst (LShiftVS src shift));
16612   ins_cost(INSN_COST);
16613   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16614   ins_encode %{
16615     int sh = (int)$shift$$constant & 31;
16616     if (sh >= 16) {
16617       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16618              as_FloatRegister($src$$reg),
16619              as_FloatRegister($src$$reg));
16620     } else {
16621       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16622              as_FloatRegister($src$$reg), sh);
16623     }
16624   %}
16625   ins_pipe(vshift128_imm);
16626 %}
16627 
16628 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16629   predicate(n->as_Vector()->length() == 2 ||
16630             n->as_Vector()->length() == 4);
16631   match(Set dst (RShiftVS src shift));
16632   ins_cost(INSN_COST);
16633   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16634   ins_encode %{
16635     int sh = (int)$shift$$constant & 31;
16636     if (sh >= 16) sh = 15;
16637     sh = -sh & 15;
16638     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16639            as_FloatRegister($src$$reg), sh);
16640   %}
16641   ins_pipe(vshift64_imm);
16642 %}
16643 
16644 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16645   predicate(n->as_Vector()->length() == 8);
16646   match(Set dst (RShiftVS src shift));
16647   ins_cost(INSN_COST);
16648   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16649   ins_encode %{
16650     int sh = (int)$shift$$constant & 31;
16651     if (sh >= 16) sh = 15;
16652     sh = -sh & 15;
16653     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16654            as_FloatRegister($src$$reg), sh);
16655   %}
16656   ins_pipe(vshift128_imm);
16657 %}
16658 
16659 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16660   predicate(n->as_Vector()->length() == 2 ||
16661             n->as_Vector()->length() == 4);
16662   match(Set dst (URShiftVS src shift));
16663   ins_cost(INSN_COST);
16664   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16665   ins_encode %{
16666     int sh = (int)$shift$$constant & 31;
16667     if (sh >= 16) {
16668       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16669              as_FloatRegister($src$$reg),
16670              as_FloatRegister($src$$reg));
16671     } else {
16672       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16673              as_FloatRegister($src$$reg), -sh & 15);
16674     }
16675   %}
16676   ins_pipe(vshift64_imm);
16677 %}
16678 
16679 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16680   predicate(n->as_Vector()->length() == 8);
16681   match(Set dst (URShiftVS src shift));
16682   ins_cost(INSN_COST);
16683   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16684   ins_encode %{
16685     int sh = (int)$shift$$constant & 31;
16686     if (sh >= 16) {
16687       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16688              as_FloatRegister($src$$reg),
16689              as_FloatRegister($src$$reg));
16690     } else {
16691       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16692              as_FloatRegister($src$$reg), -sh & 15);
16693     }
16694   %}
16695   ins_pipe(vshift128_imm);
16696 %}
16697 
16698 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16699   predicate(n->as_Vector()->length() == 2);
16700   match(Set dst (LShiftVI src shift));
16701   match(Set dst (RShiftVI src shift));
16702   ins_cost(INSN_COST);
16703   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16704   ins_encode %{
16705     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16706             as_FloatRegister($src$$reg),
16707             as_FloatRegister($shift$$reg));
16708   %}
16709   ins_pipe(vshift64);
16710 %}
16711 
16712 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16713   predicate(n->as_Vector()->length() == 4);
16714   match(Set dst (LShiftVI src shift));
16715   match(Set dst (RShiftVI src shift));
16716   ins_cost(INSN_COST);
16717   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16718   ins_encode %{
16719     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16720             as_FloatRegister($src$$reg),
16721             as_FloatRegister($shift$$reg));
16722   %}
16723   ins_pipe(vshift128);
16724 %}
16725 
16726 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16727   predicate(n->as_Vector()->length() == 2);
16728   match(Set dst (URShiftVI src shift));
16729   ins_cost(INSN_COST);
16730   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16731   ins_encode %{
16732     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16733             as_FloatRegister($src$$reg),
16734             as_FloatRegister($shift$$reg));
16735   %}
16736   ins_pipe(vshift64);
16737 %}
16738 
16739 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16740   predicate(n->as_Vector()->length() == 4);
16741   match(Set dst (URShiftVI src shift));
16742   ins_cost(INSN_COST);
16743   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16744   ins_encode %{
16745     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16746             as_FloatRegister($src$$reg),
16747             as_FloatRegister($shift$$reg));
16748   %}
16749   ins_pipe(vshift128);
16750 %}
16751 
16752 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16753   predicate(n->as_Vector()->length() == 2);
16754   match(Set dst (LShiftVI src shift));
16755   ins_cost(INSN_COST);
16756   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16757   ins_encode %{
16758     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16759            as_FloatRegister($src$$reg),
16760            (int)$shift$$constant & 31);
16761   %}
16762   ins_pipe(vshift64_imm);
16763 %}
16764 
16765 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16766   predicate(n->as_Vector()->length() == 4);
16767   match(Set dst (LShiftVI src shift));
16768   ins_cost(INSN_COST);
16769   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16770   ins_encode %{
16771     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16772            as_FloatRegister($src$$reg),
16773            (int)$shift$$constant & 31);
16774   %}
16775   ins_pipe(vshift128_imm);
16776 %}
16777 
16778 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16779   predicate(n->as_Vector()->length() == 2);
16780   match(Set dst (RShiftVI src shift));
16781   ins_cost(INSN_COST);
16782   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16783   ins_encode %{
16784     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16785             as_FloatRegister($src$$reg),
16786             -(int)$shift$$constant & 31);
16787   %}
16788   ins_pipe(vshift64_imm);
16789 %}
16790 
16791 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16792   predicate(n->as_Vector()->length() == 4);
16793   match(Set dst (RShiftVI src shift));
16794   ins_cost(INSN_COST);
16795   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16796   ins_encode %{
16797     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16798             as_FloatRegister($src$$reg),
16799             -(int)$shift$$constant & 31);
16800   %}
16801   ins_pipe(vshift128_imm);
16802 %}
16803 
16804 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16805   predicate(n->as_Vector()->length() == 2);
16806   match(Set dst (URShiftVI src shift));
16807   ins_cost(INSN_COST);
16808   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16809   ins_encode %{
16810     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16811             as_FloatRegister($src$$reg),
16812             -(int)$shift$$constant & 31);
16813   %}
16814   ins_pipe(vshift64_imm);
16815 %}
16816 
16817 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16818   predicate(n->as_Vector()->length() == 4);
16819   match(Set dst (URShiftVI src shift));
16820   ins_cost(INSN_COST);
16821   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16822   ins_encode %{
16823     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16824             as_FloatRegister($src$$reg),
16825             -(int)$shift$$constant & 31);
16826   %}
16827   ins_pipe(vshift128_imm);
16828 %}
16829 
16830 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16831   predicate(n->as_Vector()->length() == 2);
16832   match(Set dst (LShiftVL src shift));
16833   match(Set dst (RShiftVL src shift));
16834   ins_cost(INSN_COST);
16835   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16836   ins_encode %{
16837     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16838             as_FloatRegister($src$$reg),
16839             as_FloatRegister($shift$$reg));
16840   %}
16841   ins_pipe(vshift128);
16842 %}
16843 
16844 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16845   predicate(n->as_Vector()->length() == 2);
16846   match(Set dst (URShiftVL src shift));
16847   ins_cost(INSN_COST);
16848   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16849   ins_encode %{
16850     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16851             as_FloatRegister($src$$reg),
16852             as_FloatRegister($shift$$reg));
16853   %}
16854   ins_pipe(vshift128);
16855 %}
16856 
16857 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16858   predicate(n->as_Vector()->length() == 2);
16859   match(Set dst (LShiftVL src shift));
16860   ins_cost(INSN_COST);
16861   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16862   ins_encode %{
16863     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16864            as_FloatRegister($src$$reg),
16865            (int)$shift$$constant & 63);
16866   %}
16867   ins_pipe(vshift128_imm);
16868 %}
16869 
16870 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16871   predicate(n->as_Vector()->length() == 2);
16872   match(Set dst (RShiftVL src shift));
16873   ins_cost(INSN_COST);
16874   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16875   ins_encode %{
16876     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16877             as_FloatRegister($src$$reg),
16878             -(int)$shift$$constant & 63);
16879   %}
16880   ins_pipe(vshift128_imm);
16881 %}
16882 
16883 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16884   predicate(n->as_Vector()->length() == 2);
16885   match(Set dst (URShiftVL src shift));
16886   ins_cost(INSN_COST);
16887   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16888   ins_encode %{
16889     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16890             as_FloatRegister($src$$reg),
16891             -(int)$shift$$constant & 63);
16892   %}
16893   ins_pipe(vshift128_imm);
16894 %}
16895 
16896 //----------PEEPHOLE RULES-----------------------------------------------------
16897 // These must follow all instruction definitions as they use the names
16898 // defined in the instructions definitions.
16899 //
16900 // peepmatch ( root_instr_name [preceding_instruction]* );
16901 //
16902 // peepconstraint %{
16903 // (instruction_number.operand_name relational_op instruction_number.operand_name
16904 //  [, ...] );
16905 // // instruction numbers are zero-based using left to right order in peepmatch
16906 //
16907 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16908 // // provide an instruction_number.operand_name for each operand that appears
16909 // // in the replacement instruction's match rule
16910 //
16911 // ---------VM FLAGS---------------------------------------------------------
16912 //
16913 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16914 //
16915 // Each peephole rule is given an identifying number starting with zero and
16916 // increasing by one in the order seen by the parser.  An individual peephole
16917 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16918 // on the command-line.
16919 //
16920 // ---------CURRENT LIMITATIONS----------------------------------------------
16921 //
16922 // Only match adjacent instructions in same basic block
16923 // Only equality constraints
16924 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16925 // Only one replacement instruction
16926 //
16927 // ---------EXAMPLE----------------------------------------------------------
16928 //
16929 // // pertinent parts of existing instructions in architecture description
16930 // instruct movI(iRegINoSp dst, iRegI src)
16931 // %{
16932 //   match(Set dst (CopyI src));
16933 // %}
16934 //
16935 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16936 // %{
16937 //   match(Set dst (AddI dst src));
16938 //   effect(KILL cr);
16939 // %}
16940 //
16941 // // Change (inc mov) to lea
16942 // peephole %{
16943 //   // increment preceeded by register-register move
16944 //   peepmatch ( incI_iReg movI );
16945 //   // require that the destination register of the increment
16946 //   // match the destination register of the move
16947 //   peepconstraint ( 0.dst == 1.dst );
16948 //   // construct a replacement instruction that sets
16949 //   // the destination to ( move's source register + one )
16950 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16951 // %}
16952 //
16953 
16954 // Implementation no longer uses movX instructions since
16955 // machine-independent system no longer uses CopyX nodes.
16956 //
16957 // peephole
16958 // %{
16959 //   peepmatch (incI_iReg movI);
16960 //   peepconstraint (0.dst == 1.dst);
16961 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16962 // %}
16963 
16964 // peephole
16965 // %{
16966 //   peepmatch (decI_iReg movI);
16967 //   peepconstraint (0.dst == 1.dst);
16968 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16969 // %}
16970 
16971 // peephole
16972 // %{
16973 //   peepmatch (addI_iReg_imm movI);
16974 //   peepconstraint (0.dst == 1.dst);
16975 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16976 // %}
16977 
16978 // peephole
16979 // %{
16980 //   peepmatch (incL_iReg movL);
16981 //   peepconstraint (0.dst == 1.dst);
16982 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16983 // %}
16984 
16985 // peephole
16986 // %{
16987 //   peepmatch (decL_iReg movL);
16988 //   peepconstraint (0.dst == 1.dst);
16989 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16990 // %}
16991 
16992 // peephole
16993 // %{
16994 //   peepmatch (addL_iReg_imm movL);
16995 //   peepconstraint (0.dst == 1.dst);
16996 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16997 // %}
16998 
16999 // peephole
17000 // %{
17001 //   peepmatch (addP_iReg_imm movP);
17002 //   peepconstraint (0.dst == 1.dst);
17003 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17004 // %}
17005 
17006 // // Change load of spilled value to only a spill
17007 // instruct storeI(memory mem, iRegI src)
17008 // %{
17009 //   match(Set mem (StoreI mem src));
17010 // %}
17011 //
17012 // instruct loadI(iRegINoSp dst, memory mem)
17013 // %{
17014 //   match(Set dst (LoadI mem));
17015 // %}
17016 //
17017 
17018 //----------SMARTSPILL RULES---------------------------------------------------
17019 // These must follow all instruction definitions as they use the names
17020 // defined in the instructions definitions.
17021 
17022 // Local Variables:
17023 // mode: c++
17024 // End:
--- EOF ---