1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_trailing(MemBarNode *leading);
1045   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1046   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1047 
1048   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1049 
1050   bool unnecessary_acquire(const Node *barrier);
1051   bool needs_acquiring_load(const Node *load);
1052 
1053   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1054 
1055   bool unnecessary_release(const Node *barrier);
1056   bool unnecessary_volatile(const Node *barrier);
1057   bool needs_releasing_store(const Node *store);
1058 
1059   // predicate controlling translation of CompareAndSwapX
1060   bool needs_acquiring_load_exclusive(const Node *load);
1061 
1062   // predicate controlling translation of StoreCM
1063   bool unnecessary_storestore(const Node *storecm);
1064 %}
1065 
1066 source %{
1067 
1068   // Optimizaton of volatile gets and puts
1069   // -------------------------------------
1070   //
1071   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1072   // use to implement volatile reads and writes. For a volatile read
1073   // we simply need
1074   //
1075   //   ldar<x>
1076   //
1077   // and for a volatile write we need
1078   //
1079   //   stlr<x>
1080   //
1081   // Alternatively, we can implement them by pairing a normal
1082   // load/store with a memory barrier. For a volatile read we need
1083   //
1084   //   ldr<x>
1085   //   dmb ishld
1086   //
1087   // for a volatile write
1088   //
1089   //   dmb ish
1090   //   str<x>
1091   //   dmb ish
1092   //
1093   // We can also use ldaxr and stlxr to implement compare and swap CAS
1094   // sequences. These are normally translated to an instruction
1095   // sequence like the following
1096   //
1097   //   dmb      ish
1098   // retry:
1099   //   ldxr<x>   rval raddr
1100   //   cmp       rval rold
1101   //   b.ne done
1102   //   stlxr<x>  rval, rnew, rold
1103   //   cbnz      rval retry
1104   // done:
1105   //   cset      r0, eq
1106   //   dmb ishld
1107   //
1108   // Note that the exclusive store is already using an stlxr
1109   // instruction. That is required to ensure visibility to other
1110   // threads of the exclusive write (assuming it succeeds) before that
1111   // of any subsequent writes.
1112   //
1113   // The following instruction sequence is an improvement on the above
1114   //
1115   // retry:
1116   //   ldaxr<x>  rval raddr
1117   //   cmp       rval rold
1118   //   b.ne done
1119   //   stlxr<x>  rval, rnew, rold
1120   //   cbnz      rval retry
1121   // done:
1122   //   cset      r0, eq
1123   //
1124   // We don't need the leading dmb ish since the stlxr guarantees
1125   // visibility of prior writes in the case that the swap is
1126   // successful. Crucially we don't have to worry about the case where
1127   // the swap is not successful since no valid program should be
1128   // relying on visibility of prior changes by the attempting thread
1129   // in the case where the CAS fails.
1130   //
1131   // Similarly, we don't need the trailing dmb ishld if we substitute
1132   // an ldaxr instruction since that will provide all the guarantees we
1133   // require regarding observation of changes made by other threads
1134   // before any change to the CAS address observed by the load.
1135   //
1136   // In order to generate the desired instruction sequence we need to
1137   // be able to identify specific 'signature' ideal graph node
1138   // sequences which i) occur as a translation of a volatile reads or
1139   // writes or CAS operations and ii) do not occur through any other
1140   // translation or graph transformation. We can then provide
1141   // alternative aldc matching rules which translate these node
1142   // sequences to the desired machine code sequences. Selection of the
1143   // alternative rules can be implemented by predicates which identify
1144   // the relevant node sequences.
1145   //
1146   // The ideal graph generator translates a volatile read to the node
1147   // sequence
1148   //
1149   //   LoadX[mo_acquire]
1150   //   MemBarAcquire
1151   //
1152   // As a special case when using the compressed oops optimization we
1153   // may also see this variant
1154   //
1155   //   LoadN[mo_acquire]
1156   //   DecodeN
1157   //   MemBarAcquire
1158   //
1159   // A volatile write is translated to the node sequence
1160   //
1161   //   MemBarRelease
1162   //   StoreX[mo_release] {CardMark}-optional
1163   //   MemBarVolatile
1164   //
1165   // n.b. the above node patterns are generated with a strict
1166   // 'signature' configuration of input and output dependencies (see
1167   // the predicates below for exact details). The card mark may be as
1168   // simple as a few extra nodes or, in a few GC configurations, may
1169   // include more complex control flow between the leading and
1170   // trailing memory barriers. However, whatever the card mark
1171   // configuration these signatures are unique to translated volatile
1172   // reads/stores -- they will not appear as a result of any other
1173   // bytecode translation or inlining nor as a consequence of
1174   // optimizing transforms.
1175   //
1176   // We also want to catch inlined unsafe volatile gets and puts and
1177   // be able to implement them using either ldar<x>/stlr<x> or some
1178   // combination of ldr<x>/stlr<x> and dmb instructions.
1179   //
1180   // Inlined unsafe volatiles puts manifest as a minor variant of the
1181   // normal volatile put node sequence containing an extra cpuorder
1182   // membar
1183   //
1184   //   MemBarRelease
1185   //   MemBarCPUOrder
1186   //   StoreX[mo_release] {CardMark}-optional
1187   //   MemBarVolatile
1188   //
1189   // n.b. as an aside, the cpuorder membar is not itself subject to
1190   // matching and translation by adlc rules.  However, the rule
1191   // predicates need to detect its presence in order to correctly
1192   // select the desired adlc rules.
1193   //
1194   // Inlined unsafe volatile gets manifest as a somewhat different
1195   // node sequence to a normal volatile get
1196   //
1197   //   MemBarCPUOrder
1198   //        ||       \\
1199   //   MemBarAcquire LoadX[mo_acquire]
1200   //        ||
1201   //   MemBarCPUOrder
1202   //
1203   // In this case the acquire membar does not directly depend on the
1204   // load. However, we can be sure that the load is generated from an
1205   // inlined unsafe volatile get if we see it dependent on this unique
1206   // sequence of membar nodes. Similarly, given an acquire membar we
1207   // can know that it was added because of an inlined unsafe volatile
1208   // get if it is fed and feeds a cpuorder membar and if its feed
1209   // membar also feeds an acquiring load.
1210   //
1211   // Finally an inlined (Unsafe) CAS operation is translated to the
1212   // following ideal graph
1213   //
1214   //   MemBarRelease
1215   //   MemBarCPUOrder
1216   //   CompareAndSwapX {CardMark}-optional
1217   //   MemBarCPUOrder
1218   //   MemBarAcquire
1219   //
1220   // So, where we can identify these volatile read and write
1221   // signatures we can choose to plant either of the above two code
1222   // sequences. For a volatile read we can simply plant a normal
1223   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1224   // also choose to inhibit translation of the MemBarAcquire and
1225   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1226   //
1227   // When we recognise a volatile store signature we can choose to
1228   // plant at a dmb ish as a translation for the MemBarRelease, a
1229   // normal str<x> and then a dmb ish for the MemBarVolatile.
1230   // Alternatively, we can inhibit translation of the MemBarRelease
1231   // and MemBarVolatile and instead plant a simple stlr<x>
1232   // instruction.
1233   //
1234   // when we recognise a CAS signature we can choose to plant a dmb
1235   // ish as a translation for the MemBarRelease, the conventional
1236   // macro-instruction sequence for the CompareAndSwap node (which
1237   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1238   // Alternatively, we can elide generation of the dmb instructions
1239   // and plant the alternative CompareAndSwap macro-instruction
1240   // sequence (which uses ldaxr<x>).
1241   //
1242   // Of course, the above only applies when we see these signature
1243   // configurations. We still want to plant dmb instructions in any
1244   // other cases where we may see a MemBarAcquire, MemBarRelease or
1245   // MemBarVolatile. For example, at the end of a constructor which
1246   // writes final/volatile fields we will see a MemBarRelease
1247   // instruction and this needs a 'dmb ish' lest we risk the
1248   // constructed object being visible without making the
1249   // final/volatile field writes visible.
1250   //
1251   // n.b. the translation rules below which rely on detection of the
1252   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1253   // If we see anything other than the signature configurations we
1254   // always just translate the loads and stores to ldr<x> and str<x>
1255   // and translate acquire, release and volatile membars to the
1256   // relevant dmb instructions.
1257   //
1258 
1259   // graph traversal helpers used for volatile put/get and CAS
1260   // optimization
1261 
1262   // 1) general purpose helpers
1263 
1264   // if node n is linked to a parent MemBarNode by an intervening
1265   // Control and Memory ProjNode return the MemBarNode otherwise return
1266   // NULL.
1267   //
1268   // n may only be a Load or a MemBar.
1269 
1270   MemBarNode *parent_membar(const Node *n)
1271   {
1272     Node *ctl = NULL;
1273     Node *mem = NULL;
1274     Node *membar = NULL;
1275 
1276     if (n->is_Load()) {
1277       ctl = n->lookup(LoadNode::Control);
1278       mem = n->lookup(LoadNode::Memory);
1279     } else if (n->is_MemBar()) {
1280       ctl = n->lookup(TypeFunc::Control);
1281       mem = n->lookup(TypeFunc::Memory);
1282     } else {
1283         return NULL;
1284     }
1285 
1286     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1287       return NULL;
1288     }
1289 
1290     membar = ctl->lookup(0);
1291 
1292     if (!membar || !membar->is_MemBar()) {
1293       return NULL;
1294     }
1295 
1296     if (mem->lookup(0) != membar) {
1297       return NULL;
1298     }
1299 
1300     return membar->as_MemBar();
1301   }
1302 
1303   // if n is linked to a child MemBarNode by intervening Control and
1304   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1305 
1306   MemBarNode *child_membar(const MemBarNode *n)
1307   {
1308     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1309     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1310 
1311     // MemBar needs to have both a Ctl and Mem projection
1312     if (! ctl || ! mem)
1313       return NULL;
1314 
1315     MemBarNode *child = NULL;
1316     Node *x;
1317 
1318     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1319       x = ctl->fast_out(i);
1320       // if we see a membar we keep hold of it. we may also see a new
1321       // arena copy of the original but it will appear later
1322       if (x->is_MemBar()) {
1323           child = x->as_MemBar();
1324           break;
1325       }
1326     }
1327 
1328     if (child == NULL) {
1329       return NULL;
1330     }
1331 
1332     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1333       x = mem->fast_out(i);
1334       // if we see a membar we keep hold of it. we may also see a new
1335       // arena copy of the original but it will appear later
1336       if (x == child) {
1337         return child;
1338       }
1339     }
1340     return NULL;
1341   }
1342 
1343   // helper predicate use to filter candidates for a leading memory
1344   // barrier
1345   //
1346   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1347   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1348 
1349   bool leading_membar(const MemBarNode *barrier)
1350   {
1351     int opcode = barrier->Opcode();
1352     // if this is a release membar we are ok
1353     if (opcode == Op_MemBarRelease) {
1354       return true;
1355     }
1356     // if its a cpuorder membar . . .
1357     if (opcode != Op_MemBarCPUOrder) {
1358       return false;
1359     }
1360     // then the parent has to be a release membar
1361     MemBarNode *parent = parent_membar(barrier);
1362     if (!parent) {
1363       return false;
1364     }
1365     opcode = parent->Opcode();
1366     return opcode == Op_MemBarRelease;
1367   }
1368 
1369   // 2) card mark detection helper
1370 
1371   // helper predicate which can be used to detect a volatile membar
1372   // introduced as part of a conditional card mark sequence either by
1373   // G1 or by CMS when UseCondCardMark is true.
1374   //
1375   // membar can be definitively determined to be part of a card mark
1376   // sequence if and only if all the following hold
1377   //
1378   // i) it is a MemBarVolatile
1379   //
1380   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1381   // true
1382   //
1383   // iii) the node's Mem projection feeds a StoreCM node.
1384 
1385   bool is_card_mark_membar(const MemBarNode *barrier)
1386   {
1387     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1388       return false;
1389     }
1390 
1391     if (barrier->Opcode() != Op_MemBarVolatile) {
1392       return false;
1393     }
1394 
1395     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1396 
1397     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1398       Node *y = mem->fast_out(i);
1399       if (y->Opcode() == Op_StoreCM) {
1400         return true;
1401       }
1402     }
1403 
1404     return false;
1405   }
1406 
1407 
1408   // 3) helper predicates to traverse volatile put or CAS graphs which
1409   // may contain GC barrier subgraphs
1410 
1411   // Preamble
1412   // --------
1413   //
1414   // for volatile writes we can omit generating barriers and employ a
1415   // releasing store when we see a node sequence sequence with a
1416   // leading MemBarRelease and a trailing MemBarVolatile as follows
1417   //
1418   //   MemBarRelease
1419   //  {    ||        } -- optional
1420   //  {MemBarCPUOrder}
1421   //       ||       \\
1422   //       ||     StoreX[mo_release]
1423   //       | \ Bot    / ???
1424   //       | MergeMem
1425   //       | /
1426   //   MemBarVolatile
1427   //
1428   // where
1429   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1430   //  | \ and / indicate further routing of the Ctl and Mem feeds
1431   //
1432   // Note that the memory feed from the CPUOrder membar to the
1433   // MergeMem node is an AliasIdxBot slice while the feed from the
1434   // StoreX is for a slice determined by the type of value being
1435   // written.
1436   //
1437   // the diagram above shows the graph we see for non-object stores.
1438   // for a volatile Object store (StoreN/P) we may see other nodes
1439   // below the leading membar because of the need for a GC pre- or
1440   // post-write barrier.
1441   //
1442   // with most GC configurations we with see this simple variant which
1443   // includes a post-write barrier card mark.
1444   //
1445   //   MemBarRelease______________________________
1446   //         ||    \\               Ctl \        \\
1447   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1448   //         | \ Bot  / oop                 . . .  /
1449   //         | MergeMem
1450   //         | /
1451   //         ||      /
1452   //   MemBarVolatile
1453   //
1454   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1455   // the object address to an int used to compute the card offset) and
1456   // Ctl+Mem to a StoreB node (which does the actual card mark).
1457   //
1458   // n.b. a StoreCM node is only ever used when CMS (with or without
1459   // CondCardMark) or G1 is configured. This abstract instruction
1460   // differs from a normal card mark write (StoreB) because it implies
1461   // a requirement to order visibility of the card mark (StoreCM)
1462   // after that of the object put (StoreP/N) using a StoreStore memory
1463   // barrier. Note that this is /not/ a requirement to order the
1464   // instructions in the generated code (that is already guaranteed by
1465   // the order of memory dependencies). Rather it is a requirement to
1466   // ensure visibility order which only applies on architectures like
1467   // AArch64 which do not implement TSO. This ordering is required for
1468   // both non-volatile and volatile puts.
1469   //
1470   // That implies that we need to translate a StoreCM using the
1471   // sequence
1472   //
1473   //   dmb ishst
1474   //   stlrb
1475   //
1476   // This dmb cannot be omitted even when the associated StoreX or
1477   // CompareAndSwapX is implemented using stlr. However, as described
1478   // below there are circumstances where a specific GC configuration
1479   // requires a stronger barrier in which case it can be omitted.
1480   // 
1481   // With the Serial or Parallel GC using +CondCardMark the card mark
1482   // is performed conditionally on it currently being unmarked in
1483   // which case the volatile put graph looks slightly different
1484   //
1485   //   MemBarRelease____________________________________________
1486   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1487   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1488   //         | \ Bot / oop                          \            |
1489   //         | MergeMem                            . . .      StoreB
1490   //         | /                                                /
1491   //         ||     /
1492   //   MemBarVolatile
1493   //
1494   // It is worth noting at this stage that all the above
1495   // configurations can be uniquely identified by checking that the
1496   // memory flow includes the following subgraph:
1497   //
1498   //   MemBarRelease
1499   //  {MemBarCPUOrder}
1500   //      |  \      . . .
1501   //      |  StoreX[mo_release]  . . .
1502   //  Bot |   / oop
1503   //     MergeMem
1504   //      |
1505   //   MemBarVolatile
1506   //
1507   // This is referred to as a *normal* volatile store subgraph. It can
1508   // easily be detected starting from any candidate MemBarRelease,
1509   // StoreX[mo_release] or MemBarVolatile node.
1510   //
1511   // A small variation on this normal case occurs for an unsafe CAS
1512   // operation. The basic memory flow subgraph for a non-object CAS is
1513   // as follows
1514   //
1515   //   MemBarRelease
1516   //         ||
1517   //   MemBarCPUOrder
1518   //          |     \\   . . .
1519   //          |     CompareAndSwapX
1520   //          |       |
1521   //      Bot |     SCMemProj
1522   //           \     / Bot
1523   //           MergeMem
1524   //           /
1525   //   MemBarCPUOrder
1526   //         ||
1527   //   MemBarAcquire
1528   //
1529   // The same basic variations on this arrangement (mutatis mutandis)
1530   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1531   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1532   // flow subgraph is still present.
1533   // 
1534   // This is referred to as a *normal* CAS subgraph. It can easily be
1535   // detected starting from any candidate MemBarRelease,
1536   // StoreX[mo_release] or MemBarAcquire node.
1537   //
1538   // The code below uses two helper predicates, leading_to_trailing
1539   // and trailing_to_leading to identify these normal graphs, one
1540   // validating the layout starting from the top membar and searching
1541   // down and the other validating the layout starting from the lower
1542   // membar and searching up.
1543   //
1544   // There are two special case GC configurations when the simple
1545   // normal graphs above may not be generated: when using G1 (which
1546   // always employs a conditional card mark); and when using CMS with
1547   // conditional card marking (+CondCardMark) configured. These GCs
1548   // are both concurrent rather than stop-the world GCs. So they
1549   // introduce extra Ctl+Mem flow into the graph between the leading
1550   // and trailing membar nodes, in particular enforcing stronger
1551   // memory serialisation beween the object put and the corresponding
1552   // conditional card mark. CMS employs a post-write GC barrier while
1553   // G1 employs both a pre- and post-write GC barrier.
1554   //
1555   // The post-write barrier subgraph for these configurations includes
1556   // a MemBarVolatile node -- referred to as a card mark membar --
1557   // which is needed to order the card write (StoreCM) operation in
1558   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1559   // operations performed by GC threads i.e. a card mark membar
1560   // constitutes a StoreLoad barrier hence must be translated to a dmb
1561   // ish (whether or not it sits inside a volatile store sequence).
1562   //
1563   // Of course, the use of the dmb ish for the card mark membar also
1564   // implies theat the StoreCM which follows can omit the dmb ishst
1565   // instruction. The necessary visibility ordering will already be
1566   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1567   // needs to be generated for as part of the StoreCM sequence with GC
1568   // configuration +CMS -CondCardMark.
1569   // 
1570   // Of course all these extra barrier nodes may well be absent --
1571   // they are only inserted for object puts. Their potential presence
1572   // significantly complicates the task of identifying whether a
1573   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1574   // MemBarAcquire forms part of a volatile put or CAS when using
1575   // these GC configurations (see below) and also complicates the
1576   // decision as to how to translate a MemBarVolatile and StoreCM.
1577   //
1578   // So, thjis means that a card mark MemBarVolatile occurring in the
1579   // post-barrier graph it needs to be distinguished from a normal
1580   // trailing MemBarVolatile. Resolving this is straightforward: a
1581   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1582   // node and that is a unique marker
1583   //
1584   //      MemBarVolatile (card mark)
1585   //       C |    \     . . .
1586   //         |   StoreCM   . . .
1587   //       . . .
1588   //
1589   // Returning to the task of translating the object put and the
1590   // leading/trailing membar nodes: what do the node graphs look like
1591   // for these 2 special cases? and how can we determine the status of
1592   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1593   // normal and non-normal cases?
1594   //
1595   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1596   // which selects conditonal execution based on the value loaded
1597   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1598   // intervening StoreLoad barrier (MemBarVolatile).
1599   //
1600   // So, with CMS we may see a node graph for a volatile object store
1601   // which looks like this
1602   //
1603   //   MemBarRelease
1604   //   MemBarCPUOrder_(leading)____________________
1605   //     C |  | M \       \\               M |   C \
1606   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1607   //       |  | Bot \    / oop      \        |
1608   //       |  |    MergeMem          \      / 
1609   //       |  |      /                |    /
1610   //     MemBarVolatile (card mark)   |   /
1611   //     C |  ||    M |               |  /
1612   //       | LoadB    | Bot       oop | / Bot
1613   //       |   |      |              / /
1614   //       | Cmp      |\            / /
1615   //       | /        | \          / /
1616   //       If         |  \        / /
1617   //       | \        |   \      / /
1618   // IfFalse  IfTrue  |    \    / /
1619   //       \     / \  |    |   / /
1620   //        \   / StoreCM  |  / /
1621   //         \ /      \   /  / /
1622   //        Region     Phi  / /
1623   //          | \   Raw |  / /
1624   //          |  . . .  | / /
1625   //          |       MergeMem
1626   //          |           |
1627   //        MemBarVolatile (trailing)
1628   //
1629   // Notice that there are two MergeMem nodes below the leading
1630   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1631   // the leading membar and the oopptr Mem slice from the Store into
1632   // the card mark membar. The trailing MergeMem merges the
1633   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1634   // slice from the StoreCM and an oop slice from the StoreN/P node
1635   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1636   // associated with the If region).
1637   //
1638   // So, in the case of CMS + CondCardMark the volatile object store
1639   // graph still includes a normal volatile store subgraph from the
1640   // leading membar to the trailing membar. However, it also contains
1641   // the same shape memory flow to the card mark membar. The two flows
1642   // can be distinguished by testing whether or not the downstream
1643   // membar is a card mark membar.
1644   //
1645   // The graph for a CAS also varies with CMS + CondCardMark, in
1646   // particular employing a control feed from the CompareAndSwapX node
1647   // through a CmpI and If to the card mark membar and StoreCM which
1648   // updates the associated card. This avoids executing the card mark
1649   // if the CAS fails. However, it can be seen from the diagram below
1650   // that the presence of the barrier does not alter the normal CAS
1651   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1652   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1653   // MemBarAcquire pair.
1654   //
1655   //   MemBarRelease
1656   //   MemBarCPUOrder__(leading)_______________________
1657   //   C /  M |                        \\            C \
1658   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1659   //          |                  C /  M |
1660   //          |                 CmpI    |
1661   //          |                  /      |
1662   //          |               . . .     |
1663   //          |              IfTrue     |
1664   //          |              /          |
1665   //       MemBarVolatile (card mark)   |
1666   //        C |  ||    M |              |
1667   //          | LoadB    | Bot   ______/|
1668   //          |   |      |      /       |
1669   //          | Cmp      |     /      SCMemProj
1670   //          | /        |    /         |
1671   //          If         |   /         /
1672   //          | \        |  /         / Bot
1673   //     IfFalse  IfTrue | /         /
1674   //          |   / \   / / prec    /
1675   //   . . .  |  /  StoreCM        /
1676   //        \ | /      | raw      /
1677   //        Region    . . .      /
1678   //           | \              /
1679   //           |   . . .   \    / Bot
1680   //           |        MergeMem
1681   //           |          /
1682   //         MemBarCPUOrder
1683   //         MemBarAcquire (trailing)
1684   //
1685   // This has a slightly different memory subgraph to the one seen
1686   // previously but the core of it has a similar memory flow to the
1687   // CAS normal subgraph:
1688   //
1689   //   MemBarRelease
1690   //   MemBarCPUOrder____
1691   //         |          \      . . .
1692   //         |       CompareAndSwapX  . . .
1693   //         |       C /  M |
1694   //         |      CmpI    |
1695   //         |       /      |
1696   //         |      . .    /
1697   //     Bot |   IfTrue   /
1698   //         |   /       /
1699   //    MemBarVolatile  /
1700   //         | ...     /
1701   //      StoreCM ... /
1702   //         |       / 
1703   //       . . .  SCMemProj
1704   //      Raw \    / Bot
1705   //        MergeMem
1706   //           |
1707   //   MemBarCPUOrder
1708   //   MemBarAcquire
1709   //
1710   // The G1 graph for a volatile object put is a lot more complicated.
1711   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1712   // which adds the old value to the SATB queue; the releasing store
1713   // itself; and, finally, a post-write graph which performs a card
1714   // mark.
1715   //
1716   // The pre-write graph may be omitted, but only when the put is
1717   // writing to a newly allocated (young gen) object and then only if
1718   // there is a direct memory chain to the Initialize node for the
1719   // object allocation. This will not happen for a volatile put since
1720   // any memory chain passes through the leading membar.
1721   //
1722   // The pre-write graph includes a series of 3 If tests. The outermost
1723   // If tests whether SATB is enabled (no else case). The next If tests
1724   // whether the old value is non-NULL (no else case). The third tests
1725   // whether the SATB queue index is > 0, if so updating the queue. The
1726   // else case for this third If calls out to the runtime to allocate a
1727   // new queue buffer.
1728   //
1729   // So with G1 the pre-write and releasing store subgraph looks like
1730   // this (the nested Ifs are omitted).
1731   //
1732   //  MemBarRelease (leading)____________
1733   //     C |  ||  M \   M \    M \  M \ . . .
1734   //       | LoadB   \  LoadL  LoadN   \
1735   //       | /        \                 \
1736   //       If         |\                 \
1737   //       | \        | \                 \
1738   //  IfFalse  IfTrue |  \                 \
1739   //       |     |    |   \                 |
1740   //       |     If   |   /\                |
1741   //       |     |          \               |
1742   //       |                 \              |
1743   //       |    . . .         \             |
1744   //       | /       | /       |            |
1745   //      Region  Phi[M]       |            |
1746   //       | \       |         |            |
1747   //       |  \_____ | ___     |            |
1748   //     C | C \     |   C \ M |            |
1749   //       | CastP2X | StoreN/P[mo_release] |
1750   //       |         |         |            |
1751   //     C |       M |       M |          M |
1752   //        \        | Raw     | oop       / Bot
1753   //                  . . .
1754   //          (post write subtree elided)
1755   //                    . . .
1756   //             C \         M /
1757   //         MemBarVolatile (trailing)
1758   //
1759   // Note that the three memory feeds into the post-write tree are an
1760   // AliasRawIdx slice associated with the writes in the pre-write
1761   // tree, an oop type slice from the StoreX specific to the type of
1762   // the volatile field and the AliasBotIdx slice emanating from the
1763   // leading membar.
1764   //
1765   // n.b. the LoadB in this subgraph is not the card read -- it's a
1766   // read of the SATB queue active flag.
1767   //
1768   // The CAS graph is once again a variant of the above with a
1769   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1770   // value from the CompareAndSwapX node is fed into the post-write
1771   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1772   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1773   //
1774   //  MemBarRelease (leading)____________
1775   //     C |  ||  M \   M \    M \  M \ . . .
1776   //       | LoadB   \  LoadL  LoadN   \
1777   //       | /        \                 \
1778   //       If         |\                 \
1779   //       | \        | \                 \
1780   //  IfFalse  IfTrue |  \                 \
1781   //       |     |    |   \                 \
1782   //       |     If   |    \                 |
1783   //       |     |          \                |
1784   //       |                 \               |
1785   //       |    . . .         \              |
1786   //       | /       | /       \             |
1787   //      Region  Phi[M]        \            |
1788   //       | \       |           \           |
1789   //       |  \_____ |            |          |
1790   //     C | C \     |            |          |
1791   //       | CastP2X |     CompareAndSwapX   |
1792   //       |         |   res |     |         |
1793   //     C |       M |       |  SCMemProj  M |
1794   //        \        | Raw   |     | Bot    / Bot
1795   //                  . . .
1796   //          (post write subtree elided)
1797   //                    . . .
1798   //             C \         M /
1799   //         MemBarVolatile (trailing)
1800   //
1801   // The G1 post-write subtree is also optional, this time when the
1802   // new value being written is either null or can be identified as a
1803   // newly allocated (young gen) object with no intervening control
1804   // flow. The latter cannot happen but the former may, in which case
1805   // the card mark membar is omitted and the memory feeds from the
1806   // leading membar and the SToreN/P are merged direct into the
1807   // trailing membar as per the normal subgraph. So, the only special
1808   // case which arises is when the post-write subgraph is generated.
1809   //
1810   // The kernel of the post-write G1 subgraph is the card mark itself
1811   // which includes a card mark memory barrier (MemBarVolatile), a
1812   // card test (LoadB), and a conditional update (If feeding a
1813   // StoreCM). These nodes are surrounded by a series of nested Ifs
1814   // which try to avoid doing the card mark. The top level If skips if
1815   // the object reference does not cross regions (i.e. it tests if
1816   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1817   // need not be recorded. The next If, which skips on a NULL value,
1818   // may be absent (it is not generated if the type of value is >=
1819   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1820   // checking if card_val != young).  n.b. although this test requires
1821   // a pre-read of the card it can safely be done before the StoreLoad
1822   // barrier. However that does not bypass the need to reread the card
1823   // after the barrier.
1824   //
1825   //                (pre-write subtree elided)
1826   //        . . .                  . . .    . . .  . . .
1827   //        C |               M |    M |    M |
1828   //       Region            Phi[M] StoreN    |
1829   //          |            Raw  |  oop |  Bot |
1830   //         / \_______         |\     |\     |\
1831   //      C / C \      . . .    | \    | \    | \
1832   //       If   CastP2X . . .   |  \   |  \   |  \
1833   //       / \                  |   \  |   \  |   \
1834   //      /   \                 |    \ |    \ |    \
1835   // IfFalse IfTrue             |      |      |     \
1836   //   |       |                 \     |     /       |
1837   //   |       If                 \    | \  /   \    |
1838   //   |      / \                  \   |   /     \   |
1839   //   |     /   \                  \  |  / \     |  |
1840   //   | IfFalse IfTrue           MergeMem   \    |  |
1841   //   |  . . .    / \                 |      \   |  |
1842   //   |          /   \                |       |  |  |
1843   //   |     IfFalse IfTrue            |       |  |  |
1844   //   |      . . .    |               |       |  |  |
1845   //   |               If             /        |  |  |
1846   //   |               / \           /         |  |  |
1847   //   |              /   \         /          |  |  |
1848   //   |         IfFalse IfTrue    /           |  |  |
1849   //   |           . . .   |      /            |  |  |
1850   //   |                    \    /             |  |  |
1851   //   |                     \  /              |  |  |
1852   //   |         MemBarVolatile__(card mark  ) |  |  |
1853   //   |              ||   C |     \           |  |  |
1854   //   |             LoadB   If     |         /   |  |
1855   //   |                    / \ Raw |        /   /  /
1856   //   |                   . . .    |       /   /  /
1857   //   |                        \   |      /   /  /
1858   //   |                        StoreCM   /   /  /
1859   //   |                           |     /   /  /
1860   //   |                            . . .   /  /
1861   //   |                                   /  /
1862   //   |   . . .                          /  /
1863   //   |    |             | /            /  /
1864   //   |    |           Phi[M] /        /  /
1865   //   |    |             |   /        /  /
1866   //   |    |             |  /        /  /
1867   //   |  Region  . . .  Phi[M]      /  /
1868   //   |    |             |         /  /
1869   //    \   |             |        /  /
1870   //     \  | . . .       |       /  /
1871   //      \ |             |      /  /
1872   //      Region         Phi[M] /  /
1873   //        |               \  /  /
1874   //         \             MergeMem
1875   //          \            /
1876   //          MemBarVolatile
1877   //
1878   // As with CMS + CondCardMark the first MergeMem merges the
1879   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1880   // slice from the Store into the card mark membar. However, in this
1881   // case it may also merge an AliasRawIdx mem slice from the pre
1882   // barrier write.
1883   //
1884   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1885   // leading membar with an oop slice from the StoreN and an
1886   // AliasRawIdx slice from the post barrier writes. In this case the
1887   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1888   // which combine feeds from the If regions in the post barrier
1889   // subgraph.
1890   //
1891   // So, for G1 the same characteristic subgraph arises as for CMS +
1892   // CondCardMark. There is a normal subgraph feeding the card mark
1893   // membar and a normal subgraph feeding the trailing membar.
1894   //
1895   // The CAS graph when using G1GC also includes an optional
1896   // post-write subgraph. It is very similar to the above graph except
1897   // for a few details.
1898   // 
1899   // - The control flow is gated by an additonal If which tests the
1900   // result from the CompareAndSwapX node
1901   // 
1902   //  - The MergeMem which feeds the card mark membar only merges the
1903   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1904   // slice from the pre-barrier. It does not merge the SCMemProj
1905   // AliasIdxBot slice. So, this subgraph does not look like the
1906   // normal CAS subgraph.
1907   //
1908   // - The MergeMem which feeds the trailing membar merges the
1909   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1910   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1911   // has two AliasIdxBot input slices. However, this subgraph does
1912   // still look like the normal CAS subgraph.
1913   //
1914   // So, the upshot is:
1915   //
1916   // In all cases a volatile put graph will include a *normal*
1917   // volatile store subgraph betwen the leading membar and the
1918   // trailing membar. It may also include a normal volatile store
1919   // subgraph betwen the leading membar and the card mark membar.
1920   //
1921   // In all cases a CAS graph will contain a unique normal CAS graph
1922   // feeding the trailing membar.
1923   //
1924   // In all cases where there is a card mark membar (either as part of
1925   // a volatile object put or CAS) it will be fed by a MergeMem whose
1926   // AliasIdxBot slice feed will be a leading membar.
1927   //
1928   // The predicates controlling generation of instructions for store
1929   // and barrier nodes employ a few simple helper functions (described
1930   // below) which identify the presence or absence of all these
1931   // subgraph configurations and provide a means of traversing from
1932   // one node in the subgraph to another.
1933 
1934   // is_CAS(int opcode)
1935   //
1936   // return true if opcode is one of the possible CompareAndSwapX
1937   // values otherwise false.
1938 
1939   bool is_CAS(int opcode)
1940   {
1941     return (opcode == Op_CompareAndSwapI ||
1942             opcode == Op_CompareAndSwapL ||
1943             opcode == Op_CompareAndSwapN ||
1944             opcode == Op_CompareAndSwapP);
1945   }
1946 
1947   // leading_to_trailing
1948   //
1949   //graph traversal helper which detects the normal case Mem feed from
1950   // a release membar (or, optionally, its cpuorder child) to a
1951   // dependent volatile membar i.e. it ensures that one or other of
1952   // the following Mem flow subgraph is present.
1953   //
1954   //   MemBarRelease {leading}
1955   //   {MemBarCPUOrder} {optional}
1956   //     Bot |  \      . . .
1957   //         |  StoreN/P[mo_release]  . . .
1958   //         |   /
1959   //        MergeMem
1960   //         |
1961   //   MemBarVolatile {not card mark}
1962   //
1963   //   MemBarRelease {leading}
1964   //   {MemBarCPUOrder} {optional}
1965   //      |       \      . . .
1966   //      |     CompareAndSwapX  . . .
1967   //               |
1968   //     . . .    SCMemProj
1969   //           \   |
1970   //      |    MergeMem
1971   //      |       /
1972   //    MemBarCPUOrder
1973   //    MemBarAcquire {trailing}
1974   //
1975   // the predicate needs to be capable of distinguishing the following
1976   // volatile put graph which may arises when a GC post barrier
1977   // inserts a card mark membar
1978   //
1979   //   MemBarRelease {leading}
1980   //   {MemBarCPUOrder}__
1981   //     Bot |   \       \
1982   //         |   StoreN/P \
1983   //         |    / \     |
1984   //        MergeMem \    |
1985   //         |        \   |
1986   //   MemBarVolatile  \  |
1987   //    {card mark}     \ |
1988   //                  MergeMem
1989   //                      |
1990   // {not card mark} MemBarVolatile
1991   //
1992   // if the correct configuration is present returns the trailing
1993   // membar otherwise NULL.
1994   //
1995   // the input membar is expected to be either a cpuorder membar or a
1996   // release membar. in the latter case it should not have a cpu membar
1997   // child.
1998   //
1999   // the returned value may be a card mark or trailing membar
2000   //
2001 
2002   MemBarNode *leading_to_trailing(MemBarNode *leading)
2003   {
2004     assert((leading->Opcode() == Op_MemBarRelease ||
2005             leading->Opcode() == Op_MemBarCPUOrder),
2006            "expecting a volatile or cpuroder membar!");
2007 
2008     // check the mem flow
2009     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2010 
2011     if (!mem) {
2012       return NULL;
2013     }
2014 
2015     Node *x = NULL;
2016     StoreNode * st = NULL;
2017     LoadStoreNode *cas = NULL;
2018     MergeMemNode *mm = NULL;
2019     MergeMemNode *mm2 = NULL;
2020 
2021     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2022       x = mem->fast_out(i);
2023       if (x->is_MergeMem()) {
2024         if (mm != NULL) {
2025           if (mm2 != NULL) {
2026           // should not see more than 2 merge mems
2027             return NULL;
2028           } else {
2029             mm2 = x->as_MergeMem();
2030           }
2031         } else {
2032           mm = x->as_MergeMem();
2033         }
2034       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2035         // two releasing stores/CAS nodes is one too many
2036         if (st != NULL || cas != NULL) {
2037           return NULL;
2038         }
2039         st = x->as_Store();
2040       } else if (is_CAS(x->Opcode())) {
2041         if (st != NULL || cas != NULL) {
2042           return NULL;
2043         }
2044         cas = x->as_LoadStore();
2045       }
2046     }
2047 
2048     // must have a store or a cas
2049     if (!st && !cas) {
2050       return NULL;
2051     }
2052 
2053     // must have at least one merge if we also have st
2054     if (st && !mm) {
2055       return NULL;
2056     }
2057 
2058     if (cas) {
2059       Node *y = NULL;
2060       // look for an SCMemProj
2061       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2062         x = cas->fast_out(i);
2063         if (x->is_Proj()) {
2064           y = x;
2065           break;
2066         }
2067       }
2068       if (y == NULL) {
2069         return NULL;
2070       }
2071       // the proj must feed a MergeMem
2072       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2073         x = y->fast_out(i);
2074         if (x->is_MergeMem()) {
2075           mm = x->as_MergeMem();
2076           break;
2077         }
2078       }
2079       if (mm == NULL) {
2080         return NULL;
2081       }
2082       MemBarNode *mbar = NULL;
2083       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2084       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2085         x = mm->fast_out(i);
2086         if (x->is_MemBar()) {
2087           int opcode = x->Opcode();
2088           if (opcode == Op_MemBarCPUOrder) {
2089             MemBarNode *z =  x->as_MemBar();
2090             z = child_membar(z);
2091             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2092               mbar = z;
2093             }
2094           }
2095           break;
2096         }
2097       }
2098       return mbar;
2099     } else {
2100       Node *y = NULL;
2101       // ensure the store feeds the first mergemem;
2102       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2103         if (st->fast_out(i) == mm) {
2104           y = st;
2105           break;
2106         }
2107       }
2108       if (y == NULL) {
2109         return NULL;
2110       }
2111       if (mm2 != NULL) {
2112         // ensure the store feeds the second mergemem;
2113         y = NULL;
2114         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2115           if (st->fast_out(i) == mm2) {
2116             y = st;
2117           }
2118         }
2119         if (y == NULL) {
2120           return NULL;
2121         }
2122       }
2123 
2124       MemBarNode *mbar = NULL;
2125       // ensure the first mergemem feeds a volatile membar
2126       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2127         x = mm->fast_out(i);
2128         if (x->is_MemBar()) {
2129           int opcode = x->Opcode();
2130           if (opcode == Op_MemBarVolatile) {
2131             mbar = x->as_MemBar();
2132           }
2133           break;
2134         }
2135       }
2136       if (mm2 == NULL) {
2137         // this is our only option for a trailing membar
2138         return mbar;
2139       }
2140       // ensure the second mergemem feeds a volatile membar
2141       MemBarNode *mbar2 = NULL;
2142       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2143         x = mm2->fast_out(i);
2144         if (x->is_MemBar()) {
2145           int opcode = x->Opcode();
2146           if (opcode == Op_MemBarVolatile) {
2147             mbar2 = x->as_MemBar();
2148           }
2149           break;
2150         }
2151       }
2152       // if we have two merge mems we must have two volatile membars
2153       if (mbar == NULL || mbar2 == NULL) {
2154         return NULL;
2155       }
2156       // return the trailing membar
2157       if (is_card_mark_membar(mbar2)) {
2158         return mbar;
2159       } else {
2160         if (is_card_mark_membar(mbar)) {
2161           return mbar2;
2162         } else {
2163           return NULL;
2164         }
2165       }
2166     }
2167   }
2168 
2169   // trailing_to_leading
2170   //
2171   // graph traversal helper which detects the normal case Mem feed
2172   // from a trailing membar to a preceding release membar (optionally
2173   // its cpuorder child) i.e. it ensures that one or other of the
2174   // following Mem flow subgraphs is present.
2175   //
2176   //   MemBarRelease {leading}
2177   //   MemBarCPUOrder {optional}
2178   //    | Bot |  \      . . .
2179   //    |     |  StoreN/P[mo_release]  . . .
2180   //    |     |   /
2181   //    |    MergeMem
2182   //    |     |
2183   //   MemBarVolatile {not card mark}
2184   //
2185   //   MemBarRelease {leading}
2186   //   MemBarCPUOrder {optional}
2187   //      |       \      . . .
2188   //      |     CompareAndSwapX  . . .
2189   //               |
2190   //     . . .    SCMemProj
2191   //           \   |
2192   //      |    MergeMem
2193   //      |       |
2194   //    MemBarCPUOrder
2195   //    MemBarAcquire {trailing}
2196   //
2197   // this predicate checks for the same flow as the previous predicate
2198   // but starting from the bottom rather than the top.
2199   //
2200   // if the configuration is present returns the cpuorder member for
2201   // preference or when absent the release membar otherwise NULL.
2202   //
2203   // n.b. the input membar is expected to be a MemBarVolatile or
2204   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2205   // mark membar.
2206 
2207   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2208   {
2209     // input must be a volatile membar
2210     assert((barrier->Opcode() == Op_MemBarVolatile ||
2211             barrier->Opcode() == Op_MemBarAcquire),
2212            "expecting a volatile or an acquire membar");
2213 
2214     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2215            !is_card_mark_membar(barrier),
2216            "not expecting a card mark membar");
2217     Node *x;
2218     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2219 
2220     // if we have an acquire membar then it must be fed via a CPUOrder
2221     // membar
2222 
2223     if (is_cas) {
2224       // skip to parent barrier which must be a cpuorder
2225       x = parent_membar(barrier);
2226       if (x->Opcode() != Op_MemBarCPUOrder)
2227         return NULL;
2228     } else {
2229       // start from the supplied barrier
2230       x = (Node *)barrier;
2231     }
2232 
2233     // the Mem feed to the membar should be a merge
2234     x = x ->in(TypeFunc::Memory);
2235     if (!x->is_MergeMem())
2236       return NULL;
2237 
2238     MergeMemNode *mm = x->as_MergeMem();
2239 
2240     if (is_cas) {
2241       // the merge should be fed from the CAS via an SCMemProj node
2242       x = NULL;
2243       for (uint idx = 1; idx < mm->req(); idx++) {
2244         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2245           x = mm->in(idx);
2246           break;
2247         }
2248       }
2249       if (x == NULL) {
2250         return NULL;
2251       }
2252       // check for a CAS feeding this proj
2253       x = x->in(0);
2254       int opcode = x->Opcode();
2255       if (!is_CAS(opcode)) {
2256         return NULL;
2257       }
2258       // the CAS should get its mem feed from the leading membar
2259       x = x->in(MemNode::Memory);
2260     } else {
2261       // the merge should get its Bottom mem feed from the leading membar
2262       x = mm->in(Compile::AliasIdxBot);
2263     }
2264 
2265     // ensure this is a non control projection
2266     if (!x->is_Proj() || x->is_CFG()) {
2267       return NULL;
2268     }
2269     // if it is fed by a membar that's the one we want
2270     x = x->in(0);
2271 
2272     if (!x->is_MemBar()) {
2273       return NULL;
2274     }
2275 
2276     MemBarNode *leading = x->as_MemBar();
2277     // reject invalid candidates
2278     if (!leading_membar(leading)) {
2279       return NULL;
2280     }
2281 
2282     // ok, we have a leading membar, now for the sanity clauses
2283 
2284     // the leading membar must feed Mem to a releasing store or CAS
2285     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2286     StoreNode *st = NULL;
2287     LoadStoreNode *cas = NULL;
2288     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2289       x = mem->fast_out(i);
2290       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2291         // two stores or CASes is one too many
2292         if (st != NULL || cas != NULL) {
2293           return NULL;
2294         }
2295         st = x->as_Store();
2296       } else if (is_CAS(x->Opcode())) {
2297         if (st != NULL || cas != NULL) {
2298           return NULL;
2299         }
2300         cas = x->as_LoadStore();
2301       }
2302     }
2303 
2304     // we should not have both a store and a cas
2305     if (st == NULL & cas == NULL) {
2306       return NULL;
2307     }
2308 
2309     if (st == NULL) {
2310       // nothing more to check
2311       return leading;
2312     } else {
2313       // we should not have a store if we started from an acquire
2314       if (is_cas) {
2315         return NULL;
2316       }
2317 
2318       // the store should feed the merge we used to get here
2319       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2320         if (st->fast_out(i) == mm) {
2321           return leading;
2322         }
2323       }
2324     }
2325 
2326     return NULL;
2327   }
2328 
2329   // card_mark_to_leading
2330   //
2331   // graph traversal helper which traverses from a card mark volatile
2332   // membar to a leading membar i.e. it ensures that the following Mem
2333   // flow subgraph is present.
2334   //
2335   //    MemBarRelease {leading}
2336   //   {MemBarCPUOrder} {optional}
2337   //         |   . . .
2338   //     Bot |   /
2339   //      MergeMem
2340   //         |
2341   //     MemBarVolatile (card mark)
2342   //        |     \
2343   //      . . .   StoreCM
2344   //
2345   // if the configuration is present returns the cpuorder member for
2346   // preference or when absent the release membar otherwise NULL.
2347   //
2348   // n.b. the input membar is expected to be a MemBarVolatile amd must
2349   // be a card mark membar.
2350 
2351   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2352   {
2353     // input must be a card mark volatile membar
2354     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2355 
2356     // the Mem feed to the membar should be a merge
2357     Node *x = barrier->in(TypeFunc::Memory);
2358     if (!x->is_MergeMem()) {
2359       return NULL;
2360     }
2361 
2362     MergeMemNode *mm = x->as_MergeMem();
2363 
2364     x = mm->in(Compile::AliasIdxBot);
2365 
2366     if (!x->is_MemBar()) {
2367       return NULL;
2368     }
2369 
2370     MemBarNode *leading = x->as_MemBar();
2371 
2372     if (leading_membar(leading)) {
2373       return leading;
2374     }
2375 
2376     return NULL;
2377   }
2378 
2379 bool unnecessary_acquire(const Node *barrier)
2380 {
2381   assert(barrier->is_MemBar(), "expecting a membar");
2382 
2383   if (UseBarriersForVolatile) {
2384     // we need to plant a dmb
2385     return false;
2386   }
2387 
2388   // a volatile read derived from bytecode (or also from an inlined
2389   // SHA field read via LibraryCallKit::load_field_from_object)
2390   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2391   // with a bogus read dependency on it's preceding load. so in those
2392   // cases we will find the load node at the PARMS offset of the
2393   // acquire membar.  n.b. there may be an intervening DecodeN node.
2394   //
2395   // a volatile load derived from an inlined unsafe field access
2396   // manifests as a cpuorder membar with Ctl and Mem projections
2397   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2398   // acquire then feeds another cpuorder membar via Ctl and Mem
2399   // projections. The load has no output dependency on these trailing
2400   // membars because subsequent nodes inserted into the graph take
2401   // their control feed from the final membar cpuorder meaning they
2402   // are all ordered after the load.
2403 
2404   Node *x = barrier->lookup(TypeFunc::Parms);
2405   if (x) {
2406     // we are starting from an acquire and it has a fake dependency
2407     //
2408     // need to check for
2409     //
2410     //   LoadX[mo_acquire]
2411     //   {  |1   }
2412     //   {DecodeN}
2413     //      |Parms
2414     //   MemBarAcquire*
2415     //
2416     // where * tags node we were passed
2417     // and |k means input k
2418     if (x->is_DecodeNarrowPtr()) {
2419       x = x->in(1);
2420     }
2421 
2422     return (x->is_Load() && x->as_Load()->is_acquire());
2423   }
2424 
2425   // now check for an unsafe volatile get
2426 
2427   // need to check for
2428   //
2429   //   MemBarCPUOrder
2430   //        ||       \\
2431   //   MemBarAcquire* LoadX[mo_acquire]
2432   //        ||
2433   //   MemBarCPUOrder
2434   //
2435   // where * tags node we were passed
2436   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2437 
2438   // check for a parent MemBarCPUOrder
2439   ProjNode *ctl;
2440   ProjNode *mem;
2441   MemBarNode *parent = parent_membar(barrier);
2442   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2443     return false;
2444   ctl = parent->proj_out(TypeFunc::Control);
2445   mem = parent->proj_out(TypeFunc::Memory);
2446   if (!ctl || !mem) {
2447     return false;
2448   }
2449   // ensure the proj nodes both feed a LoadX[mo_acquire]
2450   LoadNode *ld = NULL;
2451   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2452     x = ctl->fast_out(i);
2453     // if we see a load we keep hold of it and stop searching
2454     if (x->is_Load()) {
2455       ld = x->as_Load();
2456       break;
2457     }
2458   }
2459   // it must be an acquiring load
2460   if (ld && ld->is_acquire()) {
2461 
2462     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2463       x = mem->fast_out(i);
2464       // if we see the same load we drop it and stop searching
2465       if (x == ld) {
2466         ld = NULL;
2467         break;
2468       }
2469     }
2470     // we must have dropped the load
2471     if (ld == NULL) {
2472       // check for a child cpuorder membar
2473       MemBarNode *child  = child_membar(barrier->as_MemBar());
2474       if (child && child->Opcode() == Op_MemBarCPUOrder)
2475         return true;
2476     }
2477   }
2478 
2479   // final option for unnecessary mebar is that it is a trailing node
2480   // belonging to a CAS
2481 
2482   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2483 
2484   return leading != NULL;
2485 }
2486 
2487 bool needs_acquiring_load(const Node *n)
2488 {
2489   assert(n->is_Load(), "expecting a load");
2490   if (UseBarriersForVolatile) {
2491     // we use a normal load and a dmb
2492     return false;
2493   }
2494 
2495   LoadNode *ld = n->as_Load();
2496 
2497   if (!ld->is_acquire()) {
2498     return false;
2499   }
2500 
2501   // check if this load is feeding an acquire membar
2502   //
2503   //   LoadX[mo_acquire]
2504   //   {  |1   }
2505   //   {DecodeN}
2506   //      |Parms
2507   //   MemBarAcquire*
2508   //
2509   // where * tags node we were passed
2510   // and |k means input k
2511 
2512   Node *start = ld;
2513   Node *mbacq = NULL;
2514 
2515   // if we hit a DecodeNarrowPtr we reset the start node and restart
2516   // the search through the outputs
2517  restart:
2518 
2519   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2520     Node *x = start->fast_out(i);
2521     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2522       mbacq = x;
2523     } else if (!mbacq &&
2524                (x->is_DecodeNarrowPtr() ||
2525                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2526       start = x;
2527       goto restart;
2528     }
2529   }
2530 
2531   if (mbacq) {
2532     return true;
2533   }
2534 
2535   // now check for an unsafe volatile get
2536 
2537   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2538   //
2539   //     MemBarCPUOrder
2540   //        ||       \\
2541   //   MemBarAcquire* LoadX[mo_acquire]
2542   //        ||
2543   //   MemBarCPUOrder
2544 
2545   MemBarNode *membar;
2546 
2547   membar = parent_membar(ld);
2548 
2549   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2550     return false;
2551   }
2552 
2553   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2554 
2555   membar = child_membar(membar);
2556 
2557   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2558     return false;
2559   }
2560 
2561   membar = child_membar(membar);
2562 
2563   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2564     return false;
2565   }
2566 
2567   return true;
2568 }
2569 
2570 bool unnecessary_release(const Node *n)
2571 {
2572   assert((n->is_MemBar() &&
2573           n->Opcode() == Op_MemBarRelease),
2574          "expecting a release membar");
2575 
2576   if (UseBarriersForVolatile) {
2577     // we need to plant a dmb
2578     return false;
2579   }
2580 
2581   // if there is a dependent CPUOrder barrier then use that as the
2582   // leading
2583 
2584   MemBarNode *barrier = n->as_MemBar();
2585   // check for an intervening cpuorder membar
2586   MemBarNode *b = child_membar(barrier);
2587   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2588     // ok, so start the check from the dependent cpuorder barrier
2589     barrier = b;
2590   }
2591 
2592   // must start with a normal feed
2593   MemBarNode *trailing = leading_to_trailing(barrier);
2594 
2595   return (trailing != NULL);
2596 }
2597 
2598 bool unnecessary_volatile(const Node *n)
2599 {
2600   // assert n->is_MemBar();
2601   if (UseBarriersForVolatile) {
2602     // we need to plant a dmb
2603     return false;
2604   }
2605 
2606   MemBarNode *mbvol = n->as_MemBar();
2607 
2608   // first we check if this is part of a card mark. if so then we have
2609   // to generate a StoreLoad barrier
2610 
2611   if (is_card_mark_membar(mbvol)) {
2612       return false;
2613   }
2614 
2615   // ok, if it's not a card mark then we still need to check if it is
2616   // a trailing membar of a volatile put graph.
2617 
2618   return (trailing_to_leading(mbvol) != NULL);
2619 }
2620 
2621 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2622 
2623 bool needs_releasing_store(const Node *n)
2624 {
2625   // assert n->is_Store();
2626   if (UseBarriersForVolatile) {
2627     // we use a normal store and dmb combination
2628     return false;
2629   }
2630 
2631   StoreNode *st = n->as_Store();
2632 
2633   // the store must be marked as releasing
2634   if (!st->is_release()) {
2635     return false;
2636   }
2637 
2638   // the store must be fed by a membar
2639 
2640   Node *x = st->lookup(StoreNode::Memory);
2641 
2642   if (! x || !x->is_Proj()) {
2643     return false;
2644   }
2645 
2646   ProjNode *proj = x->as_Proj();
2647 
2648   x = proj->lookup(0);
2649 
2650   if (!x || !x->is_MemBar()) {
2651     return false;
2652   }
2653 
2654   MemBarNode *barrier = x->as_MemBar();
2655 
2656   // if the barrier is a release membar or a cpuorder mmebar fed by a
2657   // release membar then we need to check whether that forms part of a
2658   // volatile put graph.
2659 
2660   // reject invalid candidates
2661   if (!leading_membar(barrier)) {
2662     return false;
2663   }
2664 
2665   // does this lead a normal subgraph?
2666   MemBarNode *trailing = leading_to_trailing(barrier);
2667 
2668   return (trailing != NULL);
2669 }
2670 
2671 // predicate controlling translation of CAS
2672 //
2673 // returns true if CAS needs to use an acquiring load otherwise false
2674 
2675 bool needs_acquiring_load_exclusive(const Node *n)
2676 {
2677   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2678   if (UseBarriersForVolatile) {
2679     return false;
2680   }
2681 
2682   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2683 #ifdef ASSERT
2684   LoadStoreNode *st = n->as_LoadStore();
2685 
2686   // the store must be fed by a membar
2687 
2688   Node *x = st->lookup(StoreNode::Memory);
2689 
2690   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2691 
2692   ProjNode *proj = x->as_Proj();
2693 
2694   x = proj->lookup(0);
2695 
2696   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2697 
2698   MemBarNode *barrier = x->as_MemBar();
2699 
2700   // the barrier must be a cpuorder mmebar fed by a release membar
2701 
2702   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2703          "CAS not fed by cpuorder membar!");
2704 
2705   MemBarNode *b = parent_membar(barrier);
2706   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2707           "CAS not fed by cpuorder+release membar pair!");
2708 
2709   // does this lead a normal subgraph?
2710   MemBarNode *mbar = leading_to_trailing(barrier);
2711 
2712   assert(mbar != NULL, "CAS not embedded in normal graph!");
2713 
2714   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2715 #endif // ASSERT
2716   // so we can just return true here
2717   return true;
2718 }
2719 
2720 // predicate controlling translation of StoreCM
2721 //
2722 // returns true if a StoreStore must precede the card write otherwise
2723 // false
2724 
2725 bool unnecessary_storestore(const Node *storecm)
2726 {
2727   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2728 
2729   // we only ever need to generate a dmb ishst between an object put
2730   // and the associated card mark when we are using CMS without
2731   // conditional card marking. Any other occurence will happen when
2732   // performing a card mark using CMS with conditional card marking or
2733   // G1. In those cases the preceding MamBarVolatile will be
2734   // translated to a dmb ish which guarantes visibility of the
2735   // preceding StoreN/P before this StoreCM
2736 
2737   if (!UseConcMarkSweepGC || UseCondCardMark) {
2738     return true;
2739   }
2740 
2741   // if we are implementing volatile puts using barriers then we must
2742   // insert the dmb ishst
2743 
2744   if (UseBarriersForVolatile) {
2745     return false;
2746   }
2747 
2748   // we must be using CMS with conditional card marking so we ahve to
2749   // generate the StoreStore
2750 
2751   return false;
2752 }
2753 
2754 
2755 #define __ _masm.
2756 
2757 // advance declarations for helper functions to convert register
2758 // indices to register objects
2759 
2760 // the ad file has to provide implementations of certain methods
2761 // expected by the generic code
2762 //
2763 // REQUIRED FUNCTIONALITY
2764 
2765 //=============================================================================
2766 
2767 // !!!!! Special hack to get all types of calls to specify the byte offset
2768 //       from the start of the call to the point where the return address
2769 //       will point.
2770 
2771 int MachCallStaticJavaNode::ret_addr_offset()
2772 {
2773   // call should be a simple bl
2774   int off = 4;
2775   return off;
2776 }
2777 
2778 int MachCallDynamicJavaNode::ret_addr_offset()
2779 {
2780   return 16; // movz, movk, movk, bl
2781 }
2782 
2783 int MachCallRuntimeNode::ret_addr_offset() {
2784   // for generated stubs the call will be
2785   //   far_call(addr)
2786   // for real runtime callouts it will be six instructions
2787   // see aarch64_enc_java_to_runtime
2788   //   adr(rscratch2, retaddr)
2789   //   lea(rscratch1, RuntimeAddress(addr)
2790   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2791   //   blrt rscratch1
2792   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2793   if (cb) {
2794     return MacroAssembler::far_branch_size();
2795   } else {
2796     return 6 * NativeInstruction::instruction_size;
2797   }
2798 }
2799 
2800 // Indicate if the safepoint node needs the polling page as an input
2801 
2802 // the shared code plants the oop data at the start of the generated
2803 // code for the safepoint node and that needs ot be at the load
2804 // instruction itself. so we cannot plant a mov of the safepoint poll
2805 // address followed by a load. setting this to true means the mov is
2806 // scheduled as a prior instruction. that's better for scheduling
2807 // anyway.
2808 
2809 bool SafePointNode::needs_polling_address_input()
2810 {
2811   return true;
2812 }
2813 
2814 //=============================================================================
2815 
2816 #ifndef PRODUCT
2817 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2818   st->print("BREAKPOINT");
2819 }
2820 #endif
2821 
2822 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2823   MacroAssembler _masm(&cbuf);
2824   __ brk(0);
2825 }
2826 
2827 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2828   return MachNode::size(ra_);
2829 }
2830 
2831 //=============================================================================
2832 
2833 #ifndef PRODUCT
2834   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2835     st->print("nop \t# %d bytes pad for loops and calls", _count);
2836   }
2837 #endif
2838 
2839   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2840     MacroAssembler _masm(&cbuf);
2841     for (int i = 0; i < _count; i++) {
2842       __ nop();
2843     }
2844   }
2845 
2846   uint MachNopNode::size(PhaseRegAlloc*) const {
2847     return _count * NativeInstruction::instruction_size;
2848   }
2849 
2850 //=============================================================================
2851 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2852 
2853 int Compile::ConstantTable::calculate_table_base_offset() const {
2854   return 0;  // absolute addressing, no offset
2855 }
2856 
2857 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2858 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2859   ShouldNotReachHere();
2860 }
2861 
2862 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2863   // Empty encoding
2864 }
2865 
2866 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2867   return 0;
2868 }
2869 
2870 #ifndef PRODUCT
2871 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2872   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2873 }
2874 #endif
2875 
2876 #ifndef PRODUCT
2877 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2878   Compile* C = ra_->C;
2879 
2880   int framesize = C->frame_slots() << LogBytesPerInt;
2881 
2882   if (C->need_stack_bang(framesize))
2883     st->print("# stack bang size=%d\n\t", framesize);
2884 
2885   if (framesize < ((1 << 9) + 2 * wordSize)) {
2886     st->print("sub  sp, sp, #%d\n\t", framesize);
2887     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2888     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2889   } else {
2890     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2891     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2892     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2893     st->print("sub  sp, sp, rscratch1");
2894   }
2895 }
2896 #endif
2897 
2898 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2899   Compile* C = ra_->C;
2900   MacroAssembler _masm(&cbuf);
2901 
2902   // n.b. frame size includes space for return pc and rfp
2903   const long framesize = C->frame_size_in_bytes();
2904   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2905 
2906   // insert a nop at the start of the prolog so we can patch in a
2907   // branch if we need to invalidate the method later
2908   __ nop();
2909 
2910   int bangsize = C->bang_size_in_bytes();
2911   if (C->need_stack_bang(bangsize) && UseStackBanging)
2912     __ generate_stack_overflow_check(bangsize);
2913 
2914   __ build_frame(framesize);
2915 
2916   if (NotifySimulator) {
2917     __ notify(Assembler::method_entry);
2918   }
2919 
2920   if (VerifyStackAtCalls) {
2921     Unimplemented();
2922   }
2923 
2924   C->set_frame_complete(cbuf.insts_size());
2925 
2926   if (C->has_mach_constant_base_node()) {
2927     // NOTE: We set the table base offset here because users might be
2928     // emitted before MachConstantBaseNode.
2929     Compile::ConstantTable& constant_table = C->constant_table();
2930     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2931   }
2932 }
2933 
2934 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2935 {
2936   return MachNode::size(ra_); // too many variables; just compute it
2937                               // the hard way
2938 }
2939 
2940 int MachPrologNode::reloc() const
2941 {
2942   return 0;
2943 }
2944 
2945 //=============================================================================
2946 
2947 #ifndef PRODUCT
2948 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2949   Compile* C = ra_->C;
2950   int framesize = C->frame_slots() << LogBytesPerInt;
2951 
2952   st->print("# pop frame %d\n\t",framesize);
2953 
2954   if (framesize == 0) {
2955     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2956   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2957     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2958     st->print("add  sp, sp, #%d\n\t", framesize);
2959   } else {
2960     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2961     st->print("add  sp, sp, rscratch1\n\t");
2962     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2963   }
2964 
2965   if (do_polling() && C->is_method_compilation()) {
2966     st->print("# touch polling page\n\t");
2967     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2968     st->print("ldr zr, [rscratch1]");
2969   }
2970 }
2971 #endif
2972 
2973 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2974   Compile* C = ra_->C;
2975   MacroAssembler _masm(&cbuf);
2976   int framesize = C->frame_slots() << LogBytesPerInt;
2977 
2978   __ remove_frame(framesize);
2979 
2980   if (NotifySimulator) {
2981     __ notify(Assembler::method_reentry);
2982   }
2983 
2984   if (do_polling() && C->is_method_compilation()) {
2985     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2986   }
2987 }
2988 
2989 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2990   // Variable size. Determine dynamically.
2991   return MachNode::size(ra_);
2992 }
2993 
2994 int MachEpilogNode::reloc() const {
2995   // Return number of relocatable values contained in this instruction.
2996   return 1; // 1 for polling page.
2997 }
2998 
2999 const Pipeline * MachEpilogNode::pipeline() const {
3000   return MachNode::pipeline_class();
3001 }
3002 
3003 // This method seems to be obsolete. It is declared in machnode.hpp
3004 // and defined in all *.ad files, but it is never called. Should we
3005 // get rid of it?
3006 int MachEpilogNode::safepoint_offset() const {
3007   assert(do_polling(), "no return for this epilog node");
3008   return 4;
3009 }
3010 
3011 //=============================================================================
3012 
3013 // Figure out which register class each belongs in: rc_int, rc_float or
3014 // rc_stack.
3015 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3016 
3017 static enum RC rc_class(OptoReg::Name reg) {
3018 
3019   if (reg == OptoReg::Bad) {
3020     return rc_bad;
3021   }
3022 
3023   // we have 30 int registers * 2 halves
3024   // (rscratch1 and rscratch2 are omitted)
3025 
3026   if (reg < 60) {
3027     return rc_int;
3028   }
3029 
3030   // we have 32 float register * 2 halves
3031   if (reg < 60 + 128) {
3032     return rc_float;
3033   }
3034 
3035   // Between float regs & stack is the flags regs.
3036   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3037 
3038   return rc_stack;
3039 }
3040 
3041 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3042   Compile* C = ra_->C;
3043 
3044   // Get registers to move.
3045   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3046   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3047   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3048   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3049 
3050   enum RC src_hi_rc = rc_class(src_hi);
3051   enum RC src_lo_rc = rc_class(src_lo);
3052   enum RC dst_hi_rc = rc_class(dst_hi);
3053   enum RC dst_lo_rc = rc_class(dst_lo);
3054 
3055   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3056 
3057   if (src_hi != OptoReg::Bad) {
3058     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3059            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3060            "expected aligned-adjacent pairs");
3061   }
3062 
3063   if (src_lo == dst_lo && src_hi == dst_hi) {
3064     return 0;            // Self copy, no move.
3065   }
3066 
3067   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3068               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3069   int src_offset = ra_->reg2offset(src_lo);
3070   int dst_offset = ra_->reg2offset(dst_lo);
3071 
3072   if (bottom_type()->isa_vect() != NULL) {
3073     uint ireg = ideal_reg();
3074     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3075     if (cbuf) {
3076       MacroAssembler _masm(cbuf);
3077       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3078       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3079         // stack->stack
3080         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3081         if (ireg == Op_VecD) {
3082           __ unspill(rscratch1, true, src_offset);
3083           __ spill(rscratch1, true, dst_offset);
3084         } else {
3085           __ spill_copy128(src_offset, dst_offset);
3086         }
3087       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3088         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3089                ireg == Op_VecD ? __ T8B : __ T16B,
3090                as_FloatRegister(Matcher::_regEncode[src_lo]));
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3092         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3093                        ireg == Op_VecD ? __ D : __ Q,
3094                        ra_->reg2offset(dst_lo));
3095       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3096         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(src_lo));
3099       } else {
3100         ShouldNotReachHere();
3101       }
3102     }
3103   } else if (cbuf) {
3104     MacroAssembler _masm(cbuf);
3105     switch (src_lo_rc) {
3106     case rc_int:
3107       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3108         if (is64) {
3109             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3110                    as_Register(Matcher::_regEncode[src_lo]));
3111         } else {
3112             MacroAssembler _masm(cbuf);
3113             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3114                     as_Register(Matcher::_regEncode[src_lo]));
3115         }
3116       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3117         if (is64) {
3118             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3119                      as_Register(Matcher::_regEncode[src_lo]));
3120         } else {
3121             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3122                      as_Register(Matcher::_regEncode[src_lo]));
3123         }
3124       } else {                    // gpr --> stack spill
3125         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3126         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3127       }
3128       break;
3129     case rc_float:
3130       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3131         if (is64) {
3132             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3133                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3134         } else {
3135             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3136                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3137         }
3138       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3139           if (cbuf) {
3140             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3141                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3142         } else {
3143             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3144                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3145         }
3146       } else {                    // fpr --> stack spill
3147         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3148         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3149                  is64 ? __ D : __ S, dst_offset);
3150       }
3151       break;
3152     case rc_stack:
3153       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3154         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3155       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3156         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3157                    is64 ? __ D : __ S, src_offset);
3158       } else {                    // stack --> stack copy
3159         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3160         __ unspill(rscratch1, is64, src_offset);
3161         __ spill(rscratch1, is64, dst_offset);
3162       }
3163       break;
3164     default:
3165       assert(false, "bad rc_class for spill");
3166       ShouldNotReachHere();
3167     }
3168   }
3169 
3170   if (st) {
3171     st->print("spill ");
3172     if (src_lo_rc == rc_stack) {
3173       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3174     } else {
3175       st->print("%s -> ", Matcher::regName[src_lo]);
3176     }
3177     if (dst_lo_rc == rc_stack) {
3178       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3179     } else {
3180       st->print("%s", Matcher::regName[dst_lo]);
3181     }
3182     if (bottom_type()->isa_vect() != NULL) {
3183       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3184     } else {
3185       st->print("\t# spill size = %d", is64 ? 64:32);
3186     }
3187   }
3188 
3189   return 0;
3190 
3191 }
3192 
3193 #ifndef PRODUCT
3194 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3195   if (!ra_)
3196     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3197   else
3198     implementation(NULL, ra_, false, st);
3199 }
3200 #endif
3201 
3202 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3203   implementation(&cbuf, ra_, false, NULL);
3204 }
3205 
3206 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3207   return MachNode::size(ra_);
3208 }
3209 
3210 //=============================================================================
3211 
3212 #ifndef PRODUCT
3213 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3214   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3215   int reg = ra_->get_reg_first(this);
3216   st->print("add %s, rsp, #%d]\t# box lock",
3217             Matcher::regName[reg], offset);
3218 }
3219 #endif
3220 
3221 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3222   MacroAssembler _masm(&cbuf);
3223 
3224   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3225   int reg    = ra_->get_encode(this);
3226 
3227   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3228     __ add(as_Register(reg), sp, offset);
3229   } else {
3230     ShouldNotReachHere();
3231   }
3232 }
3233 
3234 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3235   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3236   return 4;
3237 }
3238 
3239 //=============================================================================
3240 
3241 #ifndef PRODUCT
3242 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3243 {
3244   st->print_cr("# MachUEPNode");
3245   if (UseCompressedClassPointers) {
3246     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3247     if (Universe::narrow_klass_shift() != 0) {
3248       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3249     }
3250   } else {
3251    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3252   }
3253   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3254   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3255 }
3256 #endif
3257 
3258 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3259 {
3260   // This is the unverified entry point.
3261   MacroAssembler _masm(&cbuf);
3262 
3263   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3264   Label skip;
3265   // TODO
3266   // can we avoid this skip and still use a reloc?
3267   __ br(Assembler::EQ, skip);
3268   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3269   __ bind(skip);
3270 }
3271 
3272 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3273 {
3274   return MachNode::size(ra_);
3275 }
3276 
3277 // REQUIRED EMIT CODE
3278 
3279 //=============================================================================
3280 
3281 // Emit exception handler code.
3282 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3283 {
3284   // mov rscratch1 #exception_blob_entry_point
3285   // br rscratch1
3286   // Note that the code buffer's insts_mark is always relative to insts.
3287   // That's why we must use the macroassembler to generate a handler.
3288   MacroAssembler _masm(&cbuf);
3289   address base = __ start_a_stub(size_exception_handler());
3290   if (base == NULL) {
3291     ciEnv::current()->record_failure("CodeCache is full");
3292     return 0;  // CodeBuffer::expand failed
3293   }
3294   int offset = __ offset();
3295   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3296   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3297   __ end_a_stub();
3298   return offset;
3299 }
3300 
3301 // Emit deopt handler code.
3302 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3303 {
3304   // Note that the code buffer's insts_mark is always relative to insts.
3305   // That's why we must use the macroassembler to generate a handler.
3306   MacroAssembler _masm(&cbuf);
3307   address base = __ start_a_stub(size_deopt_handler());
3308   if (base == NULL) {
3309     ciEnv::current()->record_failure("CodeCache is full");
3310     return 0;  // CodeBuffer::expand failed
3311   }
3312   int offset = __ offset();
3313 
3314   __ adr(lr, __ pc());
3315   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3316 
3317   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3318   __ end_a_stub();
3319   return offset;
3320 }
3321 
3322 // REQUIRED MATCHER CODE
3323 
3324 //=============================================================================
3325 
3326 const bool Matcher::match_rule_supported(int opcode) {
3327 
3328   // TODO
3329   // identify extra cases that we might want to provide match rules for
3330   // e.g. Op_StrEquals and other intrinsics
3331   if (!has_match_rule(opcode)) {
3332     return false;
3333   }
3334 
3335   return true;  // Per default match rules are supported.
3336 }
3337 
3338 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3339 
3340   // TODO
3341   // identify extra cases that we might want to provide match rules for
3342   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3343   bool ret_value = match_rule_supported(opcode);
3344   // Add rules here.
3345 
3346   return ret_value;  // Per default match rules are supported.
3347 }
3348 
3349 const bool Matcher::has_predicated_vectors(void) {
3350   return false;
3351 }
3352 
3353 const int Matcher::float_pressure(int default_pressure_threshold) {
3354   return default_pressure_threshold;
3355 }
3356 
3357 int Matcher::regnum_to_fpu_offset(int regnum)
3358 {
3359   Unimplemented();
3360   return 0;
3361 }
3362 
3363 // Is this branch offset short enough that a short branch can be used?
3364 //
3365 // NOTE: If the platform does not provide any short branch variants, then
3366 //       this method should return false for offset 0.
3367 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3368   // The passed offset is relative to address of the branch.
3369 
3370   return (-32768 <= offset && offset < 32768);
3371 }
3372 
3373 const bool Matcher::isSimpleConstant64(jlong value) {
3374   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3375   // Probably always true, even if a temp register is required.
3376   return true;
3377 }
3378 
3379 // true just means we have fast l2f conversion
3380 const bool Matcher::convL2FSupported(void) {
3381   return true;
3382 }
3383 
3384 // Vector width in bytes.
3385 const int Matcher::vector_width_in_bytes(BasicType bt) {
3386   int size = MIN2(16,(int)MaxVectorSize);
3387   // Minimum 2 values in vector
3388   if (size < 2*type2aelembytes(bt)) size = 0;
3389   // But never < 4
3390   if (size < 4) size = 0;
3391   return size;
3392 }
3393 
3394 // Limits on vector size (number of elements) loaded into vector.
3395 const int Matcher::max_vector_size(const BasicType bt) {
3396   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3397 }
3398 const int Matcher::min_vector_size(const BasicType bt) {
3399 //  For the moment limit the vector size to 8 bytes
3400     int size = 8 / type2aelembytes(bt);
3401     if (size < 2) size = 2;
3402     return size;
3403 }
3404 
3405 // Vector ideal reg.
3406 const int Matcher::vector_ideal_reg(int len) {
3407   switch(len) {
3408     case  8: return Op_VecD;
3409     case 16: return Op_VecX;
3410   }
3411   ShouldNotReachHere();
3412   return 0;
3413 }
3414 
3415 const int Matcher::vector_shift_count_ideal_reg(int size) {
3416   return Op_VecX;
3417 }
3418 
3419 // AES support not yet implemented
3420 const bool Matcher::pass_original_key_for_aes() {
3421   return false;
3422 }
3423 
3424 // x86 supports misaligned vectors store/load.
3425 const bool Matcher::misaligned_vectors_ok() {
3426   return !AlignVector; // can be changed by flag
3427 }
3428 
3429 // false => size gets scaled to BytesPerLong, ok.
3430 const bool Matcher::init_array_count_is_in_bytes = false;
3431 
3432 // Use conditional move (CMOVL)
3433 const int Matcher::long_cmove_cost() {
3434   // long cmoves are no more expensive than int cmoves
3435   return 0;
3436 }
3437 
3438 const int Matcher::float_cmove_cost() {
3439   // float cmoves are no more expensive than int cmoves
3440   return 0;
3441 }
3442 
3443 // Does the CPU require late expand (see block.cpp for description of late expand)?
3444 const bool Matcher::require_postalloc_expand = false;
3445 
3446 // Should the Matcher clone shifts on addressing modes, expecting them
3447 // to be subsumed into complex addressing expressions or compute them
3448 // into registers?  True for Intel but false for most RISCs
3449 const bool Matcher::clone_shift_expressions = false;
3450 
3451 // Do we need to mask the count passed to shift instructions or does
3452 // the cpu only look at the lower 5/6 bits anyway?
3453 const bool Matcher::need_masked_shift_count = false;
3454 
3455 // This affects two different things:
3456 //  - how Decode nodes are matched
3457 //  - how ImplicitNullCheck opportunities are recognized
3458 // If true, the matcher will try to remove all Decodes and match them
3459 // (as operands) into nodes. NullChecks are not prepared to deal with
3460 // Decodes by final_graph_reshaping().
3461 // If false, final_graph_reshaping() forces the decode behind the Cmp
3462 // for a NullCheck. The matcher matches the Decode node into a register.
3463 // Implicit_null_check optimization moves the Decode along with the
3464 // memory operation back up before the NullCheck.
3465 bool Matcher::narrow_oop_use_complex_address() {
3466   return Universe::narrow_oop_shift() == 0;
3467 }
3468 
3469 bool Matcher::narrow_klass_use_complex_address() {
3470 // TODO
3471 // decide whether we need to set this to true
3472   return false;
3473 }
3474 
3475 // Is it better to copy float constants, or load them directly from
3476 // memory?  Intel can load a float constant from a direct address,
3477 // requiring no extra registers.  Most RISCs will have to materialize
3478 // an address into a register first, so they would do better to copy
3479 // the constant from stack.
3480 const bool Matcher::rematerialize_float_constants = false;
3481 
3482 // If CPU can load and store mis-aligned doubles directly then no
3483 // fixup is needed.  Else we split the double into 2 integer pieces
3484 // and move it piece-by-piece.  Only happens when passing doubles into
3485 // C code as the Java calling convention forces doubles to be aligned.
3486 const bool Matcher::misaligned_doubles_ok = true;
3487 
3488 // No-op on amd64
3489 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3490   Unimplemented();
3491 }
3492 
3493 // Advertise here if the CPU requires explicit rounding operations to
3494 // implement the UseStrictFP mode.
3495 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3496 
3497 // Are floats converted to double when stored to stack during
3498 // deoptimization?
3499 bool Matcher::float_in_double() { return true; }
3500 
3501 // Do ints take an entire long register or just half?
3502 // The relevant question is how the int is callee-saved:
3503 // the whole long is written but de-opt'ing will have to extract
3504 // the relevant 32 bits.
3505 const bool Matcher::int_in_long = true;
3506 
3507 // Return whether or not this register is ever used as an argument.
3508 // This function is used on startup to build the trampoline stubs in
3509 // generateOptoStub.  Registers not mentioned will be killed by the VM
3510 // call in the trampoline, and arguments in those registers not be
3511 // available to the callee.
3512 bool Matcher::can_be_java_arg(int reg)
3513 {
3514   return
3515     reg ==  R0_num || reg == R0_H_num ||
3516     reg ==  R1_num || reg == R1_H_num ||
3517     reg ==  R2_num || reg == R2_H_num ||
3518     reg ==  R3_num || reg == R3_H_num ||
3519     reg ==  R4_num || reg == R4_H_num ||
3520     reg ==  R5_num || reg == R5_H_num ||
3521     reg ==  R6_num || reg == R6_H_num ||
3522     reg ==  R7_num || reg == R7_H_num ||
3523     reg ==  V0_num || reg == V0_H_num ||
3524     reg ==  V1_num || reg == V1_H_num ||
3525     reg ==  V2_num || reg == V2_H_num ||
3526     reg ==  V3_num || reg == V3_H_num ||
3527     reg ==  V4_num || reg == V4_H_num ||
3528     reg ==  V5_num || reg == V5_H_num ||
3529     reg ==  V6_num || reg == V6_H_num ||
3530     reg ==  V7_num || reg == V7_H_num;
3531 }
3532 
3533 bool Matcher::is_spillable_arg(int reg)
3534 {
3535   return can_be_java_arg(reg);
3536 }
3537 
3538 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3539   return false;
3540 }
3541 
3542 RegMask Matcher::divI_proj_mask() {
3543   ShouldNotReachHere();
3544   return RegMask();
3545 }
3546 
3547 // Register for MODI projection of divmodI.
3548 RegMask Matcher::modI_proj_mask() {
3549   ShouldNotReachHere();
3550   return RegMask();
3551 }
3552 
3553 // Register for DIVL projection of divmodL.
3554 RegMask Matcher::divL_proj_mask() {
3555   ShouldNotReachHere();
3556   return RegMask();
3557 }
3558 
3559 // Register for MODL projection of divmodL.
3560 RegMask Matcher::modL_proj_mask() {
3561   ShouldNotReachHere();
3562   return RegMask();
3563 }
3564 
3565 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3566   return FP_REG_mask();
3567 }
3568 
3569 // helper for encoding java_to_runtime calls on sim
3570 //
3571 // this is needed to compute the extra arguments required when
3572 // planting a call to the simulator blrt instruction. the TypeFunc
3573 // can be queried to identify the counts for integral, and floating
3574 // arguments and the return type
3575 
3576 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3577 {
3578   int gps = 0;
3579   int fps = 0;
3580   const TypeTuple *domain = tf->domain();
3581   int max = domain->cnt();
3582   for (int i = TypeFunc::Parms; i < max; i++) {
3583     const Type *t = domain->field_at(i);
3584     switch(t->basic_type()) {
3585     case T_FLOAT:
3586     case T_DOUBLE:
3587       fps++;
3588     default:
3589       gps++;
3590     }
3591   }
3592   gpcnt = gps;
3593   fpcnt = fps;
3594   BasicType rt = tf->return_type();
3595   switch (rt) {
3596   case T_VOID:
3597     rtype = MacroAssembler::ret_type_void;
3598     break;
3599   default:
3600     rtype = MacroAssembler::ret_type_integral;
3601     break;
3602   case T_FLOAT:
3603     rtype = MacroAssembler::ret_type_float;
3604     break;
3605   case T_DOUBLE:
3606     rtype = MacroAssembler::ret_type_double;
3607     break;
3608   }
3609 }
3610 
3611 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3612   MacroAssembler _masm(&cbuf);                                          \
3613   {                                                                     \
3614     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3615     guarantee(DISP == 0, "mode not permitted for volatile");            \
3616     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3617     __ INSN(REG, as_Register(BASE));                                    \
3618   }
3619 
3620 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3621 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3622 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3623                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3624 
3625   // Used for all non-volatile memory accesses.  The use of
3626   // $mem->opcode() to discover whether this pattern uses sign-extended
3627   // offsets is something of a kludge.
3628   static void loadStore(MacroAssembler masm, mem_insn insn,
3629                          Register reg, int opcode,
3630                          Register base, int index, int size, int disp)
3631   {
3632     Address::extend scale;
3633 
3634     // Hooboy, this is fugly.  We need a way to communicate to the
3635     // encoder that the index needs to be sign extended, so we have to
3636     // enumerate all the cases.
3637     switch (opcode) {
3638     case INDINDEXSCALEDOFFSETI2L:
3639     case INDINDEXSCALEDI2L:
3640     case INDINDEXSCALEDOFFSETI2LN:
3641     case INDINDEXSCALEDI2LN:
3642     case INDINDEXOFFSETI2L:
3643     case INDINDEXOFFSETI2LN:
3644       scale = Address::sxtw(size);
3645       break;
3646     default:
3647       scale = Address::lsl(size);
3648     }
3649 
3650     if (index == -1) {
3651       (masm.*insn)(reg, Address(base, disp));
3652     } else {
3653       if (disp == 0) {
3654         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3655       } else {
3656         masm.lea(rscratch1, Address(base, disp));
3657         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3658       }
3659     }
3660   }
3661 
3662   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3663                          FloatRegister reg, int opcode,
3664                          Register base, int index, int size, int disp)
3665   {
3666     Address::extend scale;
3667 
3668     switch (opcode) {
3669     case INDINDEXSCALEDOFFSETI2L:
3670     case INDINDEXSCALEDI2L:
3671     case INDINDEXSCALEDOFFSETI2LN:
3672     case INDINDEXSCALEDI2LN:
3673       scale = Address::sxtw(size);
3674       break;
3675     default:
3676       scale = Address::lsl(size);
3677     }
3678 
3679      if (index == -1) {
3680       (masm.*insn)(reg, Address(base, disp));
3681     } else {
3682       if (disp == 0) {
3683         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3684       } else {
3685         masm.lea(rscratch1, Address(base, disp));
3686         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3687       }
3688     }
3689   }
3690 
3691   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3692                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3693                          int opcode, Register base, int index, int size, int disp)
3694   {
3695     if (index == -1) {
3696       (masm.*insn)(reg, T, Address(base, disp));
3697     } else {
3698       assert(disp == 0, "unsupported address mode");
3699       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3700     }
3701   }
3702 
3703 %}
3704 
3705 
3706 
3707 //----------ENCODING BLOCK-----------------------------------------------------
3708 // This block specifies the encoding classes used by the compiler to
3709 // output byte streams.  Encoding classes are parameterized macros
3710 // used by Machine Instruction Nodes in order to generate the bit
3711 // encoding of the instruction.  Operands specify their base encoding
3712 // interface with the interface keyword.  There are currently
3713 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3714 // COND_INTER.  REG_INTER causes an operand to generate a function
3715 // which returns its register number when queried.  CONST_INTER causes
3716 // an operand to generate a function which returns the value of the
3717 // constant when queried.  MEMORY_INTER causes an operand to generate
3718 // four functions which return the Base Register, the Index Register,
3719 // the Scale Value, and the Offset Value of the operand when queried.
3720 // COND_INTER causes an operand to generate six functions which return
3721 // the encoding code (ie - encoding bits for the instruction)
3722 // associated with each basic boolean condition for a conditional
3723 // instruction.
3724 //
3725 // Instructions specify two basic values for encoding.  Again, a
3726 // function is available to check if the constant displacement is an
3727 // oop. They use the ins_encode keyword to specify their encoding
3728 // classes (which must be a sequence of enc_class names, and their
3729 // parameters, specified in the encoding block), and they use the
3730 // opcode keyword to specify, in order, their primary, secondary, and
3731 // tertiary opcode.  Only the opcode sections which a particular
3732 // instruction needs for encoding need to be specified.
3733 encode %{
3734   // Build emit functions for each basic byte or larger field in the
3735   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3736   // from C++ code in the enc_class source block.  Emit functions will
3737   // live in the main source block for now.  In future, we can
3738   // generalize this by adding a syntax that specifies the sizes of
3739   // fields in an order, so that the adlc can build the emit functions
3740   // automagically
3741 
3742   // catch all for unimplemented encodings
3743   enc_class enc_unimplemented %{
3744     MacroAssembler _masm(&cbuf);
3745     __ unimplemented("C2 catch all");
3746   %}
3747 
3748   // BEGIN Non-volatile memory access
3749 
3750   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3751     Register dst_reg = as_Register($dst$$reg);
3752     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3753                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3754   %}
3755 
3756   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3757     Register dst_reg = as_Register($dst$$reg);
3758     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3759                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3760   %}
3761 
3762   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3763     Register dst_reg = as_Register($dst$$reg);
3764     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3765                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3766   %}
3767 
3768   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3769     Register dst_reg = as_Register($dst$$reg);
3770     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3771                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3772   %}
3773 
3774   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3775     Register dst_reg = as_Register($dst$$reg);
3776     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3777                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3778   %}
3779 
3780   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3781     Register dst_reg = as_Register($dst$$reg);
3782     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3783                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3784   %}
3785 
3786   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3787     Register dst_reg = as_Register($dst$$reg);
3788     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3789                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3790   %}
3791 
3792   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3793     Register dst_reg = as_Register($dst$$reg);
3794     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3795                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3796   %}
3797 
3798   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3799     Register dst_reg = as_Register($dst$$reg);
3800     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3801                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3802   %}
3803 
3804   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3805     Register dst_reg = as_Register($dst$$reg);
3806     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3807                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3808   %}
3809 
3810   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3811     Register dst_reg = as_Register($dst$$reg);
3812     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3813                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3814   %}
3815 
3816   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3817     Register dst_reg = as_Register($dst$$reg);
3818     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3819                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3820   %}
3821 
3822   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3823     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3824     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3825                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3826   %}
3827 
3828   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3829     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3830     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3831                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3832   %}
3833 
3834   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3835     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3836     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3837        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3838   %}
3839 
3840   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3841     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3842     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3843        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3844   %}
3845 
3846   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3847     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3848     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3849        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3850   %}
3851 
3852   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3853     Register src_reg = as_Register($src$$reg);
3854     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3855                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3856   %}
3857 
3858   enc_class aarch64_enc_strb0(memory mem) %{
3859     MacroAssembler _masm(&cbuf);
3860     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3862   %}
3863 
3864   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3865     MacroAssembler _masm(&cbuf);
3866     __ membar(Assembler::StoreStore);
3867     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3868                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3869   %}
3870 
3871   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3872     Register src_reg = as_Register($src$$reg);
3873     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3874                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3875   %}
3876 
3877   enc_class aarch64_enc_strh0(memory mem) %{
3878     MacroAssembler _masm(&cbuf);
3879     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3880                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3881   %}
3882 
3883   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3884     Register src_reg = as_Register($src$$reg);
3885     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3887   %}
3888 
3889   enc_class aarch64_enc_strw0(memory mem) %{
3890     MacroAssembler _masm(&cbuf);
3891     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3892                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3893   %}
3894 
3895   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3896     Register src_reg = as_Register($src$$reg);
3897     // we sometimes get asked to store the stack pointer into the
3898     // current thread -- we cannot do that directly on AArch64
3899     if (src_reg == r31_sp) {
3900       MacroAssembler _masm(&cbuf);
3901       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3902       __ mov(rscratch2, sp);
3903       src_reg = rscratch2;
3904     }
3905     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3906                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3907   %}
3908 
3909   enc_class aarch64_enc_str0(memory mem) %{
3910     MacroAssembler _masm(&cbuf);
3911     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3912                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3913   %}
3914 
3915   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3916     FloatRegister src_reg = as_FloatRegister($src$$reg);
3917     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3919   %}
3920 
3921   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3922     FloatRegister src_reg = as_FloatRegister($src$$reg);
3923     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3924                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3925   %}
3926 
3927   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3928     FloatRegister src_reg = as_FloatRegister($src$$reg);
3929     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3930        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3931   %}
3932 
3933   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3934     FloatRegister src_reg = as_FloatRegister($src$$reg);
3935     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3936        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3937   %}
3938 
3939   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3940     FloatRegister src_reg = as_FloatRegister($src$$reg);
3941     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3942        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3943   %}
3944 
3945   // END Non-volatile memory access
3946 
3947   // volatile loads and stores
3948 
3949   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3950     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3951                  rscratch1, stlrb);
3952   %}
3953 
3954   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3955     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3956                  rscratch1, stlrh);
3957   %}
3958 
3959   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3960     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3961                  rscratch1, stlrw);
3962   %}
3963 
3964 
3965   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3966     Register dst_reg = as_Register($dst$$reg);
3967     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3968              rscratch1, ldarb);
3969     __ sxtbw(dst_reg, dst_reg);
3970   %}
3971 
3972   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3973     Register dst_reg = as_Register($dst$$reg);
3974     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3975              rscratch1, ldarb);
3976     __ sxtb(dst_reg, dst_reg);
3977   %}
3978 
3979   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3980     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3981              rscratch1, ldarb);
3982   %}
3983 
3984   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3985     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3986              rscratch1, ldarb);
3987   %}
3988 
3989   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3990     Register dst_reg = as_Register($dst$$reg);
3991     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3992              rscratch1, ldarh);
3993     __ sxthw(dst_reg, dst_reg);
3994   %}
3995 
3996   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3997     Register dst_reg = as_Register($dst$$reg);
3998     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3999              rscratch1, ldarh);
4000     __ sxth(dst_reg, dst_reg);
4001   %}
4002 
4003   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4004     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4005              rscratch1, ldarh);
4006   %}
4007 
4008   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4009     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4010              rscratch1, ldarh);
4011   %}
4012 
4013   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4014     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4015              rscratch1, ldarw);
4016   %}
4017 
4018   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4019     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4020              rscratch1, ldarw);
4021   %}
4022 
4023   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4024     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4025              rscratch1, ldar);
4026   %}
4027 
4028   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4029     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4030              rscratch1, ldarw);
4031     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4032   %}
4033 
4034   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4035     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4036              rscratch1, ldar);
4037     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4038   %}
4039 
4040   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4041     Register src_reg = as_Register($src$$reg);
4042     // we sometimes get asked to store the stack pointer into the
4043     // current thread -- we cannot do that directly on AArch64
4044     if (src_reg == r31_sp) {
4045         MacroAssembler _masm(&cbuf);
4046       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4047       __ mov(rscratch2, sp);
4048       src_reg = rscratch2;
4049     }
4050     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4051                  rscratch1, stlr);
4052   %}
4053 
4054   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4055     {
4056       MacroAssembler _masm(&cbuf);
4057       FloatRegister src_reg = as_FloatRegister($src$$reg);
4058       __ fmovs(rscratch2, src_reg);
4059     }
4060     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4061                  rscratch1, stlrw);
4062   %}
4063 
4064   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4065     {
4066       MacroAssembler _masm(&cbuf);
4067       FloatRegister src_reg = as_FloatRegister($src$$reg);
4068       __ fmovd(rscratch2, src_reg);
4069     }
4070     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4071                  rscratch1, stlr);
4072   %}
4073 
4074   // synchronized read/update encodings
4075 
4076   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4077     MacroAssembler _masm(&cbuf);
4078     Register dst_reg = as_Register($dst$$reg);
4079     Register base = as_Register($mem$$base);
4080     int index = $mem$$index;
4081     int scale = $mem$$scale;
4082     int disp = $mem$$disp;
4083     if (index == -1) {
4084        if (disp != 0) {
4085         __ lea(rscratch1, Address(base, disp));
4086         __ ldaxr(dst_reg, rscratch1);
4087       } else {
4088         // TODO
4089         // should we ever get anything other than this case?
4090         __ ldaxr(dst_reg, base);
4091       }
4092     } else {
4093       Register index_reg = as_Register(index);
4094       if (disp == 0) {
4095         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4096         __ ldaxr(dst_reg, rscratch1);
4097       } else {
4098         __ lea(rscratch1, Address(base, disp));
4099         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4100         __ ldaxr(dst_reg, rscratch1);
4101       }
4102     }
4103   %}
4104 
4105   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4106     MacroAssembler _masm(&cbuf);
4107     Register src_reg = as_Register($src$$reg);
4108     Register base = as_Register($mem$$base);
4109     int index = $mem$$index;
4110     int scale = $mem$$scale;
4111     int disp = $mem$$disp;
4112     if (index == -1) {
4113        if (disp != 0) {
4114         __ lea(rscratch2, Address(base, disp));
4115         __ stlxr(rscratch1, src_reg, rscratch2);
4116       } else {
4117         // TODO
4118         // should we ever get anything other than this case?
4119         __ stlxr(rscratch1, src_reg, base);
4120       }
4121     } else {
4122       Register index_reg = as_Register(index);
4123       if (disp == 0) {
4124         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4125         __ stlxr(rscratch1, src_reg, rscratch2);
4126       } else {
4127         __ lea(rscratch2, Address(base, disp));
4128         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4129         __ stlxr(rscratch1, src_reg, rscratch2);
4130       }
4131     }
4132     __ cmpw(rscratch1, zr);
4133   %}
4134 
4135   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4136     MacroAssembler _masm(&cbuf);
4137     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4138     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4139                Assembler::xword, /*acquire*/ false, /*release*/ true);
4140   %}
4141 
4142   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4143     MacroAssembler _masm(&cbuf);
4144     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4145     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4146                Assembler::word, /*acquire*/ false, /*release*/ true);
4147   %}
4148 
4149 
4150   // The only difference between aarch64_enc_cmpxchg and
4151   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4152   // CompareAndSwap sequence to serve as a barrier on acquiring a
4153   // lock.
4154   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4155     MacroAssembler _masm(&cbuf);
4156     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4157     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4158                Assembler::xword, /*acquire*/ true, /*release*/ true);
4159   %}
4160 
4161   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4162     MacroAssembler _masm(&cbuf);
4163     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4164     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4165                Assembler::word, /*acquire*/ true, /*release*/ true);
4166   %}
4167 
4168 
4169   // auxiliary used for CompareAndSwapX to set result register
4170   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4171     MacroAssembler _masm(&cbuf);
4172     Register res_reg = as_Register($res$$reg);
4173     __ cset(res_reg, Assembler::EQ);
4174   %}
4175 
4176   // prefetch encodings
4177 
4178   enc_class aarch64_enc_prefetchw(memory mem) %{
4179     MacroAssembler _masm(&cbuf);
4180     Register base = as_Register($mem$$base);
4181     int index = $mem$$index;
4182     int scale = $mem$$scale;
4183     int disp = $mem$$disp;
4184     if (index == -1) {
4185       __ prfm(Address(base, disp), PSTL1KEEP);
4186     } else {
4187       Register index_reg = as_Register(index);
4188       if (disp == 0) {
4189         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4190       } else {
4191         __ lea(rscratch1, Address(base, disp));
4192         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4193       }
4194     }
4195   %}
4196 
4197   /// mov envcodings
4198 
4199   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4200     MacroAssembler _masm(&cbuf);
4201     u_int32_t con = (u_int32_t)$src$$constant;
4202     Register dst_reg = as_Register($dst$$reg);
4203     if (con == 0) {
4204       __ movw(dst_reg, zr);
4205     } else {
4206       __ movw(dst_reg, con);
4207     }
4208   %}
4209 
4210   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4211     MacroAssembler _masm(&cbuf);
4212     Register dst_reg = as_Register($dst$$reg);
4213     u_int64_t con = (u_int64_t)$src$$constant;
4214     if (con == 0) {
4215       __ mov(dst_reg, zr);
4216     } else {
4217       __ mov(dst_reg, con);
4218     }
4219   %}
4220 
4221   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4222     MacroAssembler _masm(&cbuf);
4223     Register dst_reg = as_Register($dst$$reg);
4224     address con = (address)$src$$constant;
4225     if (con == NULL || con == (address)1) {
4226       ShouldNotReachHere();
4227     } else {
4228       relocInfo::relocType rtype = $src->constant_reloc();
4229       if (rtype == relocInfo::oop_type) {
4230         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4231       } else if (rtype == relocInfo::metadata_type) {
4232         __ mov_metadata(dst_reg, (Metadata*)con);
4233       } else {
4234         assert(rtype == relocInfo::none, "unexpected reloc type");
4235         if (con < (address)(uintptr_t)os::vm_page_size()) {
4236           __ mov(dst_reg, con);
4237         } else {
4238           unsigned long offset;
4239           __ adrp(dst_reg, con, offset);
4240           __ add(dst_reg, dst_reg, offset);
4241         }
4242       }
4243     }
4244   %}
4245 
4246   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4247     MacroAssembler _masm(&cbuf);
4248     Register dst_reg = as_Register($dst$$reg);
4249     __ mov(dst_reg, zr);
4250   %}
4251 
4252   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4253     MacroAssembler _masm(&cbuf);
4254     Register dst_reg = as_Register($dst$$reg);
4255     __ mov(dst_reg, (u_int64_t)1);
4256   %}
4257 
4258   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4259     MacroAssembler _masm(&cbuf);
4260     address page = (address)$src$$constant;
4261     Register dst_reg = as_Register($dst$$reg);
4262     unsigned long off;
4263     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4264     assert(off == 0, "assumed offset == 0");
4265   %}
4266 
4267   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4268     MacroAssembler _masm(&cbuf);
4269     __ load_byte_map_base($dst$$Register);
4270   %}
4271 
4272   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4273     MacroAssembler _masm(&cbuf);
4274     Register dst_reg = as_Register($dst$$reg);
4275     address con = (address)$src$$constant;
4276     if (con == NULL) {
4277       ShouldNotReachHere();
4278     } else {
4279       relocInfo::relocType rtype = $src->constant_reloc();
4280       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4281       __ set_narrow_oop(dst_reg, (jobject)con);
4282     }
4283   %}
4284 
4285   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4286     MacroAssembler _masm(&cbuf);
4287     Register dst_reg = as_Register($dst$$reg);
4288     __ mov(dst_reg, zr);
4289   %}
4290 
4291   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4292     MacroAssembler _masm(&cbuf);
4293     Register dst_reg = as_Register($dst$$reg);
4294     address con = (address)$src$$constant;
4295     if (con == NULL) {
4296       ShouldNotReachHere();
4297     } else {
4298       relocInfo::relocType rtype = $src->constant_reloc();
4299       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4300       __ set_narrow_klass(dst_reg, (Klass *)con);
4301     }
4302   %}
4303 
4304   // arithmetic encodings
4305 
4306   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4307     MacroAssembler _masm(&cbuf);
4308     Register dst_reg = as_Register($dst$$reg);
4309     Register src_reg = as_Register($src1$$reg);
4310     int32_t con = (int32_t)$src2$$constant;
4311     // add has primary == 0, subtract has primary == 1
4312     if ($primary) { con = -con; }
4313     if (con < 0) {
4314       __ subw(dst_reg, src_reg, -con);
4315     } else {
4316       __ addw(dst_reg, src_reg, con);
4317     }
4318   %}
4319 
4320   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4321     MacroAssembler _masm(&cbuf);
4322     Register dst_reg = as_Register($dst$$reg);
4323     Register src_reg = as_Register($src1$$reg);
4324     int32_t con = (int32_t)$src2$$constant;
4325     // add has primary == 0, subtract has primary == 1
4326     if ($primary) { con = -con; }
4327     if (con < 0) {
4328       __ sub(dst_reg, src_reg, -con);
4329     } else {
4330       __ add(dst_reg, src_reg, con);
4331     }
4332   %}
4333 
4334   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4335     MacroAssembler _masm(&cbuf);
4336    Register dst_reg = as_Register($dst$$reg);
4337    Register src1_reg = as_Register($src1$$reg);
4338    Register src2_reg = as_Register($src2$$reg);
4339     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4340   %}
4341 
4342   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4343     MacroAssembler _masm(&cbuf);
4344    Register dst_reg = as_Register($dst$$reg);
4345    Register src1_reg = as_Register($src1$$reg);
4346    Register src2_reg = as_Register($src2$$reg);
4347     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4348   %}
4349 
4350   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4351     MacroAssembler _masm(&cbuf);
4352    Register dst_reg = as_Register($dst$$reg);
4353    Register src1_reg = as_Register($src1$$reg);
4354    Register src2_reg = as_Register($src2$$reg);
4355     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4356   %}
4357 
4358   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4359     MacroAssembler _masm(&cbuf);
4360    Register dst_reg = as_Register($dst$$reg);
4361    Register src1_reg = as_Register($src1$$reg);
4362    Register src2_reg = as_Register($src2$$reg);
4363     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4364   %}
4365 
4366   // compare instruction encodings
4367 
4368   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4369     MacroAssembler _masm(&cbuf);
4370     Register reg1 = as_Register($src1$$reg);
4371     Register reg2 = as_Register($src2$$reg);
4372     __ cmpw(reg1, reg2);
4373   %}
4374 
4375   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4376     MacroAssembler _masm(&cbuf);
4377     Register reg = as_Register($src1$$reg);
4378     int32_t val = $src2$$constant;
4379     if (val >= 0) {
4380       __ subsw(zr, reg, val);
4381     } else {
4382       __ addsw(zr, reg, -val);
4383     }
4384   %}
4385 
4386   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4387     MacroAssembler _masm(&cbuf);
4388     Register reg1 = as_Register($src1$$reg);
4389     u_int32_t val = (u_int32_t)$src2$$constant;
4390     __ movw(rscratch1, val);
4391     __ cmpw(reg1, rscratch1);
4392   %}
4393 
4394   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4395     MacroAssembler _masm(&cbuf);
4396     Register reg1 = as_Register($src1$$reg);
4397     Register reg2 = as_Register($src2$$reg);
4398     __ cmp(reg1, reg2);
4399   %}
4400 
4401   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4402     MacroAssembler _masm(&cbuf);
4403     Register reg = as_Register($src1$$reg);
4404     int64_t val = $src2$$constant;
4405     if (val >= 0) {
4406       __ subs(zr, reg, val);
4407     } else if (val != -val) {
4408       __ adds(zr, reg, -val);
4409     } else {
4410     // aargh, Long.MIN_VALUE is a special case
4411       __ orr(rscratch1, zr, (u_int64_t)val);
4412       __ subs(zr, reg, rscratch1);
4413     }
4414   %}
4415 
4416   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4417     MacroAssembler _masm(&cbuf);
4418     Register reg1 = as_Register($src1$$reg);
4419     u_int64_t val = (u_int64_t)$src2$$constant;
4420     __ mov(rscratch1, val);
4421     __ cmp(reg1, rscratch1);
4422   %}
4423 
4424   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4425     MacroAssembler _masm(&cbuf);
4426     Register reg1 = as_Register($src1$$reg);
4427     Register reg2 = as_Register($src2$$reg);
4428     __ cmp(reg1, reg2);
4429   %}
4430 
4431   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4432     MacroAssembler _masm(&cbuf);
4433     Register reg1 = as_Register($src1$$reg);
4434     Register reg2 = as_Register($src2$$reg);
4435     __ cmpw(reg1, reg2);
4436   %}
4437 
4438   enc_class aarch64_enc_testp(iRegP src) %{
4439     MacroAssembler _masm(&cbuf);
4440     Register reg = as_Register($src$$reg);
4441     __ cmp(reg, zr);
4442   %}
4443 
4444   enc_class aarch64_enc_testn(iRegN src) %{
4445     MacroAssembler _masm(&cbuf);
4446     Register reg = as_Register($src$$reg);
4447     __ cmpw(reg, zr);
4448   %}
4449 
4450   enc_class aarch64_enc_b(label lbl) %{
4451     MacroAssembler _masm(&cbuf);
4452     Label *L = $lbl$$label;
4453     __ b(*L);
4454   %}
4455 
4456   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4457     MacroAssembler _masm(&cbuf);
4458     Label *L = $lbl$$label;
4459     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4460   %}
4461 
4462   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4463     MacroAssembler _masm(&cbuf);
4464     Label *L = $lbl$$label;
4465     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4466   %}
4467 
4468   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4469   %{
4470      Register sub_reg = as_Register($sub$$reg);
4471      Register super_reg = as_Register($super$$reg);
4472      Register temp_reg = as_Register($temp$$reg);
4473      Register result_reg = as_Register($result$$reg);
4474 
4475      Label miss;
4476      MacroAssembler _masm(&cbuf);
4477      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4478                                      NULL, &miss,
4479                                      /*set_cond_codes:*/ true);
4480      if ($primary) {
4481        __ mov(result_reg, zr);
4482      }
4483      __ bind(miss);
4484   %}
4485 
4486   enc_class aarch64_enc_java_static_call(method meth) %{
4487     MacroAssembler _masm(&cbuf);
4488 
4489     address addr = (address)$meth$$method;
4490     address call;
4491     if (!_method) {
4492       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4493       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4494     } else {
4495       int method_index = resolved_method_index(cbuf);
4496       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4497                                                   : static_call_Relocation::spec(method_index);
4498       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4499 
4500       // Emit stub for static call
4501       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4502       if (stub == NULL) {
4503         ciEnv::current()->record_failure("CodeCache is full");
4504         return;
4505       }
4506     }
4507     if (call == NULL) {
4508       ciEnv::current()->record_failure("CodeCache is full");
4509       return;
4510     }
4511   %}
4512 
4513   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4514     MacroAssembler _masm(&cbuf);
4515     int method_index = resolved_method_index(cbuf);
4516     address call = __ ic_call((address)$meth$$method, method_index);
4517     if (call == NULL) {
4518       ciEnv::current()->record_failure("CodeCache is full");
4519       return;
4520     }
4521   %}
4522 
4523   enc_class aarch64_enc_call_epilog() %{
4524     MacroAssembler _masm(&cbuf);
4525     if (VerifyStackAtCalls) {
4526       // Check that stack depth is unchanged: find majik cookie on stack
4527       __ call_Unimplemented();
4528     }
4529   %}
4530 
4531   enc_class aarch64_enc_java_to_runtime(method meth) %{
4532     MacroAssembler _masm(&cbuf);
4533 
4534     // some calls to generated routines (arraycopy code) are scheduled
4535     // by C2 as runtime calls. if so we can call them using a br (they
4536     // will be in a reachable segment) otherwise we have to use a blrt
4537     // which loads the absolute address into a register.
4538     address entry = (address)$meth$$method;
4539     CodeBlob *cb = CodeCache::find_blob(entry);
4540     if (cb) {
4541       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4542       if (call == NULL) {
4543         ciEnv::current()->record_failure("CodeCache is full");
4544         return;
4545       }
4546     } else {
4547       int gpcnt;
4548       int fpcnt;
4549       int rtype;
4550       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4551       Label retaddr;
4552       __ adr(rscratch2, retaddr);
4553       __ lea(rscratch1, RuntimeAddress(entry));
4554       // Leave a breadcrumb for JavaThread::pd_last_frame().
4555       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4556       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4557       __ bind(retaddr);
4558       __ add(sp, sp, 2 * wordSize);
4559     }
4560   %}
4561 
4562   enc_class aarch64_enc_rethrow() %{
4563     MacroAssembler _masm(&cbuf);
4564     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4565   %}
4566 
4567   enc_class aarch64_enc_ret() %{
4568     MacroAssembler _masm(&cbuf);
4569     __ ret(lr);
4570   %}
4571 
4572   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4573     MacroAssembler _masm(&cbuf);
4574     Register target_reg = as_Register($jump_target$$reg);
4575     __ br(target_reg);
4576   %}
4577 
4578   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4579     MacroAssembler _masm(&cbuf);
4580     Register target_reg = as_Register($jump_target$$reg);
4581     // exception oop should be in r0
4582     // ret addr has been popped into lr
4583     // callee expects it in r3
4584     __ mov(r3, lr);
4585     __ br(target_reg);
4586   %}
4587 
4588   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4589     MacroAssembler _masm(&cbuf);
4590     Register oop = as_Register($object$$reg);
4591     Register box = as_Register($box$$reg);
4592     Register disp_hdr = as_Register($tmp$$reg);
4593     Register tmp = as_Register($tmp2$$reg);
4594     Label cont;
4595     Label object_has_monitor;
4596     Label cas_failed;
4597 
4598     assert_different_registers(oop, box, tmp, disp_hdr);
4599 
4600     // Load markOop from object into displaced_header.
4601     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4602 
4603     // Always do locking in runtime.
4604     if (EmitSync & 0x01) {
4605       __ cmp(oop, zr);
4606       return;
4607     }
4608 
4609     if (UseBiasedLocking && !UseOptoBiasInlining) {
4610       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4611     }
4612 
4613     // Handle existing monitor
4614     if ((EmitSync & 0x02) == 0) {
4615       // we can use AArch64's bit test and branch here but
4616       // markoopDesc does not define a bit index just the bit value
4617       // so assert in case the bit pos changes
4618 #     define __monitor_value_log2 1
4619       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4620       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4621 #     undef __monitor_value_log2
4622     }
4623 
4624     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4625     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4626 
4627     // Load Compare Value application register.
4628 
4629     // Initialize the box. (Must happen before we update the object mark!)
4630     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4631 
4632     // Compare object markOop with mark and if equal exchange scratch1
4633     // with object markOop.
4634     if (UseLSE) {
4635       __ mov(tmp, disp_hdr);
4636       __ casal(Assembler::xword, tmp, box, oop);
4637       __ cmp(tmp, disp_hdr);
4638       __ br(Assembler::EQ, cont);
4639     } else {
4640       Label retry_load;
4641       __ prfm(Address(oop), PSTL1STRM);
4642       __ bind(retry_load);
4643       __ ldaxr(tmp, oop);
4644       __ cmp(tmp, disp_hdr);
4645       __ br(Assembler::NE, cas_failed);
4646       // use stlxr to ensure update is immediately visible
4647       __ stlxr(tmp, box, oop);
4648       __ cbzw(tmp, cont);
4649       __ b(retry_load);
4650     }
4651 
4652     // Formerly:
4653     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4654     //               /*newv=*/box,
4655     //               /*addr=*/oop,
4656     //               /*tmp=*/tmp,
4657     //               cont,
4658     //               /*fail*/NULL);
4659 
4660     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4661 
4662     // If the compare-and-exchange succeeded, then we found an unlocked
4663     // object, will have now locked it will continue at label cont
4664 
4665     __ bind(cas_failed);
4666     // We did not see an unlocked object so try the fast recursive case.
4667 
4668     // Check if the owner is self by comparing the value in the
4669     // markOop of object (disp_hdr) with the stack pointer.
4670     __ mov(rscratch1, sp);
4671     __ sub(disp_hdr, disp_hdr, rscratch1);
4672     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4673     // If condition is true we are cont and hence we can store 0 as the
4674     // displaced header in the box, which indicates that it is a recursive lock.
4675     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4676     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4677 
4678     // Handle existing monitor.
4679     if ((EmitSync & 0x02) == 0) {
4680       __ b(cont);
4681 
4682       __ bind(object_has_monitor);
4683       // The object's monitor m is unlocked iff m->owner == NULL,
4684       // otherwise m->owner may contain a thread or a stack address.
4685       //
4686       // Try to CAS m->owner from NULL to current thread.
4687       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4688       __ mov(disp_hdr, zr);
4689 
4690       if (UseLSE) {
4691         __ mov(rscratch1, disp_hdr);
4692         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4693         __ cmp(rscratch1, disp_hdr);
4694       } else {
4695         Label retry_load, fail;
4696         __ prfm(Address(tmp), PSTL1STRM);
4697         __ bind(retry_load);
4698         __ ldaxr(rscratch1, tmp);
4699         __ cmp(disp_hdr, rscratch1);
4700         __ br(Assembler::NE, fail);
4701         // use stlxr to ensure update is immediately visible
4702         __ stlxr(rscratch1, rthread, tmp);
4703         __ cbnzw(rscratch1, retry_load);
4704         __ bind(fail);
4705       }
4706 
4707       // Label next;
4708       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4709       //               /*newv=*/rthread,
4710       //               /*addr=*/tmp,
4711       //               /*tmp=*/rscratch1,
4712       //               /*succeed*/next,
4713       //               /*fail*/NULL);
4714       // __ bind(next);
4715 
4716       // store a non-null value into the box.
4717       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4718 
4719       // PPC port checks the following invariants
4720       // #ifdef ASSERT
4721       // bne(flag, cont);
4722       // We have acquired the monitor, check some invariants.
4723       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4724       // Invariant 1: _recursions should be 0.
4725       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4726       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4727       //                        "monitor->_recursions should be 0", -1);
4728       // Invariant 2: OwnerIsThread shouldn't be 0.
4729       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4730       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4731       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4732       // #endif
4733     }
4734 
4735     __ bind(cont);
4736     // flag == EQ indicates success
4737     // flag == NE indicates failure
4738 
4739   %}
4740 
4741   // TODO
4742   // reimplement this with custom cmpxchgptr code
4743   // which avoids some of the unnecessary branching
4744   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4745     MacroAssembler _masm(&cbuf);
4746     Register oop = as_Register($object$$reg);
4747     Register box = as_Register($box$$reg);
4748     Register disp_hdr = as_Register($tmp$$reg);
4749     Register tmp = as_Register($tmp2$$reg);
4750     Label cont;
4751     Label object_has_monitor;
4752     Label cas_failed;
4753 
4754     assert_different_registers(oop, box, tmp, disp_hdr);
4755 
4756     // Always do locking in runtime.
4757     if (EmitSync & 0x01) {
4758       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4759       return;
4760     }
4761 
4762     if (UseBiasedLocking && !UseOptoBiasInlining) {
4763       __ biased_locking_exit(oop, tmp, cont);
4764     }
4765 
4766     // Find the lock address and load the displaced header from the stack.
4767     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4768 
4769     // If the displaced header is 0, we have a recursive unlock.
4770     __ cmp(disp_hdr, zr);
4771     __ br(Assembler::EQ, cont);
4772 
4773 
4774     // Handle existing monitor.
4775     if ((EmitSync & 0x02) == 0) {
4776       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4777       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4778     }
4779 
4780     // Check if it is still a light weight lock, this is is true if we
4781     // see the stack address of the basicLock in the markOop of the
4782     // object.
4783 
4784       if (UseLSE) {
4785         __ mov(tmp, box);
4786         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4787         __ cmp(tmp, box);
4788       } else {
4789         Label retry_load;
4790         __ prfm(Address(oop), PSTL1STRM);
4791         __ bind(retry_load);
4792         __ ldxr(tmp, oop);
4793         __ cmp(box, tmp);
4794         __ br(Assembler::NE, cas_failed);
4795         // use stlxr to ensure update is immediately visible
4796         __ stlxr(tmp, disp_hdr, oop);
4797         __ cbzw(tmp, cont);
4798         __ b(retry_load);
4799       }
4800 
4801     // __ cmpxchgptr(/*compare_value=*/box,
4802     //               /*exchange_value=*/disp_hdr,
4803     //               /*where=*/oop,
4804     //               /*result=*/tmp,
4805     //               cont,
4806     //               /*cas_failed*/NULL);
4807     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4808 
4809     __ bind(cas_failed);
4810 
4811     // Handle existing monitor.
4812     if ((EmitSync & 0x02) == 0) {
4813       __ b(cont);
4814 
4815       __ bind(object_has_monitor);
4816       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4817       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4818       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4819       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4820       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4821       __ cmp(rscratch1, zr);
4822       __ br(Assembler::NE, cont);
4823 
4824       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4825       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4826       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4827       __ cmp(rscratch1, zr);
4828       __ cbnz(rscratch1, cont);
4829       // need a release store here
4830       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4831       __ stlr(rscratch1, tmp); // rscratch1 is zero
4832     }
4833 
4834     __ bind(cont);
4835     // flag == EQ indicates success
4836     // flag == NE indicates failure
4837   %}
4838 
4839 %}
4840 
4841 //----------FRAME--------------------------------------------------------------
4842 // Definition of frame structure and management information.
4843 //
4844 //  S T A C K   L A Y O U T    Allocators stack-slot number
4845 //                             |   (to get allocators register number
4846 //  G  Owned by    |        |  v    add OptoReg::stack0())
4847 //  r   CALLER     |        |
4848 //  o     |        +--------+      pad to even-align allocators stack-slot
4849 //  w     V        |  pad0  |        numbers; owned by CALLER
4850 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4851 //  h     ^        |   in   |  5
4852 //        |        |  args  |  4   Holes in incoming args owned by SELF
4853 //  |     |        |        |  3
4854 //  |     |        +--------+
4855 //  V     |        | old out|      Empty on Intel, window on Sparc
4856 //        |    old |preserve|      Must be even aligned.
4857 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4858 //        |        |   in   |  3   area for Intel ret address
4859 //     Owned by    |preserve|      Empty on Sparc.
4860 //       SELF      +--------+
4861 //        |        |  pad2  |  2   pad to align old SP
4862 //        |        +--------+  1
4863 //        |        | locks  |  0
4864 //        |        +--------+----> OptoReg::stack0(), even aligned
4865 //        |        |  pad1  | 11   pad to align new SP
4866 //        |        +--------+
4867 //        |        |        | 10
4868 //        |        | spills |  9   spills
4869 //        V        |        |  8   (pad0 slot for callee)
4870 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4871 //        ^        |  out   |  7
4872 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4873 //     Owned by    +--------+
4874 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4875 //        |    new |preserve|      Must be even-aligned.
4876 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4877 //        |        |        |
4878 //
4879 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4880 //         known from SELF's arguments and the Java calling convention.
4881 //         Region 6-7 is determined per call site.
4882 // Note 2: If the calling convention leaves holes in the incoming argument
4883 //         area, those holes are owned by SELF.  Holes in the outgoing area
4884 //         are owned by the CALLEE.  Holes should not be nessecary in the
4885 //         incoming area, as the Java calling convention is completely under
4886 //         the control of the AD file.  Doubles can be sorted and packed to
4887 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4888 //         varargs C calling conventions.
4889 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4890 //         even aligned with pad0 as needed.
4891 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4892 //           (the latter is true on Intel but is it false on AArch64?)
4893 //         region 6-11 is even aligned; it may be padded out more so that
4894 //         the region from SP to FP meets the minimum stack alignment.
4895 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4896 //         alignment.  Region 11, pad1, may be dynamically extended so that
4897 //         SP meets the minimum alignment.
4898 
4899 frame %{
4900   // What direction does stack grow in (assumed to be same for C & Java)
4901   stack_direction(TOWARDS_LOW);
4902 
4903   // These three registers define part of the calling convention
4904   // between compiled code and the interpreter.
4905 
4906   // Inline Cache Register or methodOop for I2C.
4907   inline_cache_reg(R12);
4908 
4909   // Method Oop Register when calling interpreter.
4910   interpreter_method_oop_reg(R12);
4911 
4912   // Number of stack slots consumed by locking an object
4913   sync_stack_slots(2);
4914 
4915   // Compiled code's Frame Pointer
4916   frame_pointer(R31);
4917 
4918   // Interpreter stores its frame pointer in a register which is
4919   // stored to the stack by I2CAdaptors.
4920   // I2CAdaptors convert from interpreted java to compiled java.
4921   interpreter_frame_pointer(R29);
4922 
4923   // Stack alignment requirement
4924   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4925 
4926   // Number of stack slots between incoming argument block and the start of
4927   // a new frame.  The PROLOG must add this many slots to the stack.  The
4928   // EPILOG must remove this many slots. aarch64 needs two slots for
4929   // return address and fp.
4930   // TODO think this is correct but check
4931   in_preserve_stack_slots(4);
4932 
4933   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4934   // for calls to C.  Supports the var-args backing area for register parms.
4935   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4936 
4937   // The after-PROLOG location of the return address.  Location of
4938   // return address specifies a type (REG or STACK) and a number
4939   // representing the register number (i.e. - use a register name) or
4940   // stack slot.
4941   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4942   // Otherwise, it is above the locks and verification slot and alignment word
4943   // TODO this may well be correct but need to check why that - 2 is there
4944   // ppc port uses 0 but we definitely need to allow for fixed_slots
4945   // which folds in the space used for monitors
4946   return_addr(STACK - 2 +
4947               round_to((Compile::current()->in_preserve_stack_slots() +
4948                         Compile::current()->fixed_slots()),
4949                        stack_alignment_in_slots()));
4950 
4951   // Body of function which returns an integer array locating
4952   // arguments either in registers or in stack slots.  Passed an array
4953   // of ideal registers called "sig" and a "length" count.  Stack-slot
4954   // offsets are based on outgoing arguments, i.e. a CALLER setting up
4955   // arguments for a CALLEE.  Incoming stack arguments are
4956   // automatically biased by the preserve_stack_slots field above.
4957 
4958   calling_convention
4959   %{
4960     // No difference between ingoing/outgoing just pass false
4961     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4962   %}
4963 
4964   c_calling_convention
4965   %{
4966     // This is obviously always outgoing
4967     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
4968   %}
4969 
4970   // Location of compiled Java return values.  Same as C for now.
4971   return_value
4972   %{
4973     // TODO do we allow ideal_reg == Op_RegN???
4974     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4975            "only return normal values");
4976 
4977     static const int lo[Op_RegL + 1] = { // enum name
4978       0,                                 // Op_Node
4979       0,                                 // Op_Set
4980       R0_num,                            // Op_RegN
4981       R0_num,                            // Op_RegI
4982       R0_num,                            // Op_RegP
4983       V0_num,                            // Op_RegF
4984       V0_num,                            // Op_RegD
4985       R0_num                             // Op_RegL
4986     };
4987 
4988     static const int hi[Op_RegL + 1] = { // enum name
4989       0,                                 // Op_Node
4990       0,                                 // Op_Set
4991       OptoReg::Bad,                       // Op_RegN
4992       OptoReg::Bad,                      // Op_RegI
4993       R0_H_num,                          // Op_RegP
4994       OptoReg::Bad,                      // Op_RegF
4995       V0_H_num,                          // Op_RegD
4996       R0_H_num                           // Op_RegL
4997     };
4998 
4999     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5000   %}
5001 %}
5002 
5003 //----------ATTRIBUTES---------------------------------------------------------
5004 //----------Operand Attributes-------------------------------------------------
5005 op_attrib op_cost(1);        // Required cost attribute
5006 
5007 //----------Instruction Attributes---------------------------------------------
5008 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5009 ins_attrib ins_size(32);        // Required size attribute (in bits)
5010 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5011                                 // a non-matching short branch variant
5012                                 // of some long branch?
5013 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5014                                 // be a power of 2) specifies the
5015                                 // alignment that some part of the
5016                                 // instruction (not necessarily the
5017                                 // start) requires.  If > 1, a
5018                                 // compute_padding() function must be
5019                                 // provided for the instruction
5020 
5021 //----------OPERANDS-----------------------------------------------------------
5022 // Operand definitions must precede instruction definitions for correct parsing
5023 // in the ADLC because operands constitute user defined types which are used in
5024 // instruction definitions.
5025 
5026 //----------Simple Operands----------------------------------------------------
5027 
5028 // Integer operands 32 bit
5029 // 32 bit immediate
5030 operand immI()
5031 %{
5032   match(ConI);
5033 
5034   op_cost(0);
5035   format %{ %}
5036   interface(CONST_INTER);
5037 %}
5038 
5039 // 32 bit zero
5040 operand immI0()
5041 %{
5042   predicate(n->get_int() == 0);
5043   match(ConI);
5044 
5045   op_cost(0);
5046   format %{ %}
5047   interface(CONST_INTER);
5048 %}
5049 
5050 // 32 bit unit increment
5051 operand immI_1()
5052 %{
5053   predicate(n->get_int() == 1);
5054   match(ConI);
5055 
5056   op_cost(0);
5057   format %{ %}
5058   interface(CONST_INTER);
5059 %}
5060 
5061 // 32 bit unit decrement
5062 operand immI_M1()
5063 %{
5064   predicate(n->get_int() == -1);
5065   match(ConI);
5066 
5067   op_cost(0);
5068   format %{ %}
5069   interface(CONST_INTER);
5070 %}
5071 
5072 operand immI_le_4()
5073 %{
5074   predicate(n->get_int() <= 4);
5075   match(ConI);
5076 
5077   op_cost(0);
5078   format %{ %}
5079   interface(CONST_INTER);
5080 %}
5081 
5082 operand immI_31()
5083 %{
5084   predicate(n->get_int() == 31);
5085   match(ConI);
5086 
5087   op_cost(0);
5088   format %{ %}
5089   interface(CONST_INTER);
5090 %}
5091 
5092 operand immI_8()
5093 %{
5094   predicate(n->get_int() == 8);
5095   match(ConI);
5096 
5097   op_cost(0);
5098   format %{ %}
5099   interface(CONST_INTER);
5100 %}
5101 
5102 operand immI_16()
5103 %{
5104   predicate(n->get_int() == 16);
5105   match(ConI);
5106 
5107   op_cost(0);
5108   format %{ %}
5109   interface(CONST_INTER);
5110 %}
5111 
5112 operand immI_24()
5113 %{
5114   predicate(n->get_int() == 24);
5115   match(ConI);
5116 
5117   op_cost(0);
5118   format %{ %}
5119   interface(CONST_INTER);
5120 %}
5121 
5122 operand immI_32()
5123 %{
5124   predicate(n->get_int() == 32);
5125   match(ConI);
5126 
5127   op_cost(0);
5128   format %{ %}
5129   interface(CONST_INTER);
5130 %}
5131 
5132 operand immI_48()
5133 %{
5134   predicate(n->get_int() == 48);
5135   match(ConI);
5136 
5137   op_cost(0);
5138   format %{ %}
5139   interface(CONST_INTER);
5140 %}
5141 
5142 operand immI_56()
5143 %{
5144   predicate(n->get_int() == 56);
5145   match(ConI);
5146 
5147   op_cost(0);
5148   format %{ %}
5149   interface(CONST_INTER);
5150 %}
5151 
5152 operand immI_64()
5153 %{
5154   predicate(n->get_int() == 64);
5155   match(ConI);
5156 
5157   op_cost(0);
5158   format %{ %}
5159   interface(CONST_INTER);
5160 %}
5161 
5162 operand immI_255()
5163 %{
5164   predicate(n->get_int() == 255);
5165   match(ConI);
5166 
5167   op_cost(0);
5168   format %{ %}
5169   interface(CONST_INTER);
5170 %}
5171 
5172 operand immI_65535()
5173 %{
5174   predicate(n->get_int() == 65535);
5175   match(ConI);
5176 
5177   op_cost(0);
5178   format %{ %}
5179   interface(CONST_INTER);
5180 %}
5181 
5182 operand immL_63()
5183 %{
5184   predicate(n->get_int() == 63);
5185   match(ConI);
5186 
5187   op_cost(0);
5188   format %{ %}
5189   interface(CONST_INTER);
5190 %}
5191 
5192 operand immL_255()
5193 %{
5194   predicate(n->get_int() == 255);
5195   match(ConI);
5196 
5197   op_cost(0);
5198   format %{ %}
5199   interface(CONST_INTER);
5200 %}
5201 
5202 operand immL_65535()
5203 %{
5204   predicate(n->get_long() == 65535L);
5205   match(ConL);
5206 
5207   op_cost(0);
5208   format %{ %}
5209   interface(CONST_INTER);
5210 %}
5211 
5212 operand immL_4294967295()
5213 %{
5214   predicate(n->get_long() == 4294967295L);
5215   match(ConL);
5216 
5217   op_cost(0);
5218   format %{ %}
5219   interface(CONST_INTER);
5220 %}
5221 
5222 operand immL_bitmask()
5223 %{
5224   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5225             && is_power_of_2(n->get_long() + 1));
5226   match(ConL);
5227 
5228   op_cost(0);
5229   format %{ %}
5230   interface(CONST_INTER);
5231 %}
5232 
5233 operand immI_bitmask()
5234 %{
5235   predicate(((n->get_int() & 0xc0000000) == 0)
5236             && is_power_of_2(n->get_int() + 1));
5237   match(ConI);
5238 
5239   op_cost(0);
5240   format %{ %}
5241   interface(CONST_INTER);
5242 %}
5243 
5244 // Scale values for scaled offset addressing modes (up to long but not quad)
5245 operand immIScale()
5246 %{
5247   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5248   match(ConI);
5249 
5250   op_cost(0);
5251   format %{ %}
5252   interface(CONST_INTER);
5253 %}
5254 
5255 // 26 bit signed offset -- for pc-relative branches
5256 operand immI26()
5257 %{
5258   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5259   match(ConI);
5260 
5261   op_cost(0);
5262   format %{ %}
5263   interface(CONST_INTER);
5264 %}
5265 
5266 // 19 bit signed offset -- for pc-relative loads
5267 operand immI19()
5268 %{
5269   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5270   match(ConI);
5271 
5272   op_cost(0);
5273   format %{ %}
5274   interface(CONST_INTER);
5275 %}
5276 
5277 // 12 bit unsigned offset -- for base plus immediate loads
5278 operand immIU12()
5279 %{
5280   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5281   match(ConI);
5282 
5283   op_cost(0);
5284   format %{ %}
5285   interface(CONST_INTER);
5286 %}
5287 
5288 operand immLU12()
5289 %{
5290   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5291   match(ConL);
5292 
5293   op_cost(0);
5294   format %{ %}
5295   interface(CONST_INTER);
5296 %}
5297 
5298 // Offset for scaled or unscaled immediate loads and stores
5299 operand immIOffset()
5300 %{
5301   predicate(Address::offset_ok_for_immed(n->get_int()));
5302   match(ConI);
5303 
5304   op_cost(0);
5305   format %{ %}
5306   interface(CONST_INTER);
5307 %}
5308 
5309 operand immLoffset()
5310 %{
5311   predicate(Address::offset_ok_for_immed(n->get_long()));
5312   match(ConL);
5313 
5314   op_cost(0);
5315   format %{ %}
5316   interface(CONST_INTER);
5317 %}
5318 
5319 // 32 bit integer valid for add sub immediate
5320 operand immIAddSub()
5321 %{
5322   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5323   match(ConI);
5324   op_cost(0);
5325   format %{ %}
5326   interface(CONST_INTER);
5327 %}
5328 
5329 // 32 bit unsigned integer valid for logical immediate
5330 // TODO -- check this is right when e.g the mask is 0x80000000
5331 operand immILog()
5332 %{
5333   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5334   match(ConI);
5335 
5336   op_cost(0);
5337   format %{ %}
5338   interface(CONST_INTER);
5339 %}
5340 
5341 // Integer operands 64 bit
5342 // 64 bit immediate
5343 operand immL()
5344 %{
5345   match(ConL);
5346 
5347   op_cost(0);
5348   format %{ %}
5349   interface(CONST_INTER);
5350 %}
5351 
5352 // 64 bit zero
5353 operand immL0()
5354 %{
5355   predicate(n->get_long() == 0);
5356   match(ConL);
5357 
5358   op_cost(0);
5359   format %{ %}
5360   interface(CONST_INTER);
5361 %}
5362 
5363 // 64 bit unit increment
5364 operand immL_1()
5365 %{
5366   predicate(n->get_long() == 1);
5367   match(ConL);
5368 
5369   op_cost(0);
5370   format %{ %}
5371   interface(CONST_INTER);
5372 %}
5373 
5374 // 64 bit unit decrement
5375 operand immL_M1()
5376 %{
5377   predicate(n->get_long() == -1);
5378   match(ConL);
5379 
5380   op_cost(0);
5381   format %{ %}
5382   interface(CONST_INTER);
5383 %}
5384 
5385 // 32 bit offset of pc in thread anchor
5386 
5387 operand immL_pc_off()
5388 %{
5389   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5390                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5391   match(ConL);
5392 
5393   op_cost(0);
5394   format %{ %}
5395   interface(CONST_INTER);
5396 %}
5397 
5398 // 64 bit integer valid for add sub immediate
5399 operand immLAddSub()
5400 %{
5401   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5402   match(ConL);
5403   op_cost(0);
5404   format %{ %}
5405   interface(CONST_INTER);
5406 %}
5407 
5408 // 64 bit integer valid for logical immediate
5409 operand immLLog()
5410 %{
5411   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5412   match(ConL);
5413   op_cost(0);
5414   format %{ %}
5415   interface(CONST_INTER);
5416 %}
5417 
5418 // Long Immediate: low 32-bit mask
5419 operand immL_32bits()
5420 %{
5421   predicate(n->get_long() == 0xFFFFFFFFL);
5422   match(ConL);
5423   op_cost(0);
5424   format %{ %}
5425   interface(CONST_INTER);
5426 %}
5427 
5428 // Pointer operands
5429 // Pointer Immediate
5430 operand immP()
5431 %{
5432   match(ConP);
5433 
5434   op_cost(0);
5435   format %{ %}
5436   interface(CONST_INTER);
5437 %}
5438 
5439 // NULL Pointer Immediate
5440 operand immP0()
5441 %{
5442   predicate(n->get_ptr() == 0);
5443   match(ConP);
5444 
5445   op_cost(0);
5446   format %{ %}
5447   interface(CONST_INTER);
5448 %}
5449 
5450 // Pointer Immediate One
5451 // this is used in object initialization (initial object header)
5452 operand immP_1()
5453 %{
5454   predicate(n->get_ptr() == 1);
5455   match(ConP);
5456 
5457   op_cost(0);
5458   format %{ %}
5459   interface(CONST_INTER);
5460 %}
5461 
5462 // Polling Page Pointer Immediate
5463 operand immPollPage()
5464 %{
5465   predicate((address)n->get_ptr() == os::get_polling_page());
5466   match(ConP);
5467 
5468   op_cost(0);
5469   format %{ %}
5470   interface(CONST_INTER);
5471 %}
5472 
5473 // Card Table Byte Map Base
5474 operand immByteMapBase()
5475 %{
5476   // Get base of card map
5477   predicate((jbyte*)n->get_ptr() ==
5478         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5479   match(ConP);
5480 
5481   op_cost(0);
5482   format %{ %}
5483   interface(CONST_INTER);
5484 %}
5485 
5486 // Pointer Immediate Minus One
5487 // this is used when we want to write the current PC to the thread anchor
5488 operand immP_M1()
5489 %{
5490   predicate(n->get_ptr() == -1);
5491   match(ConP);
5492 
5493   op_cost(0);
5494   format %{ %}
5495   interface(CONST_INTER);
5496 %}
5497 
5498 // Pointer Immediate Minus Two
5499 // this is used when we want to write the current PC to the thread anchor
5500 operand immP_M2()
5501 %{
5502   predicate(n->get_ptr() == -2);
5503   match(ConP);
5504 
5505   op_cost(0);
5506   format %{ %}
5507   interface(CONST_INTER);
5508 %}
5509 
5510 // Float and Double operands
5511 // Double Immediate
5512 operand immD()
5513 %{
5514   match(ConD);
5515   op_cost(0);
5516   format %{ %}
5517   interface(CONST_INTER);
5518 %}
5519 
5520 // Double Immediate: +0.0d
5521 operand immD0()
5522 %{
5523   predicate(jlong_cast(n->getd()) == 0);
5524   match(ConD);
5525 
5526   op_cost(0);
5527   format %{ %}
5528   interface(CONST_INTER);
5529 %}
5530 
5531 // constant 'double +0.0'.
5532 operand immDPacked()
5533 %{
5534   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5535   match(ConD);
5536   op_cost(0);
5537   format %{ %}
5538   interface(CONST_INTER);
5539 %}
5540 
5541 // Float Immediate
5542 operand immF()
5543 %{
5544   match(ConF);
5545   op_cost(0);
5546   format %{ %}
5547   interface(CONST_INTER);
5548 %}
5549 
5550 // Float Immediate: +0.0f.
5551 operand immF0()
5552 %{
5553   predicate(jint_cast(n->getf()) == 0);
5554   match(ConF);
5555 
5556   op_cost(0);
5557   format %{ %}
5558   interface(CONST_INTER);
5559 %}
5560 
5561 //
5562 operand immFPacked()
5563 %{
5564   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5565   match(ConF);
5566   op_cost(0);
5567   format %{ %}
5568   interface(CONST_INTER);
5569 %}
5570 
5571 // Narrow pointer operands
5572 // Narrow Pointer Immediate
5573 operand immN()
5574 %{
5575   match(ConN);
5576 
5577   op_cost(0);
5578   format %{ %}
5579   interface(CONST_INTER);
5580 %}
5581 
5582 // Narrow NULL Pointer Immediate
5583 operand immN0()
5584 %{
5585   predicate(n->get_narrowcon() == 0);
5586   match(ConN);
5587 
5588   op_cost(0);
5589   format %{ %}
5590   interface(CONST_INTER);
5591 %}
5592 
5593 operand immNKlass()
5594 %{
5595   match(ConNKlass);
5596 
5597   op_cost(0);
5598   format %{ %}
5599   interface(CONST_INTER);
5600 %}
5601 
5602 // Integer 32 bit Register Operands
5603 // Integer 32 bitRegister (excludes SP)
5604 operand iRegI()
5605 %{
5606   constraint(ALLOC_IN_RC(any_reg32));
5607   match(RegI);
5608   match(iRegINoSp);
5609   op_cost(0);
5610   format %{ %}
5611   interface(REG_INTER);
5612 %}
5613 
5614 // Integer 32 bit Register not Special
5615 operand iRegINoSp()
5616 %{
5617   constraint(ALLOC_IN_RC(no_special_reg32));
5618   match(RegI);
5619   op_cost(0);
5620   format %{ %}
5621   interface(REG_INTER);
5622 %}
5623 
5624 // Integer 64 bit Register Operands
5625 // Integer 64 bit Register (includes SP)
5626 operand iRegL()
5627 %{
5628   constraint(ALLOC_IN_RC(any_reg));
5629   match(RegL);
5630   match(iRegLNoSp);
5631   op_cost(0);
5632   format %{ %}
5633   interface(REG_INTER);
5634 %}
5635 
5636 // Integer 64 bit Register not Special
5637 operand iRegLNoSp()
5638 %{
5639   constraint(ALLOC_IN_RC(no_special_reg));
5640   match(RegL);
5641   format %{ %}
5642   interface(REG_INTER);
5643 %}
5644 
5645 // Pointer Register Operands
5646 // Pointer Register
5647 operand iRegP()
5648 %{
5649   constraint(ALLOC_IN_RC(ptr_reg));
5650   match(RegP);
5651   match(iRegPNoSp);
5652   match(iRegP_R0);
5653   //match(iRegP_R2);
5654   //match(iRegP_R4);
5655   //match(iRegP_R5);
5656   match(thread_RegP);
5657   op_cost(0);
5658   format %{ %}
5659   interface(REG_INTER);
5660 %}
5661 
5662 // Pointer 64 bit Register not Special
5663 operand iRegPNoSp()
5664 %{
5665   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5666   match(RegP);
5667   // match(iRegP);
5668   // match(iRegP_R0);
5669   // match(iRegP_R2);
5670   // match(iRegP_R4);
5671   // match(iRegP_R5);
5672   // match(thread_RegP);
5673   op_cost(0);
5674   format %{ %}
5675   interface(REG_INTER);
5676 %}
5677 
5678 // Pointer 64 bit Register R0 only
5679 operand iRegP_R0()
5680 %{
5681   constraint(ALLOC_IN_RC(r0_reg));
5682   match(RegP);
5683   // match(iRegP);
5684   match(iRegPNoSp);
5685   op_cost(0);
5686   format %{ %}
5687   interface(REG_INTER);
5688 %}
5689 
5690 // Pointer 64 bit Register R1 only
5691 operand iRegP_R1()
5692 %{
5693   constraint(ALLOC_IN_RC(r1_reg));
5694   match(RegP);
5695   // match(iRegP);
5696   match(iRegPNoSp);
5697   op_cost(0);
5698   format %{ %}
5699   interface(REG_INTER);
5700 %}
5701 
5702 // Pointer 64 bit Register R2 only
5703 operand iRegP_R2()
5704 %{
5705   constraint(ALLOC_IN_RC(r2_reg));
5706   match(RegP);
5707   // match(iRegP);
5708   match(iRegPNoSp);
5709   op_cost(0);
5710   format %{ %}
5711   interface(REG_INTER);
5712 %}
5713 
5714 // Pointer 64 bit Register R3 only
5715 operand iRegP_R3()
5716 %{
5717   constraint(ALLOC_IN_RC(r3_reg));
5718   match(RegP);
5719   // match(iRegP);
5720   match(iRegPNoSp);
5721   op_cost(0);
5722   format %{ %}
5723   interface(REG_INTER);
5724 %}
5725 
5726 // Pointer 64 bit Register R4 only
5727 operand iRegP_R4()
5728 %{
5729   constraint(ALLOC_IN_RC(r4_reg));
5730   match(RegP);
5731   // match(iRegP);
5732   match(iRegPNoSp);
5733   op_cost(0);
5734   format %{ %}
5735   interface(REG_INTER);
5736 %}
5737 
5738 // Pointer 64 bit Register R5 only
5739 operand iRegP_R5()
5740 %{
5741   constraint(ALLOC_IN_RC(r5_reg));
5742   match(RegP);
5743   // match(iRegP);
5744   match(iRegPNoSp);
5745   op_cost(0);
5746   format %{ %}
5747   interface(REG_INTER);
5748 %}
5749 
5750 // Pointer 64 bit Register R10 only
5751 operand iRegP_R10()
5752 %{
5753   constraint(ALLOC_IN_RC(r10_reg));
5754   match(RegP);
5755   // match(iRegP);
5756   match(iRegPNoSp);
5757   op_cost(0);
5758   format %{ %}
5759   interface(REG_INTER);
5760 %}
5761 
5762 // Long 64 bit Register R11 only
5763 operand iRegL_R11()
5764 %{
5765   constraint(ALLOC_IN_RC(r11_reg));
5766   match(RegL);
5767   match(iRegLNoSp);
5768   op_cost(0);
5769   format %{ %}
5770   interface(REG_INTER);
5771 %}
5772 
5773 // Pointer 64 bit Register FP only
5774 operand iRegP_FP()
5775 %{
5776   constraint(ALLOC_IN_RC(fp_reg));
5777   match(RegP);
5778   // match(iRegP);
5779   op_cost(0);
5780   format %{ %}
5781   interface(REG_INTER);
5782 %}
5783 
5784 // Register R0 only
5785 operand iRegI_R0()
5786 %{
5787   constraint(ALLOC_IN_RC(int_r0_reg));
5788   match(RegI);
5789   match(iRegINoSp);
5790   op_cost(0);
5791   format %{ %}
5792   interface(REG_INTER);
5793 %}
5794 
5795 // Register R2 only
5796 operand iRegI_R2()
5797 %{
5798   constraint(ALLOC_IN_RC(int_r2_reg));
5799   match(RegI);
5800   match(iRegINoSp);
5801   op_cost(0);
5802   format %{ %}
5803   interface(REG_INTER);
5804 %}
5805 
5806 // Register R3 only
5807 operand iRegI_R3()
5808 %{
5809   constraint(ALLOC_IN_RC(int_r3_reg));
5810   match(RegI);
5811   match(iRegINoSp);
5812   op_cost(0);
5813   format %{ %}
5814   interface(REG_INTER);
5815 %}
5816 
5817 
5818 // Register R2 only
5819 operand iRegI_R4()
5820 %{
5821   constraint(ALLOC_IN_RC(int_r4_reg));
5822   match(RegI);
5823   match(iRegINoSp);
5824   op_cost(0);
5825   format %{ %}
5826   interface(REG_INTER);
5827 %}
5828 
5829 
5830 // Pointer Register Operands
5831 // Narrow Pointer Register
5832 operand iRegN()
5833 %{
5834   constraint(ALLOC_IN_RC(any_reg32));
5835   match(RegN);
5836   match(iRegNNoSp);
5837   op_cost(0);
5838   format %{ %}
5839   interface(REG_INTER);
5840 %}
5841 
5842 // Integer 64 bit Register not Special
5843 operand iRegNNoSp()
5844 %{
5845   constraint(ALLOC_IN_RC(no_special_reg32));
5846   match(RegN);
5847   op_cost(0);
5848   format %{ %}
5849   interface(REG_INTER);
5850 %}
5851 
5852 // heap base register -- used for encoding immN0
5853 
5854 operand iRegIHeapbase()
5855 %{
5856   constraint(ALLOC_IN_RC(heapbase_reg));
5857   match(RegI);
5858   op_cost(0);
5859   format %{ %}
5860   interface(REG_INTER);
5861 %}
5862 
5863 // Float Register
5864 // Float register operands
5865 operand vRegF()
5866 %{
5867   constraint(ALLOC_IN_RC(float_reg));
5868   match(RegF);
5869 
5870   op_cost(0);
5871   format %{ %}
5872   interface(REG_INTER);
5873 %}
5874 
5875 // Double Register
5876 // Double register operands
5877 operand vRegD()
5878 %{
5879   constraint(ALLOC_IN_RC(double_reg));
5880   match(RegD);
5881 
5882   op_cost(0);
5883   format %{ %}
5884   interface(REG_INTER);
5885 %}
5886 
5887 operand vecD()
5888 %{
5889   constraint(ALLOC_IN_RC(vectord_reg));
5890   match(VecD);
5891 
5892   op_cost(0);
5893   format %{ %}
5894   interface(REG_INTER);
5895 %}
5896 
5897 operand vecX()
5898 %{
5899   constraint(ALLOC_IN_RC(vectorx_reg));
5900   match(VecX);
5901 
5902   op_cost(0);
5903   format %{ %}
5904   interface(REG_INTER);
5905 %}
5906 
5907 operand vRegD_V0()
5908 %{
5909   constraint(ALLOC_IN_RC(v0_reg));
5910   match(RegD);
5911   op_cost(0);
5912   format %{ %}
5913   interface(REG_INTER);
5914 %}
5915 
5916 operand vRegD_V1()
5917 %{
5918   constraint(ALLOC_IN_RC(v1_reg));
5919   match(RegD);
5920   op_cost(0);
5921   format %{ %}
5922   interface(REG_INTER);
5923 %}
5924 
5925 operand vRegD_V2()
5926 %{
5927   constraint(ALLOC_IN_RC(v2_reg));
5928   match(RegD);
5929   op_cost(0);
5930   format %{ %}
5931   interface(REG_INTER);
5932 %}
5933 
5934 operand vRegD_V3()
5935 %{
5936   constraint(ALLOC_IN_RC(v3_reg));
5937   match(RegD);
5938   op_cost(0);
5939   format %{ %}
5940   interface(REG_INTER);
5941 %}
5942 
5943 // Flags register, used as output of signed compare instructions
5944 
5945 // note that on AArch64 we also use this register as the output for
5946 // for floating point compare instructions (CmpF CmpD). this ensures
5947 // that ordered inequality tests use GT, GE, LT or LE none of which
5948 // pass through cases where the result is unordered i.e. one or both
5949 // inputs to the compare is a NaN. this means that the ideal code can
5950 // replace e.g. a GT with an LE and not end up capturing the NaN case
5951 // (where the comparison should always fail). EQ and NE tests are
5952 // always generated in ideal code so that unordered folds into the NE
5953 // case, matching the behaviour of AArch64 NE.
5954 //
5955 // This differs from x86 where the outputs of FP compares use a
5956 // special FP flags registers and where compares based on this
5957 // register are distinguished into ordered inequalities (cmpOpUCF) and
5958 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5959 // to explicitly handle the unordered case in branches. x86 also has
5960 // to include extra CMoveX rules to accept a cmpOpUCF input.
5961 
5962 operand rFlagsReg()
5963 %{
5964   constraint(ALLOC_IN_RC(int_flags));
5965   match(RegFlags);
5966 
5967   op_cost(0);
5968   format %{ "RFLAGS" %}
5969   interface(REG_INTER);
5970 %}
5971 
5972 // Flags register, used as output of unsigned compare instructions
5973 operand rFlagsRegU()
5974 %{
5975   constraint(ALLOC_IN_RC(int_flags));
5976   match(RegFlags);
5977 
5978   op_cost(0);
5979   format %{ "RFLAGSU" %}
5980   interface(REG_INTER);
5981 %}
5982 
5983 // Special Registers
5984 
5985 // Method Register
5986 operand inline_cache_RegP(iRegP reg)
5987 %{
5988   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5989   match(reg);
5990   match(iRegPNoSp);
5991   op_cost(0);
5992   format %{ %}
5993   interface(REG_INTER);
5994 %}
5995 
5996 operand interpreter_method_oop_RegP(iRegP reg)
5997 %{
5998   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5999   match(reg);
6000   match(iRegPNoSp);
6001   op_cost(0);
6002   format %{ %}
6003   interface(REG_INTER);
6004 %}
6005 
6006 // Thread Register
6007 operand thread_RegP(iRegP reg)
6008 %{
6009   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6010   match(reg);
6011   op_cost(0);
6012   format %{ %}
6013   interface(REG_INTER);
6014 %}
6015 
6016 operand lr_RegP(iRegP reg)
6017 %{
6018   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6019   match(reg);
6020   op_cost(0);
6021   format %{ %}
6022   interface(REG_INTER);
6023 %}
6024 
6025 //----------Memory Operands----------------------------------------------------
6026 
6027 operand indirect(iRegP reg)
6028 %{
6029   constraint(ALLOC_IN_RC(ptr_reg));
6030   match(reg);
6031   op_cost(0);
6032   format %{ "[$reg]" %}
6033   interface(MEMORY_INTER) %{
6034     base($reg);
6035     index(0xffffffff);
6036     scale(0x0);
6037     disp(0x0);
6038   %}
6039 %}
6040 
6041 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6042 %{
6043   constraint(ALLOC_IN_RC(ptr_reg));
6044   match(AddP (AddP reg (LShiftL lreg scale)) off);
6045   op_cost(INSN_COST);
6046   format %{ "$reg, $lreg lsl($scale), $off" %}
6047   interface(MEMORY_INTER) %{
6048     base($reg);
6049     index($lreg);
6050     scale($scale);
6051     disp($off);
6052   %}
6053 %}
6054 
6055 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6056 %{
6057   constraint(ALLOC_IN_RC(ptr_reg));
6058   match(AddP (AddP reg (LShiftL lreg scale)) off);
6059   op_cost(INSN_COST);
6060   format %{ "$reg, $lreg lsl($scale), $off" %}
6061   interface(MEMORY_INTER) %{
6062     base($reg);
6063     index($lreg);
6064     scale($scale);
6065     disp($off);
6066   %}
6067 %}
6068 
6069 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6070 %{
6071   constraint(ALLOC_IN_RC(ptr_reg));
6072   match(AddP (AddP reg (ConvI2L ireg)) off);
6073   op_cost(INSN_COST);
6074   format %{ "$reg, $ireg, $off I2L" %}
6075   interface(MEMORY_INTER) %{
6076     base($reg);
6077     index($ireg);
6078     scale(0x0);
6079     disp($off);
6080   %}
6081 %}
6082 
6083 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6084 %{
6085   constraint(ALLOC_IN_RC(ptr_reg));
6086   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6087   op_cost(INSN_COST);
6088   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6089   interface(MEMORY_INTER) %{
6090     base($reg);
6091     index($ireg);
6092     scale($scale);
6093     disp($off);
6094   %}
6095 %}
6096 
6097 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6098 %{
6099   constraint(ALLOC_IN_RC(ptr_reg));
6100   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6101   op_cost(0);
6102   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6103   interface(MEMORY_INTER) %{
6104     base($reg);
6105     index($ireg);
6106     scale($scale);
6107     disp(0x0);
6108   %}
6109 %}
6110 
6111 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6112 %{
6113   constraint(ALLOC_IN_RC(ptr_reg));
6114   match(AddP reg (LShiftL lreg scale));
6115   op_cost(0);
6116   format %{ "$reg, $lreg lsl($scale)" %}
6117   interface(MEMORY_INTER) %{
6118     base($reg);
6119     index($lreg);
6120     scale($scale);
6121     disp(0x0);
6122   %}
6123 %}
6124 
6125 operand indIndex(iRegP reg, iRegL lreg)
6126 %{
6127   constraint(ALLOC_IN_RC(ptr_reg));
6128   match(AddP reg lreg);
6129   op_cost(0);
6130   format %{ "$reg, $lreg" %}
6131   interface(MEMORY_INTER) %{
6132     base($reg);
6133     index($lreg);
6134     scale(0x0);
6135     disp(0x0);
6136   %}
6137 %}
6138 
6139 operand indOffI(iRegP reg, immIOffset off)
6140 %{
6141   constraint(ALLOC_IN_RC(ptr_reg));
6142   match(AddP reg off);
6143   op_cost(0);
6144   format %{ "[$reg, $off]" %}
6145   interface(MEMORY_INTER) %{
6146     base($reg);
6147     index(0xffffffff);
6148     scale(0x0);
6149     disp($off);
6150   %}
6151 %}
6152 
6153 operand indOffL(iRegP reg, immLoffset off)
6154 %{
6155   constraint(ALLOC_IN_RC(ptr_reg));
6156   match(AddP reg off);
6157   op_cost(0);
6158   format %{ "[$reg, $off]" %}
6159   interface(MEMORY_INTER) %{
6160     base($reg);
6161     index(0xffffffff);
6162     scale(0x0);
6163     disp($off);
6164   %}
6165 %}
6166 
6167 
6168 operand indirectN(iRegN reg)
6169 %{
6170   predicate(Universe::narrow_oop_shift() == 0);
6171   constraint(ALLOC_IN_RC(ptr_reg));
6172   match(DecodeN reg);
6173   op_cost(0);
6174   format %{ "[$reg]\t# narrow" %}
6175   interface(MEMORY_INTER) %{
6176     base($reg);
6177     index(0xffffffff);
6178     scale(0x0);
6179     disp(0x0);
6180   %}
6181 %}
6182 
6183 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6184 %{
6185   predicate(Universe::narrow_oop_shift() == 0);
6186   constraint(ALLOC_IN_RC(ptr_reg));
6187   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6188   op_cost(0);
6189   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6190   interface(MEMORY_INTER) %{
6191     base($reg);
6192     index($lreg);
6193     scale($scale);
6194     disp($off);
6195   %}
6196 %}
6197 
6198 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6199 %{
6200   predicate(Universe::narrow_oop_shift() == 0);
6201   constraint(ALLOC_IN_RC(ptr_reg));
6202   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6203   op_cost(INSN_COST);
6204   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6205   interface(MEMORY_INTER) %{
6206     base($reg);
6207     index($lreg);
6208     scale($scale);
6209     disp($off);
6210   %}
6211 %}
6212 
6213 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6214 %{
6215   predicate(Universe::narrow_oop_shift() == 0);
6216   constraint(ALLOC_IN_RC(ptr_reg));
6217   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6218   op_cost(INSN_COST);
6219   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6220   interface(MEMORY_INTER) %{
6221     base($reg);
6222     index($ireg);
6223     scale(0x0);
6224     disp($off);
6225   %}
6226 %}
6227 
6228 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6229 %{
6230   predicate(Universe::narrow_oop_shift() == 0);
6231   constraint(ALLOC_IN_RC(ptr_reg));
6232   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6233   op_cost(INSN_COST);
6234   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6235   interface(MEMORY_INTER) %{
6236     base($reg);
6237     index($ireg);
6238     scale($scale);
6239     disp($off);
6240   %}
6241 %}
6242 
6243 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6244 %{
6245   predicate(Universe::narrow_oop_shift() == 0);
6246   constraint(ALLOC_IN_RC(ptr_reg));
6247   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6248   op_cost(0);
6249   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6250   interface(MEMORY_INTER) %{
6251     base($reg);
6252     index($ireg);
6253     scale($scale);
6254     disp(0x0);
6255   %}
6256 %}
6257 
6258 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6259 %{
6260   predicate(Universe::narrow_oop_shift() == 0);
6261   constraint(ALLOC_IN_RC(ptr_reg));
6262   match(AddP (DecodeN reg) (LShiftL lreg scale));
6263   op_cost(0);
6264   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6265   interface(MEMORY_INTER) %{
6266     base($reg);
6267     index($lreg);
6268     scale($scale);
6269     disp(0x0);
6270   %}
6271 %}
6272 
6273 operand indIndexN(iRegN reg, iRegL lreg)
6274 %{
6275   predicate(Universe::narrow_oop_shift() == 0);
6276   constraint(ALLOC_IN_RC(ptr_reg));
6277   match(AddP (DecodeN reg) lreg);
6278   op_cost(0);
6279   format %{ "$reg, $lreg\t# narrow" %}
6280   interface(MEMORY_INTER) %{
6281     base($reg);
6282     index($lreg);
6283     scale(0x0);
6284     disp(0x0);
6285   %}
6286 %}
6287 
6288 operand indOffIN(iRegN reg, immIOffset off)
6289 %{
6290   predicate(Universe::narrow_oop_shift() == 0);
6291   constraint(ALLOC_IN_RC(ptr_reg));
6292   match(AddP (DecodeN reg) off);
6293   op_cost(0);
6294   format %{ "[$reg, $off]\t# narrow" %}
6295   interface(MEMORY_INTER) %{
6296     base($reg);
6297     index(0xffffffff);
6298     scale(0x0);
6299     disp($off);
6300   %}
6301 %}
6302 
6303 operand indOffLN(iRegN reg, immLoffset off)
6304 %{
6305   predicate(Universe::narrow_oop_shift() == 0);
6306   constraint(ALLOC_IN_RC(ptr_reg));
6307   match(AddP (DecodeN reg) off);
6308   op_cost(0);
6309   format %{ "[$reg, $off]\t# narrow" %}
6310   interface(MEMORY_INTER) %{
6311     base($reg);
6312     index(0xffffffff);
6313     scale(0x0);
6314     disp($off);
6315   %}
6316 %}
6317 
6318 
6319 
6320 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6321 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6322 %{
6323   constraint(ALLOC_IN_RC(ptr_reg));
6324   match(AddP reg off);
6325   op_cost(0);
6326   format %{ "[$reg, $off]" %}
6327   interface(MEMORY_INTER) %{
6328     base($reg);
6329     index(0xffffffff);
6330     scale(0x0);
6331     disp($off);
6332   %}
6333 %}
6334 
6335 //----------Special Memory Operands--------------------------------------------
6336 // Stack Slot Operand - This operand is used for loading and storing temporary
6337 //                      values on the stack where a match requires a value to
6338 //                      flow through memory.
6339 operand stackSlotP(sRegP reg)
6340 %{
6341   constraint(ALLOC_IN_RC(stack_slots));
6342   op_cost(100);
6343   // No match rule because this operand is only generated in matching
6344   // match(RegP);
6345   format %{ "[$reg]" %}
6346   interface(MEMORY_INTER) %{
6347     base(0x1e);  // RSP
6348     index(0x0);  // No Index
6349     scale(0x0);  // No Scale
6350     disp($reg);  // Stack Offset
6351   %}
6352 %}
6353 
6354 operand stackSlotI(sRegI reg)
6355 %{
6356   constraint(ALLOC_IN_RC(stack_slots));
6357   // No match rule because this operand is only generated in matching
6358   // match(RegI);
6359   format %{ "[$reg]" %}
6360   interface(MEMORY_INTER) %{
6361     base(0x1e);  // RSP
6362     index(0x0);  // No Index
6363     scale(0x0);  // No Scale
6364     disp($reg);  // Stack Offset
6365   %}
6366 %}
6367 
6368 operand stackSlotF(sRegF reg)
6369 %{
6370   constraint(ALLOC_IN_RC(stack_slots));
6371   // No match rule because this operand is only generated in matching
6372   // match(RegF);
6373   format %{ "[$reg]" %}
6374   interface(MEMORY_INTER) %{
6375     base(0x1e);  // RSP
6376     index(0x0);  // No Index
6377     scale(0x0);  // No Scale
6378     disp($reg);  // Stack Offset
6379   %}
6380 %}
6381 
6382 operand stackSlotD(sRegD reg)
6383 %{
6384   constraint(ALLOC_IN_RC(stack_slots));
6385   // No match rule because this operand is only generated in matching
6386   // match(RegD);
6387   format %{ "[$reg]" %}
6388   interface(MEMORY_INTER) %{
6389     base(0x1e);  // RSP
6390     index(0x0);  // No Index
6391     scale(0x0);  // No Scale
6392     disp($reg);  // Stack Offset
6393   %}
6394 %}
6395 
6396 operand stackSlotL(sRegL reg)
6397 %{
6398   constraint(ALLOC_IN_RC(stack_slots));
6399   // No match rule because this operand is only generated in matching
6400   // match(RegL);
6401   format %{ "[$reg]" %}
6402   interface(MEMORY_INTER) %{
6403     base(0x1e);  // RSP
6404     index(0x0);  // No Index
6405     scale(0x0);  // No Scale
6406     disp($reg);  // Stack Offset
6407   %}
6408 %}
6409 
6410 // Operands for expressing Control Flow
6411 // NOTE: Label is a predefined operand which should not be redefined in
6412 //       the AD file. It is generically handled within the ADLC.
6413 
6414 //----------Conditional Branch Operands----------------------------------------
6415 // Comparison Op  - This is the operation of the comparison, and is limited to
6416 //                  the following set of codes:
6417 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6418 //
6419 // Other attributes of the comparison, such as unsignedness, are specified
6420 // by the comparison instruction that sets a condition code flags register.
6421 // That result is represented by a flags operand whose subtype is appropriate
6422 // to the unsignedness (etc.) of the comparison.
6423 //
6424 // Later, the instruction which matches both the Comparison Op (a Bool) and
6425 // the flags (produced by the Cmp) specifies the coding of the comparison op
6426 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6427 
6428 // used for signed integral comparisons and fp comparisons
6429 
6430 operand cmpOp()
6431 %{
6432   match(Bool);
6433 
6434   format %{ "" %}
6435   interface(COND_INTER) %{
6436     equal(0x0, "eq");
6437     not_equal(0x1, "ne");
6438     less(0xb, "lt");
6439     greater_equal(0xa, "ge");
6440     less_equal(0xd, "le");
6441     greater(0xc, "gt");
6442     overflow(0x6, "vs");
6443     no_overflow(0x7, "vc");
6444   %}
6445 %}
6446 
6447 // used for unsigned integral comparisons
6448 
6449 operand cmpOpU()
6450 %{
6451   match(Bool);
6452 
6453   format %{ "" %}
6454   interface(COND_INTER) %{
6455     equal(0x0, "eq");
6456     not_equal(0x1, "ne");
6457     less(0x3, "lo");
6458     greater_equal(0x2, "hs");
6459     less_equal(0x9, "ls");
6460     greater(0x8, "hi");
6461     overflow(0x6, "vs");
6462     no_overflow(0x7, "vc");
6463   %}
6464 %}
6465 
6466 // Special operand allowing long args to int ops to be truncated for free
6467 
6468 operand iRegL2I(iRegL reg) %{
6469 
6470   op_cost(0);
6471 
6472   match(ConvL2I reg);
6473 
6474   format %{ "l2i($reg)" %}
6475 
6476   interface(REG_INTER)
6477 %}
6478 
6479 opclass vmem(indirect, indIndex, indOffI, indOffL);
6480 
6481 //----------OPERAND CLASSES----------------------------------------------------
6482 // Operand Classes are groups of operands that are used as to simplify
6483 // instruction definitions by not requiring the AD writer to specify
6484 // separate instructions for every form of operand when the
6485 // instruction accepts multiple operand types with the same basic
6486 // encoding and format. The classic case of this is memory operands.
6487 
6488 // memory is used to define read/write location for load/store
6489 // instruction defs. we can turn a memory op into an Address
6490 
6491 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6492                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6493 
6494 
6495 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6496 // operations. it allows the src to be either an iRegI or a (ConvL2I
6497 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6498 // can be elided because the 32-bit instruction will just employ the
6499 // lower 32 bits anyway.
6500 //
6501 // n.b. this does not elide all L2I conversions. if the truncated
6502 // value is consumed by more than one operation then the ConvL2I
6503 // cannot be bundled into the consuming nodes so an l2i gets planted
6504 // (actually a movw $dst $src) and the downstream instructions consume
6505 // the result of the l2i as an iRegI input. That's a shame since the
6506 // movw is actually redundant but its not too costly.
6507 
6508 opclass iRegIorL2I(iRegI, iRegL2I);
6509 
6510 //----------PIPELINE-----------------------------------------------------------
6511 // Rules which define the behavior of the target architectures pipeline.
6512 
6513 // For specific pipelines, eg A53, define the stages of that pipeline
6514 //pipe_desc(ISS, EX1, EX2, WR);
6515 #define ISS S0
6516 #define EX1 S1
6517 #define EX2 S2
6518 #define WR  S3
6519 
6520 // Integer ALU reg operation
6521 pipeline %{
6522 
6523 attributes %{
6524   // ARM instructions are of fixed length
6525   fixed_size_instructions;        // Fixed size instructions TODO does
6526   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6527   // ARM instructions come in 32-bit word units
6528   instruction_unit_size = 4;         // An instruction is 4 bytes long
6529   instruction_fetch_unit_size = 64;  // The processor fetches one line
6530   instruction_fetch_units = 1;       // of 64 bytes
6531 
6532   // List of nop instructions
6533   nops( MachNop );
6534 %}
6535 
6536 // We don't use an actual pipeline model so don't care about resources
6537 // or description. we do use pipeline classes to introduce fixed
6538 // latencies
6539 
6540 //----------RESOURCES----------------------------------------------------------
6541 // Resources are the functional units available to the machine
6542 
6543 resources( INS0, INS1, INS01 = INS0 | INS1,
6544            ALU0, ALU1, ALU = ALU0 | ALU1,
6545            MAC,
6546            DIV,
6547            BRANCH,
6548            LDST,
6549            NEON_FP);
6550 
6551 //----------PIPELINE DESCRIPTION-----------------------------------------------
6552 // Pipeline Description specifies the stages in the machine's pipeline
6553 
6554 // Define the pipeline as a generic 6 stage pipeline
6555 pipe_desc(S0, S1, S2, S3, S4, S5);
6556 
6557 //----------PIPELINE CLASSES---------------------------------------------------
6558 // Pipeline Classes describe the stages in which input and output are
6559 // referenced by the hardware pipeline.
6560 
6561 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6562 %{
6563   single_instruction;
6564   src1   : S1(read);
6565   src2   : S2(read);
6566   dst    : S5(write);
6567   INS01  : ISS;
6568   NEON_FP : S5;
6569 %}
6570 
6571 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6572 %{
6573   single_instruction;
6574   src1   : S1(read);
6575   src2   : S2(read);
6576   dst    : S5(write);
6577   INS01  : ISS;
6578   NEON_FP : S5;
6579 %}
6580 
6581 pipe_class fp_uop_s(vRegF dst, vRegF src)
6582 %{
6583   single_instruction;
6584   src    : S1(read);
6585   dst    : S5(write);
6586   INS01  : ISS;
6587   NEON_FP : S5;
6588 %}
6589 
6590 pipe_class fp_uop_d(vRegD dst, vRegD src)
6591 %{
6592   single_instruction;
6593   src    : S1(read);
6594   dst    : S5(write);
6595   INS01  : ISS;
6596   NEON_FP : S5;
6597 %}
6598 
6599 pipe_class fp_d2f(vRegF dst, vRegD src)
6600 %{
6601   single_instruction;
6602   src    : S1(read);
6603   dst    : S5(write);
6604   INS01  : ISS;
6605   NEON_FP : S5;
6606 %}
6607 
6608 pipe_class fp_f2d(vRegD dst, vRegF src)
6609 %{
6610   single_instruction;
6611   src    : S1(read);
6612   dst    : S5(write);
6613   INS01  : ISS;
6614   NEON_FP : S5;
6615 %}
6616 
6617 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6618 %{
6619   single_instruction;
6620   src    : S1(read);
6621   dst    : S5(write);
6622   INS01  : ISS;
6623   NEON_FP : S5;
6624 %}
6625 
6626 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6627 %{
6628   single_instruction;
6629   src    : S1(read);
6630   dst    : S5(write);
6631   INS01  : ISS;
6632   NEON_FP : S5;
6633 %}
6634 
6635 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6636 %{
6637   single_instruction;
6638   src    : S1(read);
6639   dst    : S5(write);
6640   INS01  : ISS;
6641   NEON_FP : S5;
6642 %}
6643 
6644 pipe_class fp_l2f(vRegF dst, iRegL src)
6645 %{
6646   single_instruction;
6647   src    : S1(read);
6648   dst    : S5(write);
6649   INS01  : ISS;
6650   NEON_FP : S5;
6651 %}
6652 
6653 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6654 %{
6655   single_instruction;
6656   src    : S1(read);
6657   dst    : S5(write);
6658   INS01  : ISS;
6659   NEON_FP : S5;
6660 %}
6661 
6662 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6663 %{
6664   single_instruction;
6665   src    : S1(read);
6666   dst    : S5(write);
6667   INS01  : ISS;
6668   NEON_FP : S5;
6669 %}
6670 
6671 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6672 %{
6673   single_instruction;
6674   src    : S1(read);
6675   dst    : S5(write);
6676   INS01  : ISS;
6677   NEON_FP : S5;
6678 %}
6679 
6680 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6681 %{
6682   single_instruction;
6683   src    : S1(read);
6684   dst    : S5(write);
6685   INS01  : ISS;
6686   NEON_FP : S5;
6687 %}
6688 
6689 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6690 %{
6691   single_instruction;
6692   src1   : S1(read);
6693   src2   : S2(read);
6694   dst    : S5(write);
6695   INS0   : ISS;
6696   NEON_FP : S5;
6697 %}
6698 
6699 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6700 %{
6701   single_instruction;
6702   src1   : S1(read);
6703   src2   : S2(read);
6704   dst    : S5(write);
6705   INS0   : ISS;
6706   NEON_FP : S5;
6707 %}
6708 
6709 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6710 %{
6711   single_instruction;
6712   cr     : S1(read);
6713   src1   : S1(read);
6714   src2   : S1(read);
6715   dst    : S3(write);
6716   INS01  : ISS;
6717   NEON_FP : S3;
6718 %}
6719 
6720 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6721 %{
6722   single_instruction;
6723   cr     : S1(read);
6724   src1   : S1(read);
6725   src2   : S1(read);
6726   dst    : S3(write);
6727   INS01  : ISS;
6728   NEON_FP : S3;
6729 %}
6730 
6731 pipe_class fp_imm_s(vRegF dst)
6732 %{
6733   single_instruction;
6734   dst    : S3(write);
6735   INS01  : ISS;
6736   NEON_FP : S3;
6737 %}
6738 
6739 pipe_class fp_imm_d(vRegD dst)
6740 %{
6741   single_instruction;
6742   dst    : S3(write);
6743   INS01  : ISS;
6744   NEON_FP : S3;
6745 %}
6746 
6747 pipe_class fp_load_constant_s(vRegF dst)
6748 %{
6749   single_instruction;
6750   dst    : S4(write);
6751   INS01  : ISS;
6752   NEON_FP : S4;
6753 %}
6754 
6755 pipe_class fp_load_constant_d(vRegD dst)
6756 %{
6757   single_instruction;
6758   dst    : S4(write);
6759   INS01  : ISS;
6760   NEON_FP : S4;
6761 %}
6762 
6763 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6764 %{
6765   single_instruction;
6766   dst    : S5(write);
6767   src1   : S1(read);
6768   src2   : S1(read);
6769   INS01  : ISS;
6770   NEON_FP : S5;
6771 %}
6772 
6773 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6774 %{
6775   single_instruction;
6776   dst    : S5(write);
6777   src1   : S1(read);
6778   src2   : S1(read);
6779   INS0   : ISS;
6780   NEON_FP : S5;
6781 %}
6782 
6783 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6784 %{
6785   single_instruction;
6786   dst    : S5(write);
6787   src1   : S1(read);
6788   src2   : S1(read);
6789   dst    : S1(read);
6790   INS01  : ISS;
6791   NEON_FP : S5;
6792 %}
6793 
6794 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6795 %{
6796   single_instruction;
6797   dst    : S5(write);
6798   src1   : S1(read);
6799   src2   : S1(read);
6800   dst    : S1(read);
6801   INS0   : ISS;
6802   NEON_FP : S5;
6803 %}
6804 
6805 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6806 %{
6807   single_instruction;
6808   dst    : S4(write);
6809   src1   : S2(read);
6810   src2   : S2(read);
6811   INS01  : ISS;
6812   NEON_FP : S4;
6813 %}
6814 
6815 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6816 %{
6817   single_instruction;
6818   dst    : S4(write);
6819   src1   : S2(read);
6820   src2   : S2(read);
6821   INS0   : ISS;
6822   NEON_FP : S4;
6823 %}
6824 
6825 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6826 %{
6827   single_instruction;
6828   dst    : S3(write);
6829   src1   : S2(read);
6830   src2   : S2(read);
6831   INS01  : ISS;
6832   NEON_FP : S3;
6833 %}
6834 
6835 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6836 %{
6837   single_instruction;
6838   dst    : S3(write);
6839   src1   : S2(read);
6840   src2   : S2(read);
6841   INS0   : ISS;
6842   NEON_FP : S3;
6843 %}
6844 
6845 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6846 %{
6847   single_instruction;
6848   dst    : S3(write);
6849   src    : S1(read);
6850   shift  : S1(read);
6851   INS01  : ISS;
6852   NEON_FP : S3;
6853 %}
6854 
6855 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6856 %{
6857   single_instruction;
6858   dst    : S3(write);
6859   src    : S1(read);
6860   shift  : S1(read);
6861   INS0   : ISS;
6862   NEON_FP : S3;
6863 %}
6864 
6865 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6866 %{
6867   single_instruction;
6868   dst    : S3(write);
6869   src    : S1(read);
6870   INS01  : ISS;
6871   NEON_FP : S3;
6872 %}
6873 
6874 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6875 %{
6876   single_instruction;
6877   dst    : S3(write);
6878   src    : S1(read);
6879   INS0   : ISS;
6880   NEON_FP : S3;
6881 %}
6882 
6883 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6884 %{
6885   single_instruction;
6886   dst    : S5(write);
6887   src1   : S1(read);
6888   src2   : S1(read);
6889   INS01  : ISS;
6890   NEON_FP : S5;
6891 %}
6892 
6893 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6894 %{
6895   single_instruction;
6896   dst    : S5(write);
6897   src1   : S1(read);
6898   src2   : S1(read);
6899   INS0   : ISS;
6900   NEON_FP : S5;
6901 %}
6902 
6903 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6904 %{
6905   single_instruction;
6906   dst    : S5(write);
6907   src1   : S1(read);
6908   src2   : S1(read);
6909   INS0   : ISS;
6910   NEON_FP : S5;
6911 %}
6912 
6913 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6914 %{
6915   single_instruction;
6916   dst    : S5(write);
6917   src1   : S1(read);
6918   src2   : S1(read);
6919   INS0   : ISS;
6920   NEON_FP : S5;
6921 %}
6922 
6923 pipe_class vsqrt_fp128(vecX dst, vecX src)
6924 %{
6925   single_instruction;
6926   dst    : S5(write);
6927   src    : S1(read);
6928   INS0   : ISS;
6929   NEON_FP : S5;
6930 %}
6931 
6932 pipe_class vunop_fp64(vecD dst, vecD src)
6933 %{
6934   single_instruction;
6935   dst    : S5(write);
6936   src    : S1(read);
6937   INS01  : ISS;
6938   NEON_FP : S5;
6939 %}
6940 
6941 pipe_class vunop_fp128(vecX dst, vecX src)
6942 %{
6943   single_instruction;
6944   dst    : S5(write);
6945   src    : S1(read);
6946   INS0   : ISS;
6947   NEON_FP : S5;
6948 %}
6949 
6950 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6951 %{
6952   single_instruction;
6953   dst    : S3(write);
6954   src    : S1(read);
6955   INS01  : ISS;
6956   NEON_FP : S3;
6957 %}
6958 
6959 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6960 %{
6961   single_instruction;
6962   dst    : S3(write);
6963   src    : S1(read);
6964   INS01  : ISS;
6965   NEON_FP : S3;
6966 %}
6967 
6968 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
6969 %{
6970   single_instruction;
6971   dst    : S3(write);
6972   src    : S1(read);
6973   INS01  : ISS;
6974   NEON_FP : S3;
6975 %}
6976 
6977 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
6978 %{
6979   single_instruction;
6980   dst    : S3(write);
6981   src    : S1(read);
6982   INS01  : ISS;
6983   NEON_FP : S3;
6984 %}
6985 
6986 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
6987 %{
6988   single_instruction;
6989   dst    : S3(write);
6990   src    : S1(read);
6991   INS01  : ISS;
6992   NEON_FP : S3;
6993 %}
6994 
6995 pipe_class vmovi_reg_imm64(vecD dst)
6996 %{
6997   single_instruction;
6998   dst    : S3(write);
6999   INS01  : ISS;
7000   NEON_FP : S3;
7001 %}
7002 
7003 pipe_class vmovi_reg_imm128(vecX dst)
7004 %{
7005   single_instruction;
7006   dst    : S3(write);
7007   INS0   : ISS;
7008   NEON_FP : S3;
7009 %}
7010 
7011 pipe_class vload_reg_mem64(vecD dst, vmem mem)
7012 %{
7013   single_instruction;
7014   dst    : S5(write);
7015   mem    : ISS(read);
7016   INS01  : ISS;
7017   NEON_FP : S3;
7018 %}
7019 
7020 pipe_class vload_reg_mem128(vecX dst, vmem mem)
7021 %{
7022   single_instruction;
7023   dst    : S5(write);
7024   mem    : ISS(read);
7025   INS01  : ISS;
7026   NEON_FP : S3;
7027 %}
7028 
7029 pipe_class vstore_reg_mem64(vecD src, vmem mem)
7030 %{
7031   single_instruction;
7032   mem    : ISS(read);
7033   src    : S2(read);
7034   INS01  : ISS;
7035   NEON_FP : S3;
7036 %}
7037 
7038 pipe_class vstore_reg_mem128(vecD src, vmem mem)
7039 %{
7040   single_instruction;
7041   mem    : ISS(read);
7042   src    : S2(read);
7043   INS01  : ISS;
7044   NEON_FP : S3;
7045 %}
7046 
7047 //------- Integer ALU operations --------------------------
7048 
7049 // Integer ALU reg-reg operation
7050 // Operands needed in EX1, result generated in EX2
7051 // Eg.  ADD     x0, x1, x2
7052 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7053 %{
7054   single_instruction;
7055   dst    : EX2(write);
7056   src1   : EX1(read);
7057   src2   : EX1(read);
7058   INS01  : ISS; // Dual issue as instruction 0 or 1
7059   ALU    : EX2;
7060 %}
7061 
7062 // Integer ALU reg-reg operation with constant shift
7063 // Shifted register must be available in LATE_ISS instead of EX1
7064 // Eg.  ADD     x0, x1, x2, LSL #2
7065 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7066 %{
7067   single_instruction;
7068   dst    : EX2(write);
7069   src1   : EX1(read);
7070   src2   : ISS(read);
7071   INS01  : ISS;
7072   ALU    : EX2;
7073 %}
7074 
7075 // Integer ALU reg operation with constant shift
7076 // Eg.  LSL     x0, x1, #shift
7077 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7078 %{
7079   single_instruction;
7080   dst    : EX2(write);
7081   src1   : ISS(read);
7082   INS01  : ISS;
7083   ALU    : EX2;
7084 %}
7085 
7086 // Integer ALU reg-reg operation with variable shift
7087 // Both operands must be available in LATE_ISS instead of EX1
7088 // Result is available in EX1 instead of EX2
7089 // Eg.  LSLV    x0, x1, x2
7090 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7091 %{
7092   single_instruction;
7093   dst    : EX1(write);
7094   src1   : ISS(read);
7095   src2   : ISS(read);
7096   INS01  : ISS;
7097   ALU    : EX1;
7098 %}
7099 
7100 // Integer ALU reg-reg operation with extract
7101 // As for _vshift above, but result generated in EX2
7102 // Eg.  EXTR    x0, x1, x2, #N
7103 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7104 %{
7105   single_instruction;
7106   dst    : EX2(write);
7107   src1   : ISS(read);
7108   src2   : ISS(read);
7109   INS1   : ISS; // Can only dual issue as Instruction 1
7110   ALU    : EX1;
7111 %}
7112 
7113 // Integer ALU reg operation
7114 // Eg.  NEG     x0, x1
7115 pipe_class ialu_reg(iRegI dst, iRegI src)
7116 %{
7117   single_instruction;
7118   dst    : EX2(write);
7119   src    : EX1(read);
7120   INS01  : ISS;
7121   ALU    : EX2;
7122 %}
7123 
7124 // Integer ALU reg mmediate operation
7125 // Eg.  ADD     x0, x1, #N
7126 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7127 %{
7128   single_instruction;
7129   dst    : EX2(write);
7130   src1   : EX1(read);
7131   INS01  : ISS;
7132   ALU    : EX2;
7133 %}
7134 
7135 // Integer ALU immediate operation (no source operands)
7136 // Eg.  MOV     x0, #N
7137 pipe_class ialu_imm(iRegI dst)
7138 %{
7139   single_instruction;
7140   dst    : EX1(write);
7141   INS01  : ISS;
7142   ALU    : EX1;
7143 %}
7144 
7145 //------- Compare operation -------------------------------
7146 
7147 // Compare reg-reg
7148 // Eg.  CMP     x0, x1
7149 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7150 %{
7151   single_instruction;
7152 //  fixed_latency(16);
7153   cr     : EX2(write);
7154   op1    : EX1(read);
7155   op2    : EX1(read);
7156   INS01  : ISS;
7157   ALU    : EX2;
7158 %}
7159 
7160 // Compare reg-reg
7161 // Eg.  CMP     x0, #N
7162 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7163 %{
7164   single_instruction;
7165 //  fixed_latency(16);
7166   cr     : EX2(write);
7167   op1    : EX1(read);
7168   INS01  : ISS;
7169   ALU    : EX2;
7170 %}
7171 
7172 //------- Conditional instructions ------------------------
7173 
7174 // Conditional no operands
7175 // Eg.  CSINC   x0, zr, zr, <cond>
7176 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7177 %{
7178   single_instruction;
7179   cr     : EX1(read);
7180   dst    : EX2(write);
7181   INS01  : ISS;
7182   ALU    : EX2;
7183 %}
7184 
7185 // Conditional 2 operand
7186 // EG.  CSEL    X0, X1, X2, <cond>
7187 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7188 %{
7189   single_instruction;
7190   cr     : EX1(read);
7191   src1   : EX1(read);
7192   src2   : EX1(read);
7193   dst    : EX2(write);
7194   INS01  : ISS;
7195   ALU    : EX2;
7196 %}
7197 
7198 // Conditional 2 operand
7199 // EG.  CSEL    X0, X1, X2, <cond>
7200 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7201 %{
7202   single_instruction;
7203   cr     : EX1(read);
7204   src    : EX1(read);
7205   dst    : EX2(write);
7206   INS01  : ISS;
7207   ALU    : EX2;
7208 %}
7209 
7210 //------- Multiply pipeline operations --------------------
7211 
7212 // Multiply reg-reg
7213 // Eg.  MUL     w0, w1, w2
7214 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7215 %{
7216   single_instruction;
7217   dst    : WR(write);
7218   src1   : ISS(read);
7219   src2   : ISS(read);
7220   INS01  : ISS;
7221   MAC    : WR;
7222 %}
7223 
7224 // Multiply accumulate
7225 // Eg.  MADD    w0, w1, w2, w3
7226 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7227 %{
7228   single_instruction;
7229   dst    : WR(write);
7230   src1   : ISS(read);
7231   src2   : ISS(read);
7232   src3   : ISS(read);
7233   INS01  : ISS;
7234   MAC    : WR;
7235 %}
7236 
7237 // Eg.  MUL     w0, w1, w2
7238 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7239 %{
7240   single_instruction;
7241   fixed_latency(3); // Maximum latency for 64 bit mul
7242   dst    : WR(write);
7243   src1   : ISS(read);
7244   src2   : ISS(read);
7245   INS01  : ISS;
7246   MAC    : WR;
7247 %}
7248 
7249 // Multiply accumulate
7250 // Eg.  MADD    w0, w1, w2, w3
7251 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7252 %{
7253   single_instruction;
7254   fixed_latency(3); // Maximum latency for 64 bit mul
7255   dst    : WR(write);
7256   src1   : ISS(read);
7257   src2   : ISS(read);
7258   src3   : ISS(read);
7259   INS01  : ISS;
7260   MAC    : WR;
7261 %}
7262 
7263 //------- Divide pipeline operations --------------------
7264 
7265 // Eg.  SDIV    w0, w1, w2
7266 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7267 %{
7268   single_instruction;
7269   fixed_latency(8); // Maximum latency for 32 bit divide
7270   dst    : WR(write);
7271   src1   : ISS(read);
7272   src2   : ISS(read);
7273   INS0   : ISS; // Can only dual issue as instruction 0
7274   DIV    : WR;
7275 %}
7276 
7277 // Eg.  SDIV    x0, x1, x2
7278 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7279 %{
7280   single_instruction;
7281   fixed_latency(16); // Maximum latency for 64 bit divide
7282   dst    : WR(write);
7283   src1   : ISS(read);
7284   src2   : ISS(read);
7285   INS0   : ISS; // Can only dual issue as instruction 0
7286   DIV    : WR;
7287 %}
7288 
7289 //------- Load pipeline operations ------------------------
7290 
7291 // Load - prefetch
7292 // Eg.  PFRM    <mem>
7293 pipe_class iload_prefetch(memory mem)
7294 %{
7295   single_instruction;
7296   mem    : ISS(read);
7297   INS01  : ISS;
7298   LDST   : WR;
7299 %}
7300 
7301 // Load - reg, mem
7302 // Eg.  LDR     x0, <mem>
7303 pipe_class iload_reg_mem(iRegI dst, memory mem)
7304 %{
7305   single_instruction;
7306   dst    : WR(write);
7307   mem    : ISS(read);
7308   INS01  : ISS;
7309   LDST   : WR;
7310 %}
7311 
7312 // Load - reg, reg
7313 // Eg.  LDR     x0, [sp, x1]
7314 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7315 %{
7316   single_instruction;
7317   dst    : WR(write);
7318   src    : ISS(read);
7319   INS01  : ISS;
7320   LDST   : WR;
7321 %}
7322 
7323 //------- Store pipeline operations -----------------------
7324 
7325 // Store - zr, mem
7326 // Eg.  STR     zr, <mem>
7327 pipe_class istore_mem(memory mem)
7328 %{
7329   single_instruction;
7330   mem    : ISS(read);
7331   INS01  : ISS;
7332   LDST   : WR;
7333 %}
7334 
7335 // Store - reg, mem
7336 // Eg.  STR     x0, <mem>
7337 pipe_class istore_reg_mem(iRegI src, memory mem)
7338 %{
7339   single_instruction;
7340   mem    : ISS(read);
7341   src    : EX2(read);
7342   INS01  : ISS;
7343   LDST   : WR;
7344 %}
7345 
7346 // Store - reg, reg
7347 // Eg. STR      x0, [sp, x1]
7348 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7349 %{
7350   single_instruction;
7351   dst    : ISS(read);
7352   src    : EX2(read);
7353   INS01  : ISS;
7354   LDST   : WR;
7355 %}
7356 
7357 //------- Store pipeline operations -----------------------
7358 
7359 // Branch
7360 pipe_class pipe_branch()
7361 %{
7362   single_instruction;
7363   INS01  : ISS;
7364   BRANCH : EX1;
7365 %}
7366 
7367 // Conditional branch
7368 pipe_class pipe_branch_cond(rFlagsReg cr)
7369 %{
7370   single_instruction;
7371   cr     : EX1(read);
7372   INS01  : ISS;
7373   BRANCH : EX1;
7374 %}
7375 
7376 // Compare & Branch
7377 // EG.  CBZ/CBNZ
7378 pipe_class pipe_cmp_branch(iRegI op1)
7379 %{
7380   single_instruction;
7381   op1    : EX1(read);
7382   INS01  : ISS;
7383   BRANCH : EX1;
7384 %}
7385 
7386 //------- Synchronisation operations ----------------------
7387 
7388 // Any operation requiring serialization.
7389 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7390 pipe_class pipe_serial()
7391 %{
7392   single_instruction;
7393   force_serialization;
7394   fixed_latency(16);
7395   INS01  : ISS(2); // Cannot dual issue with any other instruction
7396   LDST   : WR;
7397 %}
7398 
7399 // Generic big/slow expanded idiom - also serialized
7400 pipe_class pipe_slow()
7401 %{
7402   instruction_count(10);
7403   multiple_bundles;
7404   force_serialization;
7405   fixed_latency(16);
7406   INS01  : ISS(2); // Cannot dual issue with any other instruction
7407   LDST   : WR;
7408 %}
7409 
7410 // Empty pipeline class
7411 pipe_class pipe_class_empty()
7412 %{
7413   single_instruction;
7414   fixed_latency(0);
7415 %}
7416 
7417 // Default pipeline class.
7418 pipe_class pipe_class_default()
7419 %{
7420   single_instruction;
7421   fixed_latency(2);
7422 %}
7423 
7424 // Pipeline class for compares.
7425 pipe_class pipe_class_compare()
7426 %{
7427   single_instruction;
7428   fixed_latency(16);
7429 %}
7430 
7431 // Pipeline class for memory operations.
7432 pipe_class pipe_class_memory()
7433 %{
7434   single_instruction;
7435   fixed_latency(16);
7436 %}
7437 
7438 // Pipeline class for call.
7439 pipe_class pipe_class_call()
7440 %{
7441   single_instruction;
7442   fixed_latency(100);
7443 %}
7444 
7445 // Define the class for the Nop node.
7446 define %{
7447    MachNop = pipe_class_empty;
7448 %}
7449 
7450 %}
7451 //----------INSTRUCTIONS-------------------------------------------------------
7452 //
7453 // match      -- States which machine-independent subtree may be replaced
7454 //               by this instruction.
7455 // ins_cost   -- The estimated cost of this instruction is used by instruction
7456 //               selection to identify a minimum cost tree of machine
7457 //               instructions that matches a tree of machine-independent
7458 //               instructions.
7459 // format     -- A string providing the disassembly for this instruction.
7460 //               The value of an instruction's operand may be inserted
7461 //               by referring to it with a '$' prefix.
7462 // opcode     -- Three instruction opcodes may be provided.  These are referred
7463 //               to within an encode class as $primary, $secondary, and $tertiary
7464 //               rrspectively.  The primary opcode is commonly used to
7465 //               indicate the type of machine instruction, while secondary
7466 //               and tertiary are often used for prefix options or addressing
7467 //               modes.
7468 // ins_encode -- A list of encode classes with parameters. The encode class
7469 //               name must have been defined in an 'enc_class' specification
7470 //               in the encode section of the architecture description.
7471 
7472 // ============================================================================
7473 // Memory (Load/Store) Instructions
7474 
7475 // Load Instructions
7476 
7477 // Load Byte (8 bit signed)
7478 instruct loadB(iRegINoSp dst, memory mem)
7479 %{
7480   match(Set dst (LoadB mem));
7481   predicate(!needs_acquiring_load(n));
7482 
7483   ins_cost(4 * INSN_COST);
7484   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7485 
7486   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7487 
7488   ins_pipe(iload_reg_mem);
7489 %}
7490 
7491 // Load Byte (8 bit signed) into long
7492 instruct loadB2L(iRegLNoSp dst, memory mem)
7493 %{
7494   match(Set dst (ConvI2L (LoadB mem)));
7495   predicate(!needs_acquiring_load(n->in(1)));
7496 
7497   ins_cost(4 * INSN_COST);
7498   format %{ "ldrsb  $dst, $mem\t# byte" %}
7499 
7500   ins_encode(aarch64_enc_ldrsb(dst, mem));
7501 
7502   ins_pipe(iload_reg_mem);
7503 %}
7504 
7505 // Load Byte (8 bit unsigned)
7506 instruct loadUB(iRegINoSp dst, memory mem)
7507 %{
7508   match(Set dst (LoadUB mem));
7509   predicate(!needs_acquiring_load(n));
7510 
7511   ins_cost(4 * INSN_COST);
7512   format %{ "ldrbw  $dst, $mem\t# byte" %}
7513 
7514   ins_encode(aarch64_enc_ldrb(dst, mem));
7515 
7516   ins_pipe(iload_reg_mem);
7517 %}
7518 
7519 // Load Byte (8 bit unsigned) into long
7520 instruct loadUB2L(iRegLNoSp dst, memory mem)
7521 %{
7522   match(Set dst (ConvI2L (LoadUB mem)));
7523   predicate(!needs_acquiring_load(n->in(1)));
7524 
7525   ins_cost(4 * INSN_COST);
7526   format %{ "ldrb  $dst, $mem\t# byte" %}
7527 
7528   ins_encode(aarch64_enc_ldrb(dst, mem));
7529 
7530   ins_pipe(iload_reg_mem);
7531 %}
7532 
7533 // Load Short (16 bit signed)
7534 instruct loadS(iRegINoSp dst, memory mem)
7535 %{
7536   match(Set dst (LoadS mem));
7537   predicate(!needs_acquiring_load(n));
7538 
7539   ins_cost(4 * INSN_COST);
7540   format %{ "ldrshw  $dst, $mem\t# short" %}
7541 
7542   ins_encode(aarch64_enc_ldrshw(dst, mem));
7543 
7544   ins_pipe(iload_reg_mem);
7545 %}
7546 
7547 // Load Short (16 bit signed) into long
7548 instruct loadS2L(iRegLNoSp dst, memory mem)
7549 %{
7550   match(Set dst (ConvI2L (LoadS mem)));
7551   predicate(!needs_acquiring_load(n->in(1)));
7552 
7553   ins_cost(4 * INSN_COST);
7554   format %{ "ldrsh  $dst, $mem\t# short" %}
7555 
7556   ins_encode(aarch64_enc_ldrsh(dst, mem));
7557 
7558   ins_pipe(iload_reg_mem);
7559 %}
7560 
7561 // Load Char (16 bit unsigned)
7562 instruct loadUS(iRegINoSp dst, memory mem)
7563 %{
7564   match(Set dst (LoadUS mem));
7565   predicate(!needs_acquiring_load(n));
7566 
7567   ins_cost(4 * INSN_COST);
7568   format %{ "ldrh  $dst, $mem\t# short" %}
7569 
7570   ins_encode(aarch64_enc_ldrh(dst, mem));
7571 
7572   ins_pipe(iload_reg_mem);
7573 %}
7574 
7575 // Load Short/Char (16 bit unsigned) into long
7576 instruct loadUS2L(iRegLNoSp dst, memory mem)
7577 %{
7578   match(Set dst (ConvI2L (LoadUS mem)));
7579   predicate(!needs_acquiring_load(n->in(1)));
7580 
7581   ins_cost(4 * INSN_COST);
7582   format %{ "ldrh  $dst, $mem\t# short" %}
7583 
7584   ins_encode(aarch64_enc_ldrh(dst, mem));
7585 
7586   ins_pipe(iload_reg_mem);
7587 %}
7588 
7589 // Load Integer (32 bit signed)
7590 instruct loadI(iRegINoSp dst, memory mem)
7591 %{
7592   match(Set dst (LoadI mem));
7593   predicate(!needs_acquiring_load(n));
7594 
7595   ins_cost(4 * INSN_COST);
7596   format %{ "ldrw  $dst, $mem\t# int" %}
7597 
7598   ins_encode(aarch64_enc_ldrw(dst, mem));
7599 
7600   ins_pipe(iload_reg_mem);
7601 %}
7602 
7603 // Load Integer (32 bit signed) into long
7604 instruct loadI2L(iRegLNoSp dst, memory mem)
7605 %{
7606   match(Set dst (ConvI2L (LoadI mem)));
7607   predicate(!needs_acquiring_load(n->in(1)));
7608 
7609   ins_cost(4 * INSN_COST);
7610   format %{ "ldrsw  $dst, $mem\t# int" %}
7611 
7612   ins_encode(aarch64_enc_ldrsw(dst, mem));
7613 
7614   ins_pipe(iload_reg_mem);
7615 %}
7616 
7617 // Load Integer (32 bit unsigned) into long
7618 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7619 %{
7620   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7621   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7622 
7623   ins_cost(4 * INSN_COST);
7624   format %{ "ldrw  $dst, $mem\t# int" %}
7625 
7626   ins_encode(aarch64_enc_ldrw(dst, mem));
7627 
7628   ins_pipe(iload_reg_mem);
7629 %}
7630 
7631 // Load Long (64 bit signed)
7632 instruct loadL(iRegLNoSp dst, memory mem)
7633 %{
7634   match(Set dst (LoadL mem));
7635   predicate(!needs_acquiring_load(n));
7636 
7637   ins_cost(4 * INSN_COST);
7638   format %{ "ldr  $dst, $mem\t# int" %}
7639 
7640   ins_encode(aarch64_enc_ldr(dst, mem));
7641 
7642   ins_pipe(iload_reg_mem);
7643 %}
7644 
7645 // Load Range
7646 instruct loadRange(iRegINoSp dst, memory mem)
7647 %{
7648   match(Set dst (LoadRange mem));
7649 
7650   ins_cost(4 * INSN_COST);
7651   format %{ "ldrw  $dst, $mem\t# range" %}
7652 
7653   ins_encode(aarch64_enc_ldrw(dst, mem));
7654 
7655   ins_pipe(iload_reg_mem);
7656 %}
7657 
7658 // Load Pointer
7659 instruct loadP(iRegPNoSp dst, memory mem)
7660 %{
7661   match(Set dst (LoadP mem));
7662   predicate(!needs_acquiring_load(n));
7663 
7664   ins_cost(4 * INSN_COST);
7665   format %{ "ldr  $dst, $mem\t# ptr" %}
7666 
7667   ins_encode(aarch64_enc_ldr(dst, mem));
7668 
7669   ins_pipe(iload_reg_mem);
7670 %}
7671 
7672 // Load Compressed Pointer
7673 instruct loadN(iRegNNoSp dst, memory mem)
7674 %{
7675   match(Set dst (LoadN mem));
7676   predicate(!needs_acquiring_load(n));
7677 
7678   ins_cost(4 * INSN_COST);
7679   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7680 
7681   ins_encode(aarch64_enc_ldrw(dst, mem));
7682 
7683   ins_pipe(iload_reg_mem);
7684 %}
7685 
7686 // Load Klass Pointer
7687 instruct loadKlass(iRegPNoSp dst, memory mem)
7688 %{
7689   match(Set dst (LoadKlass mem));
7690   predicate(!needs_acquiring_load(n));
7691 
7692   ins_cost(4 * INSN_COST);
7693   format %{ "ldr  $dst, $mem\t# class" %}
7694 
7695   ins_encode(aarch64_enc_ldr(dst, mem));
7696 
7697   ins_pipe(iload_reg_mem);
7698 %}
7699 
7700 // Load Narrow Klass Pointer
7701 instruct loadNKlass(iRegNNoSp dst, memory mem)
7702 %{
7703   match(Set dst (LoadNKlass mem));
7704   predicate(!needs_acquiring_load(n));
7705 
7706   ins_cost(4 * INSN_COST);
7707   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7708 
7709   ins_encode(aarch64_enc_ldrw(dst, mem));
7710 
7711   ins_pipe(iload_reg_mem);
7712 %}
7713 
7714 // Load Float
7715 instruct loadF(vRegF dst, memory mem)
7716 %{
7717   match(Set dst (LoadF mem));
7718   predicate(!needs_acquiring_load(n));
7719 
7720   ins_cost(4 * INSN_COST);
7721   format %{ "ldrs  $dst, $mem\t# float" %}
7722 
7723   ins_encode( aarch64_enc_ldrs(dst, mem) );
7724 
7725   ins_pipe(pipe_class_memory);
7726 %}
7727 
7728 // Load Double
7729 instruct loadD(vRegD dst, memory mem)
7730 %{
7731   match(Set dst (LoadD mem));
7732   predicate(!needs_acquiring_load(n));
7733 
7734   ins_cost(4 * INSN_COST);
7735   format %{ "ldrd  $dst, $mem\t# double" %}
7736 
7737   ins_encode( aarch64_enc_ldrd(dst, mem) );
7738 
7739   ins_pipe(pipe_class_memory);
7740 %}
7741 
7742 
7743 // Load Int Constant
7744 instruct loadConI(iRegINoSp dst, immI src)
7745 %{
7746   match(Set dst src);
7747 
7748   ins_cost(INSN_COST);
7749   format %{ "mov $dst, $src\t# int" %}
7750 
7751   ins_encode( aarch64_enc_movw_imm(dst, src) );
7752 
7753   ins_pipe(ialu_imm);
7754 %}
7755 
7756 // Load Long Constant
7757 instruct loadConL(iRegLNoSp dst, immL src)
7758 %{
7759   match(Set dst src);
7760 
7761   ins_cost(INSN_COST);
7762   format %{ "mov $dst, $src\t# long" %}
7763 
7764   ins_encode( aarch64_enc_mov_imm(dst, src) );
7765 
7766   ins_pipe(ialu_imm);
7767 %}
7768 
7769 // Load Pointer Constant
7770 
7771 instruct loadConP(iRegPNoSp dst, immP con)
7772 %{
7773   match(Set dst con);
7774 
7775   ins_cost(INSN_COST * 4);
7776   format %{
7777     "mov  $dst, $con\t# ptr\n\t"
7778   %}
7779 
7780   ins_encode(aarch64_enc_mov_p(dst, con));
7781 
7782   ins_pipe(ialu_imm);
7783 %}
7784 
7785 // Load Null Pointer Constant
7786 
7787 instruct loadConP0(iRegPNoSp dst, immP0 con)
7788 %{
7789   match(Set dst con);
7790 
7791   ins_cost(INSN_COST);
7792   format %{ "mov  $dst, $con\t# NULL ptr" %}
7793 
7794   ins_encode(aarch64_enc_mov_p0(dst, con));
7795 
7796   ins_pipe(ialu_imm);
7797 %}
7798 
7799 // Load Pointer Constant One
7800 
7801 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7802 %{
7803   match(Set dst con);
7804 
7805   ins_cost(INSN_COST);
7806   format %{ "mov  $dst, $con\t# NULL ptr" %}
7807 
7808   ins_encode(aarch64_enc_mov_p1(dst, con));
7809 
7810   ins_pipe(ialu_imm);
7811 %}
7812 
7813 // Load Poll Page Constant
7814 
7815 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7816 %{
7817   match(Set dst con);
7818 
7819   ins_cost(INSN_COST);
7820   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7821 
7822   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7823 
7824   ins_pipe(ialu_imm);
7825 %}
7826 
7827 // Load Byte Map Base Constant
7828 
7829 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7830 %{
7831   match(Set dst con);
7832 
7833   ins_cost(INSN_COST);
7834   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7835 
7836   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7837 
7838   ins_pipe(ialu_imm);
7839 %}
7840 
7841 // Load Narrow Pointer Constant
7842 
7843 instruct loadConN(iRegNNoSp dst, immN con)
7844 %{
7845   match(Set dst con);
7846 
7847   ins_cost(INSN_COST * 4);
7848   format %{ "mov  $dst, $con\t# compressed ptr" %}
7849 
7850   ins_encode(aarch64_enc_mov_n(dst, con));
7851 
7852   ins_pipe(ialu_imm);
7853 %}
7854 
7855 // Load Narrow Null Pointer Constant
7856 
7857 instruct loadConN0(iRegNNoSp dst, immN0 con)
7858 %{
7859   match(Set dst con);
7860 
7861   ins_cost(INSN_COST);
7862   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7863 
7864   ins_encode(aarch64_enc_mov_n0(dst, con));
7865 
7866   ins_pipe(ialu_imm);
7867 %}
7868 
7869 // Load Narrow Klass Constant
7870 
7871 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7872 %{
7873   match(Set dst con);
7874 
7875   ins_cost(INSN_COST);
7876   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7877 
7878   ins_encode(aarch64_enc_mov_nk(dst, con));
7879 
7880   ins_pipe(ialu_imm);
7881 %}
7882 
7883 // Load Packed Float Constant
7884 
7885 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7886   match(Set dst con);
7887   ins_cost(INSN_COST * 4);
7888   format %{ "fmovs  $dst, $con"%}
7889   ins_encode %{
7890     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7891   %}
7892 
7893   ins_pipe(fp_imm_s);
7894 %}
7895 
7896 // Load Float Constant
7897 
7898 instruct loadConF(vRegF dst, immF con) %{
7899   match(Set dst con);
7900 
7901   ins_cost(INSN_COST * 4);
7902 
7903   format %{
7904     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7905   %}
7906 
7907   ins_encode %{
7908     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7909   %}
7910 
7911   ins_pipe(fp_load_constant_s);
7912 %}
7913 
7914 // Load Packed Double Constant
7915 
7916 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7917   match(Set dst con);
7918   ins_cost(INSN_COST);
7919   format %{ "fmovd  $dst, $con"%}
7920   ins_encode %{
7921     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7922   %}
7923 
7924   ins_pipe(fp_imm_d);
7925 %}
7926 
7927 // Load Double Constant
7928 
7929 instruct loadConD(vRegD dst, immD con) %{
7930   match(Set dst con);
7931 
7932   ins_cost(INSN_COST * 5);
7933   format %{
7934     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7935   %}
7936 
7937   ins_encode %{
7938     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7939   %}
7940 
7941   ins_pipe(fp_load_constant_d);
7942 %}
7943 
7944 // Store Instructions
7945 
7946 // Store CMS card-mark Immediate
7947 instruct storeimmCM0(immI0 zero, memory mem)
7948 %{
7949   match(Set mem (StoreCM mem zero));
7950   predicate(unnecessary_storestore(n));
7951 
7952   ins_cost(INSN_COST);
7953   format %{ "strb zr, $mem\t# byte" %}
7954 
7955   ins_encode(aarch64_enc_strb0(mem));
7956 
7957   ins_pipe(istore_mem);
7958 %}
7959 
7960 // Store CMS card-mark Immediate with intervening StoreStore
7961 // needed when using CMS with no conditional card marking
7962 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7963 %{
7964   match(Set mem (StoreCM mem zero));
7965 
7966   ins_cost(INSN_COST * 2);
7967   format %{ "dmb ishst"
7968       "\n\tstrb zr, $mem\t# byte" %}
7969 
7970   ins_encode(aarch64_enc_strb0_ordered(mem));
7971 
7972   ins_pipe(istore_mem);
7973 %}
7974 
7975 // Store Byte
7976 instruct storeB(iRegIorL2I src, memory mem)
7977 %{
7978   match(Set mem (StoreB mem src));
7979   predicate(!needs_releasing_store(n));
7980 
7981   ins_cost(INSN_COST);
7982   format %{ "strb  $src, $mem\t# byte" %}
7983 
7984   ins_encode(aarch64_enc_strb(src, mem));
7985 
7986   ins_pipe(istore_reg_mem);
7987 %}
7988 
7989 
7990 instruct storeimmB0(immI0 zero, memory mem)
7991 %{
7992   match(Set mem (StoreB mem zero));
7993   predicate(!needs_releasing_store(n));
7994 
7995   ins_cost(INSN_COST);
7996   format %{ "strb rscractch2, $mem\t# byte" %}
7997 
7998   ins_encode(aarch64_enc_strb0(mem));
7999 
8000   ins_pipe(istore_mem);
8001 %}
8002 
8003 // Store Char/Short
8004 instruct storeC(iRegIorL2I src, memory mem)
8005 %{
8006   match(Set mem (StoreC mem src));
8007   predicate(!needs_releasing_store(n));
8008 
8009   ins_cost(INSN_COST);
8010   format %{ "strh  $src, $mem\t# short" %}
8011 
8012   ins_encode(aarch64_enc_strh(src, mem));
8013 
8014   ins_pipe(istore_reg_mem);
8015 %}
8016 
8017 instruct storeimmC0(immI0 zero, memory mem)
8018 %{
8019   match(Set mem (StoreC mem zero));
8020   predicate(!needs_releasing_store(n));
8021 
8022   ins_cost(INSN_COST);
8023   format %{ "strh  zr, $mem\t# short" %}
8024 
8025   ins_encode(aarch64_enc_strh0(mem));
8026 
8027   ins_pipe(istore_mem);
8028 %}
8029 
8030 // Store Integer
8031 
8032 instruct storeI(iRegIorL2I src, memory mem)
8033 %{
8034   match(Set mem(StoreI mem src));
8035   predicate(!needs_releasing_store(n));
8036 
8037   ins_cost(INSN_COST);
8038   format %{ "strw  $src, $mem\t# int" %}
8039 
8040   ins_encode(aarch64_enc_strw(src, mem));
8041 
8042   ins_pipe(istore_reg_mem);
8043 %}
8044 
8045 instruct storeimmI0(immI0 zero, memory mem)
8046 %{
8047   match(Set mem(StoreI mem zero));
8048   predicate(!needs_releasing_store(n));
8049 
8050   ins_cost(INSN_COST);
8051   format %{ "strw  zr, $mem\t# int" %}
8052 
8053   ins_encode(aarch64_enc_strw0(mem));
8054 
8055   ins_pipe(istore_mem);
8056 %}
8057 
8058 // Store Long (64 bit signed)
8059 instruct storeL(iRegL src, memory mem)
8060 %{
8061   match(Set mem (StoreL mem src));
8062   predicate(!needs_releasing_store(n));
8063 
8064   ins_cost(INSN_COST);
8065   format %{ "str  $src, $mem\t# int" %}
8066 
8067   ins_encode(aarch64_enc_str(src, mem));
8068 
8069   ins_pipe(istore_reg_mem);
8070 %}
8071 
8072 // Store Long (64 bit signed)
8073 instruct storeimmL0(immL0 zero, memory mem)
8074 %{
8075   match(Set mem (StoreL mem zero));
8076   predicate(!needs_releasing_store(n));
8077 
8078   ins_cost(INSN_COST);
8079   format %{ "str  zr, $mem\t# int" %}
8080 
8081   ins_encode(aarch64_enc_str0(mem));
8082 
8083   ins_pipe(istore_mem);
8084 %}
8085 
8086 // Store Pointer
8087 instruct storeP(iRegP src, memory mem)
8088 %{
8089   match(Set mem (StoreP mem src));
8090   predicate(!needs_releasing_store(n));
8091 
8092   ins_cost(INSN_COST);
8093   format %{ "str  $src, $mem\t# ptr" %}
8094 
8095   ins_encode(aarch64_enc_str(src, mem));
8096 
8097   ins_pipe(istore_reg_mem);
8098 %}
8099 
8100 // Store Pointer
8101 instruct storeimmP0(immP0 zero, memory mem)
8102 %{
8103   match(Set mem (StoreP mem zero));
8104   predicate(!needs_releasing_store(n));
8105 
8106   ins_cost(INSN_COST);
8107   format %{ "str zr, $mem\t# ptr" %}
8108 
8109   ins_encode(aarch64_enc_str0(mem));
8110 
8111   ins_pipe(istore_mem);
8112 %}
8113 
8114 // Store Compressed Pointer
8115 instruct storeN(iRegN src, memory mem)
8116 %{
8117   match(Set mem (StoreN mem src));
8118   predicate(!needs_releasing_store(n));
8119 
8120   ins_cost(INSN_COST);
8121   format %{ "strw  $src, $mem\t# compressed ptr" %}
8122 
8123   ins_encode(aarch64_enc_strw(src, mem));
8124 
8125   ins_pipe(istore_reg_mem);
8126 %}
8127 
8128 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8129 %{
8130   match(Set mem (StoreN mem zero));
8131   predicate(Universe::narrow_oop_base() == NULL &&
8132             Universe::narrow_klass_base() == NULL &&
8133             (!needs_releasing_store(n)));
8134 
8135   ins_cost(INSN_COST);
8136   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8137 
8138   ins_encode(aarch64_enc_strw(heapbase, mem));
8139 
8140   ins_pipe(istore_reg_mem);
8141 %}
8142 
8143 // Store Float
8144 instruct storeF(vRegF src, memory mem)
8145 %{
8146   match(Set mem (StoreF mem src));
8147   predicate(!needs_releasing_store(n));
8148 
8149   ins_cost(INSN_COST);
8150   format %{ "strs  $src, $mem\t# float" %}
8151 
8152   ins_encode( aarch64_enc_strs(src, mem) );
8153 
8154   ins_pipe(pipe_class_memory);
8155 %}
8156 
8157 // TODO
8158 // implement storeImmF0 and storeFImmPacked
8159 
8160 // Store Double
8161 instruct storeD(vRegD src, memory mem)
8162 %{
8163   match(Set mem (StoreD mem src));
8164   predicate(!needs_releasing_store(n));
8165 
8166   ins_cost(INSN_COST);
8167   format %{ "strd  $src, $mem\t# double" %}
8168 
8169   ins_encode( aarch64_enc_strd(src, mem) );
8170 
8171   ins_pipe(pipe_class_memory);
8172 %}
8173 
8174 // Store Compressed Klass Pointer
8175 instruct storeNKlass(iRegN src, memory mem)
8176 %{
8177   predicate(!needs_releasing_store(n));
8178   match(Set mem (StoreNKlass mem src));
8179 
8180   ins_cost(INSN_COST);
8181   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8182 
8183   ins_encode(aarch64_enc_strw(src, mem));
8184 
8185   ins_pipe(istore_reg_mem);
8186 %}
8187 
8188 // TODO
8189 // implement storeImmD0 and storeDImmPacked
8190 
8191 // prefetch instructions
8192 // Must be safe to execute with invalid address (cannot fault).
8193 
8194 instruct prefetchalloc( memory mem ) %{
8195   match(PrefetchAllocation mem);
8196 
8197   ins_cost(INSN_COST);
8198   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8199 
8200   ins_encode( aarch64_enc_prefetchw(mem) );
8201 
8202   ins_pipe(iload_prefetch);
8203 %}
8204 
8205 //  ---------------- volatile loads and stores ----------------
8206 
8207 // Load Byte (8 bit signed)
8208 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8209 %{
8210   match(Set dst (LoadB mem));
8211 
8212   ins_cost(VOLATILE_REF_COST);
8213   format %{ "ldarsb  $dst, $mem\t# byte" %}
8214 
8215   ins_encode(aarch64_enc_ldarsb(dst, mem));
8216 
8217   ins_pipe(pipe_serial);
8218 %}
8219 
8220 // Load Byte (8 bit signed) into long
8221 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8222 %{
8223   match(Set dst (ConvI2L (LoadB mem)));
8224 
8225   ins_cost(VOLATILE_REF_COST);
8226   format %{ "ldarsb  $dst, $mem\t# byte" %}
8227 
8228   ins_encode(aarch64_enc_ldarsb(dst, mem));
8229 
8230   ins_pipe(pipe_serial);
8231 %}
8232 
8233 // Load Byte (8 bit unsigned)
8234 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8235 %{
8236   match(Set dst (LoadUB mem));
8237 
8238   ins_cost(VOLATILE_REF_COST);
8239   format %{ "ldarb  $dst, $mem\t# byte" %}
8240 
8241   ins_encode(aarch64_enc_ldarb(dst, mem));
8242 
8243   ins_pipe(pipe_serial);
8244 %}
8245 
8246 // Load Byte (8 bit unsigned) into long
8247 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8248 %{
8249   match(Set dst (ConvI2L (LoadUB mem)));
8250 
8251   ins_cost(VOLATILE_REF_COST);
8252   format %{ "ldarb  $dst, $mem\t# byte" %}
8253 
8254   ins_encode(aarch64_enc_ldarb(dst, mem));
8255 
8256   ins_pipe(pipe_serial);
8257 %}
8258 
8259 // Load Short (16 bit signed)
8260 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8261 %{
8262   match(Set dst (LoadS mem));
8263 
8264   ins_cost(VOLATILE_REF_COST);
8265   format %{ "ldarshw  $dst, $mem\t# short" %}
8266 
8267   ins_encode(aarch64_enc_ldarshw(dst, mem));
8268 
8269   ins_pipe(pipe_serial);
8270 %}
8271 
8272 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8273 %{
8274   match(Set dst (LoadUS mem));
8275 
8276   ins_cost(VOLATILE_REF_COST);
8277   format %{ "ldarhw  $dst, $mem\t# short" %}
8278 
8279   ins_encode(aarch64_enc_ldarhw(dst, mem));
8280 
8281   ins_pipe(pipe_serial);
8282 %}
8283 
8284 // Load Short/Char (16 bit unsigned) into long
8285 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8286 %{
8287   match(Set dst (ConvI2L (LoadUS mem)));
8288 
8289   ins_cost(VOLATILE_REF_COST);
8290   format %{ "ldarh  $dst, $mem\t# short" %}
8291 
8292   ins_encode(aarch64_enc_ldarh(dst, mem));
8293 
8294   ins_pipe(pipe_serial);
8295 %}
8296 
8297 // Load Short/Char (16 bit signed) into long
8298 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8299 %{
8300   match(Set dst (ConvI2L (LoadS mem)));
8301 
8302   ins_cost(VOLATILE_REF_COST);
8303   format %{ "ldarh  $dst, $mem\t# short" %}
8304 
8305   ins_encode(aarch64_enc_ldarsh(dst, mem));
8306 
8307   ins_pipe(pipe_serial);
8308 %}
8309 
8310 // Load Integer (32 bit signed)
8311 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8312 %{
8313   match(Set dst (LoadI mem));
8314 
8315   ins_cost(VOLATILE_REF_COST);
8316   format %{ "ldarw  $dst, $mem\t# int" %}
8317 
8318   ins_encode(aarch64_enc_ldarw(dst, mem));
8319 
8320   ins_pipe(pipe_serial);
8321 %}
8322 
8323 // Load Integer (32 bit unsigned) into long
8324 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8325 %{
8326   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8327 
8328   ins_cost(VOLATILE_REF_COST);
8329   format %{ "ldarw  $dst, $mem\t# int" %}
8330 
8331   ins_encode(aarch64_enc_ldarw(dst, mem));
8332 
8333   ins_pipe(pipe_serial);
8334 %}
8335 
8336 // Load Long (64 bit signed)
8337 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8338 %{
8339   match(Set dst (LoadL mem));
8340 
8341   ins_cost(VOLATILE_REF_COST);
8342   format %{ "ldar  $dst, $mem\t# int" %}
8343 
8344   ins_encode(aarch64_enc_ldar(dst, mem));
8345 
8346   ins_pipe(pipe_serial);
8347 %}
8348 
8349 // Load Pointer
8350 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8351 %{
8352   match(Set dst (LoadP mem));
8353 
8354   ins_cost(VOLATILE_REF_COST);
8355   format %{ "ldar  $dst, $mem\t# ptr" %}
8356 
8357   ins_encode(aarch64_enc_ldar(dst, mem));
8358 
8359   ins_pipe(pipe_serial);
8360 %}
8361 
8362 // Load Compressed Pointer
8363 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8364 %{
8365   match(Set dst (LoadN mem));
8366 
8367   ins_cost(VOLATILE_REF_COST);
8368   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8369 
8370   ins_encode(aarch64_enc_ldarw(dst, mem));
8371 
8372   ins_pipe(pipe_serial);
8373 %}
8374 
8375 // Load Float
8376 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8377 %{
8378   match(Set dst (LoadF mem));
8379 
8380   ins_cost(VOLATILE_REF_COST);
8381   format %{ "ldars  $dst, $mem\t# float" %}
8382 
8383   ins_encode( aarch64_enc_fldars(dst, mem) );
8384 
8385   ins_pipe(pipe_serial);
8386 %}
8387 
8388 // Load Double
8389 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8390 %{
8391   match(Set dst (LoadD mem));
8392 
8393   ins_cost(VOLATILE_REF_COST);
8394   format %{ "ldard  $dst, $mem\t# double" %}
8395 
8396   ins_encode( aarch64_enc_fldard(dst, mem) );
8397 
8398   ins_pipe(pipe_serial);
8399 %}
8400 
8401 // Store Byte
8402 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8403 %{
8404   match(Set mem (StoreB mem src));
8405 
8406   ins_cost(VOLATILE_REF_COST);
8407   format %{ "stlrb  $src, $mem\t# byte" %}
8408 
8409   ins_encode(aarch64_enc_stlrb(src, mem));
8410 
8411   ins_pipe(pipe_class_memory);
8412 %}
8413 
8414 // Store Char/Short
8415 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8416 %{
8417   match(Set mem (StoreC mem src));
8418 
8419   ins_cost(VOLATILE_REF_COST);
8420   format %{ "stlrh  $src, $mem\t# short" %}
8421 
8422   ins_encode(aarch64_enc_stlrh(src, mem));
8423 
8424   ins_pipe(pipe_class_memory);
8425 %}
8426 
8427 // Store Integer
8428 
8429 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8430 %{
8431   match(Set mem(StoreI mem src));
8432 
8433   ins_cost(VOLATILE_REF_COST);
8434   format %{ "stlrw  $src, $mem\t# int" %}
8435 
8436   ins_encode(aarch64_enc_stlrw(src, mem));
8437 
8438   ins_pipe(pipe_class_memory);
8439 %}
8440 
8441 // Store Long (64 bit signed)
8442 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8443 %{
8444   match(Set mem (StoreL mem src));
8445 
8446   ins_cost(VOLATILE_REF_COST);
8447   format %{ "stlr  $src, $mem\t# int" %}
8448 
8449   ins_encode(aarch64_enc_stlr(src, mem));
8450 
8451   ins_pipe(pipe_class_memory);
8452 %}
8453 
8454 // Store Pointer
8455 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8456 %{
8457   match(Set mem (StoreP mem src));
8458 
8459   ins_cost(VOLATILE_REF_COST);
8460   format %{ "stlr  $src, $mem\t# ptr" %}
8461 
8462   ins_encode(aarch64_enc_stlr(src, mem));
8463 
8464   ins_pipe(pipe_class_memory);
8465 %}
8466 
8467 // Store Compressed Pointer
8468 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8469 %{
8470   match(Set mem (StoreN mem src));
8471 
8472   ins_cost(VOLATILE_REF_COST);
8473   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8474 
8475   ins_encode(aarch64_enc_stlrw(src, mem));
8476 
8477   ins_pipe(pipe_class_memory);
8478 %}
8479 
8480 // Store Float
8481 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8482 %{
8483   match(Set mem (StoreF mem src));
8484 
8485   ins_cost(VOLATILE_REF_COST);
8486   format %{ "stlrs  $src, $mem\t# float" %}
8487 
8488   ins_encode( aarch64_enc_fstlrs(src, mem) );
8489 
8490   ins_pipe(pipe_class_memory);
8491 %}
8492 
8493 // TODO
8494 // implement storeImmF0 and storeFImmPacked
8495 
8496 // Store Double
8497 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8498 %{
8499   match(Set mem (StoreD mem src));
8500 
8501   ins_cost(VOLATILE_REF_COST);
8502   format %{ "stlrd  $src, $mem\t# double" %}
8503 
8504   ins_encode( aarch64_enc_fstlrd(src, mem) );
8505 
8506   ins_pipe(pipe_class_memory);
8507 %}
8508 
8509 //  ---------------- end of volatile loads and stores ----------------
8510 
8511 // ============================================================================
8512 // BSWAP Instructions
8513 
8514 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8515   match(Set dst (ReverseBytesI src));
8516 
8517   ins_cost(INSN_COST);
8518   format %{ "revw  $dst, $src" %}
8519 
8520   ins_encode %{
8521     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8522   %}
8523 
8524   ins_pipe(ialu_reg);
8525 %}
8526 
8527 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8528   match(Set dst (ReverseBytesL src));
8529 
8530   ins_cost(INSN_COST);
8531   format %{ "rev  $dst, $src" %}
8532 
8533   ins_encode %{
8534     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8535   %}
8536 
8537   ins_pipe(ialu_reg);
8538 %}
8539 
8540 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8541   match(Set dst (ReverseBytesUS src));
8542 
8543   ins_cost(INSN_COST);
8544   format %{ "rev16w  $dst, $src" %}
8545 
8546   ins_encode %{
8547     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8548   %}
8549 
8550   ins_pipe(ialu_reg);
8551 %}
8552 
8553 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8554   match(Set dst (ReverseBytesS src));
8555 
8556   ins_cost(INSN_COST);
8557   format %{ "rev16w  $dst, $src\n\t"
8558             "sbfmw $dst, $dst, #0, #15" %}
8559 
8560   ins_encode %{
8561     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8562     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8563   %}
8564 
8565   ins_pipe(ialu_reg);
8566 %}
8567 
8568 // ============================================================================
8569 // Zero Count Instructions
8570 
8571 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8572   match(Set dst (CountLeadingZerosI src));
8573 
8574   ins_cost(INSN_COST);
8575   format %{ "clzw  $dst, $src" %}
8576   ins_encode %{
8577     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8578   %}
8579 
8580   ins_pipe(ialu_reg);
8581 %}
8582 
8583 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8584   match(Set dst (CountLeadingZerosL src));
8585 
8586   ins_cost(INSN_COST);
8587   format %{ "clz   $dst, $src" %}
8588   ins_encode %{
8589     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8590   %}
8591 
8592   ins_pipe(ialu_reg);
8593 %}
8594 
8595 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8596   match(Set dst (CountTrailingZerosI src));
8597 
8598   ins_cost(INSN_COST * 2);
8599   format %{ "rbitw  $dst, $src\n\t"
8600             "clzw   $dst, $dst" %}
8601   ins_encode %{
8602     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8603     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8604   %}
8605 
8606   ins_pipe(ialu_reg);
8607 %}
8608 
8609 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8610   match(Set dst (CountTrailingZerosL src));
8611 
8612   ins_cost(INSN_COST * 2);
8613   format %{ "rbit   $dst, $src\n\t"
8614             "clz    $dst, $dst" %}
8615   ins_encode %{
8616     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8617     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8618   %}
8619 
8620   ins_pipe(ialu_reg);
8621 %}
8622 
8623 //---------- Population Count Instructions -------------------------------------
8624 //
8625 
8626 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8627   predicate(UsePopCountInstruction);
8628   match(Set dst (PopCountI src));
8629   effect(TEMP tmp);
8630   ins_cost(INSN_COST * 13);
8631 
8632   format %{ "movw   $src, $src\n\t"
8633             "mov    $tmp, $src\t# vector (1D)\n\t"
8634             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8635             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8636             "mov    $dst, $tmp\t# vector (1D)" %}
8637   ins_encode %{
8638     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8639     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8640     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8641     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8642     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8643   %}
8644 
8645   ins_pipe(pipe_class_default);
8646 %}
8647 
8648 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8649   predicate(UsePopCountInstruction);
8650   match(Set dst (PopCountI (LoadI mem)));
8651   effect(TEMP tmp);
8652   ins_cost(INSN_COST * 13);
8653 
8654   format %{ "ldrs   $tmp, $mem\n\t"
8655             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8656             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8657             "mov    $dst, $tmp\t# vector (1D)" %}
8658   ins_encode %{
8659     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8660     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8661                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8662     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8663     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8664     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8665   %}
8666 
8667   ins_pipe(pipe_class_default);
8668 %}
8669 
8670 // Note: Long.bitCount(long) returns an int.
8671 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8672   predicate(UsePopCountInstruction);
8673   match(Set dst (PopCountL src));
8674   effect(TEMP tmp);
8675   ins_cost(INSN_COST * 13);
8676 
8677   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8678             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8679             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8680             "mov    $dst, $tmp\t# vector (1D)" %}
8681   ins_encode %{
8682     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8683     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8684     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8685     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8686   %}
8687 
8688   ins_pipe(pipe_class_default);
8689 %}
8690 
8691 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8692   predicate(UsePopCountInstruction);
8693   match(Set dst (PopCountL (LoadL mem)));
8694   effect(TEMP tmp);
8695   ins_cost(INSN_COST * 13);
8696 
8697   format %{ "ldrd   $tmp, $mem\n\t"
8698             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8699             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8700             "mov    $dst, $tmp\t# vector (1D)" %}
8701   ins_encode %{
8702     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8703     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8704                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8705     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8706     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8707     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8708   %}
8709 
8710   ins_pipe(pipe_class_default);
8711 %}
8712 
8713 // ============================================================================
8714 // MemBar Instruction
8715 
8716 instruct load_fence() %{
8717   match(LoadFence);
8718   ins_cost(VOLATILE_REF_COST);
8719 
8720   format %{ "load_fence" %}
8721 
8722   ins_encode %{
8723     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8724   %}
8725   ins_pipe(pipe_serial);
8726 %}
8727 
8728 instruct unnecessary_membar_acquire() %{
8729   predicate(unnecessary_acquire(n));
8730   match(MemBarAcquire);
8731   ins_cost(0);
8732 
8733   format %{ "membar_acquire (elided)" %}
8734 
8735   ins_encode %{
8736     __ block_comment("membar_acquire (elided)");
8737   %}
8738 
8739   ins_pipe(pipe_class_empty);
8740 %}
8741 
8742 instruct membar_acquire() %{
8743   match(MemBarAcquire);
8744   ins_cost(VOLATILE_REF_COST);
8745 
8746   format %{ "membar_acquire" %}
8747 
8748   ins_encode %{
8749     __ block_comment("membar_acquire");
8750     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8751   %}
8752 
8753   ins_pipe(pipe_serial);
8754 %}
8755 
8756 
8757 instruct membar_acquire_lock() %{
8758   match(MemBarAcquireLock);
8759   ins_cost(VOLATILE_REF_COST);
8760 
8761   format %{ "membar_acquire_lock (elided)" %}
8762 
8763   ins_encode %{
8764     __ block_comment("membar_acquire_lock (elided)");
8765   %}
8766 
8767   ins_pipe(pipe_serial);
8768 %}
8769 
8770 instruct store_fence() %{
8771   match(StoreFence);
8772   ins_cost(VOLATILE_REF_COST);
8773 
8774   format %{ "store_fence" %}
8775 
8776   ins_encode %{
8777     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8778   %}
8779   ins_pipe(pipe_serial);
8780 %}
8781 
8782 instruct unnecessary_membar_release() %{
8783   predicate(unnecessary_release(n));
8784   match(MemBarRelease);
8785   ins_cost(0);
8786 
8787   format %{ "membar_release (elided)" %}
8788 
8789   ins_encode %{
8790     __ block_comment("membar_release (elided)");
8791   %}
8792   ins_pipe(pipe_serial);
8793 %}
8794 
8795 instruct membar_release() %{
8796   match(MemBarRelease);
8797   ins_cost(VOLATILE_REF_COST);
8798 
8799   format %{ "membar_release" %}
8800 
8801   ins_encode %{
8802     __ block_comment("membar_release");
8803     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8804   %}
8805   ins_pipe(pipe_serial);
8806 %}
8807 
8808 instruct membar_storestore() %{
8809   match(MemBarStoreStore);
8810   ins_cost(VOLATILE_REF_COST);
8811 
8812   format %{ "MEMBAR-store-store" %}
8813 
8814   ins_encode %{
8815     __ membar(Assembler::StoreStore);
8816   %}
8817   ins_pipe(pipe_serial);
8818 %}
8819 
8820 instruct membar_release_lock() %{
8821   match(MemBarReleaseLock);
8822   ins_cost(VOLATILE_REF_COST);
8823 
8824   format %{ "membar_release_lock (elided)" %}
8825 
8826   ins_encode %{
8827     __ block_comment("membar_release_lock (elided)");
8828   %}
8829 
8830   ins_pipe(pipe_serial);
8831 %}
8832 
8833 instruct unnecessary_membar_volatile() %{
8834   predicate(unnecessary_volatile(n));
8835   match(MemBarVolatile);
8836   ins_cost(0);
8837 
8838   format %{ "membar_volatile (elided)" %}
8839 
8840   ins_encode %{
8841     __ block_comment("membar_volatile (elided)");
8842   %}
8843 
8844   ins_pipe(pipe_serial);
8845 %}
8846 
8847 instruct membar_volatile() %{
8848   match(MemBarVolatile);
8849   ins_cost(VOLATILE_REF_COST*100);
8850 
8851   format %{ "membar_volatile" %}
8852 
8853   ins_encode %{
8854     __ block_comment("membar_volatile");
8855     __ membar(Assembler::StoreLoad);
8856   %}
8857 
8858   ins_pipe(pipe_serial);
8859 %}
8860 
8861 // ============================================================================
8862 // Cast/Convert Instructions
8863 
8864 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8865   match(Set dst (CastX2P src));
8866 
8867   ins_cost(INSN_COST);
8868   format %{ "mov $dst, $src\t# long -> ptr" %}
8869 
8870   ins_encode %{
8871     if ($dst$$reg != $src$$reg) {
8872       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8873     }
8874   %}
8875 
8876   ins_pipe(ialu_reg);
8877 %}
8878 
8879 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8880   match(Set dst (CastP2X src));
8881 
8882   ins_cost(INSN_COST);
8883   format %{ "mov $dst, $src\t# ptr -> long" %}
8884 
8885   ins_encode %{
8886     if ($dst$$reg != $src$$reg) {
8887       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8888     }
8889   %}
8890 
8891   ins_pipe(ialu_reg);
8892 %}
8893 
8894 // Convert oop into int for vectors alignment masking
8895 instruct convP2I(iRegINoSp dst, iRegP src) %{
8896   match(Set dst (ConvL2I (CastP2X src)));
8897 
8898   ins_cost(INSN_COST);
8899   format %{ "movw $dst, $src\t# ptr -> int" %}
8900   ins_encode %{
8901     __ movw($dst$$Register, $src$$Register);
8902   %}
8903 
8904   ins_pipe(ialu_reg);
8905 %}
8906 
8907 // Convert compressed oop into int for vectors alignment masking
8908 // in case of 32bit oops (heap < 4Gb).
8909 instruct convN2I(iRegINoSp dst, iRegN src)
8910 %{
8911   predicate(Universe::narrow_oop_shift() == 0);
8912   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8913 
8914   ins_cost(INSN_COST);
8915   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8916   ins_encode %{
8917     __ movw($dst$$Register, $src$$Register);
8918   %}
8919 
8920   ins_pipe(ialu_reg);
8921 %}
8922 
8923 
8924 // Convert oop pointer into compressed form
8925 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8926   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8927   match(Set dst (EncodeP src));
8928   effect(KILL cr);
8929   ins_cost(INSN_COST * 3);
8930   format %{ "encode_heap_oop $dst, $src" %}
8931   ins_encode %{
8932     Register s = $src$$Register;
8933     Register d = $dst$$Register;
8934     __ encode_heap_oop(d, s);
8935   %}
8936   ins_pipe(ialu_reg);
8937 %}
8938 
8939 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8940   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8941   match(Set dst (EncodeP src));
8942   ins_cost(INSN_COST * 3);
8943   format %{ "encode_heap_oop_not_null $dst, $src" %}
8944   ins_encode %{
8945     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8946   %}
8947   ins_pipe(ialu_reg);
8948 %}
8949 
8950 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8951   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8952             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8953   match(Set dst (DecodeN src));
8954   ins_cost(INSN_COST * 3);
8955   format %{ "decode_heap_oop $dst, $src" %}
8956   ins_encode %{
8957     Register s = $src$$Register;
8958     Register d = $dst$$Register;
8959     __ decode_heap_oop(d, s);
8960   %}
8961   ins_pipe(ialu_reg);
8962 %}
8963 
8964 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8965   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8966             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8967   match(Set dst (DecodeN src));
8968   ins_cost(INSN_COST * 3);
8969   format %{ "decode_heap_oop_not_null $dst, $src" %}
8970   ins_encode %{
8971     Register s = $src$$Register;
8972     Register d = $dst$$Register;
8973     __ decode_heap_oop_not_null(d, s);
8974   %}
8975   ins_pipe(ialu_reg);
8976 %}
8977 
8978 // n.b. AArch64 implementations of encode_klass_not_null and
8979 // decode_klass_not_null do not modify the flags register so, unlike
8980 // Intel, we don't kill CR as a side effect here
8981 
8982 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8983   match(Set dst (EncodePKlass src));
8984 
8985   ins_cost(INSN_COST * 3);
8986   format %{ "encode_klass_not_null $dst,$src" %}
8987 
8988   ins_encode %{
8989     Register src_reg = as_Register($src$$reg);
8990     Register dst_reg = as_Register($dst$$reg);
8991     __ encode_klass_not_null(dst_reg, src_reg);
8992   %}
8993 
8994    ins_pipe(ialu_reg);
8995 %}
8996 
8997 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8998   match(Set dst (DecodeNKlass src));
8999 
9000   ins_cost(INSN_COST * 3);
9001   format %{ "decode_klass_not_null $dst,$src" %}
9002 
9003   ins_encode %{
9004     Register src_reg = as_Register($src$$reg);
9005     Register dst_reg = as_Register($dst$$reg);
9006     if (dst_reg != src_reg) {
9007       __ decode_klass_not_null(dst_reg, src_reg);
9008     } else {
9009       __ decode_klass_not_null(dst_reg);
9010     }
9011   %}
9012 
9013    ins_pipe(ialu_reg);
9014 %}
9015 
9016 instruct checkCastPP(iRegPNoSp dst)
9017 %{
9018   match(Set dst (CheckCastPP dst));
9019 
9020   size(0);
9021   format %{ "# checkcastPP of $dst" %}
9022   ins_encode(/* empty encoding */);
9023   ins_pipe(pipe_class_empty);
9024 %}
9025 
9026 instruct castPP(iRegPNoSp dst)
9027 %{
9028   match(Set dst (CastPP dst));
9029 
9030   size(0);
9031   format %{ "# castPP of $dst" %}
9032   ins_encode(/* empty encoding */);
9033   ins_pipe(pipe_class_empty);
9034 %}
9035 
9036 instruct castII(iRegI dst)
9037 %{
9038   match(Set dst (CastII dst));
9039 
9040   size(0);
9041   format %{ "# castII of $dst" %}
9042   ins_encode(/* empty encoding */);
9043   ins_cost(0);
9044   ins_pipe(pipe_class_empty);
9045 %}
9046 
9047 // ============================================================================
9048 // Atomic operation instructions
9049 //
9050 // Intel and SPARC both implement Ideal Node LoadPLocked and
9051 // Store{PIL}Conditional instructions using a normal load for the
9052 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9053 //
9054 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9055 // pair to lock object allocations from Eden space when not using
9056 // TLABs.
9057 //
9058 // There does not appear to be a Load{IL}Locked Ideal Node and the
9059 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9060 // and to use StoreIConditional only for 32-bit and StoreLConditional
9061 // only for 64-bit.
9062 //
9063 // We implement LoadPLocked and StorePLocked instructions using,
9064 // respectively the AArch64 hw load-exclusive and store-conditional
9065 // instructions. Whereas we must implement each of
9066 // Store{IL}Conditional using a CAS which employs a pair of
9067 // instructions comprising a load-exclusive followed by a
9068 // store-conditional.
9069 
9070 
9071 // Locked-load (linked load) of the current heap-top
9072 // used when updating the eden heap top
9073 // implemented using ldaxr on AArch64
9074 
9075 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9076 %{
9077   match(Set dst (LoadPLocked mem));
9078 
9079   ins_cost(VOLATILE_REF_COST);
9080 
9081   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9082 
9083   ins_encode(aarch64_enc_ldaxr(dst, mem));
9084 
9085   ins_pipe(pipe_serial);
9086 %}
9087 
9088 // Conditional-store of the updated heap-top.
9089 // Used during allocation of the shared heap.
9090 // Sets flag (EQ) on success.
9091 // implemented using stlxr on AArch64.
9092 
9093 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9094 %{
9095   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9096 
9097   ins_cost(VOLATILE_REF_COST);
9098 
9099  // TODO
9100  // do we need to do a store-conditional release or can we just use a
9101  // plain store-conditional?
9102 
9103   format %{
9104     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9105     "cmpw rscratch1, zr\t# EQ on successful write"
9106   %}
9107 
9108   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9109 
9110   ins_pipe(pipe_serial);
9111 %}
9112 
9113 
9114 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9115 // when attempting to rebias a lock towards the current thread.  We
9116 // must use the acquire form of cmpxchg in order to guarantee acquire
9117 // semantics in this case.
9118 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9119 %{
9120   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9121 
9122   ins_cost(VOLATILE_REF_COST);
9123 
9124   format %{
9125     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9126     "cmpw rscratch1, zr\t# EQ on successful write"
9127   %}
9128 
9129   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9130 
9131   ins_pipe(pipe_slow);
9132 %}
9133 
9134 // storeIConditional also has acquire semantics, for no better reason
9135 // than matching storeLConditional.  At the time of writing this
9136 // comment storeIConditional was not used anywhere by AArch64.
9137 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9138 %{
9139   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9140 
9141   ins_cost(VOLATILE_REF_COST);
9142 
9143   format %{
9144     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9145     "cmpw rscratch1, zr\t# EQ on successful write"
9146   %}
9147 
9148   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9149 
9150   ins_pipe(pipe_slow);
9151 %}
9152 
9153 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9154 // can't match them
9155 
9156 // standard CompareAndSwapX when we are using barriers
9157 // these have higher priority than the rules selected by a predicate
9158 
9159 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9160 
9161   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9162   ins_cost(2 * VOLATILE_REF_COST);
9163 
9164   effect(KILL cr);
9165 
9166  format %{
9167     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9168     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9169  %}
9170 
9171  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9172             aarch64_enc_cset_eq(res));
9173 
9174   ins_pipe(pipe_slow);
9175 %}
9176 
9177 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9178 
9179   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9180   ins_cost(2 * VOLATILE_REF_COST);
9181 
9182   effect(KILL cr);
9183 
9184  format %{
9185     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9186     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9187  %}
9188 
9189  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9190             aarch64_enc_cset_eq(res));
9191 
9192   ins_pipe(pipe_slow);
9193 %}
9194 
9195 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9196 
9197   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9198   ins_cost(2 * VOLATILE_REF_COST);
9199 
9200   effect(KILL cr);
9201 
9202  format %{
9203     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9204     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9205  %}
9206 
9207  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9208             aarch64_enc_cset_eq(res));
9209 
9210   ins_pipe(pipe_slow);
9211 %}
9212 
9213 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9214 
9215   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9216   ins_cost(2 * VOLATILE_REF_COST);
9217 
9218   effect(KILL cr);
9219 
9220  format %{
9221     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9222     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9223  %}
9224 
9225  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9226             aarch64_enc_cset_eq(res));
9227 
9228   ins_pipe(pipe_slow);
9229 %}
9230 
9231 // alternative CompareAndSwapX when we are eliding barriers
9232 
9233 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9234 
9235   predicate(needs_acquiring_load_exclusive(n));
9236   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9237   ins_cost(VOLATILE_REF_COST);
9238 
9239   effect(KILL cr);
9240 
9241  format %{
9242     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9243     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9244  %}
9245 
9246  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9247             aarch64_enc_cset_eq(res));
9248 
9249   ins_pipe(pipe_slow);
9250 %}
9251 
9252 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9253 
9254   predicate(needs_acquiring_load_exclusive(n));
9255   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9256   ins_cost(VOLATILE_REF_COST);
9257 
9258   effect(KILL cr);
9259 
9260  format %{
9261     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9262     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9263  %}
9264 
9265  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9266             aarch64_enc_cset_eq(res));
9267 
9268   ins_pipe(pipe_slow);
9269 %}
9270 
9271 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9272 
9273   predicate(needs_acquiring_load_exclusive(n));
9274   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9275   ins_cost(VOLATILE_REF_COST);
9276 
9277   effect(KILL cr);
9278 
9279  format %{
9280     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9281     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9282  %}
9283 
9284  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9285             aarch64_enc_cset_eq(res));
9286 
9287   ins_pipe(pipe_slow);
9288 %}
9289 
9290 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9291 
9292   predicate(needs_acquiring_load_exclusive(n));
9293   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9294   ins_cost(VOLATILE_REF_COST);
9295 
9296   effect(KILL cr);
9297 
9298  format %{
9299     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9300     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9301  %}
9302 
9303  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9304             aarch64_enc_cset_eq(res));
9305 
9306   ins_pipe(pipe_slow);
9307 %}
9308 
9309 
9310 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9311   match(Set prev (GetAndSetI mem newv));
9312   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9313   ins_encode %{
9314     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9315   %}
9316   ins_pipe(pipe_serial);
9317 %}
9318 
9319 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9320   match(Set prev (GetAndSetL mem newv));
9321   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9322   ins_encode %{
9323     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9324   %}
9325   ins_pipe(pipe_serial);
9326 %}
9327 
9328 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9329   match(Set prev (GetAndSetN mem newv));
9330   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9331   ins_encode %{
9332     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9333   %}
9334   ins_pipe(pipe_serial);
9335 %}
9336 
9337 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9338   match(Set prev (GetAndSetP mem newv));
9339   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9340   ins_encode %{
9341     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9342   %}
9343   ins_pipe(pipe_serial);
9344 %}
9345 
9346 
9347 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9348   match(Set newval (GetAndAddL mem incr));
9349   ins_cost(INSN_COST * 10);
9350   format %{ "get_and_addL $newval, [$mem], $incr" %}
9351   ins_encode %{
9352     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9353   %}
9354   ins_pipe(pipe_serial);
9355 %}
9356 
9357 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9358   predicate(n->as_LoadStore()->result_not_used());
9359   match(Set dummy (GetAndAddL mem incr));
9360   ins_cost(INSN_COST * 9);
9361   format %{ "get_and_addL [$mem], $incr" %}
9362   ins_encode %{
9363     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9364   %}
9365   ins_pipe(pipe_serial);
9366 %}
9367 
9368 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9369   match(Set newval (GetAndAddL mem incr));
9370   ins_cost(INSN_COST * 10);
9371   format %{ "get_and_addL $newval, [$mem], $incr" %}
9372   ins_encode %{
9373     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9374   %}
9375   ins_pipe(pipe_serial);
9376 %}
9377 
9378 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9379   predicate(n->as_LoadStore()->result_not_used());
9380   match(Set dummy (GetAndAddL mem incr));
9381   ins_cost(INSN_COST * 9);
9382   format %{ "get_and_addL [$mem], $incr" %}
9383   ins_encode %{
9384     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9385   %}
9386   ins_pipe(pipe_serial);
9387 %}
9388 
9389 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9390   match(Set newval (GetAndAddI mem incr));
9391   ins_cost(INSN_COST * 10);
9392   format %{ "get_and_addI $newval, [$mem], $incr" %}
9393   ins_encode %{
9394     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9395   %}
9396   ins_pipe(pipe_serial);
9397 %}
9398 
9399 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9400   predicate(n->as_LoadStore()->result_not_used());
9401   match(Set dummy (GetAndAddI mem incr));
9402   ins_cost(INSN_COST * 9);
9403   format %{ "get_and_addI [$mem], $incr" %}
9404   ins_encode %{
9405     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9406   %}
9407   ins_pipe(pipe_serial);
9408 %}
9409 
9410 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9411   match(Set newval (GetAndAddI mem incr));
9412   ins_cost(INSN_COST * 10);
9413   format %{ "get_and_addI $newval, [$mem], $incr" %}
9414   ins_encode %{
9415     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9416   %}
9417   ins_pipe(pipe_serial);
9418 %}
9419 
9420 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9421   predicate(n->as_LoadStore()->result_not_used());
9422   match(Set dummy (GetAndAddI mem incr));
9423   ins_cost(INSN_COST * 9);
9424   format %{ "get_and_addI [$mem], $incr" %}
9425   ins_encode %{
9426     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9427   %}
9428   ins_pipe(pipe_serial);
9429 %}
9430 
9431 // Manifest a CmpL result in an integer register.
9432 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9433 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9434 %{
9435   match(Set dst (CmpL3 src1 src2));
9436   effect(KILL flags);
9437 
9438   ins_cost(INSN_COST * 6);
9439   format %{
9440       "cmp $src1, $src2"
9441       "csetw $dst, ne"
9442       "cnegw $dst, lt"
9443   %}
9444   // format %{ "CmpL3 $dst, $src1, $src2" %}
9445   ins_encode %{
9446     __ cmp($src1$$Register, $src2$$Register);
9447     __ csetw($dst$$Register, Assembler::NE);
9448     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9449   %}
9450 
9451   ins_pipe(pipe_class_default);
9452 %}
9453 
9454 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9455 %{
9456   match(Set dst (CmpL3 src1 src2));
9457   effect(KILL flags);
9458 
9459   ins_cost(INSN_COST * 6);
9460   format %{
9461       "cmp $src1, $src2"
9462       "csetw $dst, ne"
9463       "cnegw $dst, lt"
9464   %}
9465   ins_encode %{
9466     int32_t con = (int32_t)$src2$$constant;
9467      if (con < 0) {
9468       __ adds(zr, $src1$$Register, -con);
9469     } else {
9470       __ subs(zr, $src1$$Register, con);
9471     }
9472     __ csetw($dst$$Register, Assembler::NE);
9473     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9474   %}
9475 
9476   ins_pipe(pipe_class_default);
9477 %}
9478 
9479 // ============================================================================
9480 // Conditional Move Instructions
9481 
9482 // n.b. we have identical rules for both a signed compare op (cmpOp)
9483 // and an unsigned compare op (cmpOpU). it would be nice if we could
9484 // define an op class which merged both inputs and use it to type the
9485 // argument to a single rule. unfortunatelyt his fails because the
9486 // opclass does not live up to the COND_INTER interface of its
9487 // component operands. When the generic code tries to negate the
9488 // operand it ends up running the generci Machoper::negate method
9489 // which throws a ShouldNotHappen. So, we have to provide two flavours
9490 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9491 
9492 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9493   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9494 
9495   ins_cost(INSN_COST * 2);
9496   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9497 
9498   ins_encode %{
9499     __ cselw(as_Register($dst$$reg),
9500              as_Register($src2$$reg),
9501              as_Register($src1$$reg),
9502              (Assembler::Condition)$cmp$$cmpcode);
9503   %}
9504 
9505   ins_pipe(icond_reg_reg);
9506 %}
9507 
9508 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9509   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9510 
9511   ins_cost(INSN_COST * 2);
9512   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9513 
9514   ins_encode %{
9515     __ cselw(as_Register($dst$$reg),
9516              as_Register($src2$$reg),
9517              as_Register($src1$$reg),
9518              (Assembler::Condition)$cmp$$cmpcode);
9519   %}
9520 
9521   ins_pipe(icond_reg_reg);
9522 %}
9523 
9524 // special cases where one arg is zero
9525 
9526 // n.b. this is selected in preference to the rule above because it
9527 // avoids loading constant 0 into a source register
9528 
9529 // TODO
9530 // we ought only to be able to cull one of these variants as the ideal
9531 // transforms ought always to order the zero consistently (to left/right?)
9532 
9533 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9534   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9535 
9536   ins_cost(INSN_COST * 2);
9537   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9538 
9539   ins_encode %{
9540     __ cselw(as_Register($dst$$reg),
9541              as_Register($src$$reg),
9542              zr,
9543              (Assembler::Condition)$cmp$$cmpcode);
9544   %}
9545 
9546   ins_pipe(icond_reg);
9547 %}
9548 
9549 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9550   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9551 
9552   ins_cost(INSN_COST * 2);
9553   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9554 
9555   ins_encode %{
9556     __ cselw(as_Register($dst$$reg),
9557              as_Register($src$$reg),
9558              zr,
9559              (Assembler::Condition)$cmp$$cmpcode);
9560   %}
9561 
9562   ins_pipe(icond_reg);
9563 %}
9564 
9565 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9566   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9567 
9568   ins_cost(INSN_COST * 2);
9569   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9570 
9571   ins_encode %{
9572     __ cselw(as_Register($dst$$reg),
9573              zr,
9574              as_Register($src$$reg),
9575              (Assembler::Condition)$cmp$$cmpcode);
9576   %}
9577 
9578   ins_pipe(icond_reg);
9579 %}
9580 
9581 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9582   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9583 
9584   ins_cost(INSN_COST * 2);
9585   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9586 
9587   ins_encode %{
9588     __ cselw(as_Register($dst$$reg),
9589              zr,
9590              as_Register($src$$reg),
9591              (Assembler::Condition)$cmp$$cmpcode);
9592   %}
9593 
9594   ins_pipe(icond_reg);
9595 %}
9596 
9597 // special case for creating a boolean 0 or 1
9598 
9599 // n.b. this is selected in preference to the rule above because it
9600 // avoids loading constants 0 and 1 into a source register
9601 
9602 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9603   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9604 
9605   ins_cost(INSN_COST * 2);
9606   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9607 
9608   ins_encode %{
9609     // equivalently
9610     // cset(as_Register($dst$$reg),
9611     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9612     __ csincw(as_Register($dst$$reg),
9613              zr,
9614              zr,
9615              (Assembler::Condition)$cmp$$cmpcode);
9616   %}
9617 
9618   ins_pipe(icond_none);
9619 %}
9620 
9621 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9622   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9623 
9624   ins_cost(INSN_COST * 2);
9625   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9626 
9627   ins_encode %{
9628     // equivalently
9629     // cset(as_Register($dst$$reg),
9630     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9631     __ csincw(as_Register($dst$$reg),
9632              zr,
9633              zr,
9634              (Assembler::Condition)$cmp$$cmpcode);
9635   %}
9636 
9637   ins_pipe(icond_none);
9638 %}
9639 
9640 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9641   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9642 
9643   ins_cost(INSN_COST * 2);
9644   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9645 
9646   ins_encode %{
9647     __ csel(as_Register($dst$$reg),
9648             as_Register($src2$$reg),
9649             as_Register($src1$$reg),
9650             (Assembler::Condition)$cmp$$cmpcode);
9651   %}
9652 
9653   ins_pipe(icond_reg_reg);
9654 %}
9655 
9656 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9657   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9658 
9659   ins_cost(INSN_COST * 2);
9660   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9661 
9662   ins_encode %{
9663     __ csel(as_Register($dst$$reg),
9664             as_Register($src2$$reg),
9665             as_Register($src1$$reg),
9666             (Assembler::Condition)$cmp$$cmpcode);
9667   %}
9668 
9669   ins_pipe(icond_reg_reg);
9670 %}
9671 
9672 // special cases where one arg is zero
9673 
9674 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9675   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9676 
9677   ins_cost(INSN_COST * 2);
9678   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9679 
9680   ins_encode %{
9681     __ csel(as_Register($dst$$reg),
9682             zr,
9683             as_Register($src$$reg),
9684             (Assembler::Condition)$cmp$$cmpcode);
9685   %}
9686 
9687   ins_pipe(icond_reg);
9688 %}
9689 
9690 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9691   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9692 
9693   ins_cost(INSN_COST * 2);
9694   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9695 
9696   ins_encode %{
9697     __ csel(as_Register($dst$$reg),
9698             zr,
9699             as_Register($src$$reg),
9700             (Assembler::Condition)$cmp$$cmpcode);
9701   %}
9702 
9703   ins_pipe(icond_reg);
9704 %}
9705 
9706 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9707   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9708 
9709   ins_cost(INSN_COST * 2);
9710   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9711 
9712   ins_encode %{
9713     __ csel(as_Register($dst$$reg),
9714             as_Register($src$$reg),
9715             zr,
9716             (Assembler::Condition)$cmp$$cmpcode);
9717   %}
9718 
9719   ins_pipe(icond_reg);
9720 %}
9721 
9722 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9723   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9724 
9725   ins_cost(INSN_COST * 2);
9726   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9727 
9728   ins_encode %{
9729     __ csel(as_Register($dst$$reg),
9730             as_Register($src$$reg),
9731             zr,
9732             (Assembler::Condition)$cmp$$cmpcode);
9733   %}
9734 
9735   ins_pipe(icond_reg);
9736 %}
9737 
9738 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9739   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9740 
9741   ins_cost(INSN_COST * 2);
9742   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9743 
9744   ins_encode %{
9745     __ csel(as_Register($dst$$reg),
9746             as_Register($src2$$reg),
9747             as_Register($src1$$reg),
9748             (Assembler::Condition)$cmp$$cmpcode);
9749   %}
9750 
9751   ins_pipe(icond_reg_reg);
9752 %}
9753 
9754 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9755   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9756 
9757   ins_cost(INSN_COST * 2);
9758   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9759 
9760   ins_encode %{
9761     __ csel(as_Register($dst$$reg),
9762             as_Register($src2$$reg),
9763             as_Register($src1$$reg),
9764             (Assembler::Condition)$cmp$$cmpcode);
9765   %}
9766 
9767   ins_pipe(icond_reg_reg);
9768 %}
9769 
9770 // special cases where one arg is zero
9771 
9772 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9773   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9774 
9775   ins_cost(INSN_COST * 2);
9776   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9777 
9778   ins_encode %{
9779     __ csel(as_Register($dst$$reg),
9780             zr,
9781             as_Register($src$$reg),
9782             (Assembler::Condition)$cmp$$cmpcode);
9783   %}
9784 
9785   ins_pipe(icond_reg);
9786 %}
9787 
9788 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9789   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9790 
9791   ins_cost(INSN_COST * 2);
9792   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9793 
9794   ins_encode %{
9795     __ csel(as_Register($dst$$reg),
9796             zr,
9797             as_Register($src$$reg),
9798             (Assembler::Condition)$cmp$$cmpcode);
9799   %}
9800 
9801   ins_pipe(icond_reg);
9802 %}
9803 
9804 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9805   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9806 
9807   ins_cost(INSN_COST * 2);
9808   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9809 
9810   ins_encode %{
9811     __ csel(as_Register($dst$$reg),
9812             as_Register($src$$reg),
9813             zr,
9814             (Assembler::Condition)$cmp$$cmpcode);
9815   %}
9816 
9817   ins_pipe(icond_reg);
9818 %}
9819 
9820 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9821   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9822 
9823   ins_cost(INSN_COST * 2);
9824   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9825 
9826   ins_encode %{
9827     __ csel(as_Register($dst$$reg),
9828             as_Register($src$$reg),
9829             zr,
9830             (Assembler::Condition)$cmp$$cmpcode);
9831   %}
9832 
9833   ins_pipe(icond_reg);
9834 %}
9835 
9836 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9837   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9838 
9839   ins_cost(INSN_COST * 2);
9840   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9841 
9842   ins_encode %{
9843     __ cselw(as_Register($dst$$reg),
9844              as_Register($src2$$reg),
9845              as_Register($src1$$reg),
9846              (Assembler::Condition)$cmp$$cmpcode);
9847   %}
9848 
9849   ins_pipe(icond_reg_reg);
9850 %}
9851 
9852 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9853   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9854 
9855   ins_cost(INSN_COST * 2);
9856   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9857 
9858   ins_encode %{
9859     __ cselw(as_Register($dst$$reg),
9860              as_Register($src2$$reg),
9861              as_Register($src1$$reg),
9862              (Assembler::Condition)$cmp$$cmpcode);
9863   %}
9864 
9865   ins_pipe(icond_reg_reg);
9866 %}
9867 
9868 // special cases where one arg is zero
9869 
9870 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9871   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9872 
9873   ins_cost(INSN_COST * 2);
9874   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9875 
9876   ins_encode %{
9877     __ cselw(as_Register($dst$$reg),
9878              zr,
9879              as_Register($src$$reg),
9880              (Assembler::Condition)$cmp$$cmpcode);
9881   %}
9882 
9883   ins_pipe(icond_reg);
9884 %}
9885 
9886 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9887   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9888 
9889   ins_cost(INSN_COST * 2);
9890   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9891 
9892   ins_encode %{
9893     __ cselw(as_Register($dst$$reg),
9894              zr,
9895              as_Register($src$$reg),
9896              (Assembler::Condition)$cmp$$cmpcode);
9897   %}
9898 
9899   ins_pipe(icond_reg);
9900 %}
9901 
9902 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9903   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9904 
9905   ins_cost(INSN_COST * 2);
9906   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9907 
9908   ins_encode %{
9909     __ cselw(as_Register($dst$$reg),
9910              as_Register($src$$reg),
9911              zr,
9912              (Assembler::Condition)$cmp$$cmpcode);
9913   %}
9914 
9915   ins_pipe(icond_reg);
9916 %}
9917 
9918 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9919   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9920 
9921   ins_cost(INSN_COST * 2);
9922   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9923 
9924   ins_encode %{
9925     __ cselw(as_Register($dst$$reg),
9926              as_Register($src$$reg),
9927              zr,
9928              (Assembler::Condition)$cmp$$cmpcode);
9929   %}
9930 
9931   ins_pipe(icond_reg);
9932 %}
9933 
9934 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9935 %{
9936   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9937 
9938   ins_cost(INSN_COST * 3);
9939 
9940   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9941   ins_encode %{
9942     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9943     __ fcsels(as_FloatRegister($dst$$reg),
9944               as_FloatRegister($src2$$reg),
9945               as_FloatRegister($src1$$reg),
9946               cond);
9947   %}
9948 
9949   ins_pipe(fp_cond_reg_reg_s);
9950 %}
9951 
9952 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9953 %{
9954   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9955 
9956   ins_cost(INSN_COST * 3);
9957 
9958   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9959   ins_encode %{
9960     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9961     __ fcsels(as_FloatRegister($dst$$reg),
9962               as_FloatRegister($src2$$reg),
9963               as_FloatRegister($src1$$reg),
9964               cond);
9965   %}
9966 
9967   ins_pipe(fp_cond_reg_reg_s);
9968 %}
9969 
9970 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9971 %{
9972   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9973 
9974   ins_cost(INSN_COST * 3);
9975 
9976   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9977   ins_encode %{
9978     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9979     __ fcseld(as_FloatRegister($dst$$reg),
9980               as_FloatRegister($src2$$reg),
9981               as_FloatRegister($src1$$reg),
9982               cond);
9983   %}
9984 
9985   ins_pipe(fp_cond_reg_reg_d);
9986 %}
9987 
9988 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9989 %{
9990   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9991 
9992   ins_cost(INSN_COST * 3);
9993 
9994   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9995   ins_encode %{
9996     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9997     __ fcseld(as_FloatRegister($dst$$reg),
9998               as_FloatRegister($src2$$reg),
9999               as_FloatRegister($src1$$reg),
10000               cond);
10001   %}
10002 
10003   ins_pipe(fp_cond_reg_reg_d);
10004 %}
10005 
10006 // ============================================================================
10007 // Arithmetic Instructions
10008 //
10009 
10010 // Integer Addition
10011 
10012 // TODO
10013 // these currently employ operations which do not set CR and hence are
10014 // not flagged as killing CR but we would like to isolate the cases
10015 // where we want to set flags from those where we don't. need to work
10016 // out how to do that.
10017 
10018 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10019   match(Set dst (AddI src1 src2));
10020 
10021   ins_cost(INSN_COST);
10022   format %{ "addw  $dst, $src1, $src2" %}
10023 
10024   ins_encode %{
10025     __ addw(as_Register($dst$$reg),
10026             as_Register($src1$$reg),
10027             as_Register($src2$$reg));
10028   %}
10029 
10030   ins_pipe(ialu_reg_reg);
10031 %}
10032 
10033 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10034   match(Set dst (AddI src1 src2));
10035 
10036   ins_cost(INSN_COST);
10037   format %{ "addw $dst, $src1, $src2" %}
10038 
10039   // use opcode to indicate that this is an add not a sub
10040   opcode(0x0);
10041 
10042   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10043 
10044   ins_pipe(ialu_reg_imm);
10045 %}
10046 
10047 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10048   match(Set dst (AddI (ConvL2I src1) src2));
10049 
10050   ins_cost(INSN_COST);
10051   format %{ "addw $dst, $src1, $src2" %}
10052 
10053   // use opcode to indicate that this is an add not a sub
10054   opcode(0x0);
10055 
10056   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10057 
10058   ins_pipe(ialu_reg_imm);
10059 %}
10060 
10061 // Pointer Addition
10062 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10063   match(Set dst (AddP src1 src2));
10064 
10065   ins_cost(INSN_COST);
10066   format %{ "add $dst, $src1, $src2\t# ptr" %}
10067 
10068   ins_encode %{
10069     __ add(as_Register($dst$$reg),
10070            as_Register($src1$$reg),
10071            as_Register($src2$$reg));
10072   %}
10073 
10074   ins_pipe(ialu_reg_reg);
10075 %}
10076 
10077 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10078   match(Set dst (AddP src1 (ConvI2L src2)));
10079 
10080   ins_cost(1.9 * INSN_COST);
10081   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10082 
10083   ins_encode %{
10084     __ add(as_Register($dst$$reg),
10085            as_Register($src1$$reg),
10086            as_Register($src2$$reg), ext::sxtw);
10087   %}
10088 
10089   ins_pipe(ialu_reg_reg);
10090 %}
10091 
10092 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10093   match(Set dst (AddP src1 (LShiftL src2 scale)));
10094 
10095   ins_cost(1.9 * INSN_COST);
10096   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10097 
10098   ins_encode %{
10099     __ lea(as_Register($dst$$reg),
10100            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10101                    Address::lsl($scale$$constant)));
10102   %}
10103 
10104   ins_pipe(ialu_reg_reg_shift);
10105 %}
10106 
10107 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10108   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10109 
10110   ins_cost(1.9 * INSN_COST);
10111   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10112 
10113   ins_encode %{
10114     __ lea(as_Register($dst$$reg),
10115            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10116                    Address::sxtw($scale$$constant)));
10117   %}
10118 
10119   ins_pipe(ialu_reg_reg_shift);
10120 %}
10121 
10122 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10123   match(Set dst (LShiftL (ConvI2L src) scale));
10124 
10125   ins_cost(INSN_COST);
10126   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10127 
10128   ins_encode %{
10129     __ sbfiz(as_Register($dst$$reg),
10130           as_Register($src$$reg),
10131           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10132   %}
10133 
10134   ins_pipe(ialu_reg_shift);
10135 %}
10136 
10137 // Pointer Immediate Addition
10138 // n.b. this needs to be more expensive than using an indirect memory
10139 // operand
10140 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10141   match(Set dst (AddP src1 src2));
10142 
10143   ins_cost(INSN_COST);
10144   format %{ "add $dst, $src1, $src2\t# ptr" %}
10145 
10146   // use opcode to indicate that this is an add not a sub
10147   opcode(0x0);
10148 
10149   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10150 
10151   ins_pipe(ialu_reg_imm);
10152 %}
10153 
10154 // Long Addition
10155 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10156 
10157   match(Set dst (AddL src1 src2));
10158 
10159   ins_cost(INSN_COST);
10160   format %{ "add  $dst, $src1, $src2" %}
10161 
10162   ins_encode %{
10163     __ add(as_Register($dst$$reg),
10164            as_Register($src1$$reg),
10165            as_Register($src2$$reg));
10166   %}
10167 
10168   ins_pipe(ialu_reg_reg);
10169 %}
10170 
10171 // No constant pool entries requiredLong Immediate Addition.
10172 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10173   match(Set dst (AddL src1 src2));
10174 
10175   ins_cost(INSN_COST);
10176   format %{ "add $dst, $src1, $src2" %}
10177 
10178   // use opcode to indicate that this is an add not a sub
10179   opcode(0x0);
10180 
10181   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10182 
10183   ins_pipe(ialu_reg_imm);
10184 %}
10185 
10186 // Integer Subtraction
10187 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10188   match(Set dst (SubI src1 src2));
10189 
10190   ins_cost(INSN_COST);
10191   format %{ "subw  $dst, $src1, $src2" %}
10192 
10193   ins_encode %{
10194     __ subw(as_Register($dst$$reg),
10195             as_Register($src1$$reg),
10196             as_Register($src2$$reg));
10197   %}
10198 
10199   ins_pipe(ialu_reg_reg);
10200 %}
10201 
10202 // Immediate Subtraction
10203 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10204   match(Set dst (SubI src1 src2));
10205 
10206   ins_cost(INSN_COST);
10207   format %{ "subw $dst, $src1, $src2" %}
10208 
10209   // use opcode to indicate that this is a sub not an add
10210   opcode(0x1);
10211 
10212   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10213 
10214   ins_pipe(ialu_reg_imm);
10215 %}
10216 
10217 // Long Subtraction
10218 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10219 
10220   match(Set dst (SubL src1 src2));
10221 
10222   ins_cost(INSN_COST);
10223   format %{ "sub  $dst, $src1, $src2" %}
10224 
10225   ins_encode %{
10226     __ sub(as_Register($dst$$reg),
10227            as_Register($src1$$reg),
10228            as_Register($src2$$reg));
10229   %}
10230 
10231   ins_pipe(ialu_reg_reg);
10232 %}
10233 
10234 // No constant pool entries requiredLong Immediate Subtraction.
10235 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10236   match(Set dst (SubL src1 src2));
10237 
10238   ins_cost(INSN_COST);
10239   format %{ "sub$dst, $src1, $src2" %}
10240 
10241   // use opcode to indicate that this is a sub not an add
10242   opcode(0x1);
10243 
10244   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10245 
10246   ins_pipe(ialu_reg_imm);
10247 %}
10248 
10249 // Integer Negation (special case for sub)
10250 
10251 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10252   match(Set dst (SubI zero src));
10253 
10254   ins_cost(INSN_COST);
10255   format %{ "negw $dst, $src\t# int" %}
10256 
10257   ins_encode %{
10258     __ negw(as_Register($dst$$reg),
10259             as_Register($src$$reg));
10260   %}
10261 
10262   ins_pipe(ialu_reg);
10263 %}
10264 
10265 // Long Negation
10266 
10267 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10268   match(Set dst (SubL zero src));
10269 
10270   ins_cost(INSN_COST);
10271   format %{ "neg $dst, $src\t# long" %}
10272 
10273   ins_encode %{
10274     __ neg(as_Register($dst$$reg),
10275            as_Register($src$$reg));
10276   %}
10277 
10278   ins_pipe(ialu_reg);
10279 %}
10280 
10281 // Integer Multiply
10282 
10283 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10284   match(Set dst (MulI src1 src2));
10285 
10286   ins_cost(INSN_COST * 3);
10287   format %{ "mulw  $dst, $src1, $src2" %}
10288 
10289   ins_encode %{
10290     __ mulw(as_Register($dst$$reg),
10291             as_Register($src1$$reg),
10292             as_Register($src2$$reg));
10293   %}
10294 
10295   ins_pipe(imul_reg_reg);
10296 %}
10297 
10298 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10299   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10300 
10301   ins_cost(INSN_COST * 3);
10302   format %{ "smull  $dst, $src1, $src2" %}
10303 
10304   ins_encode %{
10305     __ smull(as_Register($dst$$reg),
10306              as_Register($src1$$reg),
10307              as_Register($src2$$reg));
10308   %}
10309 
10310   ins_pipe(imul_reg_reg);
10311 %}
10312 
10313 // Long Multiply
10314 
10315 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10316   match(Set dst (MulL src1 src2));
10317 
10318   ins_cost(INSN_COST * 5);
10319   format %{ "mul  $dst, $src1, $src2" %}
10320 
10321   ins_encode %{
10322     __ mul(as_Register($dst$$reg),
10323            as_Register($src1$$reg),
10324            as_Register($src2$$reg));
10325   %}
10326 
10327   ins_pipe(lmul_reg_reg);
10328 %}
10329 
10330 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10331 %{
10332   match(Set dst (MulHiL src1 src2));
10333 
10334   ins_cost(INSN_COST * 7);
10335   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10336 
10337   ins_encode %{
10338     __ smulh(as_Register($dst$$reg),
10339              as_Register($src1$$reg),
10340              as_Register($src2$$reg));
10341   %}
10342 
10343   ins_pipe(lmul_reg_reg);
10344 %}
10345 
10346 // Combined Integer Multiply & Add/Sub
10347 
10348 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10349   match(Set dst (AddI src3 (MulI src1 src2)));
10350 
10351   ins_cost(INSN_COST * 3);
10352   format %{ "madd  $dst, $src1, $src2, $src3" %}
10353 
10354   ins_encode %{
10355     __ maddw(as_Register($dst$$reg),
10356              as_Register($src1$$reg),
10357              as_Register($src2$$reg),
10358              as_Register($src3$$reg));
10359   %}
10360 
10361   ins_pipe(imac_reg_reg);
10362 %}
10363 
10364 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10365   match(Set dst (SubI src3 (MulI src1 src2)));
10366 
10367   ins_cost(INSN_COST * 3);
10368   format %{ "msub  $dst, $src1, $src2, $src3" %}
10369 
10370   ins_encode %{
10371     __ msubw(as_Register($dst$$reg),
10372              as_Register($src1$$reg),
10373              as_Register($src2$$reg),
10374              as_Register($src3$$reg));
10375   %}
10376 
10377   ins_pipe(imac_reg_reg);
10378 %}
10379 
10380 // Combined Long Multiply & Add/Sub
10381 
10382 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10383   match(Set dst (AddL src3 (MulL src1 src2)));
10384 
10385   ins_cost(INSN_COST * 5);
10386   format %{ "madd  $dst, $src1, $src2, $src3" %}
10387 
10388   ins_encode %{
10389     __ madd(as_Register($dst$$reg),
10390             as_Register($src1$$reg),
10391             as_Register($src2$$reg),
10392             as_Register($src3$$reg));
10393   %}
10394 
10395   ins_pipe(lmac_reg_reg);
10396 %}
10397 
10398 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10399   match(Set dst (SubL src3 (MulL src1 src2)));
10400 
10401   ins_cost(INSN_COST * 5);
10402   format %{ "msub  $dst, $src1, $src2, $src3" %}
10403 
10404   ins_encode %{
10405     __ msub(as_Register($dst$$reg),
10406             as_Register($src1$$reg),
10407             as_Register($src2$$reg),
10408             as_Register($src3$$reg));
10409   %}
10410 
10411   ins_pipe(lmac_reg_reg);
10412 %}
10413 
10414 // Integer Divide
10415 
10416 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10417   match(Set dst (DivI src1 src2));
10418 
10419   ins_cost(INSN_COST * 19);
10420   format %{ "sdivw  $dst, $src1, $src2" %}
10421 
10422   ins_encode(aarch64_enc_divw(dst, src1, src2));
10423   ins_pipe(idiv_reg_reg);
10424 %}
10425 
10426 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10427   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10428   ins_cost(INSN_COST);
10429   format %{ "lsrw $dst, $src1, $div1" %}
10430   ins_encode %{
10431     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10432   %}
10433   ins_pipe(ialu_reg_shift);
10434 %}
10435 
10436 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10437   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10438   ins_cost(INSN_COST);
10439   format %{ "addw $dst, $src, LSR $div1" %}
10440 
10441   ins_encode %{
10442     __ addw(as_Register($dst$$reg),
10443               as_Register($src$$reg),
10444               as_Register($src$$reg),
10445               Assembler::LSR, 31);
10446   %}
10447   ins_pipe(ialu_reg);
10448 %}
10449 
10450 // Long Divide
10451 
10452 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10453   match(Set dst (DivL src1 src2));
10454 
10455   ins_cost(INSN_COST * 35);
10456   format %{ "sdiv   $dst, $src1, $src2" %}
10457 
10458   ins_encode(aarch64_enc_div(dst, src1, src2));
10459   ins_pipe(ldiv_reg_reg);
10460 %}
10461 
10462 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10463   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10464   ins_cost(INSN_COST);
10465   format %{ "lsr $dst, $src1, $div1" %}
10466   ins_encode %{
10467     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10468   %}
10469   ins_pipe(ialu_reg_shift);
10470 %}
10471 
10472 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10473   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10474   ins_cost(INSN_COST);
10475   format %{ "add $dst, $src, $div1" %}
10476 
10477   ins_encode %{
10478     __ add(as_Register($dst$$reg),
10479               as_Register($src$$reg),
10480               as_Register($src$$reg),
10481               Assembler::LSR, 63);
10482   %}
10483   ins_pipe(ialu_reg);
10484 %}
10485 
10486 // Integer Remainder
10487 
10488 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10489   match(Set dst (ModI src1 src2));
10490 
10491   ins_cost(INSN_COST * 22);
10492   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10493             "msubw($dst, rscratch1, $src2, $src1" %}
10494 
10495   ins_encode(aarch64_enc_modw(dst, src1, src2));
10496   ins_pipe(idiv_reg_reg);
10497 %}
10498 
10499 // Long Remainder
10500 
10501 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10502   match(Set dst (ModL src1 src2));
10503 
10504   ins_cost(INSN_COST * 38);
10505   format %{ "sdiv   rscratch1, $src1, $src2\n"
10506             "msub($dst, rscratch1, $src2, $src1" %}
10507 
10508   ins_encode(aarch64_enc_mod(dst, src1, src2));
10509   ins_pipe(ldiv_reg_reg);
10510 %}
10511 
10512 // Integer Shifts
10513 
10514 // Shift Left Register
10515 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10516   match(Set dst (LShiftI src1 src2));
10517 
10518   ins_cost(INSN_COST * 2);
10519   format %{ "lslvw  $dst, $src1, $src2" %}
10520 
10521   ins_encode %{
10522     __ lslvw(as_Register($dst$$reg),
10523              as_Register($src1$$reg),
10524              as_Register($src2$$reg));
10525   %}
10526 
10527   ins_pipe(ialu_reg_reg_vshift);
10528 %}
10529 
10530 // Shift Left Immediate
10531 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10532   match(Set dst (LShiftI src1 src2));
10533 
10534   ins_cost(INSN_COST);
10535   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10536 
10537   ins_encode %{
10538     __ lslw(as_Register($dst$$reg),
10539             as_Register($src1$$reg),
10540             $src2$$constant & 0x1f);
10541   %}
10542 
10543   ins_pipe(ialu_reg_shift);
10544 %}
10545 
10546 // Shift Right Logical Register
10547 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10548   match(Set dst (URShiftI src1 src2));
10549 
10550   ins_cost(INSN_COST * 2);
10551   format %{ "lsrvw  $dst, $src1, $src2" %}
10552 
10553   ins_encode %{
10554     __ lsrvw(as_Register($dst$$reg),
10555              as_Register($src1$$reg),
10556              as_Register($src2$$reg));
10557   %}
10558 
10559   ins_pipe(ialu_reg_reg_vshift);
10560 %}
10561 
10562 // Shift Right Logical Immediate
10563 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10564   match(Set dst (URShiftI src1 src2));
10565 
10566   ins_cost(INSN_COST);
10567   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10568 
10569   ins_encode %{
10570     __ lsrw(as_Register($dst$$reg),
10571             as_Register($src1$$reg),
10572             $src2$$constant & 0x1f);
10573   %}
10574 
10575   ins_pipe(ialu_reg_shift);
10576 %}
10577 
10578 // Shift Right Arithmetic Register
10579 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10580   match(Set dst (RShiftI src1 src2));
10581 
10582   ins_cost(INSN_COST * 2);
10583   format %{ "asrvw  $dst, $src1, $src2" %}
10584 
10585   ins_encode %{
10586     __ asrvw(as_Register($dst$$reg),
10587              as_Register($src1$$reg),
10588              as_Register($src2$$reg));
10589   %}
10590 
10591   ins_pipe(ialu_reg_reg_vshift);
10592 %}
10593 
10594 // Shift Right Arithmetic Immediate
10595 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10596   match(Set dst (RShiftI src1 src2));
10597 
10598   ins_cost(INSN_COST);
10599   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10600 
10601   ins_encode %{
10602     __ asrw(as_Register($dst$$reg),
10603             as_Register($src1$$reg),
10604             $src2$$constant & 0x1f);
10605   %}
10606 
10607   ins_pipe(ialu_reg_shift);
10608 %}
10609 
10610 // Combined Int Mask and Right Shift (using UBFM)
10611 // TODO
10612 
10613 // Long Shifts
10614 
10615 // Shift Left Register
10616 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10617   match(Set dst (LShiftL src1 src2));
10618 
10619   ins_cost(INSN_COST * 2);
10620   format %{ "lslv  $dst, $src1, $src2" %}
10621 
10622   ins_encode %{
10623     __ lslv(as_Register($dst$$reg),
10624             as_Register($src1$$reg),
10625             as_Register($src2$$reg));
10626   %}
10627 
10628   ins_pipe(ialu_reg_reg_vshift);
10629 %}
10630 
10631 // Shift Left Immediate
10632 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10633   match(Set dst (LShiftL src1 src2));
10634 
10635   ins_cost(INSN_COST);
10636   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10637 
10638   ins_encode %{
10639     __ lsl(as_Register($dst$$reg),
10640             as_Register($src1$$reg),
10641             $src2$$constant & 0x3f);
10642   %}
10643 
10644   ins_pipe(ialu_reg_shift);
10645 %}
10646 
10647 // Shift Right Logical Register
10648 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10649   match(Set dst (URShiftL src1 src2));
10650 
10651   ins_cost(INSN_COST * 2);
10652   format %{ "lsrv  $dst, $src1, $src2" %}
10653 
10654   ins_encode %{
10655     __ lsrv(as_Register($dst$$reg),
10656             as_Register($src1$$reg),
10657             as_Register($src2$$reg));
10658   %}
10659 
10660   ins_pipe(ialu_reg_reg_vshift);
10661 %}
10662 
10663 // Shift Right Logical Immediate
10664 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10665   match(Set dst (URShiftL src1 src2));
10666 
10667   ins_cost(INSN_COST);
10668   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10669 
10670   ins_encode %{
10671     __ lsr(as_Register($dst$$reg),
10672            as_Register($src1$$reg),
10673            $src2$$constant & 0x3f);
10674   %}
10675 
10676   ins_pipe(ialu_reg_shift);
10677 %}
10678 
10679 // A special-case pattern for card table stores.
10680 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10681   match(Set dst (URShiftL (CastP2X src1) src2));
10682 
10683   ins_cost(INSN_COST);
10684   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10685 
10686   ins_encode %{
10687     __ lsr(as_Register($dst$$reg),
10688            as_Register($src1$$reg),
10689            $src2$$constant & 0x3f);
10690   %}
10691 
10692   ins_pipe(ialu_reg_shift);
10693 %}
10694 
10695 // Shift Right Arithmetic Register
10696 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10697   match(Set dst (RShiftL src1 src2));
10698 
10699   ins_cost(INSN_COST * 2);
10700   format %{ "asrv  $dst, $src1, $src2" %}
10701 
10702   ins_encode %{
10703     __ asrv(as_Register($dst$$reg),
10704             as_Register($src1$$reg),
10705             as_Register($src2$$reg));
10706   %}
10707 
10708   ins_pipe(ialu_reg_reg_vshift);
10709 %}
10710 
10711 // Shift Right Arithmetic Immediate
10712 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10713   match(Set dst (RShiftL src1 src2));
10714 
10715   ins_cost(INSN_COST);
10716   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10717 
10718   ins_encode %{
10719     __ asr(as_Register($dst$$reg),
10720            as_Register($src1$$reg),
10721            $src2$$constant & 0x3f);
10722   %}
10723 
10724   ins_pipe(ialu_reg_shift);
10725 %}
10726 
10727 // BEGIN This section of the file is automatically generated. Do not edit --------------
10728 
10729 instruct regL_not_reg(iRegLNoSp dst,
10730                          iRegL src1, immL_M1 m1,
10731                          rFlagsReg cr) %{
10732   match(Set dst (XorL src1 m1));
10733   ins_cost(INSN_COST);
10734   format %{ "eon  $dst, $src1, zr" %}
10735 
10736   ins_encode %{
10737     __ eon(as_Register($dst$$reg),
10738               as_Register($src1$$reg),
10739               zr,
10740               Assembler::LSL, 0);
10741   %}
10742 
10743   ins_pipe(ialu_reg);
10744 %}
10745 instruct regI_not_reg(iRegINoSp dst,
10746                          iRegIorL2I src1, immI_M1 m1,
10747                          rFlagsReg cr) %{
10748   match(Set dst (XorI src1 m1));
10749   ins_cost(INSN_COST);
10750   format %{ "eonw  $dst, $src1, zr" %}
10751 
10752   ins_encode %{
10753     __ eonw(as_Register($dst$$reg),
10754               as_Register($src1$$reg),
10755               zr,
10756               Assembler::LSL, 0);
10757   %}
10758 
10759   ins_pipe(ialu_reg);
10760 %}
10761 
10762 instruct AndI_reg_not_reg(iRegINoSp dst,
10763                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10764                          rFlagsReg cr) %{
10765   match(Set dst (AndI src1 (XorI src2 m1)));
10766   ins_cost(INSN_COST);
10767   format %{ "bicw  $dst, $src1, $src2" %}
10768 
10769   ins_encode %{
10770     __ bicw(as_Register($dst$$reg),
10771               as_Register($src1$$reg),
10772               as_Register($src2$$reg),
10773               Assembler::LSL, 0);
10774   %}
10775 
10776   ins_pipe(ialu_reg_reg);
10777 %}
10778 
10779 instruct AndL_reg_not_reg(iRegLNoSp dst,
10780                          iRegL src1, iRegL src2, immL_M1 m1,
10781                          rFlagsReg cr) %{
10782   match(Set dst (AndL src1 (XorL src2 m1)));
10783   ins_cost(INSN_COST);
10784   format %{ "bic  $dst, $src1, $src2" %}
10785 
10786   ins_encode %{
10787     __ bic(as_Register($dst$$reg),
10788               as_Register($src1$$reg),
10789               as_Register($src2$$reg),
10790               Assembler::LSL, 0);
10791   %}
10792 
10793   ins_pipe(ialu_reg_reg);
10794 %}
10795 
10796 instruct OrI_reg_not_reg(iRegINoSp dst,
10797                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10798                          rFlagsReg cr) %{
10799   match(Set dst (OrI src1 (XorI src2 m1)));
10800   ins_cost(INSN_COST);
10801   format %{ "ornw  $dst, $src1, $src2" %}
10802 
10803   ins_encode %{
10804     __ ornw(as_Register($dst$$reg),
10805               as_Register($src1$$reg),
10806               as_Register($src2$$reg),
10807               Assembler::LSL, 0);
10808   %}
10809 
10810   ins_pipe(ialu_reg_reg);
10811 %}
10812 
10813 instruct OrL_reg_not_reg(iRegLNoSp dst,
10814                          iRegL src1, iRegL src2, immL_M1 m1,
10815                          rFlagsReg cr) %{
10816   match(Set dst (OrL src1 (XorL src2 m1)));
10817   ins_cost(INSN_COST);
10818   format %{ "orn  $dst, $src1, $src2" %}
10819 
10820   ins_encode %{
10821     __ orn(as_Register($dst$$reg),
10822               as_Register($src1$$reg),
10823               as_Register($src2$$reg),
10824               Assembler::LSL, 0);
10825   %}
10826 
10827   ins_pipe(ialu_reg_reg);
10828 %}
10829 
10830 instruct XorI_reg_not_reg(iRegINoSp dst,
10831                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10832                          rFlagsReg cr) %{
10833   match(Set dst (XorI m1 (XorI src2 src1)));
10834   ins_cost(INSN_COST);
10835   format %{ "eonw  $dst, $src1, $src2" %}
10836 
10837   ins_encode %{
10838     __ eonw(as_Register($dst$$reg),
10839               as_Register($src1$$reg),
10840               as_Register($src2$$reg),
10841               Assembler::LSL, 0);
10842   %}
10843 
10844   ins_pipe(ialu_reg_reg);
10845 %}
10846 
10847 instruct XorL_reg_not_reg(iRegLNoSp dst,
10848                          iRegL src1, iRegL src2, immL_M1 m1,
10849                          rFlagsReg cr) %{
10850   match(Set dst (XorL m1 (XorL src2 src1)));
10851   ins_cost(INSN_COST);
10852   format %{ "eon  $dst, $src1, $src2" %}
10853 
10854   ins_encode %{
10855     __ eon(as_Register($dst$$reg),
10856               as_Register($src1$$reg),
10857               as_Register($src2$$reg),
10858               Assembler::LSL, 0);
10859   %}
10860 
10861   ins_pipe(ialu_reg_reg);
10862 %}
10863 
10864 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10865                          iRegIorL2I src1, iRegIorL2I src2,
10866                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10867   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10868   ins_cost(1.9 * INSN_COST);
10869   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10870 
10871   ins_encode %{
10872     __ bicw(as_Register($dst$$reg),
10873               as_Register($src1$$reg),
10874               as_Register($src2$$reg),
10875               Assembler::LSR,
10876               $src3$$constant & 0x1f);
10877   %}
10878 
10879   ins_pipe(ialu_reg_reg_shift);
10880 %}
10881 
10882 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10883                          iRegL src1, iRegL src2,
10884                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10885   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10886   ins_cost(1.9 * INSN_COST);
10887   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10888 
10889   ins_encode %{
10890     __ bic(as_Register($dst$$reg),
10891               as_Register($src1$$reg),
10892               as_Register($src2$$reg),
10893               Assembler::LSR,
10894               $src3$$constant & 0x3f);
10895   %}
10896 
10897   ins_pipe(ialu_reg_reg_shift);
10898 %}
10899 
10900 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10901                          iRegIorL2I src1, iRegIorL2I src2,
10902                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10903   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10904   ins_cost(1.9 * INSN_COST);
10905   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10906 
10907   ins_encode %{
10908     __ bicw(as_Register($dst$$reg),
10909               as_Register($src1$$reg),
10910               as_Register($src2$$reg),
10911               Assembler::ASR,
10912               $src3$$constant & 0x1f);
10913   %}
10914 
10915   ins_pipe(ialu_reg_reg_shift);
10916 %}
10917 
10918 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10919                          iRegL src1, iRegL src2,
10920                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10921   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10922   ins_cost(1.9 * INSN_COST);
10923   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10924 
10925   ins_encode %{
10926     __ bic(as_Register($dst$$reg),
10927               as_Register($src1$$reg),
10928               as_Register($src2$$reg),
10929               Assembler::ASR,
10930               $src3$$constant & 0x3f);
10931   %}
10932 
10933   ins_pipe(ialu_reg_reg_shift);
10934 %}
10935 
10936 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10937                          iRegIorL2I src1, iRegIorL2I src2,
10938                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10939   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10940   ins_cost(1.9 * INSN_COST);
10941   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10942 
10943   ins_encode %{
10944     __ bicw(as_Register($dst$$reg),
10945               as_Register($src1$$reg),
10946               as_Register($src2$$reg),
10947               Assembler::LSL,
10948               $src3$$constant & 0x1f);
10949   %}
10950 
10951   ins_pipe(ialu_reg_reg_shift);
10952 %}
10953 
10954 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10955                          iRegL src1, iRegL src2,
10956                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10957   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10958   ins_cost(1.9 * INSN_COST);
10959   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10960 
10961   ins_encode %{
10962     __ bic(as_Register($dst$$reg),
10963               as_Register($src1$$reg),
10964               as_Register($src2$$reg),
10965               Assembler::LSL,
10966               $src3$$constant & 0x3f);
10967   %}
10968 
10969   ins_pipe(ialu_reg_reg_shift);
10970 %}
10971 
10972 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10973                          iRegIorL2I src1, iRegIorL2I src2,
10974                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10975   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10976   ins_cost(1.9 * INSN_COST);
10977   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10978 
10979   ins_encode %{
10980     __ eonw(as_Register($dst$$reg),
10981               as_Register($src1$$reg),
10982               as_Register($src2$$reg),
10983               Assembler::LSR,
10984               $src3$$constant & 0x1f);
10985   %}
10986 
10987   ins_pipe(ialu_reg_reg_shift);
10988 %}
10989 
10990 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10991                          iRegL src1, iRegL src2,
10992                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10993   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10994   ins_cost(1.9 * INSN_COST);
10995   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10996 
10997   ins_encode %{
10998     __ eon(as_Register($dst$$reg),
10999               as_Register($src1$$reg),
11000               as_Register($src2$$reg),
11001               Assembler::LSR,
11002               $src3$$constant & 0x3f);
11003   %}
11004 
11005   ins_pipe(ialu_reg_reg_shift);
11006 %}
11007 
11008 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11009                          iRegIorL2I src1, iRegIorL2I src2,
11010                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11011   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11012   ins_cost(1.9 * INSN_COST);
11013   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11014 
11015   ins_encode %{
11016     __ eonw(as_Register($dst$$reg),
11017               as_Register($src1$$reg),
11018               as_Register($src2$$reg),
11019               Assembler::ASR,
11020               $src3$$constant & 0x1f);
11021   %}
11022 
11023   ins_pipe(ialu_reg_reg_shift);
11024 %}
11025 
11026 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11027                          iRegL src1, iRegL src2,
11028                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11029   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11030   ins_cost(1.9 * INSN_COST);
11031   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11032 
11033   ins_encode %{
11034     __ eon(as_Register($dst$$reg),
11035               as_Register($src1$$reg),
11036               as_Register($src2$$reg),
11037               Assembler::ASR,
11038               $src3$$constant & 0x3f);
11039   %}
11040 
11041   ins_pipe(ialu_reg_reg_shift);
11042 %}
11043 
11044 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11045                          iRegIorL2I src1, iRegIorL2I src2,
11046                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11047   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11048   ins_cost(1.9 * INSN_COST);
11049   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11050 
11051   ins_encode %{
11052     __ eonw(as_Register($dst$$reg),
11053               as_Register($src1$$reg),
11054               as_Register($src2$$reg),
11055               Assembler::LSL,
11056               $src3$$constant & 0x1f);
11057   %}
11058 
11059   ins_pipe(ialu_reg_reg_shift);
11060 %}
11061 
11062 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11063                          iRegL src1, iRegL src2,
11064                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11065   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11066   ins_cost(1.9 * INSN_COST);
11067   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11068 
11069   ins_encode %{
11070     __ eon(as_Register($dst$$reg),
11071               as_Register($src1$$reg),
11072               as_Register($src2$$reg),
11073               Assembler::LSL,
11074               $src3$$constant & 0x3f);
11075   %}
11076 
11077   ins_pipe(ialu_reg_reg_shift);
11078 %}
11079 
11080 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11081                          iRegIorL2I src1, iRegIorL2I src2,
11082                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11083   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11084   ins_cost(1.9 * INSN_COST);
11085   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11086 
11087   ins_encode %{
11088     __ ornw(as_Register($dst$$reg),
11089               as_Register($src1$$reg),
11090               as_Register($src2$$reg),
11091               Assembler::LSR,
11092               $src3$$constant & 0x1f);
11093   %}
11094 
11095   ins_pipe(ialu_reg_reg_shift);
11096 %}
11097 
11098 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11099                          iRegL src1, iRegL src2,
11100                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11101   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11102   ins_cost(1.9 * INSN_COST);
11103   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11104 
11105   ins_encode %{
11106     __ orn(as_Register($dst$$reg),
11107               as_Register($src1$$reg),
11108               as_Register($src2$$reg),
11109               Assembler::LSR,
11110               $src3$$constant & 0x3f);
11111   %}
11112 
11113   ins_pipe(ialu_reg_reg_shift);
11114 %}
11115 
11116 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11117                          iRegIorL2I src1, iRegIorL2I src2,
11118                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11119   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11120   ins_cost(1.9 * INSN_COST);
11121   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11122 
11123   ins_encode %{
11124     __ ornw(as_Register($dst$$reg),
11125               as_Register($src1$$reg),
11126               as_Register($src2$$reg),
11127               Assembler::ASR,
11128               $src3$$constant & 0x1f);
11129   %}
11130 
11131   ins_pipe(ialu_reg_reg_shift);
11132 %}
11133 
11134 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11135                          iRegL src1, iRegL src2,
11136                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11137   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11138   ins_cost(1.9 * INSN_COST);
11139   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11140 
11141   ins_encode %{
11142     __ orn(as_Register($dst$$reg),
11143               as_Register($src1$$reg),
11144               as_Register($src2$$reg),
11145               Assembler::ASR,
11146               $src3$$constant & 0x3f);
11147   %}
11148 
11149   ins_pipe(ialu_reg_reg_shift);
11150 %}
11151 
11152 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11153                          iRegIorL2I src1, iRegIorL2I src2,
11154                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11155   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11156   ins_cost(1.9 * INSN_COST);
11157   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11158 
11159   ins_encode %{
11160     __ ornw(as_Register($dst$$reg),
11161               as_Register($src1$$reg),
11162               as_Register($src2$$reg),
11163               Assembler::LSL,
11164               $src3$$constant & 0x1f);
11165   %}
11166 
11167   ins_pipe(ialu_reg_reg_shift);
11168 %}
11169 
11170 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11171                          iRegL src1, iRegL src2,
11172                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11173   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11174   ins_cost(1.9 * INSN_COST);
11175   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11176 
11177   ins_encode %{
11178     __ orn(as_Register($dst$$reg),
11179               as_Register($src1$$reg),
11180               as_Register($src2$$reg),
11181               Assembler::LSL,
11182               $src3$$constant & 0x3f);
11183   %}
11184 
11185   ins_pipe(ialu_reg_reg_shift);
11186 %}
11187 
11188 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11189                          iRegIorL2I src1, iRegIorL2I src2,
11190                          immI src3, rFlagsReg cr) %{
11191   match(Set dst (AndI src1 (URShiftI src2 src3)));
11192 
11193   ins_cost(1.9 * INSN_COST);
11194   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11195 
11196   ins_encode %{
11197     __ andw(as_Register($dst$$reg),
11198               as_Register($src1$$reg),
11199               as_Register($src2$$reg),
11200               Assembler::LSR,
11201               $src3$$constant & 0x1f);
11202   %}
11203 
11204   ins_pipe(ialu_reg_reg_shift);
11205 %}
11206 
11207 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11208                          iRegL src1, iRegL src2,
11209                          immI src3, rFlagsReg cr) %{
11210   match(Set dst (AndL src1 (URShiftL src2 src3)));
11211 
11212   ins_cost(1.9 * INSN_COST);
11213   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11214 
11215   ins_encode %{
11216     __ andr(as_Register($dst$$reg),
11217               as_Register($src1$$reg),
11218               as_Register($src2$$reg),
11219               Assembler::LSR,
11220               $src3$$constant & 0x3f);
11221   %}
11222 
11223   ins_pipe(ialu_reg_reg_shift);
11224 %}
11225 
11226 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11227                          iRegIorL2I src1, iRegIorL2I src2,
11228                          immI src3, rFlagsReg cr) %{
11229   match(Set dst (AndI src1 (RShiftI src2 src3)));
11230 
11231   ins_cost(1.9 * INSN_COST);
11232   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11233 
11234   ins_encode %{
11235     __ andw(as_Register($dst$$reg),
11236               as_Register($src1$$reg),
11237               as_Register($src2$$reg),
11238               Assembler::ASR,
11239               $src3$$constant & 0x1f);
11240   %}
11241 
11242   ins_pipe(ialu_reg_reg_shift);
11243 %}
11244 
11245 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11246                          iRegL src1, iRegL src2,
11247                          immI src3, rFlagsReg cr) %{
11248   match(Set dst (AndL src1 (RShiftL src2 src3)));
11249 
11250   ins_cost(1.9 * INSN_COST);
11251   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11252 
11253   ins_encode %{
11254     __ andr(as_Register($dst$$reg),
11255               as_Register($src1$$reg),
11256               as_Register($src2$$reg),
11257               Assembler::ASR,
11258               $src3$$constant & 0x3f);
11259   %}
11260 
11261   ins_pipe(ialu_reg_reg_shift);
11262 %}
11263 
11264 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11265                          iRegIorL2I src1, iRegIorL2I src2,
11266                          immI src3, rFlagsReg cr) %{
11267   match(Set dst (AndI src1 (LShiftI src2 src3)));
11268 
11269   ins_cost(1.9 * INSN_COST);
11270   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11271 
11272   ins_encode %{
11273     __ andw(as_Register($dst$$reg),
11274               as_Register($src1$$reg),
11275               as_Register($src2$$reg),
11276               Assembler::LSL,
11277               $src3$$constant & 0x1f);
11278   %}
11279 
11280   ins_pipe(ialu_reg_reg_shift);
11281 %}
11282 
11283 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11284                          iRegL src1, iRegL src2,
11285                          immI src3, rFlagsReg cr) %{
11286   match(Set dst (AndL src1 (LShiftL src2 src3)));
11287 
11288   ins_cost(1.9 * INSN_COST);
11289   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11290 
11291   ins_encode %{
11292     __ andr(as_Register($dst$$reg),
11293               as_Register($src1$$reg),
11294               as_Register($src2$$reg),
11295               Assembler::LSL,
11296               $src3$$constant & 0x3f);
11297   %}
11298 
11299   ins_pipe(ialu_reg_reg_shift);
11300 %}
11301 
11302 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11303                          iRegIorL2I src1, iRegIorL2I src2,
11304                          immI src3, rFlagsReg cr) %{
11305   match(Set dst (XorI src1 (URShiftI src2 src3)));
11306 
11307   ins_cost(1.9 * INSN_COST);
11308   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11309 
11310   ins_encode %{
11311     __ eorw(as_Register($dst$$reg),
11312               as_Register($src1$$reg),
11313               as_Register($src2$$reg),
11314               Assembler::LSR,
11315               $src3$$constant & 0x1f);
11316   %}
11317 
11318   ins_pipe(ialu_reg_reg_shift);
11319 %}
11320 
11321 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11322                          iRegL src1, iRegL src2,
11323                          immI src3, rFlagsReg cr) %{
11324   match(Set dst (XorL src1 (URShiftL src2 src3)));
11325 
11326   ins_cost(1.9 * INSN_COST);
11327   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11328 
11329   ins_encode %{
11330     __ eor(as_Register($dst$$reg),
11331               as_Register($src1$$reg),
11332               as_Register($src2$$reg),
11333               Assembler::LSR,
11334               $src3$$constant & 0x3f);
11335   %}
11336 
11337   ins_pipe(ialu_reg_reg_shift);
11338 %}
11339 
11340 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11341                          iRegIorL2I src1, iRegIorL2I src2,
11342                          immI src3, rFlagsReg cr) %{
11343   match(Set dst (XorI src1 (RShiftI src2 src3)));
11344 
11345   ins_cost(1.9 * INSN_COST);
11346   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11347 
11348   ins_encode %{
11349     __ eorw(as_Register($dst$$reg),
11350               as_Register($src1$$reg),
11351               as_Register($src2$$reg),
11352               Assembler::ASR,
11353               $src3$$constant & 0x1f);
11354   %}
11355 
11356   ins_pipe(ialu_reg_reg_shift);
11357 %}
11358 
11359 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11360                          iRegL src1, iRegL src2,
11361                          immI src3, rFlagsReg cr) %{
11362   match(Set dst (XorL src1 (RShiftL src2 src3)));
11363 
11364   ins_cost(1.9 * INSN_COST);
11365   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11366 
11367   ins_encode %{
11368     __ eor(as_Register($dst$$reg),
11369               as_Register($src1$$reg),
11370               as_Register($src2$$reg),
11371               Assembler::ASR,
11372               $src3$$constant & 0x3f);
11373   %}
11374 
11375   ins_pipe(ialu_reg_reg_shift);
11376 %}
11377 
11378 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11379                          iRegIorL2I src1, iRegIorL2I src2,
11380                          immI src3, rFlagsReg cr) %{
11381   match(Set dst (XorI src1 (LShiftI src2 src3)));
11382 
11383   ins_cost(1.9 * INSN_COST);
11384   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11385 
11386   ins_encode %{
11387     __ eorw(as_Register($dst$$reg),
11388               as_Register($src1$$reg),
11389               as_Register($src2$$reg),
11390               Assembler::LSL,
11391               $src3$$constant & 0x1f);
11392   %}
11393 
11394   ins_pipe(ialu_reg_reg_shift);
11395 %}
11396 
11397 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11398                          iRegL src1, iRegL src2,
11399                          immI src3, rFlagsReg cr) %{
11400   match(Set dst (XorL src1 (LShiftL src2 src3)));
11401 
11402   ins_cost(1.9 * INSN_COST);
11403   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11404 
11405   ins_encode %{
11406     __ eor(as_Register($dst$$reg),
11407               as_Register($src1$$reg),
11408               as_Register($src2$$reg),
11409               Assembler::LSL,
11410               $src3$$constant & 0x3f);
11411   %}
11412 
11413   ins_pipe(ialu_reg_reg_shift);
11414 %}
11415 
11416 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11417                          iRegIorL2I src1, iRegIorL2I src2,
11418                          immI src3, rFlagsReg cr) %{
11419   match(Set dst (OrI src1 (URShiftI src2 src3)));
11420 
11421   ins_cost(1.9 * INSN_COST);
11422   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11423 
11424   ins_encode %{
11425     __ orrw(as_Register($dst$$reg),
11426               as_Register($src1$$reg),
11427               as_Register($src2$$reg),
11428               Assembler::LSR,
11429               $src3$$constant & 0x1f);
11430   %}
11431 
11432   ins_pipe(ialu_reg_reg_shift);
11433 %}
11434 
11435 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11436                          iRegL src1, iRegL src2,
11437                          immI src3, rFlagsReg cr) %{
11438   match(Set dst (OrL src1 (URShiftL src2 src3)));
11439 
11440   ins_cost(1.9 * INSN_COST);
11441   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11442 
11443   ins_encode %{
11444     __ orr(as_Register($dst$$reg),
11445               as_Register($src1$$reg),
11446               as_Register($src2$$reg),
11447               Assembler::LSR,
11448               $src3$$constant & 0x3f);
11449   %}
11450 
11451   ins_pipe(ialu_reg_reg_shift);
11452 %}
11453 
11454 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11455                          iRegIorL2I src1, iRegIorL2I src2,
11456                          immI src3, rFlagsReg cr) %{
11457   match(Set dst (OrI src1 (RShiftI src2 src3)));
11458 
11459   ins_cost(1.9 * INSN_COST);
11460   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11461 
11462   ins_encode %{
11463     __ orrw(as_Register($dst$$reg),
11464               as_Register($src1$$reg),
11465               as_Register($src2$$reg),
11466               Assembler::ASR,
11467               $src3$$constant & 0x1f);
11468   %}
11469 
11470   ins_pipe(ialu_reg_reg_shift);
11471 %}
11472 
11473 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11474                          iRegL src1, iRegL src2,
11475                          immI src3, rFlagsReg cr) %{
11476   match(Set dst (OrL src1 (RShiftL src2 src3)));
11477 
11478   ins_cost(1.9 * INSN_COST);
11479   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11480 
11481   ins_encode %{
11482     __ orr(as_Register($dst$$reg),
11483               as_Register($src1$$reg),
11484               as_Register($src2$$reg),
11485               Assembler::ASR,
11486               $src3$$constant & 0x3f);
11487   %}
11488 
11489   ins_pipe(ialu_reg_reg_shift);
11490 %}
11491 
11492 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11493                          iRegIorL2I src1, iRegIorL2I src2,
11494                          immI src3, rFlagsReg cr) %{
11495   match(Set dst (OrI src1 (LShiftI src2 src3)));
11496 
11497   ins_cost(1.9 * INSN_COST);
11498   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11499 
11500   ins_encode %{
11501     __ orrw(as_Register($dst$$reg),
11502               as_Register($src1$$reg),
11503               as_Register($src2$$reg),
11504               Assembler::LSL,
11505               $src3$$constant & 0x1f);
11506   %}
11507 
11508   ins_pipe(ialu_reg_reg_shift);
11509 %}
11510 
11511 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11512                          iRegL src1, iRegL src2,
11513                          immI src3, rFlagsReg cr) %{
11514   match(Set dst (OrL src1 (LShiftL src2 src3)));
11515 
11516   ins_cost(1.9 * INSN_COST);
11517   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11518 
11519   ins_encode %{
11520     __ orr(as_Register($dst$$reg),
11521               as_Register($src1$$reg),
11522               as_Register($src2$$reg),
11523               Assembler::LSL,
11524               $src3$$constant & 0x3f);
11525   %}
11526 
11527   ins_pipe(ialu_reg_reg_shift);
11528 %}
11529 
11530 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11531                          iRegIorL2I src1, iRegIorL2I src2,
11532                          immI src3, rFlagsReg cr) %{
11533   match(Set dst (AddI src1 (URShiftI src2 src3)));
11534 
11535   ins_cost(1.9 * INSN_COST);
11536   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11537 
11538   ins_encode %{
11539     __ addw(as_Register($dst$$reg),
11540               as_Register($src1$$reg),
11541               as_Register($src2$$reg),
11542               Assembler::LSR,
11543               $src3$$constant & 0x1f);
11544   %}
11545 
11546   ins_pipe(ialu_reg_reg_shift);
11547 %}
11548 
11549 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11550                          iRegL src1, iRegL src2,
11551                          immI src3, rFlagsReg cr) %{
11552   match(Set dst (AddL src1 (URShiftL src2 src3)));
11553 
11554   ins_cost(1.9 * INSN_COST);
11555   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11556 
11557   ins_encode %{
11558     __ add(as_Register($dst$$reg),
11559               as_Register($src1$$reg),
11560               as_Register($src2$$reg),
11561               Assembler::LSR,
11562               $src3$$constant & 0x3f);
11563   %}
11564 
11565   ins_pipe(ialu_reg_reg_shift);
11566 %}
11567 
11568 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11569                          iRegIorL2I src1, iRegIorL2I src2,
11570                          immI src3, rFlagsReg cr) %{
11571   match(Set dst (AddI src1 (RShiftI src2 src3)));
11572 
11573   ins_cost(1.9 * INSN_COST);
11574   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11575 
11576   ins_encode %{
11577     __ addw(as_Register($dst$$reg),
11578               as_Register($src1$$reg),
11579               as_Register($src2$$reg),
11580               Assembler::ASR,
11581               $src3$$constant & 0x1f);
11582   %}
11583 
11584   ins_pipe(ialu_reg_reg_shift);
11585 %}
11586 
11587 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11588                          iRegL src1, iRegL src2,
11589                          immI src3, rFlagsReg cr) %{
11590   match(Set dst (AddL src1 (RShiftL src2 src3)));
11591 
11592   ins_cost(1.9 * INSN_COST);
11593   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11594 
11595   ins_encode %{
11596     __ add(as_Register($dst$$reg),
11597               as_Register($src1$$reg),
11598               as_Register($src2$$reg),
11599               Assembler::ASR,
11600               $src3$$constant & 0x3f);
11601   %}
11602 
11603   ins_pipe(ialu_reg_reg_shift);
11604 %}
11605 
11606 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11607                          iRegIorL2I src1, iRegIorL2I src2,
11608                          immI src3, rFlagsReg cr) %{
11609   match(Set dst (AddI src1 (LShiftI src2 src3)));
11610 
11611   ins_cost(1.9 * INSN_COST);
11612   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11613 
11614   ins_encode %{
11615     __ addw(as_Register($dst$$reg),
11616               as_Register($src1$$reg),
11617               as_Register($src2$$reg),
11618               Assembler::LSL,
11619               $src3$$constant & 0x1f);
11620   %}
11621 
11622   ins_pipe(ialu_reg_reg_shift);
11623 %}
11624 
11625 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11626                          iRegL src1, iRegL src2,
11627                          immI src3, rFlagsReg cr) %{
11628   match(Set dst (AddL src1 (LShiftL src2 src3)));
11629 
11630   ins_cost(1.9 * INSN_COST);
11631   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11632 
11633   ins_encode %{
11634     __ add(as_Register($dst$$reg),
11635               as_Register($src1$$reg),
11636               as_Register($src2$$reg),
11637               Assembler::LSL,
11638               $src3$$constant & 0x3f);
11639   %}
11640 
11641   ins_pipe(ialu_reg_reg_shift);
11642 %}
11643 
11644 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11645                          iRegIorL2I src1, iRegIorL2I src2,
11646                          immI src3, rFlagsReg cr) %{
11647   match(Set dst (SubI src1 (URShiftI src2 src3)));
11648 
11649   ins_cost(1.9 * INSN_COST);
11650   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11651 
11652   ins_encode %{
11653     __ subw(as_Register($dst$$reg),
11654               as_Register($src1$$reg),
11655               as_Register($src2$$reg),
11656               Assembler::LSR,
11657               $src3$$constant & 0x1f);
11658   %}
11659 
11660   ins_pipe(ialu_reg_reg_shift);
11661 %}
11662 
11663 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11664                          iRegL src1, iRegL src2,
11665                          immI src3, rFlagsReg cr) %{
11666   match(Set dst (SubL src1 (URShiftL src2 src3)));
11667 
11668   ins_cost(1.9 * INSN_COST);
11669   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11670 
11671   ins_encode %{
11672     __ sub(as_Register($dst$$reg),
11673               as_Register($src1$$reg),
11674               as_Register($src2$$reg),
11675               Assembler::LSR,
11676               $src3$$constant & 0x3f);
11677   %}
11678 
11679   ins_pipe(ialu_reg_reg_shift);
11680 %}
11681 
11682 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11683                          iRegIorL2I src1, iRegIorL2I src2,
11684                          immI src3, rFlagsReg cr) %{
11685   match(Set dst (SubI src1 (RShiftI src2 src3)));
11686 
11687   ins_cost(1.9 * INSN_COST);
11688   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11689 
11690   ins_encode %{
11691     __ subw(as_Register($dst$$reg),
11692               as_Register($src1$$reg),
11693               as_Register($src2$$reg),
11694               Assembler::ASR,
11695               $src3$$constant & 0x1f);
11696   %}
11697 
11698   ins_pipe(ialu_reg_reg_shift);
11699 %}
11700 
11701 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11702                          iRegL src1, iRegL src2,
11703                          immI src3, rFlagsReg cr) %{
11704   match(Set dst (SubL src1 (RShiftL src2 src3)));
11705 
11706   ins_cost(1.9 * INSN_COST);
11707   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11708 
11709   ins_encode %{
11710     __ sub(as_Register($dst$$reg),
11711               as_Register($src1$$reg),
11712               as_Register($src2$$reg),
11713               Assembler::ASR,
11714               $src3$$constant & 0x3f);
11715   %}
11716 
11717   ins_pipe(ialu_reg_reg_shift);
11718 %}
11719 
11720 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11721                          iRegIorL2I src1, iRegIorL2I src2,
11722                          immI src3, rFlagsReg cr) %{
11723   match(Set dst (SubI src1 (LShiftI src2 src3)));
11724 
11725   ins_cost(1.9 * INSN_COST);
11726   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11727 
11728   ins_encode %{
11729     __ subw(as_Register($dst$$reg),
11730               as_Register($src1$$reg),
11731               as_Register($src2$$reg),
11732               Assembler::LSL,
11733               $src3$$constant & 0x1f);
11734   %}
11735 
11736   ins_pipe(ialu_reg_reg_shift);
11737 %}
11738 
11739 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11740                          iRegL src1, iRegL src2,
11741                          immI src3, rFlagsReg cr) %{
11742   match(Set dst (SubL src1 (LShiftL src2 src3)));
11743 
11744   ins_cost(1.9 * INSN_COST);
11745   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11746 
11747   ins_encode %{
11748     __ sub(as_Register($dst$$reg),
11749               as_Register($src1$$reg),
11750               as_Register($src2$$reg),
11751               Assembler::LSL,
11752               $src3$$constant & 0x3f);
11753   %}
11754 
11755   ins_pipe(ialu_reg_reg_shift);
11756 %}
11757 
11758 
11759 
11760 // Shift Left followed by Shift Right.
11761 // This idiom is used by the compiler for the i2b bytecode etc.
11762 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11763 %{
11764   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11765   // Make sure we are not going to exceed what sbfm can do.
11766   predicate((unsigned int)n->in(2)->get_int() <= 63
11767             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11768 
11769   ins_cost(INSN_COST * 2);
11770   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11771   ins_encode %{
11772     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11773     int s = 63 - lshift;
11774     int r = (rshift - lshift) & 63;
11775     __ sbfm(as_Register($dst$$reg),
11776             as_Register($src$$reg),
11777             r, s);
11778   %}
11779 
11780   ins_pipe(ialu_reg_shift);
11781 %}
11782 
11783 // Shift Left followed by Shift Right.
11784 // This idiom is used by the compiler for the i2b bytecode etc.
11785 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11786 %{
11787   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11788   // Make sure we are not going to exceed what sbfmw can do.
11789   predicate((unsigned int)n->in(2)->get_int() <= 31
11790             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11791 
11792   ins_cost(INSN_COST * 2);
11793   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11794   ins_encode %{
11795     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11796     int s = 31 - lshift;
11797     int r = (rshift - lshift) & 31;
11798     __ sbfmw(as_Register($dst$$reg),
11799             as_Register($src$$reg),
11800             r, s);
11801   %}
11802 
11803   ins_pipe(ialu_reg_shift);
11804 %}
11805 
11806 // Shift Left followed by Shift Right.
11807 // This idiom is used by the compiler for the i2b bytecode etc.
11808 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11809 %{
11810   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11811   // Make sure we are not going to exceed what ubfm can do.
11812   predicate((unsigned int)n->in(2)->get_int() <= 63
11813             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11814 
11815   ins_cost(INSN_COST * 2);
11816   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11817   ins_encode %{
11818     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11819     int s = 63 - lshift;
11820     int r = (rshift - lshift) & 63;
11821     __ ubfm(as_Register($dst$$reg),
11822             as_Register($src$$reg),
11823             r, s);
11824   %}
11825 
11826   ins_pipe(ialu_reg_shift);
11827 %}
11828 
11829 // Shift Left followed by Shift Right.
11830 // This idiom is used by the compiler for the i2b bytecode etc.
11831 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11832 %{
11833   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11834   // Make sure we are not going to exceed what ubfmw can do.
11835   predicate((unsigned int)n->in(2)->get_int() <= 31
11836             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11837 
11838   ins_cost(INSN_COST * 2);
11839   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11840   ins_encode %{
11841     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11842     int s = 31 - lshift;
11843     int r = (rshift - lshift) & 31;
11844     __ ubfmw(as_Register($dst$$reg),
11845             as_Register($src$$reg),
11846             r, s);
11847   %}
11848 
11849   ins_pipe(ialu_reg_shift);
11850 %}
11851 // Bitfield extract with shift & mask
11852 
11853 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11854 %{
11855   match(Set dst (AndI (URShiftI src rshift) mask));
11856 
11857   ins_cost(INSN_COST);
11858   format %{ "ubfxw $dst, $src, $mask" %}
11859   ins_encode %{
11860     int rshift = $rshift$$constant;
11861     long mask = $mask$$constant;
11862     int width = exact_log2(mask+1);
11863     __ ubfxw(as_Register($dst$$reg),
11864             as_Register($src$$reg), rshift, width);
11865   %}
11866   ins_pipe(ialu_reg_shift);
11867 %}
11868 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11869 %{
11870   match(Set dst (AndL (URShiftL src rshift) mask));
11871 
11872   ins_cost(INSN_COST);
11873   format %{ "ubfx $dst, $src, $mask" %}
11874   ins_encode %{
11875     int rshift = $rshift$$constant;
11876     long mask = $mask$$constant;
11877     int width = exact_log2(mask+1);
11878     __ ubfx(as_Register($dst$$reg),
11879             as_Register($src$$reg), rshift, width);
11880   %}
11881   ins_pipe(ialu_reg_shift);
11882 %}
11883 
11884 // We can use ubfx when extending an And with a mask when we know mask
11885 // is positive.  We know that because immI_bitmask guarantees it.
11886 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11887 %{
11888   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11889 
11890   ins_cost(INSN_COST * 2);
11891   format %{ "ubfx $dst, $src, $mask" %}
11892   ins_encode %{
11893     int rshift = $rshift$$constant;
11894     long mask = $mask$$constant;
11895     int width = exact_log2(mask+1);
11896     __ ubfx(as_Register($dst$$reg),
11897             as_Register($src$$reg), rshift, width);
11898   %}
11899   ins_pipe(ialu_reg_shift);
11900 %}
11901 
11902 // Rotations
11903 
11904 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11905 %{
11906   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11907   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11908 
11909   ins_cost(INSN_COST);
11910   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11911 
11912   ins_encode %{
11913     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11914             $rshift$$constant & 63);
11915   %}
11916   ins_pipe(ialu_reg_reg_extr);
11917 %}
11918 
11919 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11920 %{
11921   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11922   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11923 
11924   ins_cost(INSN_COST);
11925   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11926 
11927   ins_encode %{
11928     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11929             $rshift$$constant & 31);
11930   %}
11931   ins_pipe(ialu_reg_reg_extr);
11932 %}
11933 
11934 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11935 %{
11936   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11937   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11938 
11939   ins_cost(INSN_COST);
11940   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11941 
11942   ins_encode %{
11943     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11944             $rshift$$constant & 63);
11945   %}
11946   ins_pipe(ialu_reg_reg_extr);
11947 %}
11948 
11949 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11950 %{
11951   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11952   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11953 
11954   ins_cost(INSN_COST);
11955   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11956 
11957   ins_encode %{
11958     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11959             $rshift$$constant & 31);
11960   %}
11961   ins_pipe(ialu_reg_reg_extr);
11962 %}
11963 
11964 
11965 // rol expander
11966 
11967 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11968 %{
11969   effect(DEF dst, USE src, USE shift);
11970 
11971   format %{ "rol    $dst, $src, $shift" %}
11972   ins_cost(INSN_COST * 3);
11973   ins_encode %{
11974     __ subw(rscratch1, zr, as_Register($shift$$reg));
11975     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11976             rscratch1);
11977     %}
11978   ins_pipe(ialu_reg_reg_vshift);
11979 %}
11980 
11981 // rol expander
11982 
11983 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11984 %{
11985   effect(DEF dst, USE src, USE shift);
11986 
11987   format %{ "rol    $dst, $src, $shift" %}
11988   ins_cost(INSN_COST * 3);
11989   ins_encode %{
11990     __ subw(rscratch1, zr, as_Register($shift$$reg));
11991     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11992             rscratch1);
11993     %}
11994   ins_pipe(ialu_reg_reg_vshift);
11995 %}
11996 
11997 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11998 %{
11999   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12000 
12001   expand %{
12002     rolL_rReg(dst, src, shift, cr);
12003   %}
12004 %}
12005 
12006 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12007 %{
12008   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12009 
12010   expand %{
12011     rolL_rReg(dst, src, shift, cr);
12012   %}
12013 %}
12014 
12015 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12016 %{
12017   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12018 
12019   expand %{
12020     rolL_rReg(dst, src, shift, cr);
12021   %}
12022 %}
12023 
12024 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12025 %{
12026   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12027 
12028   expand %{
12029     rolL_rReg(dst, src, shift, cr);
12030   %}
12031 %}
12032 
12033 // ror expander
12034 
12035 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12036 %{
12037   effect(DEF dst, USE src, USE shift);
12038 
12039   format %{ "ror    $dst, $src, $shift" %}
12040   ins_cost(INSN_COST);
12041   ins_encode %{
12042     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12043             as_Register($shift$$reg));
12044     %}
12045   ins_pipe(ialu_reg_reg_vshift);
12046 %}
12047 
12048 // ror expander
12049 
12050 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12051 %{
12052   effect(DEF dst, USE src, USE shift);
12053 
12054   format %{ "ror    $dst, $src, $shift" %}
12055   ins_cost(INSN_COST);
12056   ins_encode %{
12057     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12058             as_Register($shift$$reg));
12059     %}
12060   ins_pipe(ialu_reg_reg_vshift);
12061 %}
12062 
12063 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12064 %{
12065   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12066 
12067   expand %{
12068     rorL_rReg(dst, src, shift, cr);
12069   %}
12070 %}
12071 
12072 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12073 %{
12074   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12075 
12076   expand %{
12077     rorL_rReg(dst, src, shift, cr);
12078   %}
12079 %}
12080 
12081 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12082 %{
12083   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12084 
12085   expand %{
12086     rorL_rReg(dst, src, shift, cr);
12087   %}
12088 %}
12089 
12090 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12091 %{
12092   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12093 
12094   expand %{
12095     rorL_rReg(dst, src, shift, cr);
12096   %}
12097 %}
12098 
12099 // Add/subtract (extended)
12100 
12101 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12102 %{
12103   match(Set dst (AddL src1 (ConvI2L src2)));
12104   ins_cost(INSN_COST);
12105   format %{ "add  $dst, $src1, sxtw $src2" %}
12106 
12107    ins_encode %{
12108      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12109             as_Register($src2$$reg), ext::sxtw);
12110    %}
12111   ins_pipe(ialu_reg_reg);
12112 %};
12113 
12114 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12115 %{
12116   match(Set dst (SubL src1 (ConvI2L src2)));
12117   ins_cost(INSN_COST);
12118   format %{ "sub  $dst, $src1, sxtw $src2" %}
12119 
12120    ins_encode %{
12121      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12122             as_Register($src2$$reg), ext::sxtw);
12123    %}
12124   ins_pipe(ialu_reg_reg);
12125 %};
12126 
12127 
12128 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12129 %{
12130   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12131   ins_cost(INSN_COST);
12132   format %{ "add  $dst, $src1, sxth $src2" %}
12133 
12134    ins_encode %{
12135      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12136             as_Register($src2$$reg), ext::sxth);
12137    %}
12138   ins_pipe(ialu_reg_reg);
12139 %}
12140 
12141 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12142 %{
12143   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12144   ins_cost(INSN_COST);
12145   format %{ "add  $dst, $src1, sxtb $src2" %}
12146 
12147    ins_encode %{
12148      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12149             as_Register($src2$$reg), ext::sxtb);
12150    %}
12151   ins_pipe(ialu_reg_reg);
12152 %}
12153 
12154 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12155 %{
12156   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12157   ins_cost(INSN_COST);
12158   format %{ "add  $dst, $src1, uxtb $src2" %}
12159 
12160    ins_encode %{
12161      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12162             as_Register($src2$$reg), ext::uxtb);
12163    %}
12164   ins_pipe(ialu_reg_reg);
12165 %}
12166 
12167 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12168 %{
12169   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12170   ins_cost(INSN_COST);
12171   format %{ "add  $dst, $src1, sxth $src2" %}
12172 
12173    ins_encode %{
12174      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12175             as_Register($src2$$reg), ext::sxth);
12176    %}
12177   ins_pipe(ialu_reg_reg);
12178 %}
12179 
12180 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12181 %{
12182   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12183   ins_cost(INSN_COST);
12184   format %{ "add  $dst, $src1, sxtw $src2" %}
12185 
12186    ins_encode %{
12187      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12188             as_Register($src2$$reg), ext::sxtw);
12189    %}
12190   ins_pipe(ialu_reg_reg);
12191 %}
12192 
12193 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12194 %{
12195   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12196   ins_cost(INSN_COST);
12197   format %{ "add  $dst, $src1, sxtb $src2" %}
12198 
12199    ins_encode %{
12200      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12201             as_Register($src2$$reg), ext::sxtb);
12202    %}
12203   ins_pipe(ialu_reg_reg);
12204 %}
12205 
12206 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12207 %{
12208   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12209   ins_cost(INSN_COST);
12210   format %{ "add  $dst, $src1, uxtb $src2" %}
12211 
12212    ins_encode %{
12213      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12214             as_Register($src2$$reg), ext::uxtb);
12215    %}
12216   ins_pipe(ialu_reg_reg);
12217 %}
12218 
12219 
12220 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12221 %{
12222   match(Set dst (AddI src1 (AndI src2 mask)));
12223   ins_cost(INSN_COST);
12224   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12225 
12226    ins_encode %{
12227      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12228             as_Register($src2$$reg), ext::uxtb);
12229    %}
12230   ins_pipe(ialu_reg_reg);
12231 %}
12232 
12233 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12234 %{
12235   match(Set dst (AddI src1 (AndI src2 mask)));
12236   ins_cost(INSN_COST);
12237   format %{ "addw  $dst, $src1, $src2, uxth" %}
12238 
12239    ins_encode %{
12240      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12241             as_Register($src2$$reg), ext::uxth);
12242    %}
12243   ins_pipe(ialu_reg_reg);
12244 %}
12245 
12246 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12247 %{
12248   match(Set dst (AddL src1 (AndL src2 mask)));
12249   ins_cost(INSN_COST);
12250   format %{ "add  $dst, $src1, $src2, uxtb" %}
12251 
12252    ins_encode %{
12253      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12254             as_Register($src2$$reg), ext::uxtb);
12255    %}
12256   ins_pipe(ialu_reg_reg);
12257 %}
12258 
12259 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12260 %{
12261   match(Set dst (AddL src1 (AndL src2 mask)));
12262   ins_cost(INSN_COST);
12263   format %{ "add  $dst, $src1, $src2, uxth" %}
12264 
12265    ins_encode %{
12266      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12267             as_Register($src2$$reg), ext::uxth);
12268    %}
12269   ins_pipe(ialu_reg_reg);
12270 %}
12271 
12272 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12273 %{
12274   match(Set dst (AddL src1 (AndL src2 mask)));
12275   ins_cost(INSN_COST);
12276   format %{ "add  $dst, $src1, $src2, uxtw" %}
12277 
12278    ins_encode %{
12279      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12280             as_Register($src2$$reg), ext::uxtw);
12281    %}
12282   ins_pipe(ialu_reg_reg);
12283 %}
12284 
12285 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12286 %{
12287   match(Set dst (SubI src1 (AndI src2 mask)));
12288   ins_cost(INSN_COST);
12289   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12290 
12291    ins_encode %{
12292      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12293             as_Register($src2$$reg), ext::uxtb);
12294    %}
12295   ins_pipe(ialu_reg_reg);
12296 %}
12297 
12298 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12299 %{
12300   match(Set dst (SubI src1 (AndI src2 mask)));
12301   ins_cost(INSN_COST);
12302   format %{ "subw  $dst, $src1, $src2, uxth" %}
12303 
12304    ins_encode %{
12305      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12306             as_Register($src2$$reg), ext::uxth);
12307    %}
12308   ins_pipe(ialu_reg_reg);
12309 %}
12310 
12311 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12312 %{
12313   match(Set dst (SubL src1 (AndL src2 mask)));
12314   ins_cost(INSN_COST);
12315   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12316 
12317    ins_encode %{
12318      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12319             as_Register($src2$$reg), ext::uxtb);
12320    %}
12321   ins_pipe(ialu_reg_reg);
12322 %}
12323 
12324 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12325 %{
12326   match(Set dst (SubL src1 (AndL src2 mask)));
12327   ins_cost(INSN_COST);
12328   format %{ "sub  $dst, $src1, $src2, uxth" %}
12329 
12330    ins_encode %{
12331      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12332             as_Register($src2$$reg), ext::uxth);
12333    %}
12334   ins_pipe(ialu_reg_reg);
12335 %}
12336 
12337 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12338 %{
12339   match(Set dst (SubL src1 (AndL src2 mask)));
12340   ins_cost(INSN_COST);
12341   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12342 
12343    ins_encode %{
12344      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12345             as_Register($src2$$reg), ext::uxtw);
12346    %}
12347   ins_pipe(ialu_reg_reg);
12348 %}
12349 
12350 // END This section of the file is automatically generated. Do not edit --------------
12351 
12352 // ============================================================================
12353 // Floating Point Arithmetic Instructions
12354 
12355 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12356   match(Set dst (AddF src1 src2));
12357 
12358   ins_cost(INSN_COST * 5);
12359   format %{ "fadds   $dst, $src1, $src2" %}
12360 
12361   ins_encode %{
12362     __ fadds(as_FloatRegister($dst$$reg),
12363              as_FloatRegister($src1$$reg),
12364              as_FloatRegister($src2$$reg));
12365   %}
12366 
12367   ins_pipe(fp_dop_reg_reg_s);
12368 %}
12369 
12370 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12371   match(Set dst (AddD src1 src2));
12372 
12373   ins_cost(INSN_COST * 5);
12374   format %{ "faddd   $dst, $src1, $src2" %}
12375 
12376   ins_encode %{
12377     __ faddd(as_FloatRegister($dst$$reg),
12378              as_FloatRegister($src1$$reg),
12379              as_FloatRegister($src2$$reg));
12380   %}
12381 
12382   ins_pipe(fp_dop_reg_reg_d);
12383 %}
12384 
12385 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12386   match(Set dst (SubF src1 src2));
12387 
12388   ins_cost(INSN_COST * 5);
12389   format %{ "fsubs   $dst, $src1, $src2" %}
12390 
12391   ins_encode %{
12392     __ fsubs(as_FloatRegister($dst$$reg),
12393              as_FloatRegister($src1$$reg),
12394              as_FloatRegister($src2$$reg));
12395   %}
12396 
12397   ins_pipe(fp_dop_reg_reg_s);
12398 %}
12399 
12400 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12401   match(Set dst (SubD src1 src2));
12402 
12403   ins_cost(INSN_COST * 5);
12404   format %{ "fsubd   $dst, $src1, $src2" %}
12405 
12406   ins_encode %{
12407     __ fsubd(as_FloatRegister($dst$$reg),
12408              as_FloatRegister($src1$$reg),
12409              as_FloatRegister($src2$$reg));
12410   %}
12411 
12412   ins_pipe(fp_dop_reg_reg_d);
12413 %}
12414 
12415 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12416   match(Set dst (MulF src1 src2));
12417 
12418   ins_cost(INSN_COST * 6);
12419   format %{ "fmuls   $dst, $src1, $src2" %}
12420 
12421   ins_encode %{
12422     __ fmuls(as_FloatRegister($dst$$reg),
12423              as_FloatRegister($src1$$reg),
12424              as_FloatRegister($src2$$reg));
12425   %}
12426 
12427   ins_pipe(fp_dop_reg_reg_s);
12428 %}
12429 
12430 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12431   match(Set dst (MulD src1 src2));
12432 
12433   ins_cost(INSN_COST * 6);
12434   format %{ "fmuld   $dst, $src1, $src2" %}
12435 
12436   ins_encode %{
12437     __ fmuld(as_FloatRegister($dst$$reg),
12438              as_FloatRegister($src1$$reg),
12439              as_FloatRegister($src2$$reg));
12440   %}
12441 
12442   ins_pipe(fp_dop_reg_reg_d);
12443 %}
12444 
12445 // We cannot use these fused mul w add/sub ops because they don't
12446 // produce the same result as the equivalent separated ops
12447 // (essentially they don't round the intermediate result). that's a
12448 // shame. leaving them here in case we can idenitfy cases where it is
12449 // legitimate to use them
12450 
12451 
12452 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12453 //   match(Set dst (AddF (MulF src1 src2) src3));
12454 
12455 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12456 
12457 //   ins_encode %{
12458 //     __ fmadds(as_FloatRegister($dst$$reg),
12459 //              as_FloatRegister($src1$$reg),
12460 //              as_FloatRegister($src2$$reg),
12461 //              as_FloatRegister($src3$$reg));
12462 //   %}
12463 
12464 //   ins_pipe(pipe_class_default);
12465 // %}
12466 
12467 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12468 //   match(Set dst (AddD (MulD src1 src2) src3));
12469 
12470 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12471 
12472 //   ins_encode %{
12473 //     __ fmaddd(as_FloatRegister($dst$$reg),
12474 //              as_FloatRegister($src1$$reg),
12475 //              as_FloatRegister($src2$$reg),
12476 //              as_FloatRegister($src3$$reg));
12477 //   %}
12478 
12479 //   ins_pipe(pipe_class_default);
12480 // %}
12481 
12482 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12483 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12484 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12485 
12486 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12487 
12488 //   ins_encode %{
12489 //     __ fmsubs(as_FloatRegister($dst$$reg),
12490 //               as_FloatRegister($src1$$reg),
12491 //               as_FloatRegister($src2$$reg),
12492 //              as_FloatRegister($src3$$reg));
12493 //   %}
12494 
12495 //   ins_pipe(pipe_class_default);
12496 // %}
12497 
12498 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12499 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12500 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12501 
12502 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12503 
12504 //   ins_encode %{
12505 //     __ fmsubd(as_FloatRegister($dst$$reg),
12506 //               as_FloatRegister($src1$$reg),
12507 //               as_FloatRegister($src2$$reg),
12508 //               as_FloatRegister($src3$$reg));
12509 //   %}
12510 
12511 //   ins_pipe(pipe_class_default);
12512 // %}
12513 
12514 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12515 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12516 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12517 
12518 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12519 
12520 //   ins_encode %{
12521 //     __ fnmadds(as_FloatRegister($dst$$reg),
12522 //                as_FloatRegister($src1$$reg),
12523 //                as_FloatRegister($src2$$reg),
12524 //                as_FloatRegister($src3$$reg));
12525 //   %}
12526 
12527 //   ins_pipe(pipe_class_default);
12528 // %}
12529 
12530 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12531 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12532 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12533 
12534 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12535 
12536 //   ins_encode %{
12537 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12538 //                as_FloatRegister($src1$$reg),
12539 //                as_FloatRegister($src2$$reg),
12540 //                as_FloatRegister($src3$$reg));
12541 //   %}
12542 
12543 //   ins_pipe(pipe_class_default);
12544 // %}
12545 
12546 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12547 //   match(Set dst (SubF (MulF src1 src2) src3));
12548 
12549 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12550 
12551 //   ins_encode %{
12552 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12553 //                as_FloatRegister($src1$$reg),
12554 //                as_FloatRegister($src2$$reg),
12555 //                as_FloatRegister($src3$$reg));
12556 //   %}
12557 
12558 //   ins_pipe(pipe_class_default);
12559 // %}
12560 
12561 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12562 //   match(Set dst (SubD (MulD src1 src2) src3));
12563 
12564 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12565 
12566 //   ins_encode %{
12567 //   // n.b. insn name should be fnmsubd
12568 //     __ fnmsub(as_FloatRegister($dst$$reg),
12569 //                as_FloatRegister($src1$$reg),
12570 //                as_FloatRegister($src2$$reg),
12571 //                as_FloatRegister($src3$$reg));
12572 //   %}
12573 
12574 //   ins_pipe(pipe_class_default);
12575 // %}
12576 
12577 
12578 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12579   match(Set dst (DivF src1  src2));
12580 
12581   ins_cost(INSN_COST * 18);
12582   format %{ "fdivs   $dst, $src1, $src2" %}
12583 
12584   ins_encode %{
12585     __ fdivs(as_FloatRegister($dst$$reg),
12586              as_FloatRegister($src1$$reg),
12587              as_FloatRegister($src2$$reg));
12588   %}
12589 
12590   ins_pipe(fp_div_s);
12591 %}
12592 
12593 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12594   match(Set dst (DivD src1  src2));
12595 
12596   ins_cost(INSN_COST * 32);
12597   format %{ "fdivd   $dst, $src1, $src2" %}
12598 
12599   ins_encode %{
12600     __ fdivd(as_FloatRegister($dst$$reg),
12601              as_FloatRegister($src1$$reg),
12602              as_FloatRegister($src2$$reg));
12603   %}
12604 
12605   ins_pipe(fp_div_d);
12606 %}
12607 
12608 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12609   match(Set dst (NegF src));
12610 
12611   ins_cost(INSN_COST * 3);
12612   format %{ "fneg   $dst, $src" %}
12613 
12614   ins_encode %{
12615     __ fnegs(as_FloatRegister($dst$$reg),
12616              as_FloatRegister($src$$reg));
12617   %}
12618 
12619   ins_pipe(fp_uop_s);
12620 %}
12621 
12622 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12623   match(Set dst (NegD src));
12624 
12625   ins_cost(INSN_COST * 3);
12626   format %{ "fnegd   $dst, $src" %}
12627 
12628   ins_encode %{
12629     __ fnegd(as_FloatRegister($dst$$reg),
12630              as_FloatRegister($src$$reg));
12631   %}
12632 
12633   ins_pipe(fp_uop_d);
12634 %}
12635 
12636 instruct absF_reg(vRegF dst, vRegF src) %{
12637   match(Set dst (AbsF src));
12638 
12639   ins_cost(INSN_COST * 3);
12640   format %{ "fabss   $dst, $src" %}
12641   ins_encode %{
12642     __ fabss(as_FloatRegister($dst$$reg),
12643              as_FloatRegister($src$$reg));
12644   %}
12645 
12646   ins_pipe(fp_uop_s);
12647 %}
12648 
12649 instruct absD_reg(vRegD dst, vRegD src) %{
12650   match(Set dst (AbsD src));
12651 
12652   ins_cost(INSN_COST * 3);
12653   format %{ "fabsd   $dst, $src" %}
12654   ins_encode %{
12655     __ fabsd(as_FloatRegister($dst$$reg),
12656              as_FloatRegister($src$$reg));
12657   %}
12658 
12659   ins_pipe(fp_uop_d);
12660 %}
12661 
12662 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12663   match(Set dst (SqrtD src));
12664 
12665   ins_cost(INSN_COST * 50);
12666   format %{ "fsqrtd  $dst, $src" %}
12667   ins_encode %{
12668     __ fsqrtd(as_FloatRegister($dst$$reg),
12669              as_FloatRegister($src$$reg));
12670   %}
12671 
12672   ins_pipe(fp_div_s);
12673 %}
12674 
12675 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12676   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12677 
12678   ins_cost(INSN_COST * 50);
12679   format %{ "fsqrts  $dst, $src" %}
12680   ins_encode %{
12681     __ fsqrts(as_FloatRegister($dst$$reg),
12682              as_FloatRegister($src$$reg));
12683   %}
12684 
12685   ins_pipe(fp_div_d);
12686 %}
12687 
12688 // ============================================================================
12689 // Logical Instructions
12690 
12691 // Integer Logical Instructions
12692 
12693 // And Instructions
12694 
12695 
12696 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12697   match(Set dst (AndI src1 src2));
12698 
12699   format %{ "andw  $dst, $src1, $src2\t# int" %}
12700 
12701   ins_cost(INSN_COST);
12702   ins_encode %{
12703     __ andw(as_Register($dst$$reg),
12704             as_Register($src1$$reg),
12705             as_Register($src2$$reg));
12706   %}
12707 
12708   ins_pipe(ialu_reg_reg);
12709 %}
12710 
12711 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12712   match(Set dst (AndI src1 src2));
12713 
12714   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12715 
12716   ins_cost(INSN_COST);
12717   ins_encode %{
12718     __ andw(as_Register($dst$$reg),
12719             as_Register($src1$$reg),
12720             (unsigned long)($src2$$constant));
12721   %}
12722 
12723   ins_pipe(ialu_reg_imm);
12724 %}
12725 
12726 // Or Instructions
12727 
12728 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12729   match(Set dst (OrI src1 src2));
12730 
12731   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12732 
12733   ins_cost(INSN_COST);
12734   ins_encode %{
12735     __ orrw(as_Register($dst$$reg),
12736             as_Register($src1$$reg),
12737             as_Register($src2$$reg));
12738   %}
12739 
12740   ins_pipe(ialu_reg_reg);
12741 %}
12742 
12743 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12744   match(Set dst (OrI src1 src2));
12745 
12746   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12747 
12748   ins_cost(INSN_COST);
12749   ins_encode %{
12750     __ orrw(as_Register($dst$$reg),
12751             as_Register($src1$$reg),
12752             (unsigned long)($src2$$constant));
12753   %}
12754 
12755   ins_pipe(ialu_reg_imm);
12756 %}
12757 
12758 // Xor Instructions
12759 
12760 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12761   match(Set dst (XorI src1 src2));
12762 
12763   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12764 
12765   ins_cost(INSN_COST);
12766   ins_encode %{
12767     __ eorw(as_Register($dst$$reg),
12768             as_Register($src1$$reg),
12769             as_Register($src2$$reg));
12770   %}
12771 
12772   ins_pipe(ialu_reg_reg);
12773 %}
12774 
12775 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12776   match(Set dst (XorI src1 src2));
12777 
12778   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12779 
12780   ins_cost(INSN_COST);
12781   ins_encode %{
12782     __ eorw(as_Register($dst$$reg),
12783             as_Register($src1$$reg),
12784             (unsigned long)($src2$$constant));
12785   %}
12786 
12787   ins_pipe(ialu_reg_imm);
12788 %}
12789 
12790 // Long Logical Instructions
12791 // TODO
12792 
12793 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12794   match(Set dst (AndL src1 src2));
12795 
12796   format %{ "and  $dst, $src1, $src2\t# int" %}
12797 
12798   ins_cost(INSN_COST);
12799   ins_encode %{
12800     __ andr(as_Register($dst$$reg),
12801             as_Register($src1$$reg),
12802             as_Register($src2$$reg));
12803   %}
12804 
12805   ins_pipe(ialu_reg_reg);
12806 %}
12807 
12808 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12809   match(Set dst (AndL src1 src2));
12810 
12811   format %{ "and  $dst, $src1, $src2\t# int" %}
12812 
12813   ins_cost(INSN_COST);
12814   ins_encode %{
12815     __ andr(as_Register($dst$$reg),
12816             as_Register($src1$$reg),
12817             (unsigned long)($src2$$constant));
12818   %}
12819 
12820   ins_pipe(ialu_reg_imm);
12821 %}
12822 
12823 // Or Instructions
12824 
12825 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12826   match(Set dst (OrL src1 src2));
12827 
12828   format %{ "orr  $dst, $src1, $src2\t# int" %}
12829 
12830   ins_cost(INSN_COST);
12831   ins_encode %{
12832     __ orr(as_Register($dst$$reg),
12833            as_Register($src1$$reg),
12834            as_Register($src2$$reg));
12835   %}
12836 
12837   ins_pipe(ialu_reg_reg);
12838 %}
12839 
12840 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12841   match(Set dst (OrL src1 src2));
12842 
12843   format %{ "orr  $dst, $src1, $src2\t# int" %}
12844 
12845   ins_cost(INSN_COST);
12846   ins_encode %{
12847     __ orr(as_Register($dst$$reg),
12848            as_Register($src1$$reg),
12849            (unsigned long)($src2$$constant));
12850   %}
12851 
12852   ins_pipe(ialu_reg_imm);
12853 %}
12854 
12855 // Xor Instructions
12856 
12857 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12858   match(Set dst (XorL src1 src2));
12859 
12860   format %{ "eor  $dst, $src1, $src2\t# int" %}
12861 
12862   ins_cost(INSN_COST);
12863   ins_encode %{
12864     __ eor(as_Register($dst$$reg),
12865            as_Register($src1$$reg),
12866            as_Register($src2$$reg));
12867   %}
12868 
12869   ins_pipe(ialu_reg_reg);
12870 %}
12871 
12872 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12873   match(Set dst (XorL src1 src2));
12874 
12875   ins_cost(INSN_COST);
12876   format %{ "eor  $dst, $src1, $src2\t# int" %}
12877 
12878   ins_encode %{
12879     __ eor(as_Register($dst$$reg),
12880            as_Register($src1$$reg),
12881            (unsigned long)($src2$$constant));
12882   %}
12883 
12884   ins_pipe(ialu_reg_imm);
12885 %}
12886 
12887 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12888 %{
12889   match(Set dst (ConvI2L src));
12890 
12891   ins_cost(INSN_COST);
12892   format %{ "sxtw  $dst, $src\t# i2l" %}
12893   ins_encode %{
12894     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12895   %}
12896   ins_pipe(ialu_reg_shift);
12897 %}
12898 
12899 // this pattern occurs in bigmath arithmetic
12900 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12901 %{
12902   match(Set dst (AndL (ConvI2L src) mask));
12903 
12904   ins_cost(INSN_COST);
12905   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12906   ins_encode %{
12907     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12908   %}
12909 
12910   ins_pipe(ialu_reg_shift);
12911 %}
12912 
12913 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12914   match(Set dst (ConvL2I src));
12915 
12916   ins_cost(INSN_COST);
12917   format %{ "movw  $dst, $src \t// l2i" %}
12918 
12919   ins_encode %{
12920     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12921   %}
12922 
12923   ins_pipe(ialu_reg);
12924 %}
12925 
12926 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12927 %{
12928   match(Set dst (Conv2B src));
12929   effect(KILL cr);
12930 
12931   format %{
12932     "cmpw $src, zr\n\t"
12933     "cset $dst, ne"
12934   %}
12935 
12936   ins_encode %{
12937     __ cmpw(as_Register($src$$reg), zr);
12938     __ cset(as_Register($dst$$reg), Assembler::NE);
12939   %}
12940 
12941   ins_pipe(ialu_reg);
12942 %}
12943 
12944 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12945 %{
12946   match(Set dst (Conv2B src));
12947   effect(KILL cr);
12948 
12949   format %{
12950     "cmp  $src, zr\n\t"
12951     "cset $dst, ne"
12952   %}
12953 
12954   ins_encode %{
12955     __ cmp(as_Register($src$$reg), zr);
12956     __ cset(as_Register($dst$$reg), Assembler::NE);
12957   %}
12958 
12959   ins_pipe(ialu_reg);
12960 %}
12961 
12962 instruct convD2F_reg(vRegF dst, vRegD src) %{
12963   match(Set dst (ConvD2F src));
12964 
12965   ins_cost(INSN_COST * 5);
12966   format %{ "fcvtd  $dst, $src \t// d2f" %}
12967 
12968   ins_encode %{
12969     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12970   %}
12971 
12972   ins_pipe(fp_d2f);
12973 %}
12974 
12975 instruct convF2D_reg(vRegD dst, vRegF src) %{
12976   match(Set dst (ConvF2D src));
12977 
12978   ins_cost(INSN_COST * 5);
12979   format %{ "fcvts  $dst, $src \t// f2d" %}
12980 
12981   ins_encode %{
12982     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12983   %}
12984 
12985   ins_pipe(fp_f2d);
12986 %}
12987 
12988 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12989   match(Set dst (ConvF2I src));
12990 
12991   ins_cost(INSN_COST * 5);
12992   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12993 
12994   ins_encode %{
12995     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12996   %}
12997 
12998   ins_pipe(fp_f2i);
12999 %}
13000 
13001 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13002   match(Set dst (ConvF2L src));
13003 
13004   ins_cost(INSN_COST * 5);
13005   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13006 
13007   ins_encode %{
13008     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13009   %}
13010 
13011   ins_pipe(fp_f2l);
13012 %}
13013 
13014 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13015   match(Set dst (ConvI2F src));
13016 
13017   ins_cost(INSN_COST * 5);
13018   format %{ "scvtfws  $dst, $src \t// i2f" %}
13019 
13020   ins_encode %{
13021     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13022   %}
13023 
13024   ins_pipe(fp_i2f);
13025 %}
13026 
13027 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13028   match(Set dst (ConvL2F src));
13029 
13030   ins_cost(INSN_COST * 5);
13031   format %{ "scvtfs  $dst, $src \t// l2f" %}
13032 
13033   ins_encode %{
13034     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13035   %}
13036 
13037   ins_pipe(fp_l2f);
13038 %}
13039 
13040 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13041   match(Set dst (ConvD2I src));
13042 
13043   ins_cost(INSN_COST * 5);
13044   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13045 
13046   ins_encode %{
13047     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13048   %}
13049 
13050   ins_pipe(fp_d2i);
13051 %}
13052 
13053 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13054   match(Set dst (ConvD2L src));
13055 
13056   ins_cost(INSN_COST * 5);
13057   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13058 
13059   ins_encode %{
13060     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13061   %}
13062 
13063   ins_pipe(fp_d2l);
13064 %}
13065 
13066 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13067   match(Set dst (ConvI2D src));
13068 
13069   ins_cost(INSN_COST * 5);
13070   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13071 
13072   ins_encode %{
13073     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13074   %}
13075 
13076   ins_pipe(fp_i2d);
13077 %}
13078 
13079 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13080   match(Set dst (ConvL2D src));
13081 
13082   ins_cost(INSN_COST * 5);
13083   format %{ "scvtfd  $dst, $src \t// l2d" %}
13084 
13085   ins_encode %{
13086     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13087   %}
13088 
13089   ins_pipe(fp_l2d);
13090 %}
13091 
13092 // stack <-> reg and reg <-> reg shuffles with no conversion
13093 
13094 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13095 
13096   match(Set dst (MoveF2I src));
13097 
13098   effect(DEF dst, USE src);
13099 
13100   ins_cost(4 * INSN_COST);
13101 
13102   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13103 
13104   ins_encode %{
13105     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13106   %}
13107 
13108   ins_pipe(iload_reg_reg);
13109 
13110 %}
13111 
13112 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13113 
13114   match(Set dst (MoveI2F src));
13115 
13116   effect(DEF dst, USE src);
13117 
13118   ins_cost(4 * INSN_COST);
13119 
13120   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13121 
13122   ins_encode %{
13123     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13124   %}
13125 
13126   ins_pipe(pipe_class_memory);
13127 
13128 %}
13129 
13130 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13131 
13132   match(Set dst (MoveD2L src));
13133 
13134   effect(DEF dst, USE src);
13135 
13136   ins_cost(4 * INSN_COST);
13137 
13138   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13139 
13140   ins_encode %{
13141     __ ldr($dst$$Register, Address(sp, $src$$disp));
13142   %}
13143 
13144   ins_pipe(iload_reg_reg);
13145 
13146 %}
13147 
13148 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13149 
13150   match(Set dst (MoveL2D src));
13151 
13152   effect(DEF dst, USE src);
13153 
13154   ins_cost(4 * INSN_COST);
13155 
13156   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13157 
13158   ins_encode %{
13159     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13160   %}
13161 
13162   ins_pipe(pipe_class_memory);
13163 
13164 %}
13165 
13166 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13167 
13168   match(Set dst (MoveF2I src));
13169 
13170   effect(DEF dst, USE src);
13171 
13172   ins_cost(INSN_COST);
13173 
13174   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13175 
13176   ins_encode %{
13177     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13178   %}
13179 
13180   ins_pipe(pipe_class_memory);
13181 
13182 %}
13183 
13184 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13185 
13186   match(Set dst (MoveI2F src));
13187 
13188   effect(DEF dst, USE src);
13189 
13190   ins_cost(INSN_COST);
13191 
13192   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13193 
13194   ins_encode %{
13195     __ strw($src$$Register, Address(sp, $dst$$disp));
13196   %}
13197 
13198   ins_pipe(istore_reg_reg);
13199 
13200 %}
13201 
13202 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13203 
13204   match(Set dst (MoveD2L src));
13205 
13206   effect(DEF dst, USE src);
13207 
13208   ins_cost(INSN_COST);
13209 
13210   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13211 
13212   ins_encode %{
13213     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13214   %}
13215 
13216   ins_pipe(pipe_class_memory);
13217 
13218 %}
13219 
13220 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13221 
13222   match(Set dst (MoveL2D src));
13223 
13224   effect(DEF dst, USE src);
13225 
13226   ins_cost(INSN_COST);
13227 
13228   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13229 
13230   ins_encode %{
13231     __ str($src$$Register, Address(sp, $dst$$disp));
13232   %}
13233 
13234   ins_pipe(istore_reg_reg);
13235 
13236 %}
13237 
13238 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13239 
13240   match(Set dst (MoveF2I src));
13241 
13242   effect(DEF dst, USE src);
13243 
13244   ins_cost(INSN_COST);
13245 
13246   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13247 
13248   ins_encode %{
13249     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13250   %}
13251 
13252   ins_pipe(fp_f2i);
13253 
13254 %}
13255 
13256 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13257 
13258   match(Set dst (MoveI2F src));
13259 
13260   effect(DEF dst, USE src);
13261 
13262   ins_cost(INSN_COST);
13263 
13264   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13265 
13266   ins_encode %{
13267     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13268   %}
13269 
13270   ins_pipe(fp_i2f);
13271 
13272 %}
13273 
13274 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13275 
13276   match(Set dst (MoveD2L src));
13277 
13278   effect(DEF dst, USE src);
13279 
13280   ins_cost(INSN_COST);
13281 
13282   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13283 
13284   ins_encode %{
13285     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13286   %}
13287 
13288   ins_pipe(fp_d2l);
13289 
13290 %}
13291 
13292 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13293 
13294   match(Set dst (MoveL2D src));
13295 
13296   effect(DEF dst, USE src);
13297 
13298   ins_cost(INSN_COST);
13299 
13300   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13301 
13302   ins_encode %{
13303     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13304   %}
13305 
13306   ins_pipe(fp_l2d);
13307 
13308 %}
13309 
13310 // ============================================================================
13311 // clearing of an array
13312 
13313 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13314 %{
13315   match(Set dummy (ClearArray cnt base));
13316   effect(USE_KILL cnt, USE_KILL base);
13317 
13318   ins_cost(4 * INSN_COST);
13319   format %{ "ClearArray $cnt, $base" %}
13320 
13321   ins_encode %{
13322     __ zero_words($base$$Register, $cnt$$Register);
13323   %}
13324 
13325   ins_pipe(pipe_class_memory);
13326 %}
13327 
13328 instruct clearArray_imm_reg(immL cnt, iRegP base, Universe dummy, rFlagsReg cr)
13329 %{
13330   match(Set dummy (ClearArray cnt base));
13331 
13332   ins_cost(4 * INSN_COST);
13333   format %{ "ClearArray $cnt, $base" %}
13334 
13335   ins_encode %{
13336     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13337   %}
13338 
13339   ins_pipe(pipe_class_memory);
13340 %}
13341 
13342 // ============================================================================
13343 // Overflow Math Instructions
13344 
13345 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13346 %{
13347   match(Set cr (OverflowAddI op1 op2));
13348 
13349   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13350   ins_cost(INSN_COST);
13351   ins_encode %{
13352     __ cmnw($op1$$Register, $op2$$Register);
13353   %}
13354 
13355   ins_pipe(icmp_reg_reg);
13356 %}
13357 
13358 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13359 %{
13360   match(Set cr (OverflowAddI op1 op2));
13361 
13362   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13363   ins_cost(INSN_COST);
13364   ins_encode %{
13365     __ cmnw($op1$$Register, $op2$$constant);
13366   %}
13367 
13368   ins_pipe(icmp_reg_imm);
13369 %}
13370 
13371 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13372 %{
13373   match(Set cr (OverflowAddL op1 op2));
13374 
13375   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13376   ins_cost(INSN_COST);
13377   ins_encode %{
13378     __ cmn($op1$$Register, $op2$$Register);
13379   %}
13380 
13381   ins_pipe(icmp_reg_reg);
13382 %}
13383 
13384 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13385 %{
13386   match(Set cr (OverflowAddL op1 op2));
13387 
13388   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13389   ins_cost(INSN_COST);
13390   ins_encode %{
13391     __ cmn($op1$$Register, $op2$$constant);
13392   %}
13393 
13394   ins_pipe(icmp_reg_imm);
13395 %}
13396 
13397 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13398 %{
13399   match(Set cr (OverflowSubI op1 op2));
13400 
13401   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13402   ins_cost(INSN_COST);
13403   ins_encode %{
13404     __ cmpw($op1$$Register, $op2$$Register);
13405   %}
13406 
13407   ins_pipe(icmp_reg_reg);
13408 %}
13409 
13410 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13411 %{
13412   match(Set cr (OverflowSubI op1 op2));
13413 
13414   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13415   ins_cost(INSN_COST);
13416   ins_encode %{
13417     __ cmpw($op1$$Register, $op2$$constant);
13418   %}
13419 
13420   ins_pipe(icmp_reg_imm);
13421 %}
13422 
13423 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13424 %{
13425   match(Set cr (OverflowSubL op1 op2));
13426 
13427   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13428   ins_cost(INSN_COST);
13429   ins_encode %{
13430     __ cmp($op1$$Register, $op2$$Register);
13431   %}
13432 
13433   ins_pipe(icmp_reg_reg);
13434 %}
13435 
13436 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13437 %{
13438   match(Set cr (OverflowSubL op1 op2));
13439 
13440   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13441   ins_cost(INSN_COST);
13442   ins_encode %{
13443     __ cmp($op1$$Register, $op2$$constant);
13444   %}
13445 
13446   ins_pipe(icmp_reg_imm);
13447 %}
13448 
13449 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13450 %{
13451   match(Set cr (OverflowSubI zero op1));
13452 
13453   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13454   ins_cost(INSN_COST);
13455   ins_encode %{
13456     __ cmpw(zr, $op1$$Register);
13457   %}
13458 
13459   ins_pipe(icmp_reg_imm);
13460 %}
13461 
13462 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13463 %{
13464   match(Set cr (OverflowSubL zero op1));
13465 
13466   format %{ "cmp   zr, $op1\t# overflow check long" %}
13467   ins_cost(INSN_COST);
13468   ins_encode %{
13469     __ cmp(zr, $op1$$Register);
13470   %}
13471 
13472   ins_pipe(icmp_reg_imm);
13473 %}
13474 
13475 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13476 %{
13477   match(Set cr (OverflowMulI op1 op2));
13478 
13479   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13480             "cmp   rscratch1, rscratch1, sxtw\n\t"
13481             "movw  rscratch1, #0x80000000\n\t"
13482             "cselw rscratch1, rscratch1, zr, NE\n\t"
13483             "cmpw  rscratch1, #1" %}
13484   ins_cost(5 * INSN_COST);
13485   ins_encode %{
13486     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13487     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13488     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13489     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13490     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13491   %}
13492 
13493   ins_pipe(pipe_slow);
13494 %}
13495 
13496 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13497 %{
13498   match(If cmp (OverflowMulI op1 op2));
13499   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13500             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13501   effect(USE labl, KILL cr);
13502 
13503   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13504             "cmp   rscratch1, rscratch1, sxtw\n\t"
13505             "b$cmp   $labl" %}
13506   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13507   ins_encode %{
13508     Label* L = $labl$$label;
13509     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13510     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13511     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13512     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13513   %}
13514 
13515   ins_pipe(pipe_serial);
13516 %}
13517 
13518 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13519 %{
13520   match(Set cr (OverflowMulL op1 op2));
13521 
13522   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13523             "smulh rscratch2, $op1, $op2\n\t"
13524             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13525             "movw  rscratch1, #0x80000000\n\t"
13526             "cselw rscratch1, rscratch1, zr, NE\n\t"
13527             "cmpw  rscratch1, #1" %}
13528   ins_cost(6 * INSN_COST);
13529   ins_encode %{
13530     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13531     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13532     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13533     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13534     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13535     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13536   %}
13537 
13538   ins_pipe(pipe_slow);
13539 %}
13540 
13541 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13542 %{
13543   match(If cmp (OverflowMulL op1 op2));
13544   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13545             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13546   effect(USE labl, KILL cr);
13547 
13548   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13549             "smulh rscratch2, $op1, $op2\n\t"
13550             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13551             "b$cmp $labl" %}
13552   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13553   ins_encode %{
13554     Label* L = $labl$$label;
13555     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13556     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13557     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13558     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13559     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13560   %}
13561 
13562   ins_pipe(pipe_serial);
13563 %}
13564 
13565 // ============================================================================
13566 // Compare Instructions
13567 
13568 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13569 %{
13570   match(Set cr (CmpI op1 op2));
13571 
13572   effect(DEF cr, USE op1, USE op2);
13573 
13574   ins_cost(INSN_COST);
13575   format %{ "cmpw  $op1, $op2" %}
13576 
13577   ins_encode(aarch64_enc_cmpw(op1, op2));
13578 
13579   ins_pipe(icmp_reg_reg);
13580 %}
13581 
13582 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13583 %{
13584   match(Set cr (CmpI op1 zero));
13585 
13586   effect(DEF cr, USE op1);
13587 
13588   ins_cost(INSN_COST);
13589   format %{ "cmpw $op1, 0" %}
13590 
13591   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13592 
13593   ins_pipe(icmp_reg_imm);
13594 %}
13595 
13596 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13597 %{
13598   match(Set cr (CmpI op1 op2));
13599 
13600   effect(DEF cr, USE op1);
13601 
13602   ins_cost(INSN_COST);
13603   format %{ "cmpw  $op1, $op2" %}
13604 
13605   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13606 
13607   ins_pipe(icmp_reg_imm);
13608 %}
13609 
13610 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13611 %{
13612   match(Set cr (CmpI op1 op2));
13613 
13614   effect(DEF cr, USE op1);
13615 
13616   ins_cost(INSN_COST * 2);
13617   format %{ "cmpw  $op1, $op2" %}
13618 
13619   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13620 
13621   ins_pipe(icmp_reg_imm);
13622 %}
13623 
13624 // Unsigned compare Instructions; really, same as signed compare
13625 // except it should only be used to feed an If or a CMovI which takes a
13626 // cmpOpU.
13627 
13628 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13629 %{
13630   match(Set cr (CmpU op1 op2));
13631 
13632   effect(DEF cr, USE op1, USE op2);
13633 
13634   ins_cost(INSN_COST);
13635   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13636 
13637   ins_encode(aarch64_enc_cmpw(op1, op2));
13638 
13639   ins_pipe(icmp_reg_reg);
13640 %}
13641 
13642 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13643 %{
13644   match(Set cr (CmpU op1 zero));
13645 
13646   effect(DEF cr, USE op1);
13647 
13648   ins_cost(INSN_COST);
13649   format %{ "cmpw $op1, #0\t# unsigned" %}
13650 
13651   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13652 
13653   ins_pipe(icmp_reg_imm);
13654 %}
13655 
13656 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13657 %{
13658   match(Set cr (CmpU op1 op2));
13659 
13660   effect(DEF cr, USE op1);
13661 
13662   ins_cost(INSN_COST);
13663   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13664 
13665   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13666 
13667   ins_pipe(icmp_reg_imm);
13668 %}
13669 
13670 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13671 %{
13672   match(Set cr (CmpU op1 op2));
13673 
13674   effect(DEF cr, USE op1);
13675 
13676   ins_cost(INSN_COST * 2);
13677   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13678 
13679   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13680 
13681   ins_pipe(icmp_reg_imm);
13682 %}
13683 
13684 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13685 %{
13686   match(Set cr (CmpL op1 op2));
13687 
13688   effect(DEF cr, USE op1, USE op2);
13689 
13690   ins_cost(INSN_COST);
13691   format %{ "cmp  $op1, $op2" %}
13692 
13693   ins_encode(aarch64_enc_cmp(op1, op2));
13694 
13695   ins_pipe(icmp_reg_reg);
13696 %}
13697 
13698 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13699 %{
13700   match(Set cr (CmpL op1 zero));
13701 
13702   effect(DEF cr, USE op1);
13703 
13704   ins_cost(INSN_COST);
13705   format %{ "tst  $op1" %}
13706 
13707   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13708 
13709   ins_pipe(icmp_reg_imm);
13710 %}
13711 
13712 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13713 %{
13714   match(Set cr (CmpL op1 op2));
13715 
13716   effect(DEF cr, USE op1);
13717 
13718   ins_cost(INSN_COST);
13719   format %{ "cmp  $op1, $op2" %}
13720 
13721   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13722 
13723   ins_pipe(icmp_reg_imm);
13724 %}
13725 
13726 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13727 %{
13728   match(Set cr (CmpL op1 op2));
13729 
13730   effect(DEF cr, USE op1);
13731 
13732   ins_cost(INSN_COST * 2);
13733   format %{ "cmp  $op1, $op2" %}
13734 
13735   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13736 
13737   ins_pipe(icmp_reg_imm);
13738 %}
13739 
13740 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13741 %{
13742   match(Set cr (CmpP op1 op2));
13743 
13744   effect(DEF cr, USE op1, USE op2);
13745 
13746   ins_cost(INSN_COST);
13747   format %{ "cmp  $op1, $op2\t // ptr" %}
13748 
13749   ins_encode(aarch64_enc_cmpp(op1, op2));
13750 
13751   ins_pipe(icmp_reg_reg);
13752 %}
13753 
13754 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13755 %{
13756   match(Set cr (CmpN op1 op2));
13757 
13758   effect(DEF cr, USE op1, USE op2);
13759 
13760   ins_cost(INSN_COST);
13761   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13762 
13763   ins_encode(aarch64_enc_cmpn(op1, op2));
13764 
13765   ins_pipe(icmp_reg_reg);
13766 %}
13767 
13768 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13769 %{
13770   match(Set cr (CmpP op1 zero));
13771 
13772   effect(DEF cr, USE op1, USE zero);
13773 
13774   ins_cost(INSN_COST);
13775   format %{ "cmp  $op1, 0\t // ptr" %}
13776 
13777   ins_encode(aarch64_enc_testp(op1));
13778 
13779   ins_pipe(icmp_reg_imm);
13780 %}
13781 
13782 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13783 %{
13784   match(Set cr (CmpN op1 zero));
13785 
13786   effect(DEF cr, USE op1, USE zero);
13787 
13788   ins_cost(INSN_COST);
13789   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13790 
13791   ins_encode(aarch64_enc_testn(op1));
13792 
13793   ins_pipe(icmp_reg_imm);
13794 %}
13795 
13796 // FP comparisons
13797 //
13798 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13799 // using normal cmpOp. See declaration of rFlagsReg for details.
13800 
13801 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13802 %{
13803   match(Set cr (CmpF src1 src2));
13804 
13805   ins_cost(3 * INSN_COST);
13806   format %{ "fcmps $src1, $src2" %}
13807 
13808   ins_encode %{
13809     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13810   %}
13811 
13812   ins_pipe(pipe_class_compare);
13813 %}
13814 
13815 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13816 %{
13817   match(Set cr (CmpF src1 src2));
13818 
13819   ins_cost(3 * INSN_COST);
13820   format %{ "fcmps $src1, 0.0" %}
13821 
13822   ins_encode %{
13823     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13824   %}
13825 
13826   ins_pipe(pipe_class_compare);
13827 %}
13828 // FROM HERE
13829 
13830 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13831 %{
13832   match(Set cr (CmpD src1 src2));
13833 
13834   ins_cost(3 * INSN_COST);
13835   format %{ "fcmpd $src1, $src2" %}
13836 
13837   ins_encode %{
13838     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13839   %}
13840 
13841   ins_pipe(pipe_class_compare);
13842 %}
13843 
13844 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13845 %{
13846   match(Set cr (CmpD src1 src2));
13847 
13848   ins_cost(3 * INSN_COST);
13849   format %{ "fcmpd $src1, 0.0" %}
13850 
13851   ins_encode %{
13852     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13853   %}
13854 
13855   ins_pipe(pipe_class_compare);
13856 %}
13857 
13858 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13859 %{
13860   match(Set dst (CmpF3 src1 src2));
13861   effect(KILL cr);
13862 
13863   ins_cost(5 * INSN_COST);
13864   format %{ "fcmps $src1, $src2\n\t"
13865             "csinvw($dst, zr, zr, eq\n\t"
13866             "csnegw($dst, $dst, $dst, lt)"
13867   %}
13868 
13869   ins_encode %{
13870     Label done;
13871     FloatRegister s1 = as_FloatRegister($src1$$reg);
13872     FloatRegister s2 = as_FloatRegister($src2$$reg);
13873     Register d = as_Register($dst$$reg);
13874     __ fcmps(s1, s2);
13875     // installs 0 if EQ else -1
13876     __ csinvw(d, zr, zr, Assembler::EQ);
13877     // keeps -1 if less or unordered else installs 1
13878     __ csnegw(d, d, d, Assembler::LT);
13879     __ bind(done);
13880   %}
13881 
13882   ins_pipe(pipe_class_default);
13883 
13884 %}
13885 
13886 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13887 %{
13888   match(Set dst (CmpD3 src1 src2));
13889   effect(KILL cr);
13890 
13891   ins_cost(5 * INSN_COST);
13892   format %{ "fcmpd $src1, $src2\n\t"
13893             "csinvw($dst, zr, zr, eq\n\t"
13894             "csnegw($dst, $dst, $dst, lt)"
13895   %}
13896 
13897   ins_encode %{
13898     Label done;
13899     FloatRegister s1 = as_FloatRegister($src1$$reg);
13900     FloatRegister s2 = as_FloatRegister($src2$$reg);
13901     Register d = as_Register($dst$$reg);
13902     __ fcmpd(s1, s2);
13903     // installs 0 if EQ else -1
13904     __ csinvw(d, zr, zr, Assembler::EQ);
13905     // keeps -1 if less or unordered else installs 1
13906     __ csnegw(d, d, d, Assembler::LT);
13907     __ bind(done);
13908   %}
13909   ins_pipe(pipe_class_default);
13910 
13911 %}
13912 
13913 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13914 %{
13915   match(Set dst (CmpF3 src1 zero));
13916   effect(KILL cr);
13917 
13918   ins_cost(5 * INSN_COST);
13919   format %{ "fcmps $src1, 0.0\n\t"
13920             "csinvw($dst, zr, zr, eq\n\t"
13921             "csnegw($dst, $dst, $dst, lt)"
13922   %}
13923 
13924   ins_encode %{
13925     Label done;
13926     FloatRegister s1 = as_FloatRegister($src1$$reg);
13927     Register d = as_Register($dst$$reg);
13928     __ fcmps(s1, 0.0D);
13929     // installs 0 if EQ else -1
13930     __ csinvw(d, zr, zr, Assembler::EQ);
13931     // keeps -1 if less or unordered else installs 1
13932     __ csnegw(d, d, d, Assembler::LT);
13933     __ bind(done);
13934   %}
13935 
13936   ins_pipe(pipe_class_default);
13937 
13938 %}
13939 
13940 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13941 %{
13942   match(Set dst (CmpD3 src1 zero));
13943   effect(KILL cr);
13944 
13945   ins_cost(5 * INSN_COST);
13946   format %{ "fcmpd $src1, 0.0\n\t"
13947             "csinvw($dst, zr, zr, eq\n\t"
13948             "csnegw($dst, $dst, $dst, lt)"
13949   %}
13950 
13951   ins_encode %{
13952     Label done;
13953     FloatRegister s1 = as_FloatRegister($src1$$reg);
13954     Register d = as_Register($dst$$reg);
13955     __ fcmpd(s1, 0.0D);
13956     // installs 0 if EQ else -1
13957     __ csinvw(d, zr, zr, Assembler::EQ);
13958     // keeps -1 if less or unordered else installs 1
13959     __ csnegw(d, d, d, Assembler::LT);
13960     __ bind(done);
13961   %}
13962   ins_pipe(pipe_class_default);
13963 
13964 %}
13965 
13966 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13967 %{
13968   match(Set dst (CmpLTMask p q));
13969   effect(KILL cr);
13970 
13971   ins_cost(3 * INSN_COST);
13972 
13973   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13974             "csetw $dst, lt\n\t"
13975             "subw $dst, zr, $dst"
13976   %}
13977 
13978   ins_encode %{
13979     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13980     __ csetw(as_Register($dst$$reg), Assembler::LT);
13981     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13982   %}
13983 
13984   ins_pipe(ialu_reg_reg);
13985 %}
13986 
13987 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13988 %{
13989   match(Set dst (CmpLTMask src zero));
13990   effect(KILL cr);
13991 
13992   ins_cost(INSN_COST);
13993 
13994   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13995 
13996   ins_encode %{
13997     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13998   %}
13999 
14000   ins_pipe(ialu_reg_shift);
14001 %}
14002 
14003 // ============================================================================
14004 // Max and Min
14005 
14006 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14007 %{
14008   match(Set dst (MinI src1 src2));
14009 
14010   effect(DEF dst, USE src1, USE src2, KILL cr);
14011   size(8);
14012 
14013   ins_cost(INSN_COST * 3);
14014   format %{
14015     "cmpw $src1 $src2\t signed int\n\t"
14016     "cselw $dst, $src1, $src2 lt\t"
14017   %}
14018 
14019   ins_encode %{
14020     __ cmpw(as_Register($src1$$reg),
14021             as_Register($src2$$reg));
14022     __ cselw(as_Register($dst$$reg),
14023              as_Register($src1$$reg),
14024              as_Register($src2$$reg),
14025              Assembler::LT);
14026   %}
14027 
14028   ins_pipe(ialu_reg_reg);
14029 %}
14030 // FROM HERE
14031 
14032 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14033 %{
14034   match(Set dst (MaxI src1 src2));
14035 
14036   effect(DEF dst, USE src1, USE src2, KILL cr);
14037   size(8);
14038 
14039   ins_cost(INSN_COST * 3);
14040   format %{
14041     "cmpw $src1 $src2\t signed int\n\t"
14042     "cselw $dst, $src1, $src2 gt\t"
14043   %}
14044 
14045   ins_encode %{
14046     __ cmpw(as_Register($src1$$reg),
14047             as_Register($src2$$reg));
14048     __ cselw(as_Register($dst$$reg),
14049              as_Register($src1$$reg),
14050              as_Register($src2$$reg),
14051              Assembler::GT);
14052   %}
14053 
14054   ins_pipe(ialu_reg_reg);
14055 %}
14056 
14057 // ============================================================================
14058 // Branch Instructions
14059 
14060 // Direct Branch.
14061 instruct branch(label lbl)
14062 %{
14063   match(Goto);
14064 
14065   effect(USE lbl);
14066 
14067   ins_cost(BRANCH_COST);
14068   format %{ "b  $lbl" %}
14069 
14070   ins_encode(aarch64_enc_b(lbl));
14071 
14072   ins_pipe(pipe_branch);
14073 %}
14074 
14075 // Conditional Near Branch
14076 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14077 %{
14078   // Same match rule as `branchConFar'.
14079   match(If cmp cr);
14080 
14081   effect(USE lbl);
14082 
14083   ins_cost(BRANCH_COST);
14084   // If set to 1 this indicates that the current instruction is a
14085   // short variant of a long branch. This avoids using this
14086   // instruction in first-pass matching. It will then only be used in
14087   // the `Shorten_branches' pass.
14088   // ins_short_branch(1);
14089   format %{ "b$cmp  $lbl" %}
14090 
14091   ins_encode(aarch64_enc_br_con(cmp, lbl));
14092 
14093   ins_pipe(pipe_branch_cond);
14094 %}
14095 
14096 // Conditional Near Branch Unsigned
14097 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14098 %{
14099   // Same match rule as `branchConFar'.
14100   match(If cmp cr);
14101 
14102   effect(USE lbl);
14103 
14104   ins_cost(BRANCH_COST);
14105   // If set to 1 this indicates that the current instruction is a
14106   // short variant of a long branch. This avoids using this
14107   // instruction in first-pass matching. It will then only be used in
14108   // the `Shorten_branches' pass.
14109   // ins_short_branch(1);
14110   format %{ "b$cmp  $lbl\t# unsigned" %}
14111 
14112   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14113 
14114   ins_pipe(pipe_branch_cond);
14115 %}
14116 
14117 // Make use of CBZ and CBNZ.  These instructions, as well as being
14118 // shorter than (cmp; branch), have the additional benefit of not
14119 // killing the flags.
14120 
14121 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14122   match(If cmp (CmpI op1 op2));
14123   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14124             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14125   effect(USE labl);
14126 
14127   ins_cost(BRANCH_COST);
14128   format %{ "cbw$cmp   $op1, $labl" %}
14129   ins_encode %{
14130     Label* L = $labl$$label;
14131     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14132     if (cond == Assembler::EQ)
14133       __ cbzw($op1$$Register, *L);
14134     else
14135       __ cbnzw($op1$$Register, *L);
14136   %}
14137   ins_pipe(pipe_cmp_branch);
14138 %}
14139 
14140 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14141   match(If cmp (CmpL op1 op2));
14142   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14143             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14144   effect(USE labl);
14145 
14146   ins_cost(BRANCH_COST);
14147   format %{ "cb$cmp   $op1, $labl" %}
14148   ins_encode %{
14149     Label* L = $labl$$label;
14150     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14151     if (cond == Assembler::EQ)
14152       __ cbz($op1$$Register, *L);
14153     else
14154       __ cbnz($op1$$Register, *L);
14155   %}
14156   ins_pipe(pipe_cmp_branch);
14157 %}
14158 
14159 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14160   match(If cmp (CmpP op1 op2));
14161   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14162             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14163   effect(USE labl);
14164 
14165   ins_cost(BRANCH_COST);
14166   format %{ "cb$cmp   $op1, $labl" %}
14167   ins_encode %{
14168     Label* L = $labl$$label;
14169     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14170     if (cond == Assembler::EQ)
14171       __ cbz($op1$$Register, *L);
14172     else
14173       __ cbnz($op1$$Register, *L);
14174   %}
14175   ins_pipe(pipe_cmp_branch);
14176 %}
14177 
14178 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14179   match(If cmp (CmpN op1 op2));
14180   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14181             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14182   effect(USE labl);
14183 
14184   ins_cost(BRANCH_COST);
14185   format %{ "cbw$cmp   $op1, $labl" %}
14186   ins_encode %{
14187     Label* L = $labl$$label;
14188     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14189     if (cond == Assembler::EQ)
14190       __ cbzw($op1$$Register, *L);
14191     else
14192       __ cbnzw($op1$$Register, *L);
14193   %}
14194   ins_pipe(pipe_cmp_branch);
14195 %}
14196 
14197 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14198   match(If cmp (CmpP (DecodeN oop) zero));
14199   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14200             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14201   effect(USE labl);
14202 
14203   ins_cost(BRANCH_COST);
14204   format %{ "cb$cmp   $oop, $labl" %}
14205   ins_encode %{
14206     Label* L = $labl$$label;
14207     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14208     if (cond == Assembler::EQ)
14209       __ cbzw($oop$$Register, *L);
14210     else
14211       __ cbnzw($oop$$Register, *L);
14212   %}
14213   ins_pipe(pipe_cmp_branch);
14214 %}
14215 
14216 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14217   match(If cmp (CmpU op1 op2));
14218   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14219             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14220             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14221             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
14222   effect(USE labl);
14223 
14224   ins_cost(BRANCH_COST);
14225   format %{ "cbw$cmp   $op1, $labl" %}
14226   ins_encode %{
14227     Label* L = $labl$$label;
14228     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14229     if (cond == Assembler::EQ || cond == Assembler::LS)
14230       __ cbzw($op1$$Register, *L);
14231     else
14232       __ cbnzw($op1$$Register, *L);
14233   %}
14234   ins_pipe(pipe_cmp_branch);
14235 %}
14236 
14237 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14238   match(If cmp (CmpU op1 op2));
14239   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14240             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14241             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14242             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
14243   effect(USE labl);
14244 
14245   ins_cost(BRANCH_COST);
14246   format %{ "cb$cmp   $op1, $labl" %}
14247   ins_encode %{
14248     Label* L = $labl$$label;
14249     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14250     if (cond == Assembler::EQ || cond == Assembler::LS)
14251       __ cbz($op1$$Register, *L);
14252     else
14253       __ cbnz($op1$$Register, *L);
14254   %}
14255   ins_pipe(pipe_cmp_branch);
14256 %}
14257 
14258 // Test bit and Branch
14259 
14260 // Patterns for short (< 32KiB) variants
14261 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14262   match(If cmp (CmpL op1 op2));
14263   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14264             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14265   effect(USE labl);
14266 
14267   ins_cost(BRANCH_COST);
14268   format %{ "cb$cmp   $op1, $labl # long" %}
14269   ins_encode %{
14270     Label* L = $labl$$label;
14271     Assembler::Condition cond =
14272       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14273     __ tbr(cond, $op1$$Register, 63, *L);
14274   %}
14275   ins_pipe(pipe_cmp_branch);
14276   ins_short_branch(1);
14277 %}
14278 
14279 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14280   match(If cmp (CmpI op1 op2));
14281   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14282             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14283   effect(USE labl);
14284 
14285   ins_cost(BRANCH_COST);
14286   format %{ "cb$cmp   $op1, $labl # int" %}
14287   ins_encode %{
14288     Label* L = $labl$$label;
14289     Assembler::Condition cond =
14290       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14291     __ tbr(cond, $op1$$Register, 31, *L);
14292   %}
14293   ins_pipe(pipe_cmp_branch);
14294   ins_short_branch(1);
14295 %}
14296 
14297 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14298   match(If cmp (CmpL (AndL op1 op2) op3));
14299   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14300             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14301             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14302   effect(USE labl);
14303 
14304   ins_cost(BRANCH_COST);
14305   format %{ "tb$cmp   $op1, $op2, $labl" %}
14306   ins_encode %{
14307     Label* L = $labl$$label;
14308     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14309     int bit = exact_log2($op2$$constant);
14310     __ tbr(cond, $op1$$Register, bit, *L);
14311   %}
14312   ins_pipe(pipe_cmp_branch);
14313   ins_short_branch(1);
14314 %}
14315 
14316 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14317   match(If cmp (CmpI (AndI op1 op2) op3));
14318   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14319             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14320             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14321   effect(USE labl);
14322 
14323   ins_cost(BRANCH_COST);
14324   format %{ "tb$cmp   $op1, $op2, $labl" %}
14325   ins_encode %{
14326     Label* L = $labl$$label;
14327     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14328     int bit = exact_log2($op2$$constant);
14329     __ tbr(cond, $op1$$Register, bit, *L);
14330   %}
14331   ins_pipe(pipe_cmp_branch);
14332   ins_short_branch(1);
14333 %}
14334 
14335 // And far variants
14336 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14337   match(If cmp (CmpL op1 op2));
14338   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14339             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14340   effect(USE labl);
14341 
14342   ins_cost(BRANCH_COST);
14343   format %{ "cb$cmp   $op1, $labl # long" %}
14344   ins_encode %{
14345     Label* L = $labl$$label;
14346     Assembler::Condition cond =
14347       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14348     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14349   %}
14350   ins_pipe(pipe_cmp_branch);
14351 %}
14352 
14353 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14354   match(If cmp (CmpI op1 op2));
14355   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14356             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14357   effect(USE labl);
14358 
14359   ins_cost(BRANCH_COST);
14360   format %{ "cb$cmp   $op1, $labl # int" %}
14361   ins_encode %{
14362     Label* L = $labl$$label;
14363     Assembler::Condition cond =
14364       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14365     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14366   %}
14367   ins_pipe(pipe_cmp_branch);
14368 %}
14369 
14370 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14371   match(If cmp (CmpL (AndL op1 op2) op3));
14372   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14373             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14374             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14375   effect(USE labl);
14376 
14377   ins_cost(BRANCH_COST);
14378   format %{ "tb$cmp   $op1, $op2, $labl" %}
14379   ins_encode %{
14380     Label* L = $labl$$label;
14381     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14382     int bit = exact_log2($op2$$constant);
14383     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14384   %}
14385   ins_pipe(pipe_cmp_branch);
14386 %}
14387 
14388 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14389   match(If cmp (CmpI (AndI op1 op2) op3));
14390   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14391             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14392             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14393   effect(USE labl);
14394 
14395   ins_cost(BRANCH_COST);
14396   format %{ "tb$cmp   $op1, $op2, $labl" %}
14397   ins_encode %{
14398     Label* L = $labl$$label;
14399     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14400     int bit = exact_log2($op2$$constant);
14401     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14402   %}
14403   ins_pipe(pipe_cmp_branch);
14404 %}
14405 
14406 // Test bits
14407 
14408 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14409   match(Set cr (CmpL (AndL op1 op2) op3));
14410   predicate(Assembler::operand_valid_for_logical_immediate
14411             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14412 
14413   ins_cost(INSN_COST);
14414   format %{ "tst $op1, $op2 # long" %}
14415   ins_encode %{
14416     __ tst($op1$$Register, $op2$$constant);
14417   %}
14418   ins_pipe(ialu_reg_reg);
14419 %}
14420 
14421 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14422   match(Set cr (CmpI (AndI op1 op2) op3));
14423   predicate(Assembler::operand_valid_for_logical_immediate
14424             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14425 
14426   ins_cost(INSN_COST);
14427   format %{ "tst $op1, $op2 # int" %}
14428   ins_encode %{
14429     __ tstw($op1$$Register, $op2$$constant);
14430   %}
14431   ins_pipe(ialu_reg_reg);
14432 %}
14433 
14434 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14435   match(Set cr (CmpL (AndL op1 op2) op3));
14436 
14437   ins_cost(INSN_COST);
14438   format %{ "tst $op1, $op2 # long" %}
14439   ins_encode %{
14440     __ tst($op1$$Register, $op2$$Register);
14441   %}
14442   ins_pipe(ialu_reg_reg);
14443 %}
14444 
14445 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14446   match(Set cr (CmpI (AndI op1 op2) op3));
14447 
14448   ins_cost(INSN_COST);
14449   format %{ "tstw $op1, $op2 # int" %}
14450   ins_encode %{
14451     __ tstw($op1$$Register, $op2$$Register);
14452   %}
14453   ins_pipe(ialu_reg_reg);
14454 %}
14455 
14456 
14457 // Conditional Far Branch
14458 // Conditional Far Branch Unsigned
14459 // TODO: fixme
14460 
14461 // counted loop end branch near
14462 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14463 %{
14464   match(CountedLoopEnd cmp cr);
14465 
14466   effect(USE lbl);
14467 
14468   ins_cost(BRANCH_COST);
14469   // short variant.
14470   // ins_short_branch(1);
14471   format %{ "b$cmp $lbl \t// counted loop end" %}
14472 
14473   ins_encode(aarch64_enc_br_con(cmp, lbl));
14474 
14475   ins_pipe(pipe_branch);
14476 %}
14477 
14478 // counted loop end branch near Unsigned
14479 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14480 %{
14481   match(CountedLoopEnd cmp cr);
14482 
14483   effect(USE lbl);
14484 
14485   ins_cost(BRANCH_COST);
14486   // short variant.
14487   // ins_short_branch(1);
14488   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14489 
14490   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14491 
14492   ins_pipe(pipe_branch);
14493 %}
14494 
14495 // counted loop end branch far
14496 // counted loop end branch far unsigned
14497 // TODO: fixme
14498 
14499 // ============================================================================
14500 // inlined locking and unlocking
14501 
14502 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14503 %{
14504   match(Set cr (FastLock object box));
14505   effect(TEMP tmp, TEMP tmp2);
14506 
14507   // TODO
14508   // identify correct cost
14509   ins_cost(5 * INSN_COST);
14510   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14511 
14512   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14513 
14514   ins_pipe(pipe_serial);
14515 %}
14516 
14517 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14518 %{
14519   match(Set cr (FastUnlock object box));
14520   effect(TEMP tmp, TEMP tmp2);
14521 
14522   ins_cost(5 * INSN_COST);
14523   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14524 
14525   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14526 
14527   ins_pipe(pipe_serial);
14528 %}
14529 
14530 
14531 // ============================================================================
14532 // Safepoint Instructions
14533 
14534 // TODO
14535 // provide a near and far version of this code
14536 
14537 instruct safePoint(iRegP poll)
14538 %{
14539   match(SafePoint poll);
14540 
14541   format %{
14542     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14543   %}
14544   ins_encode %{
14545     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14546   %}
14547   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14548 %}
14549 
14550 
14551 // ============================================================================
14552 // Procedure Call/Return Instructions
14553 
14554 // Call Java Static Instruction
14555 
14556 instruct CallStaticJavaDirect(method meth)
14557 %{
14558   match(CallStaticJava);
14559 
14560   effect(USE meth);
14561 
14562   ins_cost(CALL_COST);
14563 
14564   format %{ "call,static $meth \t// ==> " %}
14565 
14566   ins_encode( aarch64_enc_java_static_call(meth),
14567               aarch64_enc_call_epilog );
14568 
14569   ins_pipe(pipe_class_call);
14570 %}
14571 
14572 // TO HERE
14573 
14574 // Call Java Dynamic Instruction
14575 instruct CallDynamicJavaDirect(method meth)
14576 %{
14577   match(CallDynamicJava);
14578 
14579   effect(USE meth);
14580 
14581   ins_cost(CALL_COST);
14582 
14583   format %{ "CALL,dynamic $meth \t// ==> " %}
14584 
14585   ins_encode( aarch64_enc_java_dynamic_call(meth),
14586                aarch64_enc_call_epilog );
14587 
14588   ins_pipe(pipe_class_call);
14589 %}
14590 
14591 // Call Runtime Instruction
14592 
14593 instruct CallRuntimeDirect(method meth)
14594 %{
14595   match(CallRuntime);
14596 
14597   effect(USE meth);
14598 
14599   ins_cost(CALL_COST);
14600 
14601   format %{ "CALL, runtime $meth" %}
14602 
14603   ins_encode( aarch64_enc_java_to_runtime(meth) );
14604 
14605   ins_pipe(pipe_class_call);
14606 %}
14607 
14608 // Call Runtime Instruction
14609 
14610 instruct CallLeafDirect(method meth)
14611 %{
14612   match(CallLeaf);
14613 
14614   effect(USE meth);
14615 
14616   ins_cost(CALL_COST);
14617 
14618   format %{ "CALL, runtime leaf $meth" %}
14619 
14620   ins_encode( aarch64_enc_java_to_runtime(meth) );
14621 
14622   ins_pipe(pipe_class_call);
14623 %}
14624 
14625 // Call Runtime Instruction
14626 
14627 instruct CallLeafNoFPDirect(method meth)
14628 %{
14629   match(CallLeafNoFP);
14630 
14631   effect(USE meth);
14632 
14633   ins_cost(CALL_COST);
14634 
14635   format %{ "CALL, runtime leaf nofp $meth" %}
14636 
14637   ins_encode( aarch64_enc_java_to_runtime(meth) );
14638 
14639   ins_pipe(pipe_class_call);
14640 %}
14641 
14642 // Tail Call; Jump from runtime stub to Java code.
14643 // Also known as an 'interprocedural jump'.
14644 // Target of jump will eventually return to caller.
14645 // TailJump below removes the return address.
14646 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14647 %{
14648   match(TailCall jump_target method_oop);
14649 
14650   ins_cost(CALL_COST);
14651 
14652   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14653 
14654   ins_encode(aarch64_enc_tail_call(jump_target));
14655 
14656   ins_pipe(pipe_class_call);
14657 %}
14658 
14659 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14660 %{
14661   match(TailJump jump_target ex_oop);
14662 
14663   ins_cost(CALL_COST);
14664 
14665   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14666 
14667   ins_encode(aarch64_enc_tail_jmp(jump_target));
14668 
14669   ins_pipe(pipe_class_call);
14670 %}
14671 
14672 // Create exception oop: created by stack-crawling runtime code.
14673 // Created exception is now available to this handler, and is setup
14674 // just prior to jumping to this handler. No code emitted.
14675 // TODO check
14676 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14677 instruct CreateException(iRegP_R0 ex_oop)
14678 %{
14679   match(Set ex_oop (CreateEx));
14680 
14681   format %{ " -- \t// exception oop; no code emitted" %}
14682 
14683   size(0);
14684 
14685   ins_encode( /*empty*/ );
14686 
14687   ins_pipe(pipe_class_empty);
14688 %}
14689 
14690 // Rethrow exception: The exception oop will come in the first
14691 // argument position. Then JUMP (not call) to the rethrow stub code.
14692 instruct RethrowException() %{
14693   match(Rethrow);
14694   ins_cost(CALL_COST);
14695 
14696   format %{ "b rethrow_stub" %}
14697 
14698   ins_encode( aarch64_enc_rethrow() );
14699 
14700   ins_pipe(pipe_class_call);
14701 %}
14702 
14703 
14704 // Return Instruction
14705 // epilog node loads ret address into lr as part of frame pop
14706 instruct Ret()
14707 %{
14708   match(Return);
14709 
14710   format %{ "ret\t// return register" %}
14711 
14712   ins_encode( aarch64_enc_ret() );
14713 
14714   ins_pipe(pipe_branch);
14715 %}
14716 
14717 // Die now.
14718 instruct ShouldNotReachHere() %{
14719   match(Halt);
14720 
14721   ins_cost(CALL_COST);
14722   format %{ "ShouldNotReachHere" %}
14723 
14724   ins_encode %{
14725     // TODO
14726     // implement proper trap call here
14727     __ brk(999);
14728   %}
14729 
14730   ins_pipe(pipe_class_default);
14731 %}
14732 
14733 // ============================================================================
14734 // Partial Subtype Check
14735 //
14736 // superklass array for an instance of the superklass.  Set a hidden
14737 // internal cache on a hit (cache is checked with exposed code in
14738 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14739 // encoding ALSO sets flags.
14740 
14741 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14742 %{
14743   match(Set result (PartialSubtypeCheck sub super));
14744   effect(KILL cr, KILL temp);
14745 
14746   ins_cost(1100);  // slightly larger than the next version
14747   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14748 
14749   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14750 
14751   opcode(0x1); // Force zero of result reg on hit
14752 
14753   ins_pipe(pipe_class_memory);
14754 %}
14755 
14756 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14757 %{
14758   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14759   effect(KILL temp, KILL result);
14760 
14761   ins_cost(1100);  // slightly larger than the next version
14762   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14763 
14764   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14765 
14766   opcode(0x0); // Don't zero result reg on hit
14767 
14768   ins_pipe(pipe_class_memory);
14769 %}
14770 
14771 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14772                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14773 %{
14774   predicate(!CompactStrings);
14775   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14776   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14777 
14778   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14779   ins_encode %{
14780     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14781     __ asrw($cnt1$$Register, $cnt1$$Register, 1);
14782     __ asrw($cnt2$$Register, $cnt2$$Register, 1);
14783     __ string_compare($str1$$Register, $str2$$Register,
14784                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14785                       $tmp1$$Register);
14786   %}
14787   ins_pipe(pipe_class_memory);
14788 %}
14789 
14790 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14791        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14792 %{
14793   predicate(!CompactStrings);
14794   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14795   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14796          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14797   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14798 
14799   ins_encode %{
14800     __ string_indexof($str1$$Register, $str2$$Register,
14801                       $cnt1$$Register, $cnt2$$Register,
14802                       $tmp1$$Register, $tmp2$$Register,
14803                       $tmp3$$Register, $tmp4$$Register,
14804                       -1, $result$$Register);
14805   %}
14806   ins_pipe(pipe_class_memory);
14807 %}
14808 
14809 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14810                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14811                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14812 %{
14813   predicate(!CompactStrings);
14814   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14815   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14816          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14817   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14818 
14819   ins_encode %{
14820     int icnt2 = (int)$int_cnt2$$constant;
14821     __ string_indexof($str1$$Register, $str2$$Register,
14822                       $cnt1$$Register, zr,
14823                       $tmp1$$Register, $tmp2$$Register,
14824                       $tmp3$$Register, $tmp4$$Register,
14825                       icnt2, $result$$Register);
14826   %}
14827   ins_pipe(pipe_class_memory);
14828 %}
14829 
14830 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14831                         iRegI_R0 result, rFlagsReg cr)
14832 %{
14833   predicate(!CompactStrings);
14834   match(Set result (StrEquals (Binary str1 str2) cnt));
14835   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14836 
14837   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
14838   ins_encode %{
14839     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14840     __ asrw($cnt$$Register, $cnt$$Register, 1);
14841     __ arrays_equals($str1$$Register, $str2$$Register,
14842                      $result$$Register, $cnt$$Register,
14843                      2, /*is_string*/true);
14844   %}
14845   ins_pipe(pipe_class_memory);
14846 %}
14847 
14848 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14849                       iRegP_R10 tmp, rFlagsReg cr)
14850 %{
14851   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
14852   match(Set result (AryEq ary1 ary2));
14853   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14854 
14855   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14856   ins_encode %{
14857     __ arrays_equals($ary1$$Register, $ary2$$Register,
14858                      $result$$Register, $tmp$$Register,
14859                      1, /*is_string*/false);
14860     %}
14861   ins_pipe(pipe_class_memory);
14862 %}
14863 
14864 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14865                       iRegP_R10 tmp, rFlagsReg cr)
14866 %{
14867   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14868   match(Set result (AryEq ary1 ary2));
14869   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14870 
14871   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14872   ins_encode %{
14873     __ arrays_equals($ary1$$Register, $ary2$$Register,
14874                      $result$$Register, $tmp$$Register,
14875                      2, /*is_string*/false);
14876   %}
14877   ins_pipe(pipe_class_memory);
14878 %}
14879 
14880 
14881 // encode char[] to byte[] in ISO_8859_1
14882 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14883                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14884                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14885                           iRegI_R0 result, rFlagsReg cr)
14886 %{
14887   match(Set result (EncodeISOArray src (Binary dst len)));
14888   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14889          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14890 
14891   format %{ "Encode array $src,$dst,$len -> $result" %}
14892   ins_encode %{
14893     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14894          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14895          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14896   %}
14897   ins_pipe( pipe_class_memory );
14898 %}
14899 
14900 // ============================================================================
14901 // This name is KNOWN by the ADLC and cannot be changed.
14902 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14903 // for this guy.
14904 instruct tlsLoadP(thread_RegP dst)
14905 %{
14906   match(Set dst (ThreadLocal));
14907 
14908   ins_cost(0);
14909 
14910   format %{ " -- \t// $dst=Thread::current(), empty" %}
14911 
14912   size(0);
14913 
14914   ins_encode( /*empty*/ );
14915 
14916   ins_pipe(pipe_class_empty);
14917 %}
14918 
14919 // ====================VECTOR INSTRUCTIONS=====================================
14920 
14921 // Load vector (32 bits)
14922 instruct loadV4(vecD dst, vmem mem)
14923 %{
14924   predicate(n->as_LoadVector()->memory_size() == 4);
14925   match(Set dst (LoadVector mem));
14926   ins_cost(4 * INSN_COST);
14927   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14928   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14929   ins_pipe(vload_reg_mem64);
14930 %}
14931 
14932 // Load vector (64 bits)
14933 instruct loadV8(vecD dst, vmem mem)
14934 %{
14935   predicate(n->as_LoadVector()->memory_size() == 8);
14936   match(Set dst (LoadVector mem));
14937   ins_cost(4 * INSN_COST);
14938   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14939   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14940   ins_pipe(vload_reg_mem64);
14941 %}
14942 
14943 // Load Vector (128 bits)
14944 instruct loadV16(vecX dst, vmem mem)
14945 %{
14946   predicate(n->as_LoadVector()->memory_size() == 16);
14947   match(Set dst (LoadVector mem));
14948   ins_cost(4 * INSN_COST);
14949   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14950   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14951   ins_pipe(vload_reg_mem128);
14952 %}
14953 
14954 // Store Vector (32 bits)
14955 instruct storeV4(vecD src, vmem mem)
14956 %{
14957   predicate(n->as_StoreVector()->memory_size() == 4);
14958   match(Set mem (StoreVector mem src));
14959   ins_cost(4 * INSN_COST);
14960   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14961   ins_encode( aarch64_enc_strvS(src, mem) );
14962   ins_pipe(vstore_reg_mem64);
14963 %}
14964 
14965 // Store Vector (64 bits)
14966 instruct storeV8(vecD src, vmem mem)
14967 %{
14968   predicate(n->as_StoreVector()->memory_size() == 8);
14969   match(Set mem (StoreVector mem src));
14970   ins_cost(4 * INSN_COST);
14971   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14972   ins_encode( aarch64_enc_strvD(src, mem) );
14973   ins_pipe(vstore_reg_mem64);
14974 %}
14975 
14976 // Store Vector (128 bits)
14977 instruct storeV16(vecX src, vmem mem)
14978 %{
14979   predicate(n->as_StoreVector()->memory_size() == 16);
14980   match(Set mem (StoreVector mem src));
14981   ins_cost(4 * INSN_COST);
14982   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14983   ins_encode( aarch64_enc_strvQ(src, mem) );
14984   ins_pipe(vstore_reg_mem128);
14985 %}
14986 
14987 instruct replicate8B(vecD dst, iRegIorL2I src)
14988 %{
14989   predicate(n->as_Vector()->length() == 4 ||
14990             n->as_Vector()->length() == 8);
14991   match(Set dst (ReplicateB src));
14992   ins_cost(INSN_COST);
14993   format %{ "dup  $dst, $src\t# vector (8B)" %}
14994   ins_encode %{
14995     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14996   %}
14997   ins_pipe(vdup_reg_reg64);
14998 %}
14999 
15000 instruct replicate16B(vecX dst, iRegIorL2I src)
15001 %{
15002   predicate(n->as_Vector()->length() == 16);
15003   match(Set dst (ReplicateB src));
15004   ins_cost(INSN_COST);
15005   format %{ "dup  $dst, $src\t# vector (16B)" %}
15006   ins_encode %{
15007     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15008   %}
15009   ins_pipe(vdup_reg_reg128);
15010 %}
15011 
15012 instruct replicate8B_imm(vecD dst, immI con)
15013 %{
15014   predicate(n->as_Vector()->length() == 4 ||
15015             n->as_Vector()->length() == 8);
15016   match(Set dst (ReplicateB con));
15017   ins_cost(INSN_COST);
15018   format %{ "movi  $dst, $con\t# vector(8B)" %}
15019   ins_encode %{
15020     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15021   %}
15022   ins_pipe(vmovi_reg_imm64);
15023 %}
15024 
15025 instruct replicate16B_imm(vecX dst, immI con)
15026 %{
15027   predicate(n->as_Vector()->length() == 16);
15028   match(Set dst (ReplicateB con));
15029   ins_cost(INSN_COST);
15030   format %{ "movi  $dst, $con\t# vector(16B)" %}
15031   ins_encode %{
15032     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15033   %}
15034   ins_pipe(vmovi_reg_imm128);
15035 %}
15036 
15037 instruct replicate4S(vecD dst, iRegIorL2I src)
15038 %{
15039   predicate(n->as_Vector()->length() == 2 ||
15040             n->as_Vector()->length() == 4);
15041   match(Set dst (ReplicateS src));
15042   ins_cost(INSN_COST);
15043   format %{ "dup  $dst, $src\t# vector (4S)" %}
15044   ins_encode %{
15045     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15046   %}
15047   ins_pipe(vdup_reg_reg64);
15048 %}
15049 
15050 instruct replicate8S(vecX dst, iRegIorL2I src)
15051 %{
15052   predicate(n->as_Vector()->length() == 8);
15053   match(Set dst (ReplicateS src));
15054   ins_cost(INSN_COST);
15055   format %{ "dup  $dst, $src\t# vector (8S)" %}
15056   ins_encode %{
15057     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15058   %}
15059   ins_pipe(vdup_reg_reg128);
15060 %}
15061 
15062 instruct replicate4S_imm(vecD dst, immI con)
15063 %{
15064   predicate(n->as_Vector()->length() == 2 ||
15065             n->as_Vector()->length() == 4);
15066   match(Set dst (ReplicateS con));
15067   ins_cost(INSN_COST);
15068   format %{ "movi  $dst, $con\t# vector(4H)" %}
15069   ins_encode %{
15070     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15071   %}
15072   ins_pipe(vmovi_reg_imm64);
15073 %}
15074 
15075 instruct replicate8S_imm(vecX dst, immI con)
15076 %{
15077   predicate(n->as_Vector()->length() == 8);
15078   match(Set dst (ReplicateS con));
15079   ins_cost(INSN_COST);
15080   format %{ "movi  $dst, $con\t# vector(8H)" %}
15081   ins_encode %{
15082     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15083   %}
15084   ins_pipe(vmovi_reg_imm128);
15085 %}
15086 
15087 instruct replicate2I(vecD dst, iRegIorL2I src)
15088 %{
15089   predicate(n->as_Vector()->length() == 2);
15090   match(Set dst (ReplicateI src));
15091   ins_cost(INSN_COST);
15092   format %{ "dup  $dst, $src\t# vector (2I)" %}
15093   ins_encode %{
15094     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15095   %}
15096   ins_pipe(vdup_reg_reg64);
15097 %}
15098 
15099 instruct replicate4I(vecX dst, iRegIorL2I src)
15100 %{
15101   predicate(n->as_Vector()->length() == 4);
15102   match(Set dst (ReplicateI src));
15103   ins_cost(INSN_COST);
15104   format %{ "dup  $dst, $src\t# vector (4I)" %}
15105   ins_encode %{
15106     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15107   %}
15108   ins_pipe(vdup_reg_reg128);
15109 %}
15110 
15111 instruct replicate2I_imm(vecD dst, immI con)
15112 %{
15113   predicate(n->as_Vector()->length() == 2);
15114   match(Set dst (ReplicateI con));
15115   ins_cost(INSN_COST);
15116   format %{ "movi  $dst, $con\t# vector(2I)" %}
15117   ins_encode %{
15118     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15119   %}
15120   ins_pipe(vmovi_reg_imm64);
15121 %}
15122 
15123 instruct replicate4I_imm(vecX dst, immI con)
15124 %{
15125   predicate(n->as_Vector()->length() == 4);
15126   match(Set dst (ReplicateI con));
15127   ins_cost(INSN_COST);
15128   format %{ "movi  $dst, $con\t# vector(4I)" %}
15129   ins_encode %{
15130     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15131   %}
15132   ins_pipe(vmovi_reg_imm128);
15133 %}
15134 
15135 instruct replicate2L(vecX dst, iRegL src)
15136 %{
15137   predicate(n->as_Vector()->length() == 2);
15138   match(Set dst (ReplicateL src));
15139   ins_cost(INSN_COST);
15140   format %{ "dup  $dst, $src\t# vector (2L)" %}
15141   ins_encode %{
15142     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15143   %}
15144   ins_pipe(vdup_reg_reg128);
15145 %}
15146 
15147 instruct replicate2L_zero(vecX dst, immI0 zero)
15148 %{
15149   predicate(n->as_Vector()->length() == 2);
15150   match(Set dst (ReplicateI zero));
15151   ins_cost(INSN_COST);
15152   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15153   ins_encode %{
15154     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15155            as_FloatRegister($dst$$reg),
15156            as_FloatRegister($dst$$reg));
15157   %}
15158   ins_pipe(vmovi_reg_imm128);
15159 %}
15160 
15161 instruct replicate2F(vecD dst, vRegF src)
15162 %{
15163   predicate(n->as_Vector()->length() == 2);
15164   match(Set dst (ReplicateF src));
15165   ins_cost(INSN_COST);
15166   format %{ "dup  $dst, $src\t# vector (2F)" %}
15167   ins_encode %{
15168     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15169            as_FloatRegister($src$$reg));
15170   %}
15171   ins_pipe(vdup_reg_freg64);
15172 %}
15173 
15174 instruct replicate4F(vecX dst, vRegF src)
15175 %{
15176   predicate(n->as_Vector()->length() == 4);
15177   match(Set dst (ReplicateF src));
15178   ins_cost(INSN_COST);
15179   format %{ "dup  $dst, $src\t# vector (4F)" %}
15180   ins_encode %{
15181     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15182            as_FloatRegister($src$$reg));
15183   %}
15184   ins_pipe(vdup_reg_freg128);
15185 %}
15186 
15187 instruct replicate2D(vecX dst, vRegD src)
15188 %{
15189   predicate(n->as_Vector()->length() == 2);
15190   match(Set dst (ReplicateD src));
15191   ins_cost(INSN_COST);
15192   format %{ "dup  $dst, $src\t# vector (2D)" %}
15193   ins_encode %{
15194     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15195            as_FloatRegister($src$$reg));
15196   %}
15197   ins_pipe(vdup_reg_dreg128);
15198 %}
15199 
15200 // ====================REDUCTION ARITHMETIC====================================
15201 
15202 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15203 %{
15204   match(Set dst (AddReductionVI src1 src2));
15205   ins_cost(INSN_COST);
15206   effect(TEMP tmp, TEMP tmp2);
15207   format %{ "umov  $tmp, $src2, S, 0\n\t"
15208             "umov  $tmp2, $src2, S, 1\n\t"
15209             "addw  $dst, $src1, $tmp\n\t"
15210             "addw  $dst, $dst, $tmp2\t add reduction2i"
15211   %}
15212   ins_encode %{
15213     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15214     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15215     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15216     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15217   %}
15218   ins_pipe(pipe_class_default);
15219 %}
15220 
15221 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15222 %{
15223   match(Set dst (AddReductionVI src1 src2));
15224   ins_cost(INSN_COST);
15225   effect(TEMP tmp, TEMP tmp2);
15226   format %{ "addv  $tmp, T4S, $src2\n\t"
15227             "umov  $tmp2, $tmp, S, 0\n\t"
15228             "addw  $dst, $tmp2, $src1\t add reduction4i"
15229   %}
15230   ins_encode %{
15231     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15232             as_FloatRegister($src2$$reg));
15233     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15234     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15235   %}
15236   ins_pipe(pipe_class_default);
15237 %}
15238 
15239 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15240 %{
15241   match(Set dst (MulReductionVI src1 src2));
15242   ins_cost(INSN_COST);
15243   effect(TEMP tmp, TEMP dst);
15244   format %{ "umov  $tmp, $src2, S, 0\n\t"
15245             "mul   $dst, $tmp, $src1\n\t"
15246             "umov  $tmp, $src2, S, 1\n\t"
15247             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15248   %}
15249   ins_encode %{
15250     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15251     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15252     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15253     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15254   %}
15255   ins_pipe(pipe_class_default);
15256 %}
15257 
15258 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15259 %{
15260   match(Set dst (MulReductionVI src1 src2));
15261   ins_cost(INSN_COST);
15262   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15263   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15264             "mul   $tmp, $tmp, $src2\n\t"
15265             "umov  $tmp2, $tmp, S, 0\n\t"
15266             "mul   $dst, $tmp2, $src1\n\t"
15267             "umov  $tmp2, $tmp, S, 1\n\t"
15268             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15269   %}
15270   ins_encode %{
15271     __ ins(as_FloatRegister($tmp$$reg), __ D,
15272            as_FloatRegister($src2$$reg), 0, 1);
15273     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15274            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15275     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15276     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15277     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15278     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15279   %}
15280   ins_pipe(pipe_class_default);
15281 %}
15282 
15283 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15284 %{
15285   match(Set dst (AddReductionVF src1 src2));
15286   ins_cost(INSN_COST);
15287   effect(TEMP tmp, TEMP dst);
15288   format %{ "fadds $dst, $src1, $src2\n\t"
15289             "ins   $tmp, S, $src2, 0, 1\n\t"
15290             "fadds $dst, $dst, $tmp\t add reduction2f"
15291   %}
15292   ins_encode %{
15293     __ fadds(as_FloatRegister($dst$$reg),
15294              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15295     __ ins(as_FloatRegister($tmp$$reg), __ S,
15296            as_FloatRegister($src2$$reg), 0, 1);
15297     __ fadds(as_FloatRegister($dst$$reg),
15298              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15299   %}
15300   ins_pipe(pipe_class_default);
15301 %}
15302 
15303 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15304 %{
15305   match(Set dst (AddReductionVF src1 src2));
15306   ins_cost(INSN_COST);
15307   effect(TEMP tmp, TEMP dst);
15308   format %{ "fadds $dst, $src1, $src2\n\t"
15309             "ins   $tmp, S, $src2, 0, 1\n\t"
15310             "fadds $dst, $dst, $tmp\n\t"
15311             "ins   $tmp, S, $src2, 0, 2\n\t"
15312             "fadds $dst, $dst, $tmp\n\t"
15313             "ins   $tmp, S, $src2, 0, 3\n\t"
15314             "fadds $dst, $dst, $tmp\t add reduction4f"
15315   %}
15316   ins_encode %{
15317     __ fadds(as_FloatRegister($dst$$reg),
15318              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15319     __ ins(as_FloatRegister($tmp$$reg), __ S,
15320            as_FloatRegister($src2$$reg), 0, 1);
15321     __ fadds(as_FloatRegister($dst$$reg),
15322              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15323     __ ins(as_FloatRegister($tmp$$reg), __ S,
15324            as_FloatRegister($src2$$reg), 0, 2);
15325     __ fadds(as_FloatRegister($dst$$reg),
15326              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15327     __ ins(as_FloatRegister($tmp$$reg), __ S,
15328            as_FloatRegister($src2$$reg), 0, 3);
15329     __ fadds(as_FloatRegister($dst$$reg),
15330              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15331   %}
15332   ins_pipe(pipe_class_default);
15333 %}
15334 
15335 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15336 %{
15337   match(Set dst (MulReductionVF src1 src2));
15338   ins_cost(INSN_COST);
15339   effect(TEMP tmp, TEMP dst);
15340   format %{ "fmuls $dst, $src1, $src2\n\t"
15341             "ins   $tmp, S, $src2, 0, 1\n\t"
15342             "fmuls $dst, $dst, $tmp\t add reduction4f"
15343   %}
15344   ins_encode %{
15345     __ fmuls(as_FloatRegister($dst$$reg),
15346              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15347     __ ins(as_FloatRegister($tmp$$reg), __ S,
15348            as_FloatRegister($src2$$reg), 0, 1);
15349     __ fmuls(as_FloatRegister($dst$$reg),
15350              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15351   %}
15352   ins_pipe(pipe_class_default);
15353 %}
15354 
15355 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15356 %{
15357   match(Set dst (MulReductionVF src1 src2));
15358   ins_cost(INSN_COST);
15359   effect(TEMP tmp, TEMP dst);
15360   format %{ "fmuls $dst, $src1, $src2\n\t"
15361             "ins   $tmp, S, $src2, 0, 1\n\t"
15362             "fmuls $dst, $dst, $tmp\n\t"
15363             "ins   $tmp, S, $src2, 0, 2\n\t"
15364             "fmuls $dst, $dst, $tmp\n\t"
15365             "ins   $tmp, S, $src2, 0, 3\n\t"
15366             "fmuls $dst, $dst, $tmp\t add reduction4f"
15367   %}
15368   ins_encode %{
15369     __ fmuls(as_FloatRegister($dst$$reg),
15370              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15371     __ ins(as_FloatRegister($tmp$$reg), __ S,
15372            as_FloatRegister($src2$$reg), 0, 1);
15373     __ fmuls(as_FloatRegister($dst$$reg),
15374              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15375     __ ins(as_FloatRegister($tmp$$reg), __ S,
15376            as_FloatRegister($src2$$reg), 0, 2);
15377     __ fmuls(as_FloatRegister($dst$$reg),
15378              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15379     __ ins(as_FloatRegister($tmp$$reg), __ S,
15380            as_FloatRegister($src2$$reg), 0, 3);
15381     __ fmuls(as_FloatRegister($dst$$reg),
15382              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15383   %}
15384   ins_pipe(pipe_class_default);
15385 %}
15386 
15387 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15388 %{
15389   match(Set dst (AddReductionVD src1 src2));
15390   ins_cost(INSN_COST);
15391   effect(TEMP tmp, TEMP dst);
15392   format %{ "faddd $dst, $src1, $src2\n\t"
15393             "ins   $tmp, D, $src2, 0, 1\n\t"
15394             "faddd $dst, $dst, $tmp\t add reduction2d"
15395   %}
15396   ins_encode %{
15397     __ faddd(as_FloatRegister($dst$$reg),
15398              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15399     __ ins(as_FloatRegister($tmp$$reg), __ D,
15400            as_FloatRegister($src2$$reg), 0, 1);
15401     __ faddd(as_FloatRegister($dst$$reg),
15402              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15403   %}
15404   ins_pipe(pipe_class_default);
15405 %}
15406 
15407 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15408 %{
15409   match(Set dst (MulReductionVD src1 src2));
15410   ins_cost(INSN_COST);
15411   effect(TEMP tmp, TEMP dst);
15412   format %{ "fmuld $dst, $src1, $src2\n\t"
15413             "ins   $tmp, D, $src2, 0, 1\n\t"
15414             "fmuld $dst, $dst, $tmp\t add reduction2d"
15415   %}
15416   ins_encode %{
15417     __ fmuld(as_FloatRegister($dst$$reg),
15418              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15419     __ ins(as_FloatRegister($tmp$$reg), __ D,
15420            as_FloatRegister($src2$$reg), 0, 1);
15421     __ fmuld(as_FloatRegister($dst$$reg),
15422              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15423   %}
15424   ins_pipe(pipe_class_default);
15425 %}
15426 
15427 // ====================VECTOR ARITHMETIC=======================================
15428 
15429 // --------------------------------- ADD --------------------------------------
15430 
15431 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15432 %{
15433   predicate(n->as_Vector()->length() == 4 ||
15434             n->as_Vector()->length() == 8);
15435   match(Set dst (AddVB src1 src2));
15436   ins_cost(INSN_COST);
15437   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15438   ins_encode %{
15439     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15440             as_FloatRegister($src1$$reg),
15441             as_FloatRegister($src2$$reg));
15442   %}
15443   ins_pipe(vdop64);
15444 %}
15445 
15446 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15447 %{
15448   predicate(n->as_Vector()->length() == 16);
15449   match(Set dst (AddVB src1 src2));
15450   ins_cost(INSN_COST);
15451   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15452   ins_encode %{
15453     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15454             as_FloatRegister($src1$$reg),
15455             as_FloatRegister($src2$$reg));
15456   %}
15457   ins_pipe(vdop128);
15458 %}
15459 
15460 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15461 %{
15462   predicate(n->as_Vector()->length() == 2 ||
15463             n->as_Vector()->length() == 4);
15464   match(Set dst (AddVS src1 src2));
15465   ins_cost(INSN_COST);
15466   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15467   ins_encode %{
15468     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15469             as_FloatRegister($src1$$reg),
15470             as_FloatRegister($src2$$reg));
15471   %}
15472   ins_pipe(vdop64);
15473 %}
15474 
15475 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15476 %{
15477   predicate(n->as_Vector()->length() == 8);
15478   match(Set dst (AddVS src1 src2));
15479   ins_cost(INSN_COST);
15480   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15481   ins_encode %{
15482     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15483             as_FloatRegister($src1$$reg),
15484             as_FloatRegister($src2$$reg));
15485   %}
15486   ins_pipe(vdop128);
15487 %}
15488 
15489 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15490 %{
15491   predicate(n->as_Vector()->length() == 2);
15492   match(Set dst (AddVI src1 src2));
15493   ins_cost(INSN_COST);
15494   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15495   ins_encode %{
15496     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15497             as_FloatRegister($src1$$reg),
15498             as_FloatRegister($src2$$reg));
15499   %}
15500   ins_pipe(vdop64);
15501 %}
15502 
15503 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15504 %{
15505   predicate(n->as_Vector()->length() == 4);
15506   match(Set dst (AddVI src1 src2));
15507   ins_cost(INSN_COST);
15508   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15509   ins_encode %{
15510     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15511             as_FloatRegister($src1$$reg),
15512             as_FloatRegister($src2$$reg));
15513   %}
15514   ins_pipe(vdop128);
15515 %}
15516 
15517 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15518 %{
15519   predicate(n->as_Vector()->length() == 2);
15520   match(Set dst (AddVL src1 src2));
15521   ins_cost(INSN_COST);
15522   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15523   ins_encode %{
15524     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15525             as_FloatRegister($src1$$reg),
15526             as_FloatRegister($src2$$reg));
15527   %}
15528   ins_pipe(vdop128);
15529 %}
15530 
15531 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15532 %{
15533   predicate(n->as_Vector()->length() == 2);
15534   match(Set dst (AddVF src1 src2));
15535   ins_cost(INSN_COST);
15536   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15537   ins_encode %{
15538     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15539             as_FloatRegister($src1$$reg),
15540             as_FloatRegister($src2$$reg));
15541   %}
15542   ins_pipe(vdop_fp64);
15543 %}
15544 
15545 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15546 %{
15547   predicate(n->as_Vector()->length() == 4);
15548   match(Set dst (AddVF src1 src2));
15549   ins_cost(INSN_COST);
15550   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15551   ins_encode %{
15552     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15553             as_FloatRegister($src1$$reg),
15554             as_FloatRegister($src2$$reg));
15555   %}
15556   ins_pipe(vdop_fp128);
15557 %}
15558 
15559 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15560 %{
15561   match(Set dst (AddVD src1 src2));
15562   ins_cost(INSN_COST);
15563   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15564   ins_encode %{
15565     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15566             as_FloatRegister($src1$$reg),
15567             as_FloatRegister($src2$$reg));
15568   %}
15569   ins_pipe(vdop_fp128);
15570 %}
15571 
15572 // --------------------------------- SUB --------------------------------------
15573 
15574 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15575 %{
15576   predicate(n->as_Vector()->length() == 4 ||
15577             n->as_Vector()->length() == 8);
15578   match(Set dst (SubVB src1 src2));
15579   ins_cost(INSN_COST);
15580   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15581   ins_encode %{
15582     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15583             as_FloatRegister($src1$$reg),
15584             as_FloatRegister($src2$$reg));
15585   %}
15586   ins_pipe(vdop64);
15587 %}
15588 
15589 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15590 %{
15591   predicate(n->as_Vector()->length() == 16);
15592   match(Set dst (SubVB src1 src2));
15593   ins_cost(INSN_COST);
15594   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15595   ins_encode %{
15596     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15597             as_FloatRegister($src1$$reg),
15598             as_FloatRegister($src2$$reg));
15599   %}
15600   ins_pipe(vdop128);
15601 %}
15602 
15603 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15604 %{
15605   predicate(n->as_Vector()->length() == 2 ||
15606             n->as_Vector()->length() == 4);
15607   match(Set dst (SubVS src1 src2));
15608   ins_cost(INSN_COST);
15609   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15610   ins_encode %{
15611     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15612             as_FloatRegister($src1$$reg),
15613             as_FloatRegister($src2$$reg));
15614   %}
15615   ins_pipe(vdop64);
15616 %}
15617 
15618 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15619 %{
15620   predicate(n->as_Vector()->length() == 8);
15621   match(Set dst (SubVS src1 src2));
15622   ins_cost(INSN_COST);
15623   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15624   ins_encode %{
15625     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15626             as_FloatRegister($src1$$reg),
15627             as_FloatRegister($src2$$reg));
15628   %}
15629   ins_pipe(vdop128);
15630 %}
15631 
15632 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15633 %{
15634   predicate(n->as_Vector()->length() == 2);
15635   match(Set dst (SubVI src1 src2));
15636   ins_cost(INSN_COST);
15637   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15638   ins_encode %{
15639     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15640             as_FloatRegister($src1$$reg),
15641             as_FloatRegister($src2$$reg));
15642   %}
15643   ins_pipe(vdop64);
15644 %}
15645 
15646 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15647 %{
15648   predicate(n->as_Vector()->length() == 4);
15649   match(Set dst (SubVI src1 src2));
15650   ins_cost(INSN_COST);
15651   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15652   ins_encode %{
15653     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15654             as_FloatRegister($src1$$reg),
15655             as_FloatRegister($src2$$reg));
15656   %}
15657   ins_pipe(vdop128);
15658 %}
15659 
15660 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15661 %{
15662   predicate(n->as_Vector()->length() == 2);
15663   match(Set dst (SubVL src1 src2));
15664   ins_cost(INSN_COST);
15665   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15666   ins_encode %{
15667     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15668             as_FloatRegister($src1$$reg),
15669             as_FloatRegister($src2$$reg));
15670   %}
15671   ins_pipe(vdop128);
15672 %}
15673 
15674 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15675 %{
15676   predicate(n->as_Vector()->length() == 2);
15677   match(Set dst (SubVF src1 src2));
15678   ins_cost(INSN_COST);
15679   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15680   ins_encode %{
15681     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15682             as_FloatRegister($src1$$reg),
15683             as_FloatRegister($src2$$reg));
15684   %}
15685   ins_pipe(vdop_fp64);
15686 %}
15687 
15688 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15689 %{
15690   predicate(n->as_Vector()->length() == 4);
15691   match(Set dst (SubVF src1 src2));
15692   ins_cost(INSN_COST);
15693   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15694   ins_encode %{
15695     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15696             as_FloatRegister($src1$$reg),
15697             as_FloatRegister($src2$$reg));
15698   %}
15699   ins_pipe(vdop_fp128);
15700 %}
15701 
15702 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15703 %{
15704   predicate(n->as_Vector()->length() == 2);
15705   match(Set dst (SubVD src1 src2));
15706   ins_cost(INSN_COST);
15707   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15708   ins_encode %{
15709     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15710             as_FloatRegister($src1$$reg),
15711             as_FloatRegister($src2$$reg));
15712   %}
15713   ins_pipe(vdop_fp128);
15714 %}
15715 
15716 // --------------------------------- MUL --------------------------------------
15717 
15718 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15719 %{
15720   predicate(n->as_Vector()->length() == 2 ||
15721             n->as_Vector()->length() == 4);
15722   match(Set dst (MulVS src1 src2));
15723   ins_cost(INSN_COST);
15724   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15725   ins_encode %{
15726     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15727             as_FloatRegister($src1$$reg),
15728             as_FloatRegister($src2$$reg));
15729   %}
15730   ins_pipe(vmul64);
15731 %}
15732 
15733 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15734 %{
15735   predicate(n->as_Vector()->length() == 8);
15736   match(Set dst (MulVS src1 src2));
15737   ins_cost(INSN_COST);
15738   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15739   ins_encode %{
15740     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15741             as_FloatRegister($src1$$reg),
15742             as_FloatRegister($src2$$reg));
15743   %}
15744   ins_pipe(vmul128);
15745 %}
15746 
15747 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15748 %{
15749   predicate(n->as_Vector()->length() == 2);
15750   match(Set dst (MulVI src1 src2));
15751   ins_cost(INSN_COST);
15752   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15753   ins_encode %{
15754     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15755             as_FloatRegister($src1$$reg),
15756             as_FloatRegister($src2$$reg));
15757   %}
15758   ins_pipe(vmul64);
15759 %}
15760 
15761 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15762 %{
15763   predicate(n->as_Vector()->length() == 4);
15764   match(Set dst (MulVI src1 src2));
15765   ins_cost(INSN_COST);
15766   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15767   ins_encode %{
15768     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15769             as_FloatRegister($src1$$reg),
15770             as_FloatRegister($src2$$reg));
15771   %}
15772   ins_pipe(vmul128);
15773 %}
15774 
15775 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15776 %{
15777   predicate(n->as_Vector()->length() == 2);
15778   match(Set dst (MulVF src1 src2));
15779   ins_cost(INSN_COST);
15780   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15781   ins_encode %{
15782     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15783             as_FloatRegister($src1$$reg),
15784             as_FloatRegister($src2$$reg));
15785   %}
15786   ins_pipe(vmuldiv_fp64);
15787 %}
15788 
15789 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15790 %{
15791   predicate(n->as_Vector()->length() == 4);
15792   match(Set dst (MulVF src1 src2));
15793   ins_cost(INSN_COST);
15794   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15795   ins_encode %{
15796     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15797             as_FloatRegister($src1$$reg),
15798             as_FloatRegister($src2$$reg));
15799   %}
15800   ins_pipe(vmuldiv_fp128);
15801 %}
15802 
15803 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15804 %{
15805   predicate(n->as_Vector()->length() == 2);
15806   match(Set dst (MulVD src1 src2));
15807   ins_cost(INSN_COST);
15808   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15809   ins_encode %{
15810     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15811             as_FloatRegister($src1$$reg),
15812             as_FloatRegister($src2$$reg));
15813   %}
15814   ins_pipe(vmuldiv_fp128);
15815 %}
15816 
15817 // --------------------------------- MLA --------------------------------------
15818 
15819 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15820 %{
15821   predicate(n->as_Vector()->length() == 2 ||
15822             n->as_Vector()->length() == 4);
15823   match(Set dst (AddVS dst (MulVS src1 src2)));
15824   ins_cost(INSN_COST);
15825   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
15826   ins_encode %{
15827     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
15828             as_FloatRegister($src1$$reg),
15829             as_FloatRegister($src2$$reg));
15830   %}
15831   ins_pipe(vmla64);
15832 %}
15833 
15834 instruct vmla8S(vecX dst, vecX src1, vecX src2)
15835 %{
15836   predicate(n->as_Vector()->length() == 8);
15837   match(Set dst (AddVS dst (MulVS src1 src2)));
15838   ins_cost(INSN_COST);
15839   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
15840   ins_encode %{
15841     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
15842             as_FloatRegister($src1$$reg),
15843             as_FloatRegister($src2$$reg));
15844   %}
15845   ins_pipe(vmla128);
15846 %}
15847 
15848 instruct vmla2I(vecD dst, vecD src1, vecD src2)
15849 %{
15850   predicate(n->as_Vector()->length() == 2);
15851   match(Set dst (AddVI dst (MulVI src1 src2)));
15852   ins_cost(INSN_COST);
15853   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
15854   ins_encode %{
15855     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
15856             as_FloatRegister($src1$$reg),
15857             as_FloatRegister($src2$$reg));
15858   %}
15859   ins_pipe(vmla64);
15860 %}
15861 
15862 instruct vmla4I(vecX dst, vecX src1, vecX src2)
15863 %{
15864   predicate(n->as_Vector()->length() == 4);
15865   match(Set dst (AddVI dst (MulVI src1 src2)));
15866   ins_cost(INSN_COST);
15867   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
15868   ins_encode %{
15869     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
15870             as_FloatRegister($src1$$reg),
15871             as_FloatRegister($src2$$reg));
15872   %}
15873   ins_pipe(vmla128);
15874 %}
15875 
15876 // --------------------------------- MLS --------------------------------------
15877 
15878 instruct vmls4S(vecD dst, vecD src1, vecD src2)
15879 %{
15880   predicate(n->as_Vector()->length() == 2 ||
15881             n->as_Vector()->length() == 4);
15882   match(Set dst (SubVS dst (MulVS src1 src2)));
15883   ins_cost(INSN_COST);
15884   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
15885   ins_encode %{
15886     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
15887             as_FloatRegister($src1$$reg),
15888             as_FloatRegister($src2$$reg));
15889   %}
15890   ins_pipe(vmla64);
15891 %}
15892 
15893 instruct vmls8S(vecX dst, vecX src1, vecX src2)
15894 %{
15895   predicate(n->as_Vector()->length() == 8);
15896   match(Set dst (SubVS dst (MulVS src1 src2)));
15897   ins_cost(INSN_COST);
15898   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
15899   ins_encode %{
15900     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
15901             as_FloatRegister($src1$$reg),
15902             as_FloatRegister($src2$$reg));
15903   %}
15904   ins_pipe(vmla128);
15905 %}
15906 
15907 instruct vmls2I(vecD dst, vecD src1, vecD src2)
15908 %{
15909   predicate(n->as_Vector()->length() == 2);
15910   match(Set dst (SubVI dst (MulVI src1 src2)));
15911   ins_cost(INSN_COST);
15912   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
15913   ins_encode %{
15914     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
15915             as_FloatRegister($src1$$reg),
15916             as_FloatRegister($src2$$reg));
15917   %}
15918   ins_pipe(vmla64);
15919 %}
15920 
15921 instruct vmls4I(vecX dst, vecX src1, vecX src2)
15922 %{
15923   predicate(n->as_Vector()->length() == 4);
15924   match(Set dst (SubVI dst (MulVI src1 src2)));
15925   ins_cost(INSN_COST);
15926   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
15927   ins_encode %{
15928     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
15929             as_FloatRegister($src1$$reg),
15930             as_FloatRegister($src2$$reg));
15931   %}
15932   ins_pipe(vmla128);
15933 %}
15934 
15935 // --------------------------------- DIV --------------------------------------
15936 
15937 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15938 %{
15939   predicate(n->as_Vector()->length() == 2);
15940   match(Set dst (DivVF src1 src2));
15941   ins_cost(INSN_COST);
15942   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15943   ins_encode %{
15944     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15945             as_FloatRegister($src1$$reg),
15946             as_FloatRegister($src2$$reg));
15947   %}
15948   ins_pipe(vmuldiv_fp64);
15949 %}
15950 
15951 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15952 %{
15953   predicate(n->as_Vector()->length() == 4);
15954   match(Set dst (DivVF src1 src2));
15955   ins_cost(INSN_COST);
15956   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15957   ins_encode %{
15958     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15959             as_FloatRegister($src1$$reg),
15960             as_FloatRegister($src2$$reg));
15961   %}
15962   ins_pipe(vmuldiv_fp128);
15963 %}
15964 
15965 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15966 %{
15967   predicate(n->as_Vector()->length() == 2);
15968   match(Set dst (DivVD src1 src2));
15969   ins_cost(INSN_COST);
15970   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15971   ins_encode %{
15972     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15973             as_FloatRegister($src1$$reg),
15974             as_FloatRegister($src2$$reg));
15975   %}
15976   ins_pipe(vmuldiv_fp128);
15977 %}
15978 
15979 // --------------------------------- SQRT -------------------------------------
15980 
15981 instruct vsqrt2D(vecX dst, vecX src)
15982 %{
15983   predicate(n->as_Vector()->length() == 2);
15984   match(Set dst (SqrtVD src));
15985   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
15986   ins_encode %{
15987     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
15988              as_FloatRegister($src$$reg));
15989   %}
15990   ins_pipe(vsqrt_fp128);
15991 %}
15992 
15993 // --------------------------------- ABS --------------------------------------
15994 
15995 instruct vabs2F(vecD dst, vecD src)
15996 %{
15997   predicate(n->as_Vector()->length() == 2);
15998   match(Set dst (AbsVF src));
15999   ins_cost(INSN_COST * 3);
16000   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16001   ins_encode %{
16002     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16003             as_FloatRegister($src$$reg));
16004   %}
16005   ins_pipe(vunop_fp64);
16006 %}
16007 
16008 instruct vabs4F(vecX dst, vecX src)
16009 %{
16010   predicate(n->as_Vector()->length() == 4);
16011   match(Set dst (AbsVF src));
16012   ins_cost(INSN_COST * 3);
16013   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16014   ins_encode %{
16015     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16016             as_FloatRegister($src$$reg));
16017   %}
16018   ins_pipe(vunop_fp128);
16019 %}
16020 
16021 instruct vabs2D(vecX dst, vecX src)
16022 %{
16023   predicate(n->as_Vector()->length() == 2);
16024   match(Set dst (AbsVD src));
16025   ins_cost(INSN_COST * 3);
16026   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16027   ins_encode %{
16028     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16029             as_FloatRegister($src$$reg));
16030   %}
16031   ins_pipe(vunop_fp128);
16032 %}
16033 
16034 // --------------------------------- NEG --------------------------------------
16035 
16036 instruct vneg2F(vecD dst, vecD src)
16037 %{
16038   predicate(n->as_Vector()->length() == 2);
16039   match(Set dst (NegVF src));
16040   ins_cost(INSN_COST * 3);
16041   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16042   ins_encode %{
16043     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16044             as_FloatRegister($src$$reg));
16045   %}
16046   ins_pipe(vunop_fp64);
16047 %}
16048 
16049 instruct vneg4F(vecX dst, vecX src)
16050 %{
16051   predicate(n->as_Vector()->length() == 4);
16052   match(Set dst (NegVF src));
16053   ins_cost(INSN_COST * 3);
16054   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16055   ins_encode %{
16056     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16057             as_FloatRegister($src$$reg));
16058   %}
16059   ins_pipe(vunop_fp128);
16060 %}
16061 
16062 instruct vneg2D(vecX dst, vecX src)
16063 %{
16064   predicate(n->as_Vector()->length() == 2);
16065   match(Set dst (NegVD src));
16066   ins_cost(INSN_COST * 3);
16067   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16068   ins_encode %{
16069     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16070             as_FloatRegister($src$$reg));
16071   %}
16072   ins_pipe(vunop_fp128);
16073 %}
16074 
16075 // --------------------------------- AND --------------------------------------
16076 
16077 instruct vand8B(vecD dst, vecD src1, vecD src2)
16078 %{
16079   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16080             n->as_Vector()->length_in_bytes() == 8);
16081   match(Set dst (AndV src1 src2));
16082   ins_cost(INSN_COST);
16083   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16084   ins_encode %{
16085     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16086             as_FloatRegister($src1$$reg),
16087             as_FloatRegister($src2$$reg));
16088   %}
16089   ins_pipe(vlogical64);
16090 %}
16091 
16092 instruct vand16B(vecX dst, vecX src1, vecX src2)
16093 %{
16094   predicate(n->as_Vector()->length_in_bytes() == 16);
16095   match(Set dst (AndV src1 src2));
16096   ins_cost(INSN_COST);
16097   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16098   ins_encode %{
16099     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16100             as_FloatRegister($src1$$reg),
16101             as_FloatRegister($src2$$reg));
16102   %}
16103   ins_pipe(vlogical128);
16104 %}
16105 
16106 // --------------------------------- OR ---------------------------------------
16107 
16108 instruct vor8B(vecD dst, vecD src1, vecD src2)
16109 %{
16110   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16111             n->as_Vector()->length_in_bytes() == 8);
16112   match(Set dst (OrV src1 src2));
16113   ins_cost(INSN_COST);
16114   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16115   ins_encode %{
16116     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16117             as_FloatRegister($src1$$reg),
16118             as_FloatRegister($src2$$reg));
16119   %}
16120   ins_pipe(vlogical64);
16121 %}
16122 
16123 instruct vor16B(vecX dst, vecX src1, vecX src2)
16124 %{
16125   predicate(n->as_Vector()->length_in_bytes() == 16);
16126   match(Set dst (OrV src1 src2));
16127   ins_cost(INSN_COST);
16128   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16129   ins_encode %{
16130     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16131             as_FloatRegister($src1$$reg),
16132             as_FloatRegister($src2$$reg));
16133   %}
16134   ins_pipe(vlogical128);
16135 %}
16136 
16137 // --------------------------------- XOR --------------------------------------
16138 
16139 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16140 %{
16141   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16142             n->as_Vector()->length_in_bytes() == 8);
16143   match(Set dst (XorV src1 src2));
16144   ins_cost(INSN_COST);
16145   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16146   ins_encode %{
16147     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16148             as_FloatRegister($src1$$reg),
16149             as_FloatRegister($src2$$reg));
16150   %}
16151   ins_pipe(vlogical64);
16152 %}
16153 
16154 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16155 %{
16156   predicate(n->as_Vector()->length_in_bytes() == 16);
16157   match(Set dst (XorV src1 src2));
16158   ins_cost(INSN_COST);
16159   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16160   ins_encode %{
16161     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16162             as_FloatRegister($src1$$reg),
16163             as_FloatRegister($src2$$reg));
16164   %}
16165   ins_pipe(vlogical128);
16166 %}
16167 
16168 // ------------------------------ Shift ---------------------------------------
16169 
16170 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16171   match(Set dst (LShiftCntV cnt));
16172   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16173   ins_encode %{
16174     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16175   %}
16176   ins_pipe(vdup_reg_reg128);
16177 %}
16178 
16179 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16180 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16181   match(Set dst (RShiftCntV cnt));
16182   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16183   ins_encode %{
16184     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16185     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16186   %}
16187   ins_pipe(vdup_reg_reg128);
16188 %}
16189 
16190 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16191   predicate(n->as_Vector()->length() == 4 ||
16192             n->as_Vector()->length() == 8);
16193   match(Set dst (LShiftVB src shift));
16194   match(Set dst (RShiftVB src shift));
16195   ins_cost(INSN_COST);
16196   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16197   ins_encode %{
16198     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16199             as_FloatRegister($src$$reg),
16200             as_FloatRegister($shift$$reg));
16201   %}
16202   ins_pipe(vshift64);
16203 %}
16204 
16205 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16206   predicate(n->as_Vector()->length() == 16);
16207   match(Set dst (LShiftVB src shift));
16208   match(Set dst (RShiftVB src shift));
16209   ins_cost(INSN_COST);
16210   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16211   ins_encode %{
16212     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16213             as_FloatRegister($src$$reg),
16214             as_FloatRegister($shift$$reg));
16215   %}
16216   ins_pipe(vshift128);
16217 %}
16218 
16219 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16220   predicate(n->as_Vector()->length() == 4 ||
16221             n->as_Vector()->length() == 8);
16222   match(Set dst (URShiftVB src shift));
16223   ins_cost(INSN_COST);
16224   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16225   ins_encode %{
16226     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16227             as_FloatRegister($src$$reg),
16228             as_FloatRegister($shift$$reg));
16229   %}
16230   ins_pipe(vshift64);
16231 %}
16232 
16233 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16234   predicate(n->as_Vector()->length() == 16);
16235   match(Set dst (URShiftVB src shift));
16236   ins_cost(INSN_COST);
16237   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16238   ins_encode %{
16239     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16240             as_FloatRegister($src$$reg),
16241             as_FloatRegister($shift$$reg));
16242   %}
16243   ins_pipe(vshift128);
16244 %}
16245 
16246 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16247   predicate(n->as_Vector()->length() == 4 ||
16248             n->as_Vector()->length() == 8);
16249   match(Set dst (LShiftVB src shift));
16250   ins_cost(INSN_COST);
16251   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16252   ins_encode %{
16253     int sh = (int)$shift$$constant & 31;
16254     if (sh >= 8) {
16255       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16256              as_FloatRegister($src$$reg),
16257              as_FloatRegister($src$$reg));
16258     } else {
16259       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16260              as_FloatRegister($src$$reg), sh);
16261     }
16262   %}
16263   ins_pipe(vshift64_imm);
16264 %}
16265 
16266 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16267   predicate(n->as_Vector()->length() == 16);
16268   match(Set dst (LShiftVB src shift));
16269   ins_cost(INSN_COST);
16270   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16271   ins_encode %{
16272     int sh = (int)$shift$$constant & 31;
16273     if (sh >= 8) {
16274       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16275              as_FloatRegister($src$$reg),
16276              as_FloatRegister($src$$reg));
16277     } else {
16278       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16279              as_FloatRegister($src$$reg), sh);
16280     }
16281   %}
16282   ins_pipe(vshift128_imm);
16283 %}
16284 
16285 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16286   predicate(n->as_Vector()->length() == 4 ||
16287             n->as_Vector()->length() == 8);
16288   match(Set dst (RShiftVB src shift));
16289   ins_cost(INSN_COST);
16290   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16291   ins_encode %{
16292     int sh = (int)$shift$$constant & 31;
16293     if (sh >= 8) sh = 7;
16294     sh = -sh & 7;
16295     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16296            as_FloatRegister($src$$reg), sh);
16297   %}
16298   ins_pipe(vshift64_imm);
16299 %}
16300 
16301 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16302   predicate(n->as_Vector()->length() == 16);
16303   match(Set dst (RShiftVB src shift));
16304   ins_cost(INSN_COST);
16305   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16306   ins_encode %{
16307     int sh = (int)$shift$$constant & 31;
16308     if (sh >= 8) sh = 7;
16309     sh = -sh & 7;
16310     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16311            as_FloatRegister($src$$reg), sh);
16312   %}
16313   ins_pipe(vshift128_imm);
16314 %}
16315 
16316 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16317   predicate(n->as_Vector()->length() == 4 ||
16318             n->as_Vector()->length() == 8);
16319   match(Set dst (URShiftVB src shift));
16320   ins_cost(INSN_COST);
16321   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16322   ins_encode %{
16323     int sh = (int)$shift$$constant & 31;
16324     if (sh >= 8) {
16325       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16326              as_FloatRegister($src$$reg),
16327              as_FloatRegister($src$$reg));
16328     } else {
16329       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16330              as_FloatRegister($src$$reg), -sh & 7);
16331     }
16332   %}
16333   ins_pipe(vshift64_imm);
16334 %}
16335 
16336 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16337   predicate(n->as_Vector()->length() == 16);
16338   match(Set dst (URShiftVB src shift));
16339   ins_cost(INSN_COST);
16340   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16341   ins_encode %{
16342     int sh = (int)$shift$$constant & 31;
16343     if (sh >= 8) {
16344       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16345              as_FloatRegister($src$$reg),
16346              as_FloatRegister($src$$reg));
16347     } else {
16348       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16349              as_FloatRegister($src$$reg), -sh & 7);
16350     }
16351   %}
16352   ins_pipe(vshift128_imm);
16353 %}
16354 
16355 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16356   predicate(n->as_Vector()->length() == 2 ||
16357             n->as_Vector()->length() == 4);
16358   match(Set dst (LShiftVS src shift));
16359   match(Set dst (RShiftVS src shift));
16360   ins_cost(INSN_COST);
16361   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16362   ins_encode %{
16363     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16364             as_FloatRegister($src$$reg),
16365             as_FloatRegister($shift$$reg));
16366   %}
16367   ins_pipe(vshift64);
16368 %}
16369 
16370 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16371   predicate(n->as_Vector()->length() == 8);
16372   match(Set dst (LShiftVS src shift));
16373   match(Set dst (RShiftVS src shift));
16374   ins_cost(INSN_COST);
16375   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16376   ins_encode %{
16377     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16378             as_FloatRegister($src$$reg),
16379             as_FloatRegister($shift$$reg));
16380   %}
16381   ins_pipe(vshift128);
16382 %}
16383 
16384 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16385   predicate(n->as_Vector()->length() == 2 ||
16386             n->as_Vector()->length() == 4);
16387   match(Set dst (URShiftVS src shift));
16388   ins_cost(INSN_COST);
16389   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16390   ins_encode %{
16391     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16392             as_FloatRegister($src$$reg),
16393             as_FloatRegister($shift$$reg));
16394   %}
16395   ins_pipe(vshift64);
16396 %}
16397 
16398 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16399   predicate(n->as_Vector()->length() == 8);
16400   match(Set dst (URShiftVS src shift));
16401   ins_cost(INSN_COST);
16402   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16403   ins_encode %{
16404     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16405             as_FloatRegister($src$$reg),
16406             as_FloatRegister($shift$$reg));
16407   %}
16408   ins_pipe(vshift128);
16409 %}
16410 
16411 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16412   predicate(n->as_Vector()->length() == 2 ||
16413             n->as_Vector()->length() == 4);
16414   match(Set dst (LShiftVS src shift));
16415   ins_cost(INSN_COST);
16416   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16417   ins_encode %{
16418     int sh = (int)$shift$$constant & 31;
16419     if (sh >= 16) {
16420       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16421              as_FloatRegister($src$$reg),
16422              as_FloatRegister($src$$reg));
16423     } else {
16424       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16425              as_FloatRegister($src$$reg), sh);
16426     }
16427   %}
16428   ins_pipe(vshift64_imm);
16429 %}
16430 
16431 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16432   predicate(n->as_Vector()->length() == 8);
16433   match(Set dst (LShiftVS src shift));
16434   ins_cost(INSN_COST);
16435   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16436   ins_encode %{
16437     int sh = (int)$shift$$constant & 31;
16438     if (sh >= 16) {
16439       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16440              as_FloatRegister($src$$reg),
16441              as_FloatRegister($src$$reg));
16442     } else {
16443       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16444              as_FloatRegister($src$$reg), sh);
16445     }
16446   %}
16447   ins_pipe(vshift128_imm);
16448 %}
16449 
16450 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16451   predicate(n->as_Vector()->length() == 2 ||
16452             n->as_Vector()->length() == 4);
16453   match(Set dst (RShiftVS src shift));
16454   ins_cost(INSN_COST);
16455   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16456   ins_encode %{
16457     int sh = (int)$shift$$constant & 31;
16458     if (sh >= 16) sh = 15;
16459     sh = -sh & 15;
16460     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16461            as_FloatRegister($src$$reg), sh);
16462   %}
16463   ins_pipe(vshift64_imm);
16464 %}
16465 
16466 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16467   predicate(n->as_Vector()->length() == 8);
16468   match(Set dst (RShiftVS src shift));
16469   ins_cost(INSN_COST);
16470   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16471   ins_encode %{
16472     int sh = (int)$shift$$constant & 31;
16473     if (sh >= 16) sh = 15;
16474     sh = -sh & 15;
16475     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16476            as_FloatRegister($src$$reg), sh);
16477   %}
16478   ins_pipe(vshift128_imm);
16479 %}
16480 
16481 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16482   predicate(n->as_Vector()->length() == 2 ||
16483             n->as_Vector()->length() == 4);
16484   match(Set dst (URShiftVS src shift));
16485   ins_cost(INSN_COST);
16486   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16487   ins_encode %{
16488     int sh = (int)$shift$$constant & 31;
16489     if (sh >= 16) {
16490       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16491              as_FloatRegister($src$$reg),
16492              as_FloatRegister($src$$reg));
16493     } else {
16494       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16495              as_FloatRegister($src$$reg), -sh & 15);
16496     }
16497   %}
16498   ins_pipe(vshift64_imm);
16499 %}
16500 
16501 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16502   predicate(n->as_Vector()->length() == 8);
16503   match(Set dst (URShiftVS src shift));
16504   ins_cost(INSN_COST);
16505   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16506   ins_encode %{
16507     int sh = (int)$shift$$constant & 31;
16508     if (sh >= 16) {
16509       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16510              as_FloatRegister($src$$reg),
16511              as_FloatRegister($src$$reg));
16512     } else {
16513       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16514              as_FloatRegister($src$$reg), -sh & 15);
16515     }
16516   %}
16517   ins_pipe(vshift128_imm);
16518 %}
16519 
16520 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16521   predicate(n->as_Vector()->length() == 2);
16522   match(Set dst (LShiftVI src shift));
16523   match(Set dst (RShiftVI src shift));
16524   ins_cost(INSN_COST);
16525   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16526   ins_encode %{
16527     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16528             as_FloatRegister($src$$reg),
16529             as_FloatRegister($shift$$reg));
16530   %}
16531   ins_pipe(vshift64);
16532 %}
16533 
16534 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16535   predicate(n->as_Vector()->length() == 4);
16536   match(Set dst (LShiftVI src shift));
16537   match(Set dst (RShiftVI src shift));
16538   ins_cost(INSN_COST);
16539   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16540   ins_encode %{
16541     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16542             as_FloatRegister($src$$reg),
16543             as_FloatRegister($shift$$reg));
16544   %}
16545   ins_pipe(vshift128);
16546 %}
16547 
16548 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16549   predicate(n->as_Vector()->length() == 2);
16550   match(Set dst (URShiftVI src shift));
16551   ins_cost(INSN_COST);
16552   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16553   ins_encode %{
16554     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16555             as_FloatRegister($src$$reg),
16556             as_FloatRegister($shift$$reg));
16557   %}
16558   ins_pipe(vshift64);
16559 %}
16560 
16561 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16562   predicate(n->as_Vector()->length() == 4);
16563   match(Set dst (URShiftVI src shift));
16564   ins_cost(INSN_COST);
16565   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16566   ins_encode %{
16567     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16568             as_FloatRegister($src$$reg),
16569             as_FloatRegister($shift$$reg));
16570   %}
16571   ins_pipe(vshift128);
16572 %}
16573 
16574 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16575   predicate(n->as_Vector()->length() == 2);
16576   match(Set dst (LShiftVI src shift));
16577   ins_cost(INSN_COST);
16578   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16579   ins_encode %{
16580     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16581            as_FloatRegister($src$$reg),
16582            (int)$shift$$constant & 31);
16583   %}
16584   ins_pipe(vshift64_imm);
16585 %}
16586 
16587 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16588   predicate(n->as_Vector()->length() == 4);
16589   match(Set dst (LShiftVI src shift));
16590   ins_cost(INSN_COST);
16591   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16592   ins_encode %{
16593     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16594            as_FloatRegister($src$$reg),
16595            (int)$shift$$constant & 31);
16596   %}
16597   ins_pipe(vshift128_imm);
16598 %}
16599 
16600 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16601   predicate(n->as_Vector()->length() == 2);
16602   match(Set dst (RShiftVI src shift));
16603   ins_cost(INSN_COST);
16604   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16605   ins_encode %{
16606     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16607             as_FloatRegister($src$$reg),
16608             -(int)$shift$$constant & 31);
16609   %}
16610   ins_pipe(vshift64_imm);
16611 %}
16612 
16613 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16614   predicate(n->as_Vector()->length() == 4);
16615   match(Set dst (RShiftVI src shift));
16616   ins_cost(INSN_COST);
16617   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16618   ins_encode %{
16619     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16620             as_FloatRegister($src$$reg),
16621             -(int)$shift$$constant & 31);
16622   %}
16623   ins_pipe(vshift128_imm);
16624 %}
16625 
16626 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16627   predicate(n->as_Vector()->length() == 2);
16628   match(Set dst (URShiftVI src shift));
16629   ins_cost(INSN_COST);
16630   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16631   ins_encode %{
16632     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16633             as_FloatRegister($src$$reg),
16634             -(int)$shift$$constant & 31);
16635   %}
16636   ins_pipe(vshift64_imm);
16637 %}
16638 
16639 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16640   predicate(n->as_Vector()->length() == 4);
16641   match(Set dst (URShiftVI src shift));
16642   ins_cost(INSN_COST);
16643   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16644   ins_encode %{
16645     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16646             as_FloatRegister($src$$reg),
16647             -(int)$shift$$constant & 31);
16648   %}
16649   ins_pipe(vshift128_imm);
16650 %}
16651 
16652 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16653   predicate(n->as_Vector()->length() == 2);
16654   match(Set dst (LShiftVL src shift));
16655   match(Set dst (RShiftVL src shift));
16656   ins_cost(INSN_COST);
16657   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16658   ins_encode %{
16659     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16660             as_FloatRegister($src$$reg),
16661             as_FloatRegister($shift$$reg));
16662   %}
16663   ins_pipe(vshift128);
16664 %}
16665 
16666 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16667   predicate(n->as_Vector()->length() == 2);
16668   match(Set dst (URShiftVL src shift));
16669   ins_cost(INSN_COST);
16670   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16671   ins_encode %{
16672     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16673             as_FloatRegister($src$$reg),
16674             as_FloatRegister($shift$$reg));
16675   %}
16676   ins_pipe(vshift128);
16677 %}
16678 
16679 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16680   predicate(n->as_Vector()->length() == 2);
16681   match(Set dst (LShiftVL src shift));
16682   ins_cost(INSN_COST);
16683   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16684   ins_encode %{
16685     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16686            as_FloatRegister($src$$reg),
16687            (int)$shift$$constant & 63);
16688   %}
16689   ins_pipe(vshift128_imm);
16690 %}
16691 
16692 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16693   predicate(n->as_Vector()->length() == 2);
16694   match(Set dst (RShiftVL src shift));
16695   ins_cost(INSN_COST);
16696   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16697   ins_encode %{
16698     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16699             as_FloatRegister($src$$reg),
16700             -(int)$shift$$constant & 63);
16701   %}
16702   ins_pipe(vshift128_imm);
16703 %}
16704 
16705 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16706   predicate(n->as_Vector()->length() == 2);
16707   match(Set dst (URShiftVL src shift));
16708   ins_cost(INSN_COST);
16709   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16710   ins_encode %{
16711     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16712             as_FloatRegister($src$$reg),
16713             -(int)$shift$$constant & 63);
16714   %}
16715   ins_pipe(vshift128_imm);
16716 %}
16717 
16718 //----------PEEPHOLE RULES-----------------------------------------------------
16719 // These must follow all instruction definitions as they use the names
16720 // defined in the instructions definitions.
16721 //
16722 // peepmatch ( root_instr_name [preceding_instruction]* );
16723 //
16724 // peepconstraint %{
16725 // (instruction_number.operand_name relational_op instruction_number.operand_name
16726 //  [, ...] );
16727 // // instruction numbers are zero-based using left to right order in peepmatch
16728 //
16729 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16730 // // provide an instruction_number.operand_name for each operand that appears
16731 // // in the replacement instruction's match rule
16732 //
16733 // ---------VM FLAGS---------------------------------------------------------
16734 //
16735 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16736 //
16737 // Each peephole rule is given an identifying number starting with zero and
16738 // increasing by one in the order seen by the parser.  An individual peephole
16739 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16740 // on the command-line.
16741 //
16742 // ---------CURRENT LIMITATIONS----------------------------------------------
16743 //
16744 // Only match adjacent instructions in same basic block
16745 // Only equality constraints
16746 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16747 // Only one replacement instruction
16748 //
16749 // ---------EXAMPLE----------------------------------------------------------
16750 //
16751 // // pertinent parts of existing instructions in architecture description
16752 // instruct movI(iRegINoSp dst, iRegI src)
16753 // %{
16754 //   match(Set dst (CopyI src));
16755 // %}
16756 //
16757 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16758 // %{
16759 //   match(Set dst (AddI dst src));
16760 //   effect(KILL cr);
16761 // %}
16762 //
16763 // // Change (inc mov) to lea
16764 // peephole %{
16765 //   // increment preceeded by register-register move
16766 //   peepmatch ( incI_iReg movI );
16767 //   // require that the destination register of the increment
16768 //   // match the destination register of the move
16769 //   peepconstraint ( 0.dst == 1.dst );
16770 //   // construct a replacement instruction that sets
16771 //   // the destination to ( move's source register + one )
16772 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16773 // %}
16774 //
16775 
16776 // Implementation no longer uses movX instructions since
16777 // machine-independent system no longer uses CopyX nodes.
16778 //
16779 // peephole
16780 // %{
16781 //   peepmatch (incI_iReg movI);
16782 //   peepconstraint (0.dst == 1.dst);
16783 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16784 // %}
16785 
16786 // peephole
16787 // %{
16788 //   peepmatch (decI_iReg movI);
16789 //   peepconstraint (0.dst == 1.dst);
16790 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16791 // %}
16792 
16793 // peephole
16794 // %{
16795 //   peepmatch (addI_iReg_imm movI);
16796 //   peepconstraint (0.dst == 1.dst);
16797 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16798 // %}
16799 
16800 // peephole
16801 // %{
16802 //   peepmatch (incL_iReg movL);
16803 //   peepconstraint (0.dst == 1.dst);
16804 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16805 // %}
16806 
16807 // peephole
16808 // %{
16809 //   peepmatch (decL_iReg movL);
16810 //   peepconstraint (0.dst == 1.dst);
16811 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16812 // %}
16813 
16814 // peephole
16815 // %{
16816 //   peepmatch (addL_iReg_imm movL);
16817 //   peepconstraint (0.dst == 1.dst);
16818 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16819 // %}
16820 
16821 // peephole
16822 // %{
16823 //   peepmatch (addP_iReg_imm movP);
16824 //   peepconstraint (0.dst == 1.dst);
16825 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16826 // %}
16827 
16828 // // Change load of spilled value to only a spill
16829 // instruct storeI(memory mem, iRegI src)
16830 // %{
16831 //   match(Set mem (StoreI mem src));
16832 // %}
16833 //
16834 // instruct loadI(iRegINoSp dst, memory mem)
16835 // %{
16836 //   match(Set dst (LoadI mem));
16837 // %}
16838 //
16839 
16840 //----------SMARTSPILL RULES---------------------------------------------------
16841 // These must follow all instruction definitions as they use the names
16842 // defined in the instructions definitions.
16843 
16844 // Local Variables:
16845 // mode: c++
16846 // End: