1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_trailing(MemBarNode *leading);
1045   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1046   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1047 
1048   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1049 
1050   bool unnecessary_acquire(const Node *barrier);
1051   bool needs_acquiring_load(const Node *load);
1052 
1053   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1054 
1055   bool unnecessary_release(const Node *barrier);
1056   bool unnecessary_volatile(const Node *barrier);
1057   bool needs_releasing_store(const Node *store);
1058 
1059   // predicate controlling translation of CompareAndSwapX
1060   bool needs_acquiring_load_exclusive(const Node *load);
1061 
1062   // predicate controlling translation of StoreCM
1063   bool unnecessary_storestore(const Node *storecm);
1064 %}
1065 
1066 source %{
1067 
1068   // Optimizaton of volatile gets and puts
1069   // -------------------------------------
1070   //
1071   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1072   // use to implement volatile reads and writes. For a volatile read
1073   // we simply need
1074   //
1075   //   ldar<x>
1076   //
1077   // and for a volatile write we need
1078   //
1079   //   stlr<x>
1080   //
1081   // Alternatively, we can implement them by pairing a normal
1082   // load/store with a memory barrier. For a volatile read we need
1083   //
1084   //   ldr<x>
1085   //   dmb ishld
1086   //
1087   // for a volatile write
1088   //
1089   //   dmb ish
1090   //   str<x>
1091   //   dmb ish
1092   //
1093   // We can also use ldaxr and stlxr to implement compare and swap CAS
1094   // sequences. These are normally translated to an instruction
1095   // sequence like the following
1096   //
1097   //   dmb      ish
1098   // retry:
1099   //   ldxr<x>   rval raddr
1100   //   cmp       rval rold
1101   //   b.ne done
1102   //   stlxr<x>  rval, rnew, rold
1103   //   cbnz      rval retry
1104   // done:
1105   //   cset      r0, eq
1106   //   dmb ishld
1107   //
1108   // Note that the exclusive store is already using an stlxr
1109   // instruction. That is required to ensure visibility to other
1110   // threads of the exclusive write (assuming it succeeds) before that
1111   // of any subsequent writes.
1112   //
1113   // The following instruction sequence is an improvement on the above
1114   //
1115   // retry:
1116   //   ldaxr<x>  rval raddr
1117   //   cmp       rval rold
1118   //   b.ne done
1119   //   stlxr<x>  rval, rnew, rold
1120   //   cbnz      rval retry
1121   // done:
1122   //   cset      r0, eq
1123   //
1124   // We don't need the leading dmb ish since the stlxr guarantees
1125   // visibility of prior writes in the case that the swap is
1126   // successful. Crucially we don't have to worry about the case where
1127   // the swap is not successful since no valid program should be
1128   // relying on visibility of prior changes by the attempting thread
1129   // in the case where the CAS fails.
1130   //
1131   // Similarly, we don't need the trailing dmb ishld if we substitute
1132   // an ldaxr instruction since that will provide all the guarantees we
1133   // require regarding observation of changes made by other threads
1134   // before any change to the CAS address observed by the load.
1135   //
1136   // In order to generate the desired instruction sequence we need to
1137   // be able to identify specific 'signature' ideal graph node
1138   // sequences which i) occur as a translation of a volatile reads or
1139   // writes or CAS operations and ii) do not occur through any other
1140   // translation or graph transformation. We can then provide
1141   // alternative aldc matching rules which translate these node
1142   // sequences to the desired machine code sequences. Selection of the
1143   // alternative rules can be implemented by predicates which identify
1144   // the relevant node sequences.
1145   //
1146   // The ideal graph generator translates a volatile read to the node
1147   // sequence
1148   //
1149   //   LoadX[mo_acquire]
1150   //   MemBarAcquire
1151   //
1152   // As a special case when using the compressed oops optimization we
1153   // may also see this variant
1154   //
1155   //   LoadN[mo_acquire]
1156   //   DecodeN
1157   //   MemBarAcquire
1158   //
1159   // A volatile write is translated to the node sequence
1160   //
1161   //   MemBarRelease
1162   //   StoreX[mo_release] {CardMark}-optional
1163   //   MemBarVolatile
1164   //
1165   // n.b. the above node patterns are generated with a strict
1166   // 'signature' configuration of input and output dependencies (see
1167   // the predicates below for exact details). The card mark may be as
1168   // simple as a few extra nodes or, in a few GC configurations, may
1169   // include more complex control flow between the leading and
1170   // trailing memory barriers. However, whatever the card mark
1171   // configuration these signatures are unique to translated volatile
1172   // reads/stores -- they will not appear as a result of any other
1173   // bytecode translation or inlining nor as a consequence of
1174   // optimizing transforms.
1175   //
1176   // We also want to catch inlined unsafe volatile gets and puts and
1177   // be able to implement them using either ldar<x>/stlr<x> or some
1178   // combination of ldr<x>/stlr<x> and dmb instructions.
1179   //
1180   // Inlined unsafe volatiles puts manifest as a minor variant of the
1181   // normal volatile put node sequence containing an extra cpuorder
1182   // membar
1183   //
1184   //   MemBarRelease
1185   //   MemBarCPUOrder
1186   //   StoreX[mo_release] {CardMark}-optional
1187   //   MemBarVolatile
1188   //
1189   // n.b. as an aside, the cpuorder membar is not itself subject to
1190   // matching and translation by adlc rules.  However, the rule
1191   // predicates need to detect its presence in order to correctly
1192   // select the desired adlc rules.
1193   //
1194   // Inlined unsafe volatile gets manifest as a somewhat different
1195   // node sequence to a normal volatile get
1196   //
1197   //   MemBarCPUOrder
1198   //        ||       \\
1199   //   MemBarAcquire LoadX[mo_acquire]
1200   //        ||
1201   //   MemBarCPUOrder
1202   //
1203   // In this case the acquire membar does not directly depend on the
1204   // load. However, we can be sure that the load is generated from an
1205   // inlined unsafe volatile get if we see it dependent on this unique
1206   // sequence of membar nodes. Similarly, given an acquire membar we
1207   // can know that it was added because of an inlined unsafe volatile
1208   // get if it is fed and feeds a cpuorder membar and if its feed
1209   // membar also feeds an acquiring load.
1210   //
1211   // Finally an inlined (Unsafe) CAS operation is translated to the
1212   // following ideal graph
1213   //
1214   //   MemBarRelease
1215   //   MemBarCPUOrder
1216   //   CompareAndSwapX {CardMark}-optional
1217   //   MemBarCPUOrder
1218   //   MemBarAcquire
1219   //
1220   // So, where we can identify these volatile read and write
1221   // signatures we can choose to plant either of the above two code
1222   // sequences. For a volatile read we can simply plant a normal
1223   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1224   // also choose to inhibit translation of the MemBarAcquire and
1225   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1226   //
1227   // When we recognise a volatile store signature we can choose to
1228   // plant at a dmb ish as a translation for the MemBarRelease, a
1229   // normal str<x> and then a dmb ish for the MemBarVolatile.
1230   // Alternatively, we can inhibit translation of the MemBarRelease
1231   // and MemBarVolatile and instead plant a simple stlr<x>
1232   // instruction.
1233   //
1234   // when we recognise a CAS signature we can choose to plant a dmb
1235   // ish as a translation for the MemBarRelease, the conventional
1236   // macro-instruction sequence for the CompareAndSwap node (which
1237   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1238   // Alternatively, we can elide generation of the dmb instructions
1239   // and plant the alternative CompareAndSwap macro-instruction
1240   // sequence (which uses ldaxr<x>).
1241   //
1242   // Of course, the above only applies when we see these signature
1243   // configurations. We still want to plant dmb instructions in any
1244   // other cases where we may see a MemBarAcquire, MemBarRelease or
1245   // MemBarVolatile. For example, at the end of a constructor which
1246   // writes final/volatile fields we will see a MemBarRelease
1247   // instruction and this needs a 'dmb ish' lest we risk the
1248   // constructed object being visible without making the
1249   // final/volatile field writes visible.
1250   //
1251   // n.b. the translation rules below which rely on detection of the
1252   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1253   // If we see anything other than the signature configurations we
1254   // always just translate the loads and stores to ldr<x> and str<x>
1255   // and translate acquire, release and volatile membars to the
1256   // relevant dmb instructions.
1257   //
1258 
1259   // graph traversal helpers used for volatile put/get and CAS
1260   // optimization
1261 
1262   // 1) general purpose helpers
1263 
1264   // if node n is linked to a parent MemBarNode by an intervening
1265   // Control and Memory ProjNode return the MemBarNode otherwise return
1266   // NULL.
1267   //
1268   // n may only be a Load or a MemBar.
1269 
1270   MemBarNode *parent_membar(const Node *n)
1271   {
1272     Node *ctl = NULL;
1273     Node *mem = NULL;
1274     Node *membar = NULL;
1275 
1276     if (n->is_Load()) {
1277       ctl = n->lookup(LoadNode::Control);
1278       mem = n->lookup(LoadNode::Memory);
1279     } else if (n->is_MemBar()) {
1280       ctl = n->lookup(TypeFunc::Control);
1281       mem = n->lookup(TypeFunc::Memory);
1282     } else {
1283         return NULL;
1284     }
1285 
1286     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1287       return NULL;
1288     }
1289 
1290     membar = ctl->lookup(0);
1291 
1292     if (!membar || !membar->is_MemBar()) {
1293       return NULL;
1294     }
1295 
1296     if (mem->lookup(0) != membar) {
1297       return NULL;
1298     }
1299 
1300     return membar->as_MemBar();
1301   }
1302 
1303   // if n is linked to a child MemBarNode by intervening Control and
1304   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1305 
1306   MemBarNode *child_membar(const MemBarNode *n)
1307   {
1308     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1309     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1310 
1311     // MemBar needs to have both a Ctl and Mem projection
1312     if (! ctl || ! mem)
1313       return NULL;
1314 
1315     MemBarNode *child = NULL;
1316     Node *x;
1317 
1318     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1319       x = ctl->fast_out(i);
1320       // if we see a membar we keep hold of it. we may also see a new
1321       // arena copy of the original but it will appear later
1322       if (x->is_MemBar()) {
1323           child = x->as_MemBar();
1324           break;
1325       }
1326     }
1327 
1328     if (child == NULL) {
1329       return NULL;
1330     }
1331 
1332     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1333       x = mem->fast_out(i);
1334       // if we see a membar we keep hold of it. we may also see a new
1335       // arena copy of the original but it will appear later
1336       if (x == child) {
1337         return child;
1338       }
1339     }
1340     return NULL;
1341   }
1342 
1343   // helper predicate use to filter candidates for a leading memory
1344   // barrier
1345   //
1346   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1347   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1348 
1349   bool leading_membar(const MemBarNode *barrier)
1350   {
1351     int opcode = barrier->Opcode();
1352     // if this is a release membar we are ok
1353     if (opcode == Op_MemBarRelease) {
1354       return true;
1355     }
1356     // if its a cpuorder membar . . .
1357     if (opcode != Op_MemBarCPUOrder) {
1358       return false;
1359     }
1360     // then the parent has to be a release membar
1361     MemBarNode *parent = parent_membar(barrier);
1362     if (!parent) {
1363       return false;
1364     }
1365     opcode = parent->Opcode();
1366     return opcode == Op_MemBarRelease;
1367   }
1368 
1369   // 2) card mark detection helper
1370 
1371   // helper predicate which can be used to detect a volatile membar
1372   // introduced as part of a conditional card mark sequence either by
1373   // G1 or by CMS when UseCondCardMark is true.
1374   //
1375   // membar can be definitively determined to be part of a card mark
1376   // sequence if and only if all the following hold
1377   //
1378   // i) it is a MemBarVolatile
1379   //
1380   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1381   // true
1382   //
1383   // iii) the node's Mem projection feeds a StoreCM node.
1384 
1385   bool is_card_mark_membar(const MemBarNode *barrier)
1386   {
1387     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1388       return false;
1389     }
1390 
1391     if (barrier->Opcode() != Op_MemBarVolatile) {
1392       return false;
1393     }
1394 
1395     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1396 
1397     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1398       Node *y = mem->fast_out(i);
1399       if (y->Opcode() == Op_StoreCM) {
1400         return true;
1401       }
1402     }
1403 
1404     return false;
1405   }
1406 
1407 
1408   // 3) helper predicates to traverse volatile put or CAS graphs which
1409   // may contain GC barrier subgraphs
1410 
1411   // Preamble
1412   // --------
1413   //
1414   // for volatile writes we can omit generating barriers and employ a
1415   // releasing store when we see a node sequence sequence with a
1416   // leading MemBarRelease and a trailing MemBarVolatile as follows
1417   //
1418   //   MemBarRelease
1419   //  {    ||        } -- optional
1420   //  {MemBarCPUOrder}
1421   //       ||       \\
1422   //       ||     StoreX[mo_release]
1423   //       | \ Bot    / ???
1424   //       | MergeMem
1425   //       | /
1426   //   MemBarVolatile
1427   //
1428   // where
1429   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1430   //  | \ and / indicate further routing of the Ctl and Mem feeds
1431   //
1432   // Note that the memory feed from the CPUOrder membar to the
1433   // MergeMem node is an AliasIdxBot slice while the feed from the
1434   // StoreX is for a slice determined by the type of value being
1435   // written.
1436   //
1437   // the diagram above shows the graph we see for non-object stores.
1438   // for a volatile Object store (StoreN/P) we may see other nodes
1439   // below the leading membar because of the need for a GC pre- or
1440   // post-write barrier.
1441   //
1442   // with most GC configurations we with see this simple variant which
1443   // includes a post-write barrier card mark.
1444   //
1445   //   MemBarRelease______________________________
1446   //         ||    \\               Ctl \        \\
1447   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1448   //         | \ Bot  / oop                 . . .  /
1449   //         | MergeMem
1450   //         | /
1451   //         ||      /
1452   //   MemBarVolatile
1453   //
1454   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1455   // the object address to an int used to compute the card offset) and
1456   // Ctl+Mem to a StoreB node (which does the actual card mark).
1457   //
1458   // n.b. a StoreCM node is only ever used when CMS (with or without
1459   // CondCardMark) or G1 is configured. This abstract instruction
1460   // differs from a normal card mark write (StoreB) because it implies
1461   // a requirement to order visibility of the card mark (StoreCM)
1462   // after that of the object put (StoreP/N) using a StoreStore memory
1463   // barrier. Note that this is /not/ a requirement to order the
1464   // instructions in the generated code (that is already guaranteed by
1465   // the order of memory dependencies). Rather it is a requirement to
1466   // ensure visibility order which only applies on architectures like
1467   // AArch64 which do not implement TSO. This ordering is required for
1468   // both non-volatile and volatile puts.
1469   //
1470   // That implies that we need to translate a StoreCM using the
1471   // sequence
1472   //
1473   //   dmb ishst
1474   //   stlrb
1475   //
1476   // This dmb cannot be omitted even when the associated StoreX or
1477   // CompareAndSwapX is implemented using stlr. However, as described
1478   // below there are circumstances where a specific GC configuration
1479   // requires a stronger barrier in which case it can be omitted.
1480   // 
1481   // With the Serial or Parallel GC using +CondCardMark the card mark
1482   // is performed conditionally on it currently being unmarked in
1483   // which case the volatile put graph looks slightly different
1484   //
1485   //   MemBarRelease____________________________________________
1486   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1487   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1488   //         | \ Bot / oop                          \            |
1489   //         | MergeMem                            . . .      StoreB
1490   //         | /                                                /
1491   //         ||     /
1492   //   MemBarVolatile
1493   //
1494   // It is worth noting at this stage that all the above
1495   // configurations can be uniquely identified by checking that the
1496   // memory flow includes the following subgraph:
1497   //
1498   //   MemBarRelease
1499   //  {MemBarCPUOrder}
1500   //      |  \      . . .
1501   //      |  StoreX[mo_release]  . . .
1502   //  Bot |   / oop
1503   //     MergeMem
1504   //      |
1505   //   MemBarVolatile
1506   //
1507   // This is referred to as a *normal* volatile store subgraph. It can
1508   // easily be detected starting from any candidate MemBarRelease,
1509   // StoreX[mo_release] or MemBarVolatile node.
1510   //
1511   // A small variation on this normal case occurs for an unsafe CAS
1512   // operation. The basic memory flow subgraph for a non-object CAS is
1513   // as follows
1514   //
1515   //   MemBarRelease
1516   //         ||
1517   //   MemBarCPUOrder
1518   //          |     \\   . . .
1519   //          |     CompareAndSwapX
1520   //          |       |
1521   //      Bot |     SCMemProj
1522   //           \     / Bot
1523   //           MergeMem
1524   //           /
1525   //   MemBarCPUOrder
1526   //         ||
1527   //   MemBarAcquire
1528   //
1529   // The same basic variations on this arrangement (mutatis mutandis)
1530   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1531   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1532   // flow subgraph is still present.
1533   // 
1534   // This is referred to as a *normal* CAS subgraph. It can easily be
1535   // detected starting from any candidate MemBarRelease,
1536   // StoreX[mo_release] or MemBarAcquire node.
1537   //
1538   // The code below uses two helper predicates, leading_to_trailing
1539   // and trailing_to_leading to identify these normal graphs, one
1540   // validating the layout starting from the top membar and searching
1541   // down and the other validating the layout starting from the lower
1542   // membar and searching up.
1543   //
1544   // There are two special case GC configurations when the simple
1545   // normal graphs above may not be generated: when using G1 (which
1546   // always employs a conditional card mark); and when using CMS with
1547   // conditional card marking (+CondCardMark) configured. These GCs
1548   // are both concurrent rather than stop-the world GCs. So they
1549   // introduce extra Ctl+Mem flow into the graph between the leading
1550   // and trailing membar nodes, in particular enforcing stronger
1551   // memory serialisation beween the object put and the corresponding
1552   // conditional card mark. CMS employs a post-write GC barrier while
1553   // G1 employs both a pre- and post-write GC barrier.
1554   //
1555   // The post-write barrier subgraph for these configurations includes
1556   // a MemBarVolatile node -- referred to as a card mark membar --
1557   // which is needed to order the card write (StoreCM) operation in
1558   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1559   // operations performed by GC threads i.e. a card mark membar
1560   // constitutes a StoreLoad barrier hence must be translated to a dmb
1561   // ish (whether or not it sits inside a volatile store sequence).
1562   //
1563   // Of course, the use of the dmb ish for the card mark membar also
1564   // implies theat the StoreCM which follows can omit the dmb ishst
1565   // instruction. The necessary visibility ordering will already be
1566   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1567   // needs to be generated for as part of the StoreCM sequence with GC
1568   // configuration +CMS -CondCardMark.
1569   // 
1570   // Of course all these extra barrier nodes may well be absent --
1571   // they are only inserted for object puts. Their potential presence
1572   // significantly complicates the task of identifying whether a
1573   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1574   // MemBarAcquire forms part of a volatile put or CAS when using
1575   // these GC configurations (see below) and also complicates the
1576   // decision as to how to translate a MemBarVolatile and StoreCM.
1577   //
1578   // So, thjis means that a card mark MemBarVolatile occurring in the
1579   // post-barrier graph it needs to be distinguished from a normal
1580   // trailing MemBarVolatile. Resolving this is straightforward: a
1581   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1582   // node and that is a unique marker
1583   //
1584   //      MemBarVolatile (card mark)
1585   //       C |    \     . . .
1586   //         |   StoreCM   . . .
1587   //       . . .
1588   //
1589   // Returning to the task of translating the object put and the
1590   // leading/trailing membar nodes: what do the node graphs look like
1591   // for these 2 special cases? and how can we determine the status of
1592   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1593   // normal and non-normal cases?
1594   //
1595   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1596   // which selects conditonal execution based on the value loaded
1597   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1598   // intervening StoreLoad barrier (MemBarVolatile).
1599   //
1600   // So, with CMS we may see a node graph for a volatile object store
1601   // which looks like this
1602   //
1603   //   MemBarRelease
1604   //   MemBarCPUOrder_(leading)____________________
1605   //     C |  | M \       \\               M |   C \
1606   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1607   //       |  | Bot \    / oop      \        |
1608   //       |  |    MergeMem          \      / 
1609   //       |  |      /                |    /
1610   //     MemBarVolatile (card mark)   |   /
1611   //     C |  ||    M |               |  /
1612   //       | LoadB    | Bot       oop | / Bot
1613   //       |   |      |              / /
1614   //       | Cmp      |\            / /
1615   //       | /        | \          / /
1616   //       If         |  \        / /
1617   //       | \        |   \      / /
1618   // IfFalse  IfTrue  |    \    / /
1619   //       \     / \  |    |   / /
1620   //        \   / StoreCM  |  / /
1621   //         \ /      \   /  / /
1622   //        Region     Phi  / /
1623   //          | \   Raw |  / /
1624   //          |  . . .  | / /
1625   //          |       MergeMem
1626   //          |           |
1627   //        MemBarVolatile (trailing)
1628   //
1629   // Notice that there are two MergeMem nodes below the leading
1630   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1631   // the leading membar and the oopptr Mem slice from the Store into
1632   // the card mark membar. The trailing MergeMem merges the
1633   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1634   // slice from the StoreCM and an oop slice from the StoreN/P node
1635   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1636   // associated with the If region).
1637   //
1638   // So, in the case of CMS + CondCardMark the volatile object store
1639   // graph still includes a normal volatile store subgraph from the
1640   // leading membar to the trailing membar. However, it also contains
1641   // the same shape memory flow to the card mark membar. The two flows
1642   // can be distinguished by testing whether or not the downstream
1643   // membar is a card mark membar.
1644   //
1645   // The graph for a CAS also varies with CMS + CondCardMark, in
1646   // particular employing a control feed from the CompareAndSwapX node
1647   // through a CmpI and If to the card mark membar and StoreCM which
1648   // updates the associated card. This avoids executing the card mark
1649   // if the CAS fails. However, it can be seen from the diagram below
1650   // that the presence of the barrier does not alter the normal CAS
1651   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1652   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1653   // MemBarAcquire pair.
1654   //
1655   //   MemBarRelease
1656   //   MemBarCPUOrder__(leading)_______________________
1657   //   C /  M |                        \\            C \
1658   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1659   //          |                  C /  M |
1660   //          |                 CmpI    |
1661   //          |                  /      |
1662   //          |               . . .     |
1663   //          |              IfTrue     |
1664   //          |              /          |
1665   //       MemBarVolatile (card mark)   |
1666   //        C |  ||    M |              |
1667   //          | LoadB    | Bot   ______/|
1668   //          |   |      |      /       |
1669   //          | Cmp      |     /      SCMemProj
1670   //          | /        |    /         |
1671   //          If         |   /         /
1672   //          | \        |  /         / Bot
1673   //     IfFalse  IfTrue | /         /
1674   //          |   / \   / / prec    /
1675   //   . . .  |  /  StoreCM        /
1676   //        \ | /      | raw      /
1677   //        Region    . . .      /
1678   //           | \              /
1679   //           |   . . .   \    / Bot
1680   //           |        MergeMem
1681   //           |          /
1682   //         MemBarCPUOrder
1683   //         MemBarAcquire (trailing)
1684   //
1685   // This has a slightly different memory subgraph to the one seen
1686   // previously but the core of it has a similar memory flow to the
1687   // CAS normal subgraph:
1688   //
1689   //   MemBarRelease
1690   //   MemBarCPUOrder____
1691   //         |          \      . . .
1692   //         |       CompareAndSwapX  . . .
1693   //         |       C /  M |
1694   //         |      CmpI    |
1695   //         |       /      |
1696   //         |      . .    /
1697   //     Bot |   IfTrue   /
1698   //         |   /       /
1699   //    MemBarVolatile  /
1700   //         | ...     /
1701   //      StoreCM ... /
1702   //         |       / 
1703   //       . . .  SCMemProj
1704   //      Raw \    / Bot
1705   //        MergeMem
1706   //           |
1707   //   MemBarCPUOrder
1708   //   MemBarAcquire
1709   //
1710   // The G1 graph for a volatile object put is a lot more complicated.
1711   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1712   // which adds the old value to the SATB queue; the releasing store
1713   // itself; and, finally, a post-write graph which performs a card
1714   // mark.
1715   //
1716   // The pre-write graph may be omitted, but only when the put is
1717   // writing to a newly allocated (young gen) object and then only if
1718   // there is a direct memory chain to the Initialize node for the
1719   // object allocation. This will not happen for a volatile put since
1720   // any memory chain passes through the leading membar.
1721   //
1722   // The pre-write graph includes a series of 3 If tests. The outermost
1723   // If tests whether SATB is enabled (no else case). The next If tests
1724   // whether the old value is non-NULL (no else case). The third tests
1725   // whether the SATB queue index is > 0, if so updating the queue. The
1726   // else case for this third If calls out to the runtime to allocate a
1727   // new queue buffer.
1728   //
1729   // So with G1 the pre-write and releasing store subgraph looks like
1730   // this (the nested Ifs are omitted).
1731   //
1732   //  MemBarRelease (leading)____________
1733   //     C |  ||  M \   M \    M \  M \ . . .
1734   //       | LoadB   \  LoadL  LoadN   \
1735   //       | /        \                 \
1736   //       If         |\                 \
1737   //       | \        | \                 \
1738   //  IfFalse  IfTrue |  \                 \
1739   //       |     |    |   \                 |
1740   //       |     If   |   /\                |
1741   //       |     |          \               |
1742   //       |                 \              |
1743   //       |    . . .         \             |
1744   //       | /       | /       |            |
1745   //      Region  Phi[M]       |            |
1746   //       | \       |         |            |
1747   //       |  \_____ | ___     |            |
1748   //     C | C \     |   C \ M |            |
1749   //       | CastP2X | StoreN/P[mo_release] |
1750   //       |         |         |            |
1751   //     C |       M |       M |          M |
1752   //        \        | Raw     | oop       / Bot
1753   //                  . . .
1754   //          (post write subtree elided)
1755   //                    . . .
1756   //             C \         M /
1757   //         MemBarVolatile (trailing)
1758   //
1759   // Note that the three memory feeds into the post-write tree are an
1760   // AliasRawIdx slice associated with the writes in the pre-write
1761   // tree, an oop type slice from the StoreX specific to the type of
1762   // the volatile field and the AliasBotIdx slice emanating from the
1763   // leading membar.
1764   //
1765   // n.b. the LoadB in this subgraph is not the card read -- it's a
1766   // read of the SATB queue active flag.
1767   //
1768   // The CAS graph is once again a variant of the above with a
1769   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1770   // value from the CompareAndSwapX node is fed into the post-write
1771   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1772   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1773   //
1774   //  MemBarRelease (leading)____________
1775   //     C |  ||  M \   M \    M \  M \ . . .
1776   //       | LoadB   \  LoadL  LoadN   \
1777   //       | /        \                 \
1778   //       If         |\                 \
1779   //       | \        | \                 \
1780   //  IfFalse  IfTrue |  \                 \
1781   //       |     |    |   \                 \
1782   //       |     If   |    \                 |
1783   //       |     |          \                |
1784   //       |                 \               |
1785   //       |    . . .         \              |
1786   //       | /       | /       \             |
1787   //      Region  Phi[M]        \            |
1788   //       | \       |           \           |
1789   //       |  \_____ |            |          |
1790   //     C | C \     |            |          |
1791   //       | CastP2X |     CompareAndSwapX   |
1792   //       |         |   res |     |         |
1793   //     C |       M |       |  SCMemProj  M |
1794   //        \        | Raw   |     | Bot    / Bot
1795   //                  . . .
1796   //          (post write subtree elided)
1797   //                    . . .
1798   //             C \         M /
1799   //         MemBarVolatile (trailing)
1800   //
1801   // The G1 post-write subtree is also optional, this time when the
1802   // new value being written is either null or can be identified as a
1803   // newly allocated (young gen) object with no intervening control
1804   // flow. The latter cannot happen but the former may, in which case
1805   // the card mark membar is omitted and the memory feeds from the
1806   // leading membar and the SToreN/P are merged direct into the
1807   // trailing membar as per the normal subgraph. So, the only special
1808   // case which arises is when the post-write subgraph is generated.
1809   //
1810   // The kernel of the post-write G1 subgraph is the card mark itself
1811   // which includes a card mark memory barrier (MemBarVolatile), a
1812   // card test (LoadB), and a conditional update (If feeding a
1813   // StoreCM). These nodes are surrounded by a series of nested Ifs
1814   // which try to avoid doing the card mark. The top level If skips if
1815   // the object reference does not cross regions (i.e. it tests if
1816   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1817   // need not be recorded. The next If, which skips on a NULL value,
1818   // may be absent (it is not generated if the type of value is >=
1819   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1820   // checking if card_val != young).  n.b. although this test requires
1821   // a pre-read of the card it can safely be done before the StoreLoad
1822   // barrier. However that does not bypass the need to reread the card
1823   // after the barrier.
1824   //
1825   //                (pre-write subtree elided)
1826   //        . . .                  . . .    . . .  . . .
1827   //        C |               M |    M |    M |
1828   //       Region            Phi[M] StoreN    |
1829   //          |            Raw  |  oop |  Bot |
1830   //         / \_______         |\     |\     |\
1831   //      C / C \      . . .    | \    | \    | \
1832   //       If   CastP2X . . .   |  \   |  \   |  \
1833   //       / \                  |   \  |   \  |   \
1834   //      /   \                 |    \ |    \ |    \
1835   // IfFalse IfTrue             |      |      |     \
1836   //   |       |                 \     |     /       |
1837   //   |       If                 \    | \  /   \    |
1838   //   |      / \                  \   |   /     \   |
1839   //   |     /   \                  \  |  / \     |  |
1840   //   | IfFalse IfTrue           MergeMem   \    |  |
1841   //   |  . . .    / \                 |      \   |  |
1842   //   |          /   \                |       |  |  |
1843   //   |     IfFalse IfTrue            |       |  |  |
1844   //   |      . . .    |               |       |  |  |
1845   //   |               If             /        |  |  |
1846   //   |               / \           /         |  |  |
1847   //   |              /   \         /          |  |  |
1848   //   |         IfFalse IfTrue    /           |  |  |
1849   //   |           . . .   |      /            |  |  |
1850   //   |                    \    /             |  |  |
1851   //   |                     \  /              |  |  |
1852   //   |         MemBarVolatile__(card mark  ) |  |  |
1853   //   |              ||   C |     \           |  |  |
1854   //   |             LoadB   If     |         /   |  |
1855   //   |                    / \ Raw |        /   /  /
1856   //   |                   . . .    |       /   /  /
1857   //   |                        \   |      /   /  /
1858   //   |                        StoreCM   /   /  /
1859   //   |                           |     /   /  /
1860   //   |                            . . .   /  /
1861   //   |                                   /  /
1862   //   |   . . .                          /  /
1863   //   |    |             | /            /  /
1864   //   |    |           Phi[M] /        /  /
1865   //   |    |             |   /        /  /
1866   //   |    |             |  /        /  /
1867   //   |  Region  . . .  Phi[M]      /  /
1868   //   |    |             |         /  /
1869   //    \   |             |        /  /
1870   //     \  | . . .       |       /  /
1871   //      \ |             |      /  /
1872   //      Region         Phi[M] /  /
1873   //        |               \  /  /
1874   //         \             MergeMem
1875   //          \            /
1876   //          MemBarVolatile
1877   //
1878   // As with CMS + CondCardMark the first MergeMem merges the
1879   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1880   // slice from the Store into the card mark membar. However, in this
1881   // case it may also merge an AliasRawIdx mem slice from the pre
1882   // barrier write.
1883   //
1884   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1885   // leading membar with an oop slice from the StoreN and an
1886   // AliasRawIdx slice from the post barrier writes. In this case the
1887   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1888   // which combine feeds from the If regions in the post barrier
1889   // subgraph.
1890   //
1891   // So, for G1 the same characteristic subgraph arises as for CMS +
1892   // CondCardMark. There is a normal subgraph feeding the card mark
1893   // membar and a normal subgraph feeding the trailing membar.
1894   //
1895   // The CAS graph when using G1GC also includes an optional
1896   // post-write subgraph. It is very similar to the above graph except
1897   // for a few details.
1898   // 
1899   // - The control flow is gated by an additonal If which tests the
1900   // result from the CompareAndSwapX node
1901   // 
1902   //  - The MergeMem which feeds the card mark membar only merges the
1903   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1904   // slice from the pre-barrier. It does not merge the SCMemProj
1905   // AliasIdxBot slice. So, this subgraph does not look like the
1906   // normal CAS subgraph.
1907   //
1908   // - The MergeMem which feeds the trailing membar merges the
1909   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1910   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1911   // has two AliasIdxBot input slices. However, this subgraph does
1912   // still look like the normal CAS subgraph.
1913   //
1914   // So, the upshot is:
1915   //
1916   // In all cases a volatile put graph will include a *normal*
1917   // volatile store subgraph betwen the leading membar and the
1918   // trailing membar. It may also include a normal volatile store
1919   // subgraph betwen the leading membar and the card mark membar.
1920   //
1921   // In all cases a CAS graph will contain a unique normal CAS graph
1922   // feeding the trailing membar.
1923   //
1924   // In all cases where there is a card mark membar (either as part of
1925   // a volatile object put or CAS) it will be fed by a MergeMem whose
1926   // AliasIdxBot slice feed will be a leading membar.
1927   //
1928   // The predicates controlling generation of instructions for store
1929   // and barrier nodes employ a few simple helper functions (described
1930   // below) which identify the presence or absence of all these
1931   // subgraph configurations and provide a means of traversing from
1932   // one node in the subgraph to another.
1933 
1934   // is_CAS(int opcode)
1935   //
1936   // return true if opcode is one of the possible CompareAndSwapX
1937   // values otherwise false.
1938 
1939   bool is_CAS(int opcode)
1940   {
1941     return (opcode == Op_CompareAndSwapI ||
1942             opcode == Op_CompareAndSwapL ||
1943             opcode == Op_CompareAndSwapN ||
1944             opcode == Op_CompareAndSwapP);
1945   }
1946 
1947   // leading_to_trailing
1948   //
1949   //graph traversal helper which detects the normal case Mem feed from
1950   // a release membar (or, optionally, its cpuorder child) to a
1951   // dependent volatile membar i.e. it ensures that one or other of
1952   // the following Mem flow subgraph is present.
1953   //
1954   //   MemBarRelease {leading}
1955   //   {MemBarCPUOrder} {optional}
1956   //     Bot |  \      . . .
1957   //         |  StoreN/P[mo_release]  . . .
1958   //         |   /
1959   //        MergeMem
1960   //         |
1961   //   MemBarVolatile {not card mark}
1962   //
1963   //   MemBarRelease {leading}
1964   //   {MemBarCPUOrder} {optional}
1965   //      |       \      . . .
1966   //      |     CompareAndSwapX  . . .
1967   //               |
1968   //     . . .    SCMemProj
1969   //           \   |
1970   //      |    MergeMem
1971   //      |       /
1972   //    MemBarCPUOrder
1973   //    MemBarAcquire {trailing}
1974   //
1975   // the predicate needs to be capable of distinguishing the following
1976   // volatile put graph which may arises when a GC post barrier
1977   // inserts a card mark membar
1978   //
1979   //   MemBarRelease {leading}
1980   //   {MemBarCPUOrder}__
1981   //     Bot |   \       \
1982   //         |   StoreN/P \
1983   //         |    / \     |
1984   //        MergeMem \    |
1985   //         |        \   |
1986   //   MemBarVolatile  \  |
1987   //    {card mark}     \ |
1988   //                  MergeMem
1989   //                      |
1990   // {not card mark} MemBarVolatile
1991   //
1992   // if the correct configuration is present returns the trailing
1993   // membar otherwise NULL.
1994   //
1995   // the input membar is expected to be either a cpuorder membar or a
1996   // release membar. in the latter case it should not have a cpu membar
1997   // child.
1998   //
1999   // the returned value may be a card mark or trailing membar
2000   //
2001 
2002   MemBarNode *leading_to_trailing(MemBarNode *leading)
2003   {
2004     assert((leading->Opcode() == Op_MemBarRelease ||
2005             leading->Opcode() == Op_MemBarCPUOrder),
2006            "expecting a volatile or cpuroder membar!");
2007 
2008     // check the mem flow
2009     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2010 
2011     if (!mem) {
2012       return NULL;
2013     }
2014 
2015     Node *x = NULL;
2016     StoreNode * st = NULL;
2017     LoadStoreNode *cas = NULL;
2018     MergeMemNode *mm = NULL;
2019     MergeMemNode *mm2 = NULL;
2020 
2021     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2022       x = mem->fast_out(i);
2023       if (x->is_MergeMem()) {
2024         if (mm != NULL) {
2025           if (mm2 != NULL) {
2026           // should not see more than 2 merge mems
2027             return NULL;
2028           } else {
2029             mm2 = x->as_MergeMem();
2030           }
2031         } else {
2032           mm = x->as_MergeMem();
2033         }
2034       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2035         // two releasing stores/CAS nodes is one too many
2036         if (st != NULL || cas != NULL) {
2037           return NULL;
2038         }
2039         st = x->as_Store();
2040       } else if (is_CAS(x->Opcode())) {
2041         if (st != NULL || cas != NULL) {
2042           return NULL;
2043         }
2044         cas = x->as_LoadStore();
2045       }
2046     }
2047 
2048     // must have a store or a cas
2049     if (!st && !cas) {
2050       return NULL;
2051     }
2052 
2053     // must have at least one merge if we also have st
2054     if (st && !mm) {
2055       return NULL;
2056     }
2057 
2058     if (cas) {
2059       Node *y = NULL;
2060       // look for an SCMemProj
2061       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2062         x = cas->fast_out(i);
2063         if (x->is_Proj()) {
2064           y = x;
2065           break;
2066         }
2067       }
2068       if (y == NULL) {
2069         return NULL;
2070       }
2071       // the proj must feed a MergeMem
2072       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2073         x = y->fast_out(i);
2074         if (x->is_MergeMem()) {
2075           mm = x->as_MergeMem();
2076           break;
2077         }
2078       }
2079       if (mm == NULL) {
2080         return NULL;
2081       }
2082       MemBarNode *mbar = NULL;
2083       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2084       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2085         x = mm->fast_out(i);
2086         if (x->is_MemBar()) {
2087           int opcode = x->Opcode();
2088           if (opcode == Op_MemBarCPUOrder) {
2089             MemBarNode *z =  x->as_MemBar();
2090             z = child_membar(z);
2091             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2092               mbar = z;
2093             }
2094           }
2095           break;
2096         }
2097       }
2098       return mbar;
2099     } else {
2100       Node *y = NULL;
2101       // ensure the store feeds the first mergemem;
2102       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2103         if (st->fast_out(i) == mm) {
2104           y = st;
2105           break;
2106         }
2107       }
2108       if (y == NULL) {
2109         return NULL;
2110       }
2111       if (mm2 != NULL) {
2112         // ensure the store feeds the second mergemem;
2113         y = NULL;
2114         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2115           if (st->fast_out(i) == mm2) {
2116             y = st;
2117           }
2118         }
2119         if (y == NULL) {
2120           return NULL;
2121         }
2122       }
2123 
2124       MemBarNode *mbar = NULL;
2125       // ensure the first mergemem feeds a volatile membar
2126       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2127         x = mm->fast_out(i);
2128         if (x->is_MemBar()) {
2129           int opcode = x->Opcode();
2130           if (opcode == Op_MemBarVolatile) {
2131             mbar = x->as_MemBar();
2132           }
2133           break;
2134         }
2135       }
2136       if (mm2 == NULL) {
2137         // this is our only option for a trailing membar
2138         return mbar;
2139       }
2140       // ensure the second mergemem feeds a volatile membar
2141       MemBarNode *mbar2 = NULL;
2142       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2143         x = mm2->fast_out(i);
2144         if (x->is_MemBar()) {
2145           int opcode = x->Opcode();
2146           if (opcode == Op_MemBarVolatile) {
2147             mbar2 = x->as_MemBar();
2148           }
2149           break;
2150         }
2151       }
2152       // if we have two merge mems we must have two volatile membars
2153       if (mbar == NULL || mbar2 == NULL) {
2154         return NULL;
2155       }
2156       // return the trailing membar
2157       if (is_card_mark_membar(mbar2)) {
2158         return mbar;
2159       } else {
2160         if (is_card_mark_membar(mbar)) {
2161           return mbar2;
2162         } else {
2163           return NULL;
2164         }
2165       }
2166     }
2167   }
2168 
2169   // trailing_to_leading
2170   //
2171   // graph traversal helper which detects the normal case Mem feed
2172   // from a trailing membar to a preceding release membar (optionally
2173   // its cpuorder child) i.e. it ensures that one or other of the
2174   // following Mem flow subgraphs is present.
2175   //
2176   //   MemBarRelease {leading}
2177   //   MemBarCPUOrder {optional}
2178   //    | Bot |  \      . . .
2179   //    |     |  StoreN/P[mo_release]  . . .
2180   //    |     |   /
2181   //    |    MergeMem
2182   //    |     |
2183   //   MemBarVolatile {not card mark}
2184   //
2185   //   MemBarRelease {leading}
2186   //   MemBarCPUOrder {optional}
2187   //      |       \      . . .
2188   //      |     CompareAndSwapX  . . .
2189   //               |
2190   //     . . .    SCMemProj
2191   //           \   |
2192   //      |    MergeMem
2193   //      |       |
2194   //    MemBarCPUOrder
2195   //    MemBarAcquire {trailing}
2196   //
2197   // this predicate checks for the same flow as the previous predicate
2198   // but starting from the bottom rather than the top.
2199   //
2200   // if the configuration is present returns the cpuorder member for
2201   // preference or when absent the release membar otherwise NULL.
2202   //
2203   // n.b. the input membar is expected to be a MemBarVolatile or
2204   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2205   // mark membar.
2206 
2207   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2208   {
2209     // input must be a volatile membar
2210     assert((barrier->Opcode() == Op_MemBarVolatile ||
2211             barrier->Opcode() == Op_MemBarAcquire),
2212            "expecting a volatile or an acquire membar");
2213 
2214     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2215            !is_card_mark_membar(barrier),
2216            "not expecting a card mark membar");
2217     Node *x;
2218     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2219 
2220     // if we have an acquire membar then it must be fed via a CPUOrder
2221     // membar
2222 
2223     if (is_cas) {
2224       // skip to parent barrier which must be a cpuorder
2225       x = parent_membar(barrier);
2226       if (x->Opcode() != Op_MemBarCPUOrder)
2227         return NULL;
2228     } else {
2229       // start from the supplied barrier
2230       x = (Node *)barrier;
2231     }
2232 
2233     // the Mem feed to the membar should be a merge
2234     x = x ->in(TypeFunc::Memory);
2235     if (!x->is_MergeMem())
2236       return NULL;
2237 
2238     MergeMemNode *mm = x->as_MergeMem();
2239 
2240     if (is_cas) {
2241       // the merge should be fed from the CAS via an SCMemProj node
2242       x = NULL;
2243       for (uint idx = 1; idx < mm->req(); idx++) {
2244         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2245           x = mm->in(idx);
2246           break;
2247         }
2248       }
2249       if (x == NULL) {
2250         return NULL;
2251       }
2252       // check for a CAS feeding this proj
2253       x = x->in(0);
2254       int opcode = x->Opcode();
2255       if (!is_CAS(opcode)) {
2256         return NULL;
2257       }
2258       // the CAS should get its mem feed from the leading membar
2259       x = x->in(MemNode::Memory);
2260     } else {
2261       // the merge should get its Bottom mem feed from the leading membar
2262       x = mm->in(Compile::AliasIdxBot);
2263     }
2264 
2265     // ensure this is a non control projection
2266     if (!x->is_Proj() || x->is_CFG()) {
2267       return NULL;
2268     }
2269     // if it is fed by a membar that's the one we want
2270     x = x->in(0);
2271 
2272     if (!x->is_MemBar()) {
2273       return NULL;
2274     }
2275 
2276     MemBarNode *leading = x->as_MemBar();
2277     // reject invalid candidates
2278     if (!leading_membar(leading)) {
2279       return NULL;
2280     }
2281 
2282     // ok, we have a leading membar, now for the sanity clauses
2283 
2284     // the leading membar must feed Mem to a releasing store or CAS
2285     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2286     StoreNode *st = NULL;
2287     LoadStoreNode *cas = NULL;
2288     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2289       x = mem->fast_out(i);
2290       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2291         // two stores or CASes is one too many
2292         if (st != NULL || cas != NULL) {
2293           return NULL;
2294         }
2295         st = x->as_Store();
2296       } else if (is_CAS(x->Opcode())) {
2297         if (st != NULL || cas != NULL) {
2298           return NULL;
2299         }
2300         cas = x->as_LoadStore();
2301       }
2302     }
2303 
2304     // we should not have both a store and a cas
2305     if (st == NULL & cas == NULL) {
2306       return NULL;
2307     }
2308 
2309     if (st == NULL) {
2310       // nothing more to check
2311       return leading;
2312     } else {
2313       // we should not have a store if we started from an acquire
2314       if (is_cas) {
2315         return NULL;
2316       }
2317 
2318       // the store should feed the merge we used to get here
2319       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2320         if (st->fast_out(i) == mm) {
2321           return leading;
2322         }
2323       }
2324     }
2325 
2326     return NULL;
2327   }
2328 
2329   // card_mark_to_leading
2330   //
2331   // graph traversal helper which traverses from a card mark volatile
2332   // membar to a leading membar i.e. it ensures that the following Mem
2333   // flow subgraph is present.
2334   //
2335   //    MemBarRelease {leading}
2336   //   {MemBarCPUOrder} {optional}
2337   //         |   . . .
2338   //     Bot |   /
2339   //      MergeMem
2340   //         |
2341   //     MemBarVolatile (card mark)
2342   //        |     \
2343   //      . . .   StoreCM
2344   //
2345   // if the configuration is present returns the cpuorder member for
2346   // preference or when absent the release membar otherwise NULL.
2347   //
2348   // n.b. the input membar is expected to be a MemBarVolatile amd must
2349   // be a card mark membar.
2350 
2351   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2352   {
2353     // input must be a card mark volatile membar
2354     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2355 
2356     // the Mem feed to the membar should be a merge
2357     Node *x = barrier->in(TypeFunc::Memory);
2358     if (!x->is_MergeMem()) {
2359       return NULL;
2360     }
2361 
2362     MergeMemNode *mm = x->as_MergeMem();
2363 
2364     x = mm->in(Compile::AliasIdxBot);
2365 
2366     if (!x->is_MemBar()) {
2367       return NULL;
2368     }
2369 
2370     MemBarNode *leading = x->as_MemBar();
2371 
2372     if (leading_membar(leading)) {
2373       return leading;
2374     }
2375 
2376     return NULL;
2377   }
2378 
2379 bool unnecessary_acquire(const Node *barrier)
2380 {
2381   assert(barrier->is_MemBar(), "expecting a membar");
2382 
2383   if (UseBarriersForVolatile) {
2384     // we need to plant a dmb
2385     return false;
2386   }
2387 
2388   // a volatile read derived from bytecode (or also from an inlined
2389   // SHA field read via LibraryCallKit::load_field_from_object)
2390   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2391   // with a bogus read dependency on it's preceding load. so in those
2392   // cases we will find the load node at the PARMS offset of the
2393   // acquire membar.  n.b. there may be an intervening DecodeN node.
2394   //
2395   // a volatile load derived from an inlined unsafe field access
2396   // manifests as a cpuorder membar with Ctl and Mem projections
2397   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2398   // acquire then feeds another cpuorder membar via Ctl and Mem
2399   // projections. The load has no output dependency on these trailing
2400   // membars because subsequent nodes inserted into the graph take
2401   // their control feed from the final membar cpuorder meaning they
2402   // are all ordered after the load.
2403 
2404   Node *x = barrier->lookup(TypeFunc::Parms);
2405   if (x) {
2406     // we are starting from an acquire and it has a fake dependency
2407     //
2408     // need to check for
2409     //
2410     //   LoadX[mo_acquire]
2411     //   {  |1   }
2412     //   {DecodeN}
2413     //      |Parms
2414     //   MemBarAcquire*
2415     //
2416     // where * tags node we were passed
2417     // and |k means input k
2418     if (x->is_DecodeNarrowPtr()) {
2419       x = x->in(1);
2420     }
2421 
2422     return (x->is_Load() && x->as_Load()->is_acquire());
2423   }
2424 
2425   // now check for an unsafe volatile get
2426 
2427   // need to check for
2428   //
2429   //   MemBarCPUOrder
2430   //        ||       \\
2431   //   MemBarAcquire* LoadX[mo_acquire]
2432   //        ||
2433   //   MemBarCPUOrder
2434   //
2435   // where * tags node we were passed
2436   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2437 
2438   // check for a parent MemBarCPUOrder
2439   ProjNode *ctl;
2440   ProjNode *mem;
2441   MemBarNode *parent = parent_membar(barrier);
2442   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2443     return false;
2444   ctl = parent->proj_out(TypeFunc::Control);
2445   mem = parent->proj_out(TypeFunc::Memory);
2446   if (!ctl || !mem) {
2447     return false;
2448   }
2449   // ensure the proj nodes both feed a LoadX[mo_acquire]
2450   LoadNode *ld = NULL;
2451   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2452     x = ctl->fast_out(i);
2453     // if we see a load we keep hold of it and stop searching
2454     if (x->is_Load()) {
2455       ld = x->as_Load();
2456       break;
2457     }
2458   }
2459   // it must be an acquiring load
2460   if (ld && ld->is_acquire()) {
2461 
2462     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2463       x = mem->fast_out(i);
2464       // if we see the same load we drop it and stop searching
2465       if (x == ld) {
2466         ld = NULL;
2467         break;
2468       }
2469     }
2470     // we must have dropped the load
2471     if (ld == NULL) {
2472       // check for a child cpuorder membar
2473       MemBarNode *child  = child_membar(barrier->as_MemBar());
2474       if (child && child->Opcode() == Op_MemBarCPUOrder)
2475         return true;
2476     }
2477   }
2478 
2479   // final option for unnecessary mebar is that it is a trailing node
2480   // belonging to a CAS
2481 
2482   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2483 
2484   return leading != NULL;
2485 }
2486 
2487 bool needs_acquiring_load(const Node *n)
2488 {
2489   assert(n->is_Load(), "expecting a load");
2490   if (UseBarriersForVolatile) {
2491     // we use a normal load and a dmb
2492     return false;
2493   }
2494 
2495   LoadNode *ld = n->as_Load();
2496 
2497   if (!ld->is_acquire()) {
2498     return false;
2499   }
2500 
2501   // check if this load is feeding an acquire membar
2502   //
2503   //   LoadX[mo_acquire]
2504   //   {  |1   }
2505   //   {DecodeN}
2506   //      |Parms
2507   //   MemBarAcquire*
2508   //
2509   // where * tags node we were passed
2510   // and |k means input k
2511 
2512   Node *start = ld;
2513   Node *mbacq = NULL;
2514 
2515   // if we hit a DecodeNarrowPtr we reset the start node and restart
2516   // the search through the outputs
2517  restart:
2518 
2519   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2520     Node *x = start->fast_out(i);
2521     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2522       mbacq = x;
2523     } else if (!mbacq &&
2524                (x->is_DecodeNarrowPtr() ||
2525                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2526       start = x;
2527       goto restart;
2528     }
2529   }
2530 
2531   if (mbacq) {
2532     return true;
2533   }
2534 
2535   // now check for an unsafe volatile get
2536 
2537   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2538   //
2539   //     MemBarCPUOrder
2540   //        ||       \\
2541   //   MemBarAcquire* LoadX[mo_acquire]
2542   //        ||
2543   //   MemBarCPUOrder
2544 
2545   MemBarNode *membar;
2546 
2547   membar = parent_membar(ld);
2548 
2549   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2550     return false;
2551   }
2552 
2553   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2554 
2555   membar = child_membar(membar);
2556 
2557   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2558     return false;
2559   }
2560 
2561   membar = child_membar(membar);
2562 
2563   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2564     return false;
2565   }
2566 
2567   return true;
2568 }
2569 
2570 bool unnecessary_release(const Node *n)
2571 {
2572   assert((n->is_MemBar() &&
2573           n->Opcode() == Op_MemBarRelease),
2574          "expecting a release membar");
2575 
2576   if (UseBarriersForVolatile) {
2577     // we need to plant a dmb
2578     return false;
2579   }
2580 
2581   // if there is a dependent CPUOrder barrier then use that as the
2582   // leading
2583 
2584   MemBarNode *barrier = n->as_MemBar();
2585   // check for an intervening cpuorder membar
2586   MemBarNode *b = child_membar(barrier);
2587   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2588     // ok, so start the check from the dependent cpuorder barrier
2589     barrier = b;
2590   }
2591 
2592   // must start with a normal feed
2593   MemBarNode *trailing = leading_to_trailing(barrier);
2594 
2595   return (trailing != NULL);
2596 }
2597 
2598 bool unnecessary_volatile(const Node *n)
2599 {
2600   // assert n->is_MemBar();
2601   if (UseBarriersForVolatile) {
2602     // we need to plant a dmb
2603     return false;
2604   }
2605 
2606   MemBarNode *mbvol = n->as_MemBar();
2607 
2608   // first we check if this is part of a card mark. if so then we have
2609   // to generate a StoreLoad barrier
2610 
2611   if (is_card_mark_membar(mbvol)) {
2612       return false;
2613   }
2614 
2615   // ok, if it's not a card mark then we still need to check if it is
2616   // a trailing membar of a volatile put graph.
2617 
2618   return (trailing_to_leading(mbvol) != NULL);
2619 }
2620 
2621 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2622 
2623 bool needs_releasing_store(const Node *n)
2624 {
2625   // assert n->is_Store();
2626   if (UseBarriersForVolatile) {
2627     // we use a normal store and dmb combination
2628     return false;
2629   }
2630 
2631   StoreNode *st = n->as_Store();
2632 
2633   // the store must be marked as releasing
2634   if (!st->is_release()) {
2635     return false;
2636   }
2637 
2638   // the store must be fed by a membar
2639 
2640   Node *x = st->lookup(StoreNode::Memory);
2641 
2642   if (! x || !x->is_Proj()) {
2643     return false;
2644   }
2645 
2646   ProjNode *proj = x->as_Proj();
2647 
2648   x = proj->lookup(0);
2649 
2650   if (!x || !x->is_MemBar()) {
2651     return false;
2652   }
2653 
2654   MemBarNode *barrier = x->as_MemBar();
2655 
2656   // if the barrier is a release membar or a cpuorder mmebar fed by a
2657   // release membar then we need to check whether that forms part of a
2658   // volatile put graph.
2659 
2660   // reject invalid candidates
2661   if (!leading_membar(barrier)) {
2662     return false;
2663   }
2664 
2665   // does this lead a normal subgraph?
2666   MemBarNode *trailing = leading_to_trailing(barrier);
2667 
2668   return (trailing != NULL);
2669 }
2670 
2671 // predicate controlling translation of CAS
2672 //
2673 // returns true if CAS needs to use an acquiring load otherwise false
2674 
2675 bool needs_acquiring_load_exclusive(const Node *n)
2676 {
2677   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2678   if (UseBarriersForVolatile) {
2679     return false;
2680   }
2681 
2682   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2683 #ifdef ASSERT
2684   LoadStoreNode *st = n->as_LoadStore();
2685 
2686   // the store must be fed by a membar
2687 
2688   Node *x = st->lookup(StoreNode::Memory);
2689 
2690   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2691 
2692   ProjNode *proj = x->as_Proj();
2693 
2694   x = proj->lookup(0);
2695 
2696   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2697 
2698   MemBarNode *barrier = x->as_MemBar();
2699 
2700   // the barrier must be a cpuorder mmebar fed by a release membar
2701 
2702   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2703          "CAS not fed by cpuorder membar!");
2704 
2705   MemBarNode *b = parent_membar(barrier);
2706   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2707           "CAS not fed by cpuorder+release membar pair!");
2708 
2709   // does this lead a normal subgraph?
2710   MemBarNode *mbar = leading_to_trailing(barrier);
2711 
2712   assert(mbar != NULL, "CAS not embedded in normal graph!");
2713 
2714   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2715 #endif // ASSERT
2716   // so we can just return true here
2717   return true;
2718 }
2719 
2720 // predicate controlling translation of StoreCM
2721 //
2722 // returns true if a StoreStore must precede the card write otherwise
2723 // false
2724 
2725 bool unnecessary_storestore(const Node *storecm)
2726 {
2727   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2728 
2729   // we only ever need to generate a dmb ishst between an object put
2730   // and the associated card mark when we are using CMS without
2731   // conditional card marking. Any other occurence will happen when
2732   // performing a card mark using CMS with conditional card marking or
2733   // G1. In those cases the preceding MamBarVolatile will be
2734   // translated to a dmb ish which guarantes visibility of the
2735   // preceding StoreN/P before this StoreCM
2736 
2737   if (!UseConcMarkSweepGC || UseCondCardMark) {
2738     return true;
2739   }
2740 
2741   // if we are implementing volatile puts using barriers then we must
2742   // insert the dmb ishst
2743 
2744   if (UseBarriersForVolatile) {
2745     return false;
2746   }
2747 
2748   // we must be using CMS with conditional card marking so we ahve to
2749   // generate the StoreStore
2750 
2751   return false;
2752 }
2753 
2754 
2755 #define __ _masm.
2756 
2757 // advance declarations for helper functions to convert register
2758 // indices to register objects
2759 
2760 // the ad file has to provide implementations of certain methods
2761 // expected by the generic code
2762 //
2763 // REQUIRED FUNCTIONALITY
2764 
2765 //=============================================================================
2766 
2767 // !!!!! Special hack to get all types of calls to specify the byte offset
2768 //       from the start of the call to the point where the return address
2769 //       will point.
2770 
2771 int MachCallStaticJavaNode::ret_addr_offset()
2772 {
2773   // call should be a simple bl
2774   int off = 4;
2775   return off;
2776 }
2777 
2778 int MachCallDynamicJavaNode::ret_addr_offset()
2779 {
2780   return 16; // movz, movk, movk, bl
2781 }
2782 
2783 int MachCallRuntimeNode::ret_addr_offset() {
2784   // for generated stubs the call will be
2785   //   far_call(addr)
2786   // for real runtime callouts it will be six instructions
2787   // see aarch64_enc_java_to_runtime
2788   //   adr(rscratch2, retaddr)
2789   //   lea(rscratch1, RuntimeAddress(addr)
2790   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2791   //   blrt rscratch1
2792   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2793   if (cb) {
2794     return MacroAssembler::far_branch_size();
2795   } else {
2796     return 6 * NativeInstruction::instruction_size;
2797   }
2798 }
2799 
2800 // Indicate if the safepoint node needs the polling page as an input
2801 
2802 // the shared code plants the oop data at the start of the generated
2803 // code for the safepoint node and that needs ot be at the load
2804 // instruction itself. so we cannot plant a mov of the safepoint poll
2805 // address followed by a load. setting this to true means the mov is
2806 // scheduled as a prior instruction. that's better for scheduling
2807 // anyway.
2808 
2809 bool SafePointNode::needs_polling_address_input()
2810 {
2811   return true;
2812 }
2813 
2814 //=============================================================================
2815 
2816 #ifndef PRODUCT
2817 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2818   st->print("BREAKPOINT");
2819 }
2820 #endif
2821 
2822 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2823   MacroAssembler _masm(&cbuf);
2824   __ brk(0);
2825 }
2826 
2827 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2828   return MachNode::size(ra_);
2829 }
2830 
2831 //=============================================================================
2832 
2833 #ifndef PRODUCT
2834   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2835     st->print("nop \t# %d bytes pad for loops and calls", _count);
2836   }
2837 #endif
2838 
2839   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2840     MacroAssembler _masm(&cbuf);
2841     for (int i = 0; i < _count; i++) {
2842       __ nop();
2843     }
2844   }
2845 
2846   uint MachNopNode::size(PhaseRegAlloc*) const {
2847     return _count * NativeInstruction::instruction_size;
2848   }
2849 
2850 //=============================================================================
2851 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2852 
2853 int Compile::ConstantTable::calculate_table_base_offset() const {
2854   return 0;  // absolute addressing, no offset
2855 }
2856 
2857 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2858 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2859   ShouldNotReachHere();
2860 }
2861 
2862 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2863   // Empty encoding
2864 }
2865 
2866 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2867   return 0;
2868 }
2869 
2870 #ifndef PRODUCT
2871 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2872   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2873 }
2874 #endif
2875 
2876 #ifndef PRODUCT
2877 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2878   Compile* C = ra_->C;
2879 
2880   int framesize = C->frame_slots() << LogBytesPerInt;
2881 
2882   if (C->need_stack_bang(framesize))
2883     st->print("# stack bang size=%d\n\t", framesize);
2884 
2885   if (framesize < ((1 << 9) + 2 * wordSize)) {
2886     st->print("sub  sp, sp, #%d\n\t", framesize);
2887     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2888     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2889   } else {
2890     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2891     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2892     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2893     st->print("sub  sp, sp, rscratch1");
2894   }
2895 }
2896 #endif
2897 
2898 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2899   Compile* C = ra_->C;
2900   MacroAssembler _masm(&cbuf);
2901 
2902   // n.b. frame size includes space for return pc and rfp
2903   const long framesize = C->frame_size_in_bytes();
2904   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2905 
2906   // insert a nop at the start of the prolog so we can patch in a
2907   // branch if we need to invalidate the method later
2908   __ nop();
2909 
2910   int bangsize = C->bang_size_in_bytes();
2911   if (C->need_stack_bang(bangsize) && UseStackBanging)
2912     __ generate_stack_overflow_check(bangsize);
2913 
2914   __ build_frame(framesize);
2915 
2916   if (NotifySimulator) {
2917     __ notify(Assembler::method_entry);
2918   }
2919 
2920   if (VerifyStackAtCalls) {
2921     Unimplemented();
2922   }
2923 
2924   C->set_frame_complete(cbuf.insts_size());
2925 
2926   if (C->has_mach_constant_base_node()) {
2927     // NOTE: We set the table base offset here because users might be
2928     // emitted before MachConstantBaseNode.
2929     Compile::ConstantTable& constant_table = C->constant_table();
2930     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2931   }
2932 }
2933 
2934 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2935 {
2936   return MachNode::size(ra_); // too many variables; just compute it
2937                               // the hard way
2938 }
2939 
2940 int MachPrologNode::reloc() const
2941 {
2942   return 0;
2943 }
2944 
2945 //=============================================================================
2946 
2947 #ifndef PRODUCT
2948 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2949   Compile* C = ra_->C;
2950   int framesize = C->frame_slots() << LogBytesPerInt;
2951 
2952   st->print("# pop frame %d\n\t",framesize);
2953 
2954   if (framesize == 0) {
2955     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2956   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2957     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2958     st->print("add  sp, sp, #%d\n\t", framesize);
2959   } else {
2960     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2961     st->print("add  sp, sp, rscratch1\n\t");
2962     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2963   }
2964 
2965   if (do_polling() && C->is_method_compilation()) {
2966     st->print("# touch polling page\n\t");
2967     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2968     st->print("ldr zr, [rscratch1]");
2969   }
2970 }
2971 #endif
2972 
2973 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2974   Compile* C = ra_->C;
2975   MacroAssembler _masm(&cbuf);
2976   int framesize = C->frame_slots() << LogBytesPerInt;
2977 
2978   __ remove_frame(framesize);
2979 
2980   if (NotifySimulator) {
2981     __ notify(Assembler::method_reentry);
2982   }
2983 
2984   if (do_polling() && C->is_method_compilation()) {
2985     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2986   }
2987 }
2988 
2989 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2990   // Variable size. Determine dynamically.
2991   return MachNode::size(ra_);
2992 }
2993 
2994 int MachEpilogNode::reloc() const {
2995   // Return number of relocatable values contained in this instruction.
2996   return 1; // 1 for polling page.
2997 }
2998 
2999 const Pipeline * MachEpilogNode::pipeline() const {
3000   return MachNode::pipeline_class();
3001 }
3002 
3003 // This method seems to be obsolete. It is declared in machnode.hpp
3004 // and defined in all *.ad files, but it is never called. Should we
3005 // get rid of it?
3006 int MachEpilogNode::safepoint_offset() const {
3007   assert(do_polling(), "no return for this epilog node");
3008   return 4;
3009 }
3010 
3011 //=============================================================================
3012 
3013 // Figure out which register class each belongs in: rc_int, rc_float or
3014 // rc_stack.
3015 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3016 
3017 static enum RC rc_class(OptoReg::Name reg) {
3018 
3019   if (reg == OptoReg::Bad) {
3020     return rc_bad;
3021   }
3022 
3023   // we have 30 int registers * 2 halves
3024   // (rscratch1 and rscratch2 are omitted)
3025 
3026   if (reg < 60) {
3027     return rc_int;
3028   }
3029 
3030   // we have 32 float register * 2 halves
3031   if (reg < 60 + 128) {
3032     return rc_float;
3033   }
3034 
3035   // Between float regs & stack is the flags regs.
3036   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3037 
3038   return rc_stack;
3039 }
3040 
3041 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3042   Compile* C = ra_->C;
3043 
3044   // Get registers to move.
3045   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3046   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3047   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3048   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3049 
3050   enum RC src_hi_rc = rc_class(src_hi);
3051   enum RC src_lo_rc = rc_class(src_lo);
3052   enum RC dst_hi_rc = rc_class(dst_hi);
3053   enum RC dst_lo_rc = rc_class(dst_lo);
3054 
3055   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3056 
3057   if (src_hi != OptoReg::Bad) {
3058     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3059            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3060            "expected aligned-adjacent pairs");
3061   }
3062 
3063   if (src_lo == dst_lo && src_hi == dst_hi) {
3064     return 0;            // Self copy, no move.
3065   }
3066 
3067   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3068               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3069   int src_offset = ra_->reg2offset(src_lo);
3070   int dst_offset = ra_->reg2offset(dst_lo);
3071 
3072   if (bottom_type()->isa_vect() != NULL) {
3073     uint ireg = ideal_reg();
3074     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3075     if (cbuf) {
3076       MacroAssembler _masm(cbuf);
3077       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3078       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3079         // stack->stack
3080         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3081         if (ireg == Op_VecD) {
3082           __ unspill(rscratch1, true, src_offset);
3083           __ spill(rscratch1, true, dst_offset);
3084         } else {
3085           __ spill_copy128(src_offset, dst_offset);
3086         }
3087       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3088         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3089                ireg == Op_VecD ? __ T8B : __ T16B,
3090                as_FloatRegister(Matcher::_regEncode[src_lo]));
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3092         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3093                        ireg == Op_VecD ? __ D : __ Q,
3094                        ra_->reg2offset(dst_lo));
3095       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3096         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(src_lo));
3099       } else {
3100         ShouldNotReachHere();
3101       }
3102     }
3103   } else if (cbuf) {
3104     MacroAssembler _masm(cbuf);
3105     switch (src_lo_rc) {
3106     case rc_int:
3107       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3108         if (is64) {
3109             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3110                    as_Register(Matcher::_regEncode[src_lo]));
3111         } else {
3112             MacroAssembler _masm(cbuf);
3113             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3114                     as_Register(Matcher::_regEncode[src_lo]));
3115         }
3116       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3117         if (is64) {
3118             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3119                      as_Register(Matcher::_regEncode[src_lo]));
3120         } else {
3121             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3122                      as_Register(Matcher::_regEncode[src_lo]));
3123         }
3124       } else {                    // gpr --> stack spill
3125         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3126         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3127       }
3128       break;
3129     case rc_float:
3130       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3131         if (is64) {
3132             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3133                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3134         } else {
3135             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3136                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3137         }
3138       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3139           if (cbuf) {
3140             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3141                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3142         } else {
3143             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3144                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3145         }
3146       } else {                    // fpr --> stack spill
3147         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3148         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3149                  is64 ? __ D : __ S, dst_offset);
3150       }
3151       break;
3152     case rc_stack:
3153       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3154         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3155       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3156         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3157                    is64 ? __ D : __ S, src_offset);
3158       } else {                    // stack --> stack copy
3159         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3160         __ unspill(rscratch1, is64, src_offset);
3161         __ spill(rscratch1, is64, dst_offset);
3162       }
3163       break;
3164     default:
3165       assert(false, "bad rc_class for spill");
3166       ShouldNotReachHere();
3167     }
3168   }
3169 
3170   if (st) {
3171     st->print("spill ");
3172     if (src_lo_rc == rc_stack) {
3173       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3174     } else {
3175       st->print("%s -> ", Matcher::regName[src_lo]);
3176     }
3177     if (dst_lo_rc == rc_stack) {
3178       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3179     } else {
3180       st->print("%s", Matcher::regName[dst_lo]);
3181     }
3182     if (bottom_type()->isa_vect() != NULL) {
3183       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3184     } else {
3185       st->print("\t# spill size = %d", is64 ? 64:32);
3186     }
3187   }
3188 
3189   return 0;
3190 
3191 }
3192 
3193 #ifndef PRODUCT
3194 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3195   if (!ra_)
3196     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3197   else
3198     implementation(NULL, ra_, false, st);
3199 }
3200 #endif
3201 
3202 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3203   implementation(&cbuf, ra_, false, NULL);
3204 }
3205 
3206 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3207   return MachNode::size(ra_);
3208 }
3209 
3210 //=============================================================================
3211 
3212 #ifndef PRODUCT
3213 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3214   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3215   int reg = ra_->get_reg_first(this);
3216   st->print("add %s, rsp, #%d]\t# box lock",
3217             Matcher::regName[reg], offset);
3218 }
3219 #endif
3220 
3221 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3222   MacroAssembler _masm(&cbuf);
3223 
3224   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3225   int reg    = ra_->get_encode(this);
3226 
3227   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3228     __ add(as_Register(reg), sp, offset);
3229   } else {
3230     ShouldNotReachHere();
3231   }
3232 }
3233 
3234 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3235   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3236   return 4;
3237 }
3238 
3239 //=============================================================================
3240 
3241 #ifndef PRODUCT
3242 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3243 {
3244   st->print_cr("# MachUEPNode");
3245   if (UseCompressedClassPointers) {
3246     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3247     if (Universe::narrow_klass_shift() != 0) {
3248       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3249     }
3250   } else {
3251    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3252   }
3253   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3254   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3255 }
3256 #endif
3257 
3258 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3259 {
3260   // This is the unverified entry point.
3261   MacroAssembler _masm(&cbuf);
3262 
3263   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3264   Label skip;
3265   // TODO
3266   // can we avoid this skip and still use a reloc?
3267   __ br(Assembler::EQ, skip);
3268   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3269   __ bind(skip);
3270 }
3271 
3272 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3273 {
3274   return MachNode::size(ra_);
3275 }
3276 
3277 // REQUIRED EMIT CODE
3278 
3279 //=============================================================================
3280 
3281 // Emit exception handler code.
3282 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3283 {
3284   // mov rscratch1 #exception_blob_entry_point
3285   // br rscratch1
3286   // Note that the code buffer's insts_mark is always relative to insts.
3287   // That's why we must use the macroassembler to generate a handler.
3288   MacroAssembler _masm(&cbuf);
3289   address base = __ start_a_stub(size_exception_handler());
3290   if (base == NULL) {
3291     ciEnv::current()->record_failure("CodeCache is full");
3292     return 0;  // CodeBuffer::expand failed
3293   }
3294   int offset = __ offset();
3295   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3296   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3297   __ end_a_stub();
3298   return offset;
3299 }
3300 
3301 // Emit deopt handler code.
3302 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3303 {
3304   // Note that the code buffer's insts_mark is always relative to insts.
3305   // That's why we must use the macroassembler to generate a handler.
3306   MacroAssembler _masm(&cbuf);
3307   address base = __ start_a_stub(size_deopt_handler());
3308   if (base == NULL) {
3309     ciEnv::current()->record_failure("CodeCache is full");
3310     return 0;  // CodeBuffer::expand failed
3311   }
3312   int offset = __ offset();
3313 
3314   __ adr(lr, __ pc());
3315   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3316 
3317   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3318   __ end_a_stub();
3319   return offset;
3320 }
3321 
3322 // REQUIRED MATCHER CODE
3323 
3324 //=============================================================================
3325 
3326 const bool Matcher::match_rule_supported(int opcode) {
3327 
3328   // TODO
3329   // identify extra cases that we might want to provide match rules for
3330   // e.g. Op_StrEquals and other intrinsics
3331   if (!has_match_rule(opcode)) {
3332     return false;
3333   }
3334 
3335   return true;  // Per default match rules are supported.
3336 }
3337 
3338 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3339 
3340   // TODO
3341   // identify extra cases that we might want to provide match rules for
3342   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3343   bool ret_value = match_rule_supported(opcode);
3344   // Add rules here.
3345 
3346   return ret_value;  // Per default match rules are supported.
3347 }
3348 
3349 const int Matcher::float_pressure(int default_pressure_threshold) {
3350   return default_pressure_threshold;
3351 }
3352 
3353 int Matcher::regnum_to_fpu_offset(int regnum)
3354 {
3355   Unimplemented();
3356   return 0;
3357 }
3358 
3359 // Is this branch offset short enough that a short branch can be used?
3360 //
3361 // NOTE: If the platform does not provide any short branch variants, then
3362 //       this method should return false for offset 0.
3363 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3364   // The passed offset is relative to address of the branch.
3365 
3366   return (-32768 <= offset && offset < 32768);
3367 }
3368 
3369 const bool Matcher::isSimpleConstant64(jlong value) {
3370   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3371   // Probably always true, even if a temp register is required.
3372   return true;
3373 }
3374 
3375 // true just means we have fast l2f conversion
3376 const bool Matcher::convL2FSupported(void) {
3377   return true;
3378 }
3379 
3380 // Vector width in bytes.
3381 const int Matcher::vector_width_in_bytes(BasicType bt) {
3382   int size = MIN2(16,(int)MaxVectorSize);
3383   // Minimum 2 values in vector
3384   if (size < 2*type2aelembytes(bt)) size = 0;
3385   // But never < 4
3386   if (size < 4) size = 0;
3387   return size;
3388 }
3389 
3390 // Limits on vector size (number of elements) loaded into vector.
3391 const int Matcher::max_vector_size(const BasicType bt) {
3392   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3393 }
3394 const int Matcher::min_vector_size(const BasicType bt) {
3395 //  For the moment limit the vector size to 8 bytes
3396     int size = 8 / type2aelembytes(bt);
3397     if (size < 2) size = 2;
3398     return size;
3399 }
3400 
3401 // Vector ideal reg.
3402 const int Matcher::vector_ideal_reg(int len) {
3403   switch(len) {
3404     case  8: return Op_VecD;
3405     case 16: return Op_VecX;
3406   }
3407   ShouldNotReachHere();
3408   return 0;
3409 }
3410 
3411 const int Matcher::vector_shift_count_ideal_reg(int size) {
3412   return Op_VecX;
3413 }
3414 
3415 // AES support not yet implemented
3416 const bool Matcher::pass_original_key_for_aes() {
3417   return false;
3418 }
3419 
3420 // x86 supports misaligned vectors store/load.
3421 const bool Matcher::misaligned_vectors_ok() {
3422   return !AlignVector; // can be changed by flag
3423 }
3424 
3425 // false => size gets scaled to BytesPerLong, ok.
3426 const bool Matcher::init_array_count_is_in_bytes = false;
3427 
3428 // Threshold size for cleararray.
3429 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3430 
3431 // Use conditional move (CMOVL)
3432 const int Matcher::long_cmove_cost() {
3433   // long cmoves are no more expensive than int cmoves
3434   return 0;
3435 }
3436 
3437 const int Matcher::float_cmove_cost() {
3438   // float cmoves are no more expensive than int cmoves
3439   return 0;
3440 }
3441 
3442 // Does the CPU require late expand (see block.cpp for description of late expand)?
3443 const bool Matcher::require_postalloc_expand = false;
3444 
3445 // Should the Matcher clone shifts on addressing modes, expecting them
3446 // to be subsumed into complex addressing expressions or compute them
3447 // into registers?  True for Intel but false for most RISCs
3448 const bool Matcher::clone_shift_expressions = false;
3449 
3450 // Do we need to mask the count passed to shift instructions or does
3451 // the cpu only look at the lower 5/6 bits anyway?
3452 const bool Matcher::need_masked_shift_count = false;
3453 
3454 // This affects two different things:
3455 //  - how Decode nodes are matched
3456 //  - how ImplicitNullCheck opportunities are recognized
3457 // If true, the matcher will try to remove all Decodes and match them
3458 // (as operands) into nodes. NullChecks are not prepared to deal with
3459 // Decodes by final_graph_reshaping().
3460 // If false, final_graph_reshaping() forces the decode behind the Cmp
3461 // for a NullCheck. The matcher matches the Decode node into a register.
3462 // Implicit_null_check optimization moves the Decode along with the
3463 // memory operation back up before the NullCheck.
3464 bool Matcher::narrow_oop_use_complex_address() {
3465   return Universe::narrow_oop_shift() == 0;
3466 }
3467 
3468 bool Matcher::narrow_klass_use_complex_address() {
3469 // TODO
3470 // decide whether we need to set this to true
3471   return false;
3472 }
3473 
3474 // Is it better to copy float constants, or load them directly from
3475 // memory?  Intel can load a float constant from a direct address,
3476 // requiring no extra registers.  Most RISCs will have to materialize
3477 // an address into a register first, so they would do better to copy
3478 // the constant from stack.
3479 const bool Matcher::rematerialize_float_constants = false;
3480 
3481 // If CPU can load and store mis-aligned doubles directly then no
3482 // fixup is needed.  Else we split the double into 2 integer pieces
3483 // and move it piece-by-piece.  Only happens when passing doubles into
3484 // C code as the Java calling convention forces doubles to be aligned.
3485 const bool Matcher::misaligned_doubles_ok = true;
3486 
3487 // No-op on amd64
3488 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3489   Unimplemented();
3490 }
3491 
3492 // Advertise here if the CPU requires explicit rounding operations to
3493 // implement the UseStrictFP mode.
3494 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3495 
3496 // Are floats converted to double when stored to stack during
3497 // deoptimization?
3498 bool Matcher::float_in_double() { return true; }
3499 
3500 // Do ints take an entire long register or just half?
3501 // The relevant question is how the int is callee-saved:
3502 // the whole long is written but de-opt'ing will have to extract
3503 // the relevant 32 bits.
3504 const bool Matcher::int_in_long = true;
3505 
3506 // Return whether or not this register is ever used as an argument.
3507 // This function is used on startup to build the trampoline stubs in
3508 // generateOptoStub.  Registers not mentioned will be killed by the VM
3509 // call in the trampoline, and arguments in those registers not be
3510 // available to the callee.
3511 bool Matcher::can_be_java_arg(int reg)
3512 {
3513   return
3514     reg ==  R0_num || reg == R0_H_num ||
3515     reg ==  R1_num || reg == R1_H_num ||
3516     reg ==  R2_num || reg == R2_H_num ||
3517     reg ==  R3_num || reg == R3_H_num ||
3518     reg ==  R4_num || reg == R4_H_num ||
3519     reg ==  R5_num || reg == R5_H_num ||
3520     reg ==  R6_num || reg == R6_H_num ||
3521     reg ==  R7_num || reg == R7_H_num ||
3522     reg ==  V0_num || reg == V0_H_num ||
3523     reg ==  V1_num || reg == V1_H_num ||
3524     reg ==  V2_num || reg == V2_H_num ||
3525     reg ==  V3_num || reg == V3_H_num ||
3526     reg ==  V4_num || reg == V4_H_num ||
3527     reg ==  V5_num || reg == V5_H_num ||
3528     reg ==  V6_num || reg == V6_H_num ||
3529     reg ==  V7_num || reg == V7_H_num;
3530 }
3531 
3532 bool Matcher::is_spillable_arg(int reg)
3533 {
3534   return can_be_java_arg(reg);
3535 }
3536 
3537 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3538   return false;
3539 }
3540 
3541 RegMask Matcher::divI_proj_mask() {
3542   ShouldNotReachHere();
3543   return RegMask();
3544 }
3545 
3546 // Register for MODI projection of divmodI.
3547 RegMask Matcher::modI_proj_mask() {
3548   ShouldNotReachHere();
3549   return RegMask();
3550 }
3551 
3552 // Register for DIVL projection of divmodL.
3553 RegMask Matcher::divL_proj_mask() {
3554   ShouldNotReachHere();
3555   return RegMask();
3556 }
3557 
3558 // Register for MODL projection of divmodL.
3559 RegMask Matcher::modL_proj_mask() {
3560   ShouldNotReachHere();
3561   return RegMask();
3562 }
3563 
3564 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3565   return FP_REG_mask();
3566 }
3567 
3568 // helper for encoding java_to_runtime calls on sim
3569 //
3570 // this is needed to compute the extra arguments required when
3571 // planting a call to the simulator blrt instruction. the TypeFunc
3572 // can be queried to identify the counts for integral, and floating
3573 // arguments and the return type
3574 
3575 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3576 {
3577   int gps = 0;
3578   int fps = 0;
3579   const TypeTuple *domain = tf->domain();
3580   int max = domain->cnt();
3581   for (int i = TypeFunc::Parms; i < max; i++) {
3582     const Type *t = domain->field_at(i);
3583     switch(t->basic_type()) {
3584     case T_FLOAT:
3585     case T_DOUBLE:
3586       fps++;
3587     default:
3588       gps++;
3589     }
3590   }
3591   gpcnt = gps;
3592   fpcnt = fps;
3593   BasicType rt = tf->return_type();
3594   switch (rt) {
3595   case T_VOID:
3596     rtype = MacroAssembler::ret_type_void;
3597     break;
3598   default:
3599     rtype = MacroAssembler::ret_type_integral;
3600     break;
3601   case T_FLOAT:
3602     rtype = MacroAssembler::ret_type_float;
3603     break;
3604   case T_DOUBLE:
3605     rtype = MacroAssembler::ret_type_double;
3606     break;
3607   }
3608 }
3609 
3610 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3611   MacroAssembler _masm(&cbuf);                                          \
3612   {                                                                     \
3613     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3614     guarantee(DISP == 0, "mode not permitted for volatile");            \
3615     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3616     __ INSN(REG, as_Register(BASE));                                    \
3617   }
3618 
3619 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3620 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3621 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3622                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3623 
3624   // Used for all non-volatile memory accesses.  The use of
3625   // $mem->opcode() to discover whether this pattern uses sign-extended
3626   // offsets is something of a kludge.
3627   static void loadStore(MacroAssembler masm, mem_insn insn,
3628                          Register reg, int opcode,
3629                          Register base, int index, int size, int disp)
3630   {
3631     Address::extend scale;
3632 
3633     // Hooboy, this is fugly.  We need a way to communicate to the
3634     // encoder that the index needs to be sign extended, so we have to
3635     // enumerate all the cases.
3636     switch (opcode) {
3637     case INDINDEXSCALEDOFFSETI2L:
3638     case INDINDEXSCALEDI2L:
3639     case INDINDEXSCALEDOFFSETI2LN:
3640     case INDINDEXSCALEDI2LN:
3641     case INDINDEXOFFSETI2L:
3642     case INDINDEXOFFSETI2LN:
3643       scale = Address::sxtw(size);
3644       break;
3645     default:
3646       scale = Address::lsl(size);
3647     }
3648 
3649     if (index == -1) {
3650       (masm.*insn)(reg, Address(base, disp));
3651     } else {
3652       if (disp == 0) {
3653         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3654       } else {
3655         masm.lea(rscratch1, Address(base, disp));
3656         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3657       }
3658     }
3659   }
3660 
3661   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3662                          FloatRegister reg, int opcode,
3663                          Register base, int index, int size, int disp)
3664   {
3665     Address::extend scale;
3666 
3667     switch (opcode) {
3668     case INDINDEXSCALEDOFFSETI2L:
3669     case INDINDEXSCALEDI2L:
3670     case INDINDEXSCALEDOFFSETI2LN:
3671     case INDINDEXSCALEDI2LN:
3672       scale = Address::sxtw(size);
3673       break;
3674     default:
3675       scale = Address::lsl(size);
3676     }
3677 
3678      if (index == -1) {
3679       (masm.*insn)(reg, Address(base, disp));
3680     } else {
3681       if (disp == 0) {
3682         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3683       } else {
3684         masm.lea(rscratch1, Address(base, disp));
3685         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3686       }
3687     }
3688   }
3689 
3690   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3691                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3692                          int opcode, Register base, int index, int size, int disp)
3693   {
3694     if (index == -1) {
3695       (masm.*insn)(reg, T, Address(base, disp));
3696     } else {
3697       assert(disp == 0, "unsupported address mode");
3698       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3699     }
3700   }
3701 
3702 %}
3703 
3704 
3705 
3706 //----------ENCODING BLOCK-----------------------------------------------------
3707 // This block specifies the encoding classes used by the compiler to
3708 // output byte streams.  Encoding classes are parameterized macros
3709 // used by Machine Instruction Nodes in order to generate the bit
3710 // encoding of the instruction.  Operands specify their base encoding
3711 // interface with the interface keyword.  There are currently
3712 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3713 // COND_INTER.  REG_INTER causes an operand to generate a function
3714 // which returns its register number when queried.  CONST_INTER causes
3715 // an operand to generate a function which returns the value of the
3716 // constant when queried.  MEMORY_INTER causes an operand to generate
3717 // four functions which return the Base Register, the Index Register,
3718 // the Scale Value, and the Offset Value of the operand when queried.
3719 // COND_INTER causes an operand to generate six functions which return
3720 // the encoding code (ie - encoding bits for the instruction)
3721 // associated with each basic boolean condition for a conditional
3722 // instruction.
3723 //
3724 // Instructions specify two basic values for encoding.  Again, a
3725 // function is available to check if the constant displacement is an
3726 // oop. They use the ins_encode keyword to specify their encoding
3727 // classes (which must be a sequence of enc_class names, and their
3728 // parameters, specified in the encoding block), and they use the
3729 // opcode keyword to specify, in order, their primary, secondary, and
3730 // tertiary opcode.  Only the opcode sections which a particular
3731 // instruction needs for encoding need to be specified.
3732 encode %{
3733   // Build emit functions for each basic byte or larger field in the
3734   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3735   // from C++ code in the enc_class source block.  Emit functions will
3736   // live in the main source block for now.  In future, we can
3737   // generalize this by adding a syntax that specifies the sizes of
3738   // fields in an order, so that the adlc can build the emit functions
3739   // automagically
3740 
3741   // catch all for unimplemented encodings
3742   enc_class enc_unimplemented %{
3743     MacroAssembler _masm(&cbuf);
3744     __ unimplemented("C2 catch all");
3745   %}
3746 
3747   // BEGIN Non-volatile memory access
3748 
3749   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3750     Register dst_reg = as_Register($dst$$reg);
3751     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3752                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3753   %}
3754 
3755   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3756     Register dst_reg = as_Register($dst$$reg);
3757     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3758                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3759   %}
3760 
3761   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3762     Register dst_reg = as_Register($dst$$reg);
3763     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3764                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3765   %}
3766 
3767   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3768     Register dst_reg = as_Register($dst$$reg);
3769     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3770                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3771   %}
3772 
3773   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3774     Register dst_reg = as_Register($dst$$reg);
3775     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3776                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3777   %}
3778 
3779   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3780     Register dst_reg = as_Register($dst$$reg);
3781     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3782                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3783   %}
3784 
3785   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3786     Register dst_reg = as_Register($dst$$reg);
3787     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3788                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3789   %}
3790 
3791   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3792     Register dst_reg = as_Register($dst$$reg);
3793     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3794                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3795   %}
3796 
3797   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3798     Register dst_reg = as_Register($dst$$reg);
3799     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3800                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3801   %}
3802 
3803   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3804     Register dst_reg = as_Register($dst$$reg);
3805     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3806                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3807   %}
3808 
3809   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3810     Register dst_reg = as_Register($dst$$reg);
3811     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3812                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3813   %}
3814 
3815   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3816     Register dst_reg = as_Register($dst$$reg);
3817     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3818                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3819   %}
3820 
3821   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3822     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3823     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3824                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3825   %}
3826 
3827   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3828     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3829     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3830                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3831   %}
3832 
3833   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3834     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3835     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3836        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3837   %}
3838 
3839   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3840     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3841     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3842        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3843   %}
3844 
3845   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3846     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3847     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3848        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3849   %}
3850 
3851   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3852     Register src_reg = as_Register($src$$reg);
3853     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3854                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3855   %}
3856 
3857   enc_class aarch64_enc_strb0(memory mem) %{
3858     MacroAssembler _masm(&cbuf);
3859     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3860                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3861   %}
3862 
3863   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3864     MacroAssembler _masm(&cbuf);
3865     __ membar(Assembler::StoreStore);
3866     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3867                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3868   %}
3869 
3870   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3871     Register src_reg = as_Register($src$$reg);
3872     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3873                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3874   %}
3875 
3876   enc_class aarch64_enc_strh0(memory mem) %{
3877     MacroAssembler _masm(&cbuf);
3878     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3879                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3880   %}
3881 
3882   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3883     Register src_reg = as_Register($src$$reg);
3884     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3885                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3886   %}
3887 
3888   enc_class aarch64_enc_strw0(memory mem) %{
3889     MacroAssembler _masm(&cbuf);
3890     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3891                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3892   %}
3893 
3894   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3895     Register src_reg = as_Register($src$$reg);
3896     // we sometimes get asked to store the stack pointer into the
3897     // current thread -- we cannot do that directly on AArch64
3898     if (src_reg == r31_sp) {
3899       MacroAssembler _masm(&cbuf);
3900       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3901       __ mov(rscratch2, sp);
3902       src_reg = rscratch2;
3903     }
3904     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3905                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3906   %}
3907 
3908   enc_class aarch64_enc_str0(memory mem) %{
3909     MacroAssembler _masm(&cbuf);
3910     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3911                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3912   %}
3913 
3914   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3915     FloatRegister src_reg = as_FloatRegister($src$$reg);
3916     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3917                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3918   %}
3919 
3920   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3921     FloatRegister src_reg = as_FloatRegister($src$$reg);
3922     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3923                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3924   %}
3925 
3926   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3927     FloatRegister src_reg = as_FloatRegister($src$$reg);
3928     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3929        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3930   %}
3931 
3932   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3933     FloatRegister src_reg = as_FloatRegister($src$$reg);
3934     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3935        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3936   %}
3937 
3938   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3939     FloatRegister src_reg = as_FloatRegister($src$$reg);
3940     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3941        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3942   %}
3943 
3944   // END Non-volatile memory access
3945 
3946   // volatile loads and stores
3947 
3948   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3949     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3950                  rscratch1, stlrb);
3951   %}
3952 
3953   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3954     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3955                  rscratch1, stlrh);
3956   %}
3957 
3958   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3959     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3960                  rscratch1, stlrw);
3961   %}
3962 
3963 
3964   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3965     Register dst_reg = as_Register($dst$$reg);
3966     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3967              rscratch1, ldarb);
3968     __ sxtbw(dst_reg, dst_reg);
3969   %}
3970 
3971   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3972     Register dst_reg = as_Register($dst$$reg);
3973     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3974              rscratch1, ldarb);
3975     __ sxtb(dst_reg, dst_reg);
3976   %}
3977 
3978   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3979     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3980              rscratch1, ldarb);
3981   %}
3982 
3983   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3984     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3985              rscratch1, ldarb);
3986   %}
3987 
3988   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3989     Register dst_reg = as_Register($dst$$reg);
3990     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3991              rscratch1, ldarh);
3992     __ sxthw(dst_reg, dst_reg);
3993   %}
3994 
3995   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3996     Register dst_reg = as_Register($dst$$reg);
3997     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3998              rscratch1, ldarh);
3999     __ sxth(dst_reg, dst_reg);
4000   %}
4001 
4002   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4003     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4004              rscratch1, ldarh);
4005   %}
4006 
4007   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4008     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4009              rscratch1, ldarh);
4010   %}
4011 
4012   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4013     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4014              rscratch1, ldarw);
4015   %}
4016 
4017   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4018     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4019              rscratch1, ldarw);
4020   %}
4021 
4022   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4023     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4024              rscratch1, ldar);
4025   %}
4026 
4027   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4028     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4029              rscratch1, ldarw);
4030     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4031   %}
4032 
4033   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4034     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4035              rscratch1, ldar);
4036     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4037   %}
4038 
4039   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4040     Register src_reg = as_Register($src$$reg);
4041     // we sometimes get asked to store the stack pointer into the
4042     // current thread -- we cannot do that directly on AArch64
4043     if (src_reg == r31_sp) {
4044         MacroAssembler _masm(&cbuf);
4045       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4046       __ mov(rscratch2, sp);
4047       src_reg = rscratch2;
4048     }
4049     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4050                  rscratch1, stlr);
4051   %}
4052 
4053   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4054     {
4055       MacroAssembler _masm(&cbuf);
4056       FloatRegister src_reg = as_FloatRegister($src$$reg);
4057       __ fmovs(rscratch2, src_reg);
4058     }
4059     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4060                  rscratch1, stlrw);
4061   %}
4062 
4063   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4064     {
4065       MacroAssembler _masm(&cbuf);
4066       FloatRegister src_reg = as_FloatRegister($src$$reg);
4067       __ fmovd(rscratch2, src_reg);
4068     }
4069     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4070                  rscratch1, stlr);
4071   %}
4072 
4073   // synchronized read/update encodings
4074 
4075   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4076     MacroAssembler _masm(&cbuf);
4077     Register dst_reg = as_Register($dst$$reg);
4078     Register base = as_Register($mem$$base);
4079     int index = $mem$$index;
4080     int scale = $mem$$scale;
4081     int disp = $mem$$disp;
4082     if (index == -1) {
4083        if (disp != 0) {
4084         __ lea(rscratch1, Address(base, disp));
4085         __ ldaxr(dst_reg, rscratch1);
4086       } else {
4087         // TODO
4088         // should we ever get anything other than this case?
4089         __ ldaxr(dst_reg, base);
4090       }
4091     } else {
4092       Register index_reg = as_Register(index);
4093       if (disp == 0) {
4094         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4095         __ ldaxr(dst_reg, rscratch1);
4096       } else {
4097         __ lea(rscratch1, Address(base, disp));
4098         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4099         __ ldaxr(dst_reg, rscratch1);
4100       }
4101     }
4102   %}
4103 
4104   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4105     MacroAssembler _masm(&cbuf);
4106     Register src_reg = as_Register($src$$reg);
4107     Register base = as_Register($mem$$base);
4108     int index = $mem$$index;
4109     int scale = $mem$$scale;
4110     int disp = $mem$$disp;
4111     if (index == -1) {
4112        if (disp != 0) {
4113         __ lea(rscratch2, Address(base, disp));
4114         __ stlxr(rscratch1, src_reg, rscratch2);
4115       } else {
4116         // TODO
4117         // should we ever get anything other than this case?
4118         __ stlxr(rscratch1, src_reg, base);
4119       }
4120     } else {
4121       Register index_reg = as_Register(index);
4122       if (disp == 0) {
4123         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4124         __ stlxr(rscratch1, src_reg, rscratch2);
4125       } else {
4126         __ lea(rscratch2, Address(base, disp));
4127         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4128         __ stlxr(rscratch1, src_reg, rscratch2);
4129       }
4130     }
4131     __ cmpw(rscratch1, zr);
4132   %}
4133 
4134   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4135     MacroAssembler _masm(&cbuf);
4136     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4137     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4138                &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
4139   %}
4140 
4141   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4142     MacroAssembler _masm(&cbuf);
4143     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4144     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4145                &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4146   %}
4147 
4148 
4149   // The only difference between aarch64_enc_cmpxchg and
4150   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4151   // CompareAndSwap sequence to serve as a barrier on acquiring a
4152   // lock.
4153   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4154     MacroAssembler _masm(&cbuf);
4155     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4156     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4157                &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
4158   %}
4159 
4160   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4161     MacroAssembler _masm(&cbuf);
4162     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4163     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4164                &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4165   %}
4166 
4167 
4168   // auxiliary used for CompareAndSwapX to set result register
4169   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4170     MacroAssembler _masm(&cbuf);
4171     Register res_reg = as_Register($res$$reg);
4172     __ cset(res_reg, Assembler::EQ);
4173   %}
4174 
4175   // prefetch encodings
4176 
4177   enc_class aarch64_enc_prefetchw(memory mem) %{
4178     MacroAssembler _masm(&cbuf);
4179     Register base = as_Register($mem$$base);
4180     int index = $mem$$index;
4181     int scale = $mem$$scale;
4182     int disp = $mem$$disp;
4183     if (index == -1) {
4184       __ prfm(Address(base, disp), PSTL1KEEP);
4185     } else {
4186       Register index_reg = as_Register(index);
4187       if (disp == 0) {
4188         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4189       } else {
4190         __ lea(rscratch1, Address(base, disp));
4191         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4192       }
4193     }
4194   %}
4195 
4196   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4197     MacroAssembler _masm(&cbuf);
4198     Register cnt_reg = as_Register($cnt$$reg);
4199     Register base_reg = as_Register($base$$reg);
4200     // base is word aligned
4201     // cnt is count of words
4202 
4203     Label loop;
4204     Label entry;
4205 
4206 //  Algorithm:
4207 //
4208 //    scratch1 = cnt & 7;
4209 //    cnt -= scratch1;
4210 //    p += scratch1;
4211 //    switch (scratch1) {
4212 //      do {
4213 //        cnt -= 8;
4214 //          p[-8] = 0;
4215 //        case 7:
4216 //          p[-7] = 0;
4217 //        case 6:
4218 //          p[-6] = 0;
4219 //          // ...
4220 //        case 1:
4221 //          p[-1] = 0;
4222 //        case 0:
4223 //          p += 8;
4224 //      } while (cnt);
4225 //    }
4226 
4227     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4228 
4229     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4230     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4231     // base_reg always points to the end of the region we're about to zero
4232     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4233     __ adr(rscratch2, entry);
4234     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4235     __ br(rscratch2);
4236     __ bind(loop);
4237     __ sub(cnt_reg, cnt_reg, unroll);
4238     for (int i = -unroll; i < 0; i++)
4239       __ str(zr, Address(base_reg, i * wordSize));
4240     __ bind(entry);
4241     __ add(base_reg, base_reg, unroll * wordSize);
4242     __ cbnz(cnt_reg, loop);
4243   %}
4244 
4245   /// mov envcodings
4246 
4247   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4248     MacroAssembler _masm(&cbuf);
4249     u_int32_t con = (u_int32_t)$src$$constant;
4250     Register dst_reg = as_Register($dst$$reg);
4251     if (con == 0) {
4252       __ movw(dst_reg, zr);
4253     } else {
4254       __ movw(dst_reg, con);
4255     }
4256   %}
4257 
4258   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4259     MacroAssembler _masm(&cbuf);
4260     Register dst_reg = as_Register($dst$$reg);
4261     u_int64_t con = (u_int64_t)$src$$constant;
4262     if (con == 0) {
4263       __ mov(dst_reg, zr);
4264     } else {
4265       __ mov(dst_reg, con);
4266     }
4267   %}
4268 
4269   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4270     MacroAssembler _masm(&cbuf);
4271     Register dst_reg = as_Register($dst$$reg);
4272     address con = (address)$src$$constant;
4273     if (con == NULL || con == (address)1) {
4274       ShouldNotReachHere();
4275     } else {
4276       relocInfo::relocType rtype = $src->constant_reloc();
4277       if (rtype == relocInfo::oop_type) {
4278         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4279       } else if (rtype == relocInfo::metadata_type) {
4280         __ mov_metadata(dst_reg, (Metadata*)con);
4281       } else {
4282         assert(rtype == relocInfo::none, "unexpected reloc type");
4283         if (con < (address)(uintptr_t)os::vm_page_size()) {
4284           __ mov(dst_reg, con);
4285         } else {
4286           unsigned long offset;
4287           __ adrp(dst_reg, con, offset);
4288           __ add(dst_reg, dst_reg, offset);
4289         }
4290       }
4291     }
4292   %}
4293 
4294   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4295     MacroAssembler _masm(&cbuf);
4296     Register dst_reg = as_Register($dst$$reg);
4297     __ mov(dst_reg, zr);
4298   %}
4299 
4300   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4301     MacroAssembler _masm(&cbuf);
4302     Register dst_reg = as_Register($dst$$reg);
4303     __ mov(dst_reg, (u_int64_t)1);
4304   %}
4305 
4306   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4307     MacroAssembler _masm(&cbuf);
4308     address page = (address)$src$$constant;
4309     Register dst_reg = as_Register($dst$$reg);
4310     unsigned long off;
4311     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4312     assert(off == 0, "assumed offset == 0");
4313   %}
4314 
4315   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4316     MacroAssembler _masm(&cbuf);
4317     __ load_byte_map_base($dst$$Register);
4318   %}
4319 
4320   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4321     MacroAssembler _masm(&cbuf);
4322     Register dst_reg = as_Register($dst$$reg);
4323     address con = (address)$src$$constant;
4324     if (con == NULL) {
4325       ShouldNotReachHere();
4326     } else {
4327       relocInfo::relocType rtype = $src->constant_reloc();
4328       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4329       __ set_narrow_oop(dst_reg, (jobject)con);
4330     }
4331   %}
4332 
4333   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4334     MacroAssembler _masm(&cbuf);
4335     Register dst_reg = as_Register($dst$$reg);
4336     __ mov(dst_reg, zr);
4337   %}
4338 
4339   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4340     MacroAssembler _masm(&cbuf);
4341     Register dst_reg = as_Register($dst$$reg);
4342     address con = (address)$src$$constant;
4343     if (con == NULL) {
4344       ShouldNotReachHere();
4345     } else {
4346       relocInfo::relocType rtype = $src->constant_reloc();
4347       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4348       __ set_narrow_klass(dst_reg, (Klass *)con);
4349     }
4350   %}
4351 
4352   // arithmetic encodings
4353 
4354   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4355     MacroAssembler _masm(&cbuf);
4356     Register dst_reg = as_Register($dst$$reg);
4357     Register src_reg = as_Register($src1$$reg);
4358     int32_t con = (int32_t)$src2$$constant;
4359     // add has primary == 0, subtract has primary == 1
4360     if ($primary) { con = -con; }
4361     if (con < 0) {
4362       __ subw(dst_reg, src_reg, -con);
4363     } else {
4364       __ addw(dst_reg, src_reg, con);
4365     }
4366   %}
4367 
4368   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4369     MacroAssembler _masm(&cbuf);
4370     Register dst_reg = as_Register($dst$$reg);
4371     Register src_reg = as_Register($src1$$reg);
4372     int32_t con = (int32_t)$src2$$constant;
4373     // add has primary == 0, subtract has primary == 1
4374     if ($primary) { con = -con; }
4375     if (con < 0) {
4376       __ sub(dst_reg, src_reg, -con);
4377     } else {
4378       __ add(dst_reg, src_reg, con);
4379     }
4380   %}
4381 
4382   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4383     MacroAssembler _masm(&cbuf);
4384    Register dst_reg = as_Register($dst$$reg);
4385    Register src1_reg = as_Register($src1$$reg);
4386    Register src2_reg = as_Register($src2$$reg);
4387     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4388   %}
4389 
4390   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4391     MacroAssembler _masm(&cbuf);
4392    Register dst_reg = as_Register($dst$$reg);
4393    Register src1_reg = as_Register($src1$$reg);
4394    Register src2_reg = as_Register($src2$$reg);
4395     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4396   %}
4397 
4398   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4399     MacroAssembler _masm(&cbuf);
4400    Register dst_reg = as_Register($dst$$reg);
4401    Register src1_reg = as_Register($src1$$reg);
4402    Register src2_reg = as_Register($src2$$reg);
4403     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4404   %}
4405 
4406   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4407     MacroAssembler _masm(&cbuf);
4408    Register dst_reg = as_Register($dst$$reg);
4409    Register src1_reg = as_Register($src1$$reg);
4410    Register src2_reg = as_Register($src2$$reg);
4411     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4412   %}
4413 
4414   // compare instruction encodings
4415 
4416   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4417     MacroAssembler _masm(&cbuf);
4418     Register reg1 = as_Register($src1$$reg);
4419     Register reg2 = as_Register($src2$$reg);
4420     __ cmpw(reg1, reg2);
4421   %}
4422 
4423   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4424     MacroAssembler _masm(&cbuf);
4425     Register reg = as_Register($src1$$reg);
4426     int32_t val = $src2$$constant;
4427     if (val >= 0) {
4428       __ subsw(zr, reg, val);
4429     } else {
4430       __ addsw(zr, reg, -val);
4431     }
4432   %}
4433 
4434   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4435     MacroAssembler _masm(&cbuf);
4436     Register reg1 = as_Register($src1$$reg);
4437     u_int32_t val = (u_int32_t)$src2$$constant;
4438     __ movw(rscratch1, val);
4439     __ cmpw(reg1, rscratch1);
4440   %}
4441 
4442   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4443     MacroAssembler _masm(&cbuf);
4444     Register reg1 = as_Register($src1$$reg);
4445     Register reg2 = as_Register($src2$$reg);
4446     __ cmp(reg1, reg2);
4447   %}
4448 
4449   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4450     MacroAssembler _masm(&cbuf);
4451     Register reg = as_Register($src1$$reg);
4452     int64_t val = $src2$$constant;
4453     if (val >= 0) {
4454       __ subs(zr, reg, val);
4455     } else if (val != -val) {
4456       __ adds(zr, reg, -val);
4457     } else {
4458     // aargh, Long.MIN_VALUE is a special case
4459       __ orr(rscratch1, zr, (u_int64_t)val);
4460       __ subs(zr, reg, rscratch1);
4461     }
4462   %}
4463 
4464   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4465     MacroAssembler _masm(&cbuf);
4466     Register reg1 = as_Register($src1$$reg);
4467     u_int64_t val = (u_int64_t)$src2$$constant;
4468     __ mov(rscratch1, val);
4469     __ cmp(reg1, rscratch1);
4470   %}
4471 
4472   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4473     MacroAssembler _masm(&cbuf);
4474     Register reg1 = as_Register($src1$$reg);
4475     Register reg2 = as_Register($src2$$reg);
4476     __ cmp(reg1, reg2);
4477   %}
4478 
4479   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4480     MacroAssembler _masm(&cbuf);
4481     Register reg1 = as_Register($src1$$reg);
4482     Register reg2 = as_Register($src2$$reg);
4483     __ cmpw(reg1, reg2);
4484   %}
4485 
4486   enc_class aarch64_enc_testp(iRegP src) %{
4487     MacroAssembler _masm(&cbuf);
4488     Register reg = as_Register($src$$reg);
4489     __ cmp(reg, zr);
4490   %}
4491 
4492   enc_class aarch64_enc_testn(iRegN src) %{
4493     MacroAssembler _masm(&cbuf);
4494     Register reg = as_Register($src$$reg);
4495     __ cmpw(reg, zr);
4496   %}
4497 
4498   enc_class aarch64_enc_b(label lbl) %{
4499     MacroAssembler _masm(&cbuf);
4500     Label *L = $lbl$$label;
4501     __ b(*L);
4502   %}
4503 
4504   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4505     MacroAssembler _masm(&cbuf);
4506     Label *L = $lbl$$label;
4507     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4508   %}
4509 
4510   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4511     MacroAssembler _masm(&cbuf);
4512     Label *L = $lbl$$label;
4513     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4514   %}
4515 
4516   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4517   %{
4518      Register sub_reg = as_Register($sub$$reg);
4519      Register super_reg = as_Register($super$$reg);
4520      Register temp_reg = as_Register($temp$$reg);
4521      Register result_reg = as_Register($result$$reg);
4522 
4523      Label miss;
4524      MacroAssembler _masm(&cbuf);
4525      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4526                                      NULL, &miss,
4527                                      /*set_cond_codes:*/ true);
4528      if ($primary) {
4529        __ mov(result_reg, zr);
4530      }
4531      __ bind(miss);
4532   %}
4533 
4534   enc_class aarch64_enc_java_static_call(method meth) %{
4535     MacroAssembler _masm(&cbuf);
4536 
4537     address addr = (address)$meth$$method;
4538     address call;
4539     if (!_method) {
4540       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4541       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4542     } else {
4543       int method_index = resolved_method_index(cbuf);
4544       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4545                                                   : static_call_Relocation::spec(method_index);
4546       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4547 
4548       // Emit stub for static call
4549       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4550       if (stub == NULL) {
4551         ciEnv::current()->record_failure("CodeCache is full");
4552         return;
4553       }
4554     }
4555     if (call == NULL) {
4556       ciEnv::current()->record_failure("CodeCache is full");
4557       return;
4558     }
4559   %}
4560 
4561   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4562     MacroAssembler _masm(&cbuf);
4563     int method_index = resolved_method_index(cbuf);
4564     address call = __ ic_call((address)$meth$$method, method_index);
4565     if (call == NULL) {
4566       ciEnv::current()->record_failure("CodeCache is full");
4567       return;
4568     }
4569   %}
4570 
4571   enc_class aarch64_enc_call_epilog() %{
4572     MacroAssembler _masm(&cbuf);
4573     if (VerifyStackAtCalls) {
4574       // Check that stack depth is unchanged: find majik cookie on stack
4575       __ call_Unimplemented();
4576     }
4577   %}
4578 
4579   enc_class aarch64_enc_java_to_runtime(method meth) %{
4580     MacroAssembler _masm(&cbuf);
4581 
4582     // some calls to generated routines (arraycopy code) are scheduled
4583     // by C2 as runtime calls. if so we can call them using a br (they
4584     // will be in a reachable segment) otherwise we have to use a blrt
4585     // which loads the absolute address into a register.
4586     address entry = (address)$meth$$method;
4587     CodeBlob *cb = CodeCache::find_blob(entry);
4588     if (cb) {
4589       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4590       if (call == NULL) {
4591         ciEnv::current()->record_failure("CodeCache is full");
4592         return;
4593       }
4594     } else {
4595       int gpcnt;
4596       int fpcnt;
4597       int rtype;
4598       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4599       Label retaddr;
4600       __ adr(rscratch2, retaddr);
4601       __ lea(rscratch1, RuntimeAddress(entry));
4602       // Leave a breadcrumb for JavaThread::pd_last_frame().
4603       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4604       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4605       __ bind(retaddr);
4606       __ add(sp, sp, 2 * wordSize);
4607     }
4608   %}
4609 
4610   enc_class aarch64_enc_rethrow() %{
4611     MacroAssembler _masm(&cbuf);
4612     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4613   %}
4614 
4615   enc_class aarch64_enc_ret() %{
4616     MacroAssembler _masm(&cbuf);
4617     __ ret(lr);
4618   %}
4619 
4620   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4621     MacroAssembler _masm(&cbuf);
4622     Register target_reg = as_Register($jump_target$$reg);
4623     __ br(target_reg);
4624   %}
4625 
4626   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4627     MacroAssembler _masm(&cbuf);
4628     Register target_reg = as_Register($jump_target$$reg);
4629     // exception oop should be in r0
4630     // ret addr has been popped into lr
4631     // callee expects it in r3
4632     __ mov(r3, lr);
4633     __ br(target_reg);
4634   %}
4635 
4636   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4637     MacroAssembler _masm(&cbuf);
4638     Register oop = as_Register($object$$reg);
4639     Register box = as_Register($box$$reg);
4640     Register disp_hdr = as_Register($tmp$$reg);
4641     Register tmp = as_Register($tmp2$$reg);
4642     Label cont;
4643     Label object_has_monitor;
4644     Label cas_failed;
4645 
4646     assert_different_registers(oop, box, tmp, disp_hdr);
4647 
4648     // Load markOop from object into displaced_header.
4649     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4650 
4651     // Always do locking in runtime.
4652     if (EmitSync & 0x01) {
4653       __ cmp(oop, zr);
4654       return;
4655     }
4656 
4657     if (UseBiasedLocking && !UseOptoBiasInlining) {
4658       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4659     }
4660 
4661     // Handle existing monitor
4662     if ((EmitSync & 0x02) == 0) {
4663       // we can use AArch64's bit test and branch here but
4664       // markoopDesc does not define a bit index just the bit value
4665       // so assert in case the bit pos changes
4666 #     define __monitor_value_log2 1
4667       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4668       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4669 #     undef __monitor_value_log2
4670     }
4671 
4672     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4673     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4674 
4675     // Load Compare Value application register.
4676 
4677     // Initialize the box. (Must happen before we update the object mark!)
4678     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4679 
4680     // Compare object markOop with mark and if equal exchange scratch1
4681     // with object markOop.
4682     {
4683       Label retry_load;
4684       __ bind(retry_load);
4685       __ ldaxr(tmp, oop);
4686       __ cmp(tmp, disp_hdr);
4687       __ br(Assembler::NE, cas_failed);
4688       // use stlxr to ensure update is immediately visible
4689       __ stlxr(tmp, box, oop);
4690       __ cbzw(tmp, cont);
4691       __ b(retry_load);
4692     }
4693 
4694     // Formerly:
4695     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4696     //               /*newv=*/box,
4697     //               /*addr=*/oop,
4698     //               /*tmp=*/tmp,
4699     //               cont,
4700     //               /*fail*/NULL);
4701 
4702     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4703 
4704     // If the compare-and-exchange succeeded, then we found an unlocked
4705     // object, will have now locked it will continue at label cont
4706 
4707     __ bind(cas_failed);
4708     // We did not see an unlocked object so try the fast recursive case.
4709 
4710     // Check if the owner is self by comparing the value in the
4711     // markOop of object (disp_hdr) with the stack pointer.
4712     __ mov(rscratch1, sp);
4713     __ sub(disp_hdr, disp_hdr, rscratch1);
4714     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4715     // If condition is true we are cont and hence we can store 0 as the
4716     // displaced header in the box, which indicates that it is a recursive lock.
4717     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4718     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4719 
4720     // Handle existing monitor.
4721     if ((EmitSync & 0x02) == 0) {
4722       __ b(cont);
4723 
4724       __ bind(object_has_monitor);
4725       // The object's monitor m is unlocked iff m->owner == NULL,
4726       // otherwise m->owner may contain a thread or a stack address.
4727       //
4728       // Try to CAS m->owner from NULL to current thread.
4729       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4730       __ mov(disp_hdr, zr);
4731 
4732       {
4733         Label retry_load, fail;
4734         __ bind(retry_load);
4735         __ ldaxr(rscratch1, tmp);
4736         __ cmp(disp_hdr, rscratch1);
4737         __ br(Assembler::NE, fail);
4738         // use stlxr to ensure update is immediately visible
4739         __ stlxr(rscratch1, rthread, tmp);
4740         __ cbnzw(rscratch1, retry_load);
4741         __ bind(fail);
4742       }
4743 
4744       // Label next;
4745       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4746       //               /*newv=*/rthread,
4747       //               /*addr=*/tmp,
4748       //               /*tmp=*/rscratch1,
4749       //               /*succeed*/next,
4750       //               /*fail*/NULL);
4751       // __ bind(next);
4752 
4753       // store a non-null value into the box.
4754       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4755 
4756       // PPC port checks the following invariants
4757       // #ifdef ASSERT
4758       // bne(flag, cont);
4759       // We have acquired the monitor, check some invariants.
4760       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4761       // Invariant 1: _recursions should be 0.
4762       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4763       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4764       //                        "monitor->_recursions should be 0", -1);
4765       // Invariant 2: OwnerIsThread shouldn't be 0.
4766       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4767       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4768       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4769       // #endif
4770     }
4771 
4772     __ bind(cont);
4773     // flag == EQ indicates success
4774     // flag == NE indicates failure
4775 
4776   %}
4777 
4778   // TODO
4779   // reimplement this with custom cmpxchgptr code
4780   // which avoids some of the unnecessary branching
4781   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4782     MacroAssembler _masm(&cbuf);
4783     Register oop = as_Register($object$$reg);
4784     Register box = as_Register($box$$reg);
4785     Register disp_hdr = as_Register($tmp$$reg);
4786     Register tmp = as_Register($tmp2$$reg);
4787     Label cont;
4788     Label object_has_monitor;
4789     Label cas_failed;
4790 
4791     assert_different_registers(oop, box, tmp, disp_hdr);
4792 
4793     // Always do locking in runtime.
4794     if (EmitSync & 0x01) {
4795       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4796       return;
4797     }
4798 
4799     if (UseBiasedLocking && !UseOptoBiasInlining) {
4800       __ biased_locking_exit(oop, tmp, cont);
4801     }
4802 
4803     // Find the lock address and load the displaced header from the stack.
4804     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4805 
4806     // If the displaced header is 0, we have a recursive unlock.
4807     __ cmp(disp_hdr, zr);
4808     __ br(Assembler::EQ, cont);
4809 
4810 
4811     // Handle existing monitor.
4812     if ((EmitSync & 0x02) == 0) {
4813       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4814       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4815     }
4816 
4817     // Check if it is still a light weight lock, this is is true if we
4818     // see the stack address of the basicLock in the markOop of the
4819     // object.
4820 
4821       {
4822         Label retry_load;
4823         __ bind(retry_load);
4824         __ ldxr(tmp, oop);
4825         __ cmp(box, tmp);
4826         __ br(Assembler::NE, cas_failed);
4827         // use stlxr to ensure update is immediately visible
4828         __ stlxr(tmp, disp_hdr, oop);
4829         __ cbzw(tmp, cont);
4830         __ b(retry_load);
4831       }
4832 
4833     // __ cmpxchgptr(/*compare_value=*/box,
4834     //               /*exchange_value=*/disp_hdr,
4835     //               /*where=*/oop,
4836     //               /*result=*/tmp,
4837     //               cont,
4838     //               /*cas_failed*/NULL);
4839     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4840 
4841     __ bind(cas_failed);
4842 
4843     // Handle existing monitor.
4844     if ((EmitSync & 0x02) == 0) {
4845       __ b(cont);
4846 
4847       __ bind(object_has_monitor);
4848       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4849       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4850       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4851       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4852       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4853       __ cmp(rscratch1, zr);
4854       __ br(Assembler::NE, cont);
4855 
4856       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4857       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4858       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4859       __ cmp(rscratch1, zr);
4860       __ cbnz(rscratch1, cont);
4861       // need a release store here
4862       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4863       __ stlr(rscratch1, tmp); // rscratch1 is zero
4864     }
4865 
4866     __ bind(cont);
4867     // flag == EQ indicates success
4868     // flag == NE indicates failure
4869   %}
4870 
4871 %}
4872 
4873 //----------FRAME--------------------------------------------------------------
4874 // Definition of frame structure and management information.
4875 //
4876 //  S T A C K   L A Y O U T    Allocators stack-slot number
4877 //                             |   (to get allocators register number
4878 //  G  Owned by    |        |  v    add OptoReg::stack0())
4879 //  r   CALLER     |        |
4880 //  o     |        +--------+      pad to even-align allocators stack-slot
4881 //  w     V        |  pad0  |        numbers; owned by CALLER
4882 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4883 //  h     ^        |   in   |  5
4884 //        |        |  args  |  4   Holes in incoming args owned by SELF
4885 //  |     |        |        |  3
4886 //  |     |        +--------+
4887 //  V     |        | old out|      Empty on Intel, window on Sparc
4888 //        |    old |preserve|      Must be even aligned.
4889 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4890 //        |        |   in   |  3   area for Intel ret address
4891 //     Owned by    |preserve|      Empty on Sparc.
4892 //       SELF      +--------+
4893 //        |        |  pad2  |  2   pad to align old SP
4894 //        |        +--------+  1
4895 //        |        | locks  |  0
4896 //        |        +--------+----> OptoReg::stack0(), even aligned
4897 //        |        |  pad1  | 11   pad to align new SP
4898 //        |        +--------+
4899 //        |        |        | 10
4900 //        |        | spills |  9   spills
4901 //        V        |        |  8   (pad0 slot for callee)
4902 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4903 //        ^        |  out   |  7
4904 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4905 //     Owned by    +--------+
4906 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4907 //        |    new |preserve|      Must be even-aligned.
4908 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4909 //        |        |        |
4910 //
4911 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4912 //         known from SELF's arguments and the Java calling convention.
4913 //         Region 6-7 is determined per call site.
4914 // Note 2: If the calling convention leaves holes in the incoming argument
4915 //         area, those holes are owned by SELF.  Holes in the outgoing area
4916 //         are owned by the CALLEE.  Holes should not be nessecary in the
4917 //         incoming area, as the Java calling convention is completely under
4918 //         the control of the AD file.  Doubles can be sorted and packed to
4919 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4920 //         varargs C calling conventions.
4921 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4922 //         even aligned with pad0 as needed.
4923 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4924 //           (the latter is true on Intel but is it false on AArch64?)
4925 //         region 6-11 is even aligned; it may be padded out more so that
4926 //         the region from SP to FP meets the minimum stack alignment.
4927 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4928 //         alignment.  Region 11, pad1, may be dynamically extended so that
4929 //         SP meets the minimum alignment.
4930 
4931 frame %{
4932   // What direction does stack grow in (assumed to be same for C & Java)
4933   stack_direction(TOWARDS_LOW);
4934 
4935   // These three registers define part of the calling convention
4936   // between compiled code and the interpreter.
4937 
4938   // Inline Cache Register or methodOop for I2C.
4939   inline_cache_reg(R12);
4940 
4941   // Method Oop Register when calling interpreter.
4942   interpreter_method_oop_reg(R12);
4943 
4944   // Number of stack slots consumed by locking an object
4945   sync_stack_slots(2);
4946 
4947   // Compiled code's Frame Pointer
4948   frame_pointer(R31);
4949 
4950   // Interpreter stores its frame pointer in a register which is
4951   // stored to the stack by I2CAdaptors.
4952   // I2CAdaptors convert from interpreted java to compiled java.
4953   interpreter_frame_pointer(R29);
4954 
4955   // Stack alignment requirement
4956   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4957 
4958   // Number of stack slots between incoming argument block and the start of
4959   // a new frame.  The PROLOG must add this many slots to the stack.  The
4960   // EPILOG must remove this many slots. aarch64 needs two slots for
4961   // return address and fp.
4962   // TODO think this is correct but check
4963   in_preserve_stack_slots(4);
4964 
4965   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4966   // for calls to C.  Supports the var-args backing area for register parms.
4967   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4968 
4969   // The after-PROLOG location of the return address.  Location of
4970   // return address specifies a type (REG or STACK) and a number
4971   // representing the register number (i.e. - use a register name) or
4972   // stack slot.
4973   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4974   // Otherwise, it is above the locks and verification slot and alignment word
4975   // TODO this may well be correct but need to check why that - 2 is there
4976   // ppc port uses 0 but we definitely need to allow for fixed_slots
4977   // which folds in the space used for monitors
4978   return_addr(STACK - 2 +
4979               round_to((Compile::current()->in_preserve_stack_slots() +
4980                         Compile::current()->fixed_slots()),
4981                        stack_alignment_in_slots()));
4982 
4983   // Body of function which returns an integer array locating
4984   // arguments either in registers or in stack slots.  Passed an array
4985   // of ideal registers called "sig" and a "length" count.  Stack-slot
4986   // offsets are based on outgoing arguments, i.e. a CALLER setting up
4987   // arguments for a CALLEE.  Incoming stack arguments are
4988   // automatically biased by the preserve_stack_slots field above.
4989 
4990   calling_convention
4991   %{
4992     // No difference between ingoing/outgoing just pass false
4993     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4994   %}
4995 
4996   c_calling_convention
4997   %{
4998     // This is obviously always outgoing
4999     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5000   %}
5001 
5002   // Location of compiled Java return values.  Same as C for now.
5003   return_value
5004   %{
5005     // TODO do we allow ideal_reg == Op_RegN???
5006     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5007            "only return normal values");
5008 
5009     static const int lo[Op_RegL + 1] = { // enum name
5010       0,                                 // Op_Node
5011       0,                                 // Op_Set
5012       R0_num,                            // Op_RegN
5013       R0_num,                            // Op_RegI
5014       R0_num,                            // Op_RegP
5015       V0_num,                            // Op_RegF
5016       V0_num,                            // Op_RegD
5017       R0_num                             // Op_RegL
5018     };
5019 
5020     static const int hi[Op_RegL + 1] = { // enum name
5021       0,                                 // Op_Node
5022       0,                                 // Op_Set
5023       OptoReg::Bad,                       // Op_RegN
5024       OptoReg::Bad,                      // Op_RegI
5025       R0_H_num,                          // Op_RegP
5026       OptoReg::Bad,                      // Op_RegF
5027       V0_H_num,                          // Op_RegD
5028       R0_H_num                           // Op_RegL
5029     };
5030 
5031     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5032   %}
5033 %}
5034 
5035 //----------ATTRIBUTES---------------------------------------------------------
5036 //----------Operand Attributes-------------------------------------------------
5037 op_attrib op_cost(1);        // Required cost attribute
5038 
5039 //----------Instruction Attributes---------------------------------------------
5040 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5041 ins_attrib ins_size(32);        // Required size attribute (in bits)
5042 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5043                                 // a non-matching short branch variant
5044                                 // of some long branch?
5045 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5046                                 // be a power of 2) specifies the
5047                                 // alignment that some part of the
5048                                 // instruction (not necessarily the
5049                                 // start) requires.  If > 1, a
5050                                 // compute_padding() function must be
5051                                 // provided for the instruction
5052 
5053 //----------OPERANDS-----------------------------------------------------------
5054 // Operand definitions must precede instruction definitions for correct parsing
5055 // in the ADLC because operands constitute user defined types which are used in
5056 // instruction definitions.
5057 
5058 //----------Simple Operands----------------------------------------------------
5059 
5060 // Integer operands 32 bit
5061 // 32 bit immediate
5062 operand immI()
5063 %{
5064   match(ConI);
5065 
5066   op_cost(0);
5067   format %{ %}
5068   interface(CONST_INTER);
5069 %}
5070 
5071 // 32 bit zero
5072 operand immI0()
5073 %{
5074   predicate(n->get_int() == 0);
5075   match(ConI);
5076 
5077   op_cost(0);
5078   format %{ %}
5079   interface(CONST_INTER);
5080 %}
5081 
5082 // 32 bit unit increment
5083 operand immI_1()
5084 %{
5085   predicate(n->get_int() == 1);
5086   match(ConI);
5087 
5088   op_cost(0);
5089   format %{ %}
5090   interface(CONST_INTER);
5091 %}
5092 
5093 // 32 bit unit decrement
5094 operand immI_M1()
5095 %{
5096   predicate(n->get_int() == -1);
5097   match(ConI);
5098 
5099   op_cost(0);
5100   format %{ %}
5101   interface(CONST_INTER);
5102 %}
5103 
5104 operand immI_le_4()
5105 %{
5106   predicate(n->get_int() <= 4);
5107   match(ConI);
5108 
5109   op_cost(0);
5110   format %{ %}
5111   interface(CONST_INTER);
5112 %}
5113 
5114 operand immI_31()
5115 %{
5116   predicate(n->get_int() == 31);
5117   match(ConI);
5118 
5119   op_cost(0);
5120   format %{ %}
5121   interface(CONST_INTER);
5122 %}
5123 
5124 operand immI_8()
5125 %{
5126   predicate(n->get_int() == 8);
5127   match(ConI);
5128 
5129   op_cost(0);
5130   format %{ %}
5131   interface(CONST_INTER);
5132 %}
5133 
5134 operand immI_16()
5135 %{
5136   predicate(n->get_int() == 16);
5137   match(ConI);
5138 
5139   op_cost(0);
5140   format %{ %}
5141   interface(CONST_INTER);
5142 %}
5143 
5144 operand immI_24()
5145 %{
5146   predicate(n->get_int() == 24);
5147   match(ConI);
5148 
5149   op_cost(0);
5150   format %{ %}
5151   interface(CONST_INTER);
5152 %}
5153 
5154 operand immI_32()
5155 %{
5156   predicate(n->get_int() == 32);
5157   match(ConI);
5158 
5159   op_cost(0);
5160   format %{ %}
5161   interface(CONST_INTER);
5162 %}
5163 
5164 operand immI_48()
5165 %{
5166   predicate(n->get_int() == 48);
5167   match(ConI);
5168 
5169   op_cost(0);
5170   format %{ %}
5171   interface(CONST_INTER);
5172 %}
5173 
5174 operand immI_56()
5175 %{
5176   predicate(n->get_int() == 56);
5177   match(ConI);
5178 
5179   op_cost(0);
5180   format %{ %}
5181   interface(CONST_INTER);
5182 %}
5183 
5184 operand immI_64()
5185 %{
5186   predicate(n->get_int() == 64);
5187   match(ConI);
5188 
5189   op_cost(0);
5190   format %{ %}
5191   interface(CONST_INTER);
5192 %}
5193 
5194 operand immI_255()
5195 %{
5196   predicate(n->get_int() == 255);
5197   match(ConI);
5198 
5199   op_cost(0);
5200   format %{ %}
5201   interface(CONST_INTER);
5202 %}
5203 
5204 operand immI_65535()
5205 %{
5206   predicate(n->get_int() == 65535);
5207   match(ConI);
5208 
5209   op_cost(0);
5210   format %{ %}
5211   interface(CONST_INTER);
5212 %}
5213 
5214 operand immL_63()
5215 %{
5216   predicate(n->get_int() == 63);
5217   match(ConI);
5218 
5219   op_cost(0);
5220   format %{ %}
5221   interface(CONST_INTER);
5222 %}
5223 
5224 operand immL_255()
5225 %{
5226   predicate(n->get_int() == 255);
5227   match(ConI);
5228 
5229   op_cost(0);
5230   format %{ %}
5231   interface(CONST_INTER);
5232 %}
5233 
5234 operand immL_65535()
5235 %{
5236   predicate(n->get_long() == 65535L);
5237   match(ConL);
5238 
5239   op_cost(0);
5240   format %{ %}
5241   interface(CONST_INTER);
5242 %}
5243 
5244 operand immL_4294967295()
5245 %{
5246   predicate(n->get_long() == 4294967295L);
5247   match(ConL);
5248 
5249   op_cost(0);
5250   format %{ %}
5251   interface(CONST_INTER);
5252 %}
5253 
5254 operand immL_bitmask()
5255 %{
5256   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5257             && is_power_of_2(n->get_long() + 1));
5258   match(ConL);
5259 
5260   op_cost(0);
5261   format %{ %}
5262   interface(CONST_INTER);
5263 %}
5264 
5265 operand immI_bitmask()
5266 %{
5267   predicate(((n->get_int() & 0xc0000000) == 0)
5268             && is_power_of_2(n->get_int() + 1));
5269   match(ConI);
5270 
5271   op_cost(0);
5272   format %{ %}
5273   interface(CONST_INTER);
5274 %}
5275 
5276 // Scale values for scaled offset addressing modes (up to long but not quad)
5277 operand immIScale()
5278 %{
5279   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5280   match(ConI);
5281 
5282   op_cost(0);
5283   format %{ %}
5284   interface(CONST_INTER);
5285 %}
5286 
5287 // 26 bit signed offset -- for pc-relative branches
5288 operand immI26()
5289 %{
5290   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5291   match(ConI);
5292 
5293   op_cost(0);
5294   format %{ %}
5295   interface(CONST_INTER);
5296 %}
5297 
5298 // 19 bit signed offset -- for pc-relative loads
5299 operand immI19()
5300 %{
5301   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5302   match(ConI);
5303 
5304   op_cost(0);
5305   format %{ %}
5306   interface(CONST_INTER);
5307 %}
5308 
5309 // 12 bit unsigned offset -- for base plus immediate loads
5310 operand immIU12()
5311 %{
5312   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5313   match(ConI);
5314 
5315   op_cost(0);
5316   format %{ %}
5317   interface(CONST_INTER);
5318 %}
5319 
5320 operand immLU12()
5321 %{
5322   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5323   match(ConL);
5324 
5325   op_cost(0);
5326   format %{ %}
5327   interface(CONST_INTER);
5328 %}
5329 
5330 // Offset for scaled or unscaled immediate loads and stores
5331 operand immIOffset()
5332 %{
5333   predicate(Address::offset_ok_for_immed(n->get_int()));
5334   match(ConI);
5335 
5336   op_cost(0);
5337   format %{ %}
5338   interface(CONST_INTER);
5339 %}
5340 
5341 operand immLoffset()
5342 %{
5343   predicate(Address::offset_ok_for_immed(n->get_long()));
5344   match(ConL);
5345 
5346   op_cost(0);
5347   format %{ %}
5348   interface(CONST_INTER);
5349 %}
5350 
5351 // 32 bit integer valid for add sub immediate
5352 operand immIAddSub()
5353 %{
5354   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5355   match(ConI);
5356   op_cost(0);
5357   format %{ %}
5358   interface(CONST_INTER);
5359 %}
5360 
5361 // 32 bit unsigned integer valid for logical immediate
5362 // TODO -- check this is right when e.g the mask is 0x80000000
5363 operand immILog()
5364 %{
5365   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5366   match(ConI);
5367 
5368   op_cost(0);
5369   format %{ %}
5370   interface(CONST_INTER);
5371 %}
5372 
5373 // Integer operands 64 bit
5374 // 64 bit immediate
5375 operand immL()
5376 %{
5377   match(ConL);
5378 
5379   op_cost(0);
5380   format %{ %}
5381   interface(CONST_INTER);
5382 %}
5383 
5384 // 64 bit zero
5385 operand immL0()
5386 %{
5387   predicate(n->get_long() == 0);
5388   match(ConL);
5389 
5390   op_cost(0);
5391   format %{ %}
5392   interface(CONST_INTER);
5393 %}
5394 
5395 // 64 bit unit increment
5396 operand immL_1()
5397 %{
5398   predicate(n->get_long() == 1);
5399   match(ConL);
5400 
5401   op_cost(0);
5402   format %{ %}
5403   interface(CONST_INTER);
5404 %}
5405 
5406 // 64 bit unit decrement
5407 operand immL_M1()
5408 %{
5409   predicate(n->get_long() == -1);
5410   match(ConL);
5411 
5412   op_cost(0);
5413   format %{ %}
5414   interface(CONST_INTER);
5415 %}
5416 
5417 // 32 bit offset of pc in thread anchor
5418 
5419 operand immL_pc_off()
5420 %{
5421   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5422                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5423   match(ConL);
5424 
5425   op_cost(0);
5426   format %{ %}
5427   interface(CONST_INTER);
5428 %}
5429 
5430 // 64 bit integer valid for add sub immediate
5431 operand immLAddSub()
5432 %{
5433   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5434   match(ConL);
5435   op_cost(0);
5436   format %{ %}
5437   interface(CONST_INTER);
5438 %}
5439 
5440 // 64 bit integer valid for logical immediate
5441 operand immLLog()
5442 %{
5443   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5444   match(ConL);
5445   op_cost(0);
5446   format %{ %}
5447   interface(CONST_INTER);
5448 %}
5449 
5450 // Long Immediate: low 32-bit mask
5451 operand immL_32bits()
5452 %{
5453   predicate(n->get_long() == 0xFFFFFFFFL);
5454   match(ConL);
5455   op_cost(0);
5456   format %{ %}
5457   interface(CONST_INTER);
5458 %}
5459 
5460 // Pointer operands
5461 // Pointer Immediate
5462 operand immP()
5463 %{
5464   match(ConP);
5465 
5466   op_cost(0);
5467   format %{ %}
5468   interface(CONST_INTER);
5469 %}
5470 
5471 // NULL Pointer Immediate
5472 operand immP0()
5473 %{
5474   predicate(n->get_ptr() == 0);
5475   match(ConP);
5476 
5477   op_cost(0);
5478   format %{ %}
5479   interface(CONST_INTER);
5480 %}
5481 
5482 // Pointer Immediate One
5483 // this is used in object initialization (initial object header)
5484 operand immP_1()
5485 %{
5486   predicate(n->get_ptr() == 1);
5487   match(ConP);
5488 
5489   op_cost(0);
5490   format %{ %}
5491   interface(CONST_INTER);
5492 %}
5493 
5494 // Polling Page Pointer Immediate
5495 operand immPollPage()
5496 %{
5497   predicate((address)n->get_ptr() == os::get_polling_page());
5498   match(ConP);
5499 
5500   op_cost(0);
5501   format %{ %}
5502   interface(CONST_INTER);
5503 %}
5504 
5505 // Card Table Byte Map Base
5506 operand immByteMapBase()
5507 %{
5508   // Get base of card map
5509   predicate((jbyte*)n->get_ptr() ==
5510         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5511   match(ConP);
5512 
5513   op_cost(0);
5514   format %{ %}
5515   interface(CONST_INTER);
5516 %}
5517 
5518 // Pointer Immediate Minus One
5519 // this is used when we want to write the current PC to the thread anchor
5520 operand immP_M1()
5521 %{
5522   predicate(n->get_ptr() == -1);
5523   match(ConP);
5524 
5525   op_cost(0);
5526   format %{ %}
5527   interface(CONST_INTER);
5528 %}
5529 
5530 // Pointer Immediate Minus Two
5531 // this is used when we want to write the current PC to the thread anchor
5532 operand immP_M2()
5533 %{
5534   predicate(n->get_ptr() == -2);
5535   match(ConP);
5536 
5537   op_cost(0);
5538   format %{ %}
5539   interface(CONST_INTER);
5540 %}
5541 
5542 // Float and Double operands
5543 // Double Immediate
5544 operand immD()
5545 %{
5546   match(ConD);
5547   op_cost(0);
5548   format %{ %}
5549   interface(CONST_INTER);
5550 %}
5551 
5552 // Double Immediate: +0.0d
5553 operand immD0()
5554 %{
5555   predicate(jlong_cast(n->getd()) == 0);
5556   match(ConD);
5557 
5558   op_cost(0);
5559   format %{ %}
5560   interface(CONST_INTER);
5561 %}
5562 
5563 // constant 'double +0.0'.
5564 operand immDPacked()
5565 %{
5566   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5567   match(ConD);
5568   op_cost(0);
5569   format %{ %}
5570   interface(CONST_INTER);
5571 %}
5572 
5573 // Float Immediate
5574 operand immF()
5575 %{
5576   match(ConF);
5577   op_cost(0);
5578   format %{ %}
5579   interface(CONST_INTER);
5580 %}
5581 
5582 // Float Immediate: +0.0f.
5583 operand immF0()
5584 %{
5585   predicate(jint_cast(n->getf()) == 0);
5586   match(ConF);
5587 
5588   op_cost(0);
5589   format %{ %}
5590   interface(CONST_INTER);
5591 %}
5592 
5593 //
5594 operand immFPacked()
5595 %{
5596   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5597   match(ConF);
5598   op_cost(0);
5599   format %{ %}
5600   interface(CONST_INTER);
5601 %}
5602 
5603 // Narrow pointer operands
5604 // Narrow Pointer Immediate
5605 operand immN()
5606 %{
5607   match(ConN);
5608 
5609   op_cost(0);
5610   format %{ %}
5611   interface(CONST_INTER);
5612 %}
5613 
5614 // Narrow NULL Pointer Immediate
5615 operand immN0()
5616 %{
5617   predicate(n->get_narrowcon() == 0);
5618   match(ConN);
5619 
5620   op_cost(0);
5621   format %{ %}
5622   interface(CONST_INTER);
5623 %}
5624 
5625 operand immNKlass()
5626 %{
5627   match(ConNKlass);
5628 
5629   op_cost(0);
5630   format %{ %}
5631   interface(CONST_INTER);
5632 %}
5633 
5634 // Integer 32 bit Register Operands
5635 // Integer 32 bitRegister (excludes SP)
5636 operand iRegI()
5637 %{
5638   constraint(ALLOC_IN_RC(any_reg32));
5639   match(RegI);
5640   match(iRegINoSp);
5641   op_cost(0);
5642   format %{ %}
5643   interface(REG_INTER);
5644 %}
5645 
5646 // Integer 32 bit Register not Special
5647 operand iRegINoSp()
5648 %{
5649   constraint(ALLOC_IN_RC(no_special_reg32));
5650   match(RegI);
5651   op_cost(0);
5652   format %{ %}
5653   interface(REG_INTER);
5654 %}
5655 
5656 // Integer 64 bit Register Operands
5657 // Integer 64 bit Register (includes SP)
5658 operand iRegL()
5659 %{
5660   constraint(ALLOC_IN_RC(any_reg));
5661   match(RegL);
5662   match(iRegLNoSp);
5663   op_cost(0);
5664   format %{ %}
5665   interface(REG_INTER);
5666 %}
5667 
5668 // Integer 64 bit Register not Special
5669 operand iRegLNoSp()
5670 %{
5671   constraint(ALLOC_IN_RC(no_special_reg));
5672   match(RegL);
5673   format %{ %}
5674   interface(REG_INTER);
5675 %}
5676 
5677 // Pointer Register Operands
5678 // Pointer Register
5679 operand iRegP()
5680 %{
5681   constraint(ALLOC_IN_RC(ptr_reg));
5682   match(RegP);
5683   match(iRegPNoSp);
5684   match(iRegP_R0);
5685   //match(iRegP_R2);
5686   //match(iRegP_R4);
5687   //match(iRegP_R5);
5688   match(thread_RegP);
5689   op_cost(0);
5690   format %{ %}
5691   interface(REG_INTER);
5692 %}
5693 
5694 // Pointer 64 bit Register not Special
5695 operand iRegPNoSp()
5696 %{
5697   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5698   match(RegP);
5699   // match(iRegP);
5700   // match(iRegP_R0);
5701   // match(iRegP_R2);
5702   // match(iRegP_R4);
5703   // match(iRegP_R5);
5704   // match(thread_RegP);
5705   op_cost(0);
5706   format %{ %}
5707   interface(REG_INTER);
5708 %}
5709 
5710 // Pointer 64 bit Register R0 only
5711 operand iRegP_R0()
5712 %{
5713   constraint(ALLOC_IN_RC(r0_reg));
5714   match(RegP);
5715   // match(iRegP);
5716   match(iRegPNoSp);
5717   op_cost(0);
5718   format %{ %}
5719   interface(REG_INTER);
5720 %}
5721 
5722 // Pointer 64 bit Register R1 only
5723 operand iRegP_R1()
5724 %{
5725   constraint(ALLOC_IN_RC(r1_reg));
5726   match(RegP);
5727   // match(iRegP);
5728   match(iRegPNoSp);
5729   op_cost(0);
5730   format %{ %}
5731   interface(REG_INTER);
5732 %}
5733 
5734 // Pointer 64 bit Register R2 only
5735 operand iRegP_R2()
5736 %{
5737   constraint(ALLOC_IN_RC(r2_reg));
5738   match(RegP);
5739   // match(iRegP);
5740   match(iRegPNoSp);
5741   op_cost(0);
5742   format %{ %}
5743   interface(REG_INTER);
5744 %}
5745 
5746 // Pointer 64 bit Register R3 only
5747 operand iRegP_R3()
5748 %{
5749   constraint(ALLOC_IN_RC(r3_reg));
5750   match(RegP);
5751   // match(iRegP);
5752   match(iRegPNoSp);
5753   op_cost(0);
5754   format %{ %}
5755   interface(REG_INTER);
5756 %}
5757 
5758 // Pointer 64 bit Register R4 only
5759 operand iRegP_R4()
5760 %{
5761   constraint(ALLOC_IN_RC(r4_reg));
5762   match(RegP);
5763   // match(iRegP);
5764   match(iRegPNoSp);
5765   op_cost(0);
5766   format %{ %}
5767   interface(REG_INTER);
5768 %}
5769 
5770 // Pointer 64 bit Register R5 only
5771 operand iRegP_R5()
5772 %{
5773   constraint(ALLOC_IN_RC(r5_reg));
5774   match(RegP);
5775   // match(iRegP);
5776   match(iRegPNoSp);
5777   op_cost(0);
5778   format %{ %}
5779   interface(REG_INTER);
5780 %}
5781 
5782 // Pointer 64 bit Register R10 only
5783 operand iRegP_R10()
5784 %{
5785   constraint(ALLOC_IN_RC(r10_reg));
5786   match(RegP);
5787   // match(iRegP);
5788   match(iRegPNoSp);
5789   op_cost(0);
5790   format %{ %}
5791   interface(REG_INTER);
5792 %}
5793 
5794 // Long 64 bit Register R11 only
5795 operand iRegL_R11()
5796 %{
5797   constraint(ALLOC_IN_RC(r11_reg));
5798   match(RegL);
5799   match(iRegLNoSp);
5800   op_cost(0);
5801   format %{ %}
5802   interface(REG_INTER);
5803 %}
5804 
5805 // Pointer 64 bit Register FP only
5806 operand iRegP_FP()
5807 %{
5808   constraint(ALLOC_IN_RC(fp_reg));
5809   match(RegP);
5810   // match(iRegP);
5811   op_cost(0);
5812   format %{ %}
5813   interface(REG_INTER);
5814 %}
5815 
5816 // Register R0 only
5817 operand iRegI_R0()
5818 %{
5819   constraint(ALLOC_IN_RC(int_r0_reg));
5820   match(RegI);
5821   match(iRegINoSp);
5822   op_cost(0);
5823   format %{ %}
5824   interface(REG_INTER);
5825 %}
5826 
5827 // Register R2 only
5828 operand iRegI_R2()
5829 %{
5830   constraint(ALLOC_IN_RC(int_r2_reg));
5831   match(RegI);
5832   match(iRegINoSp);
5833   op_cost(0);
5834   format %{ %}
5835   interface(REG_INTER);
5836 %}
5837 
5838 // Register R3 only
5839 operand iRegI_R3()
5840 %{
5841   constraint(ALLOC_IN_RC(int_r3_reg));
5842   match(RegI);
5843   match(iRegINoSp);
5844   op_cost(0);
5845   format %{ %}
5846   interface(REG_INTER);
5847 %}
5848 
5849 
5850 // Register R2 only
5851 operand iRegI_R4()
5852 %{
5853   constraint(ALLOC_IN_RC(int_r4_reg));
5854   match(RegI);
5855   match(iRegINoSp);
5856   op_cost(0);
5857   format %{ %}
5858   interface(REG_INTER);
5859 %}
5860 
5861 
5862 // Pointer Register Operands
5863 // Narrow Pointer Register
5864 operand iRegN()
5865 %{
5866   constraint(ALLOC_IN_RC(any_reg32));
5867   match(RegN);
5868   match(iRegNNoSp);
5869   op_cost(0);
5870   format %{ %}
5871   interface(REG_INTER);
5872 %}
5873 
5874 // Integer 64 bit Register not Special
5875 operand iRegNNoSp()
5876 %{
5877   constraint(ALLOC_IN_RC(no_special_reg32));
5878   match(RegN);
5879   op_cost(0);
5880   format %{ %}
5881   interface(REG_INTER);
5882 %}
5883 
5884 // heap base register -- used for encoding immN0
5885 
5886 operand iRegIHeapbase()
5887 %{
5888   constraint(ALLOC_IN_RC(heapbase_reg));
5889   match(RegI);
5890   op_cost(0);
5891   format %{ %}
5892   interface(REG_INTER);
5893 %}
5894 
5895 // Float Register
5896 // Float register operands
5897 operand vRegF()
5898 %{
5899   constraint(ALLOC_IN_RC(float_reg));
5900   match(RegF);
5901 
5902   op_cost(0);
5903   format %{ %}
5904   interface(REG_INTER);
5905 %}
5906 
5907 // Double Register
5908 // Double register operands
5909 operand vRegD()
5910 %{
5911   constraint(ALLOC_IN_RC(double_reg));
5912   match(RegD);
5913 
5914   op_cost(0);
5915   format %{ %}
5916   interface(REG_INTER);
5917 %}
5918 
5919 operand vecD()
5920 %{
5921   constraint(ALLOC_IN_RC(vectord_reg));
5922   match(VecD);
5923 
5924   op_cost(0);
5925   format %{ %}
5926   interface(REG_INTER);
5927 %}
5928 
5929 operand vecX()
5930 %{
5931   constraint(ALLOC_IN_RC(vectorx_reg));
5932   match(VecX);
5933 
5934   op_cost(0);
5935   format %{ %}
5936   interface(REG_INTER);
5937 %}
5938 
5939 operand vRegD_V0()
5940 %{
5941   constraint(ALLOC_IN_RC(v0_reg));
5942   match(RegD);
5943   op_cost(0);
5944   format %{ %}
5945   interface(REG_INTER);
5946 %}
5947 
5948 operand vRegD_V1()
5949 %{
5950   constraint(ALLOC_IN_RC(v1_reg));
5951   match(RegD);
5952   op_cost(0);
5953   format %{ %}
5954   interface(REG_INTER);
5955 %}
5956 
5957 operand vRegD_V2()
5958 %{
5959   constraint(ALLOC_IN_RC(v2_reg));
5960   match(RegD);
5961   op_cost(0);
5962   format %{ %}
5963   interface(REG_INTER);
5964 %}
5965 
5966 operand vRegD_V3()
5967 %{
5968   constraint(ALLOC_IN_RC(v3_reg));
5969   match(RegD);
5970   op_cost(0);
5971   format %{ %}
5972   interface(REG_INTER);
5973 %}
5974 
5975 // Flags register, used as output of signed compare instructions
5976 
5977 // note that on AArch64 we also use this register as the output for
5978 // for floating point compare instructions (CmpF CmpD). this ensures
5979 // that ordered inequality tests use GT, GE, LT or LE none of which
5980 // pass through cases where the result is unordered i.e. one or both
5981 // inputs to the compare is a NaN. this means that the ideal code can
5982 // replace e.g. a GT with an LE and not end up capturing the NaN case
5983 // (where the comparison should always fail). EQ and NE tests are
5984 // always generated in ideal code so that unordered folds into the NE
5985 // case, matching the behaviour of AArch64 NE.
5986 //
5987 // This differs from x86 where the outputs of FP compares use a
5988 // special FP flags registers and where compares based on this
5989 // register are distinguished into ordered inequalities (cmpOpUCF) and
5990 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5991 // to explicitly handle the unordered case in branches. x86 also has
5992 // to include extra CMoveX rules to accept a cmpOpUCF input.
5993 
5994 operand rFlagsReg()
5995 %{
5996   constraint(ALLOC_IN_RC(int_flags));
5997   match(RegFlags);
5998 
5999   op_cost(0);
6000   format %{ "RFLAGS" %}
6001   interface(REG_INTER);
6002 %}
6003 
6004 // Flags register, used as output of unsigned compare instructions
6005 operand rFlagsRegU()
6006 %{
6007   constraint(ALLOC_IN_RC(int_flags));
6008   match(RegFlags);
6009 
6010   op_cost(0);
6011   format %{ "RFLAGSU" %}
6012   interface(REG_INTER);
6013 %}
6014 
6015 // Special Registers
6016 
6017 // Method Register
6018 operand inline_cache_RegP(iRegP reg)
6019 %{
6020   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6021   match(reg);
6022   match(iRegPNoSp);
6023   op_cost(0);
6024   format %{ %}
6025   interface(REG_INTER);
6026 %}
6027 
6028 operand interpreter_method_oop_RegP(iRegP reg)
6029 %{
6030   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6031   match(reg);
6032   match(iRegPNoSp);
6033   op_cost(0);
6034   format %{ %}
6035   interface(REG_INTER);
6036 %}
6037 
6038 // Thread Register
6039 operand thread_RegP(iRegP reg)
6040 %{
6041   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6042   match(reg);
6043   op_cost(0);
6044   format %{ %}
6045   interface(REG_INTER);
6046 %}
6047 
6048 operand lr_RegP(iRegP reg)
6049 %{
6050   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6051   match(reg);
6052   op_cost(0);
6053   format %{ %}
6054   interface(REG_INTER);
6055 %}
6056 
6057 //----------Memory Operands----------------------------------------------------
6058 
6059 operand indirect(iRegP reg)
6060 %{
6061   constraint(ALLOC_IN_RC(ptr_reg));
6062   match(reg);
6063   op_cost(0);
6064   format %{ "[$reg]" %}
6065   interface(MEMORY_INTER) %{
6066     base($reg);
6067     index(0xffffffff);
6068     scale(0x0);
6069     disp(0x0);
6070   %}
6071 %}
6072 
6073 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6074 %{
6075   constraint(ALLOC_IN_RC(ptr_reg));
6076   match(AddP (AddP reg (LShiftL lreg scale)) off);
6077   op_cost(INSN_COST);
6078   format %{ "$reg, $lreg lsl($scale), $off" %}
6079   interface(MEMORY_INTER) %{
6080     base($reg);
6081     index($lreg);
6082     scale($scale);
6083     disp($off);
6084   %}
6085 %}
6086 
6087 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6088 %{
6089   constraint(ALLOC_IN_RC(ptr_reg));
6090   match(AddP (AddP reg (LShiftL lreg scale)) off);
6091   op_cost(INSN_COST);
6092   format %{ "$reg, $lreg lsl($scale), $off" %}
6093   interface(MEMORY_INTER) %{
6094     base($reg);
6095     index($lreg);
6096     scale($scale);
6097     disp($off);
6098   %}
6099 %}
6100 
6101 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6102 %{
6103   constraint(ALLOC_IN_RC(ptr_reg));
6104   match(AddP (AddP reg (ConvI2L ireg)) off);
6105   op_cost(INSN_COST);
6106   format %{ "$reg, $ireg, $off I2L" %}
6107   interface(MEMORY_INTER) %{
6108     base($reg);
6109     index($ireg);
6110     scale(0x0);
6111     disp($off);
6112   %}
6113 %}
6114 
6115 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6116 %{
6117   constraint(ALLOC_IN_RC(ptr_reg));
6118   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6119   op_cost(INSN_COST);
6120   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6121   interface(MEMORY_INTER) %{
6122     base($reg);
6123     index($ireg);
6124     scale($scale);
6125     disp($off);
6126   %}
6127 %}
6128 
6129 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6130 %{
6131   constraint(ALLOC_IN_RC(ptr_reg));
6132   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6133   op_cost(0);
6134   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6135   interface(MEMORY_INTER) %{
6136     base($reg);
6137     index($ireg);
6138     scale($scale);
6139     disp(0x0);
6140   %}
6141 %}
6142 
6143 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6144 %{
6145   constraint(ALLOC_IN_RC(ptr_reg));
6146   match(AddP reg (LShiftL lreg scale));
6147   op_cost(0);
6148   format %{ "$reg, $lreg lsl($scale)" %}
6149   interface(MEMORY_INTER) %{
6150     base($reg);
6151     index($lreg);
6152     scale($scale);
6153     disp(0x0);
6154   %}
6155 %}
6156 
6157 operand indIndex(iRegP reg, iRegL lreg)
6158 %{
6159   constraint(ALLOC_IN_RC(ptr_reg));
6160   match(AddP reg lreg);
6161   op_cost(0);
6162   format %{ "$reg, $lreg" %}
6163   interface(MEMORY_INTER) %{
6164     base($reg);
6165     index($lreg);
6166     scale(0x0);
6167     disp(0x0);
6168   %}
6169 %}
6170 
6171 operand indOffI(iRegP reg, immIOffset off)
6172 %{
6173   constraint(ALLOC_IN_RC(ptr_reg));
6174   match(AddP reg off);
6175   op_cost(0);
6176   format %{ "[$reg, $off]" %}
6177   interface(MEMORY_INTER) %{
6178     base($reg);
6179     index(0xffffffff);
6180     scale(0x0);
6181     disp($off);
6182   %}
6183 %}
6184 
6185 operand indOffL(iRegP reg, immLoffset off)
6186 %{
6187   constraint(ALLOC_IN_RC(ptr_reg));
6188   match(AddP reg off);
6189   op_cost(0);
6190   format %{ "[$reg, $off]" %}
6191   interface(MEMORY_INTER) %{
6192     base($reg);
6193     index(0xffffffff);
6194     scale(0x0);
6195     disp($off);
6196   %}
6197 %}
6198 
6199 
6200 operand indirectN(iRegN reg)
6201 %{
6202   predicate(Universe::narrow_oop_shift() == 0);
6203   constraint(ALLOC_IN_RC(ptr_reg));
6204   match(DecodeN reg);
6205   op_cost(0);
6206   format %{ "[$reg]\t# narrow" %}
6207   interface(MEMORY_INTER) %{
6208     base($reg);
6209     index(0xffffffff);
6210     scale(0x0);
6211     disp(0x0);
6212   %}
6213 %}
6214 
6215 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6216 %{
6217   predicate(Universe::narrow_oop_shift() == 0);
6218   constraint(ALLOC_IN_RC(ptr_reg));
6219   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6220   op_cost(0);
6221   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6222   interface(MEMORY_INTER) %{
6223     base($reg);
6224     index($lreg);
6225     scale($scale);
6226     disp($off);
6227   %}
6228 %}
6229 
6230 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6231 %{
6232   predicate(Universe::narrow_oop_shift() == 0);
6233   constraint(ALLOC_IN_RC(ptr_reg));
6234   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6235   op_cost(INSN_COST);
6236   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6237   interface(MEMORY_INTER) %{
6238     base($reg);
6239     index($lreg);
6240     scale($scale);
6241     disp($off);
6242   %}
6243 %}
6244 
6245 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6246 %{
6247   predicate(Universe::narrow_oop_shift() == 0);
6248   constraint(ALLOC_IN_RC(ptr_reg));
6249   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6250   op_cost(INSN_COST);
6251   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6252   interface(MEMORY_INTER) %{
6253     base($reg);
6254     index($ireg);
6255     scale(0x0);
6256     disp($off);
6257   %}
6258 %}
6259 
6260 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6261 %{
6262   predicate(Universe::narrow_oop_shift() == 0);
6263   constraint(ALLOC_IN_RC(ptr_reg));
6264   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6265   op_cost(INSN_COST);
6266   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6267   interface(MEMORY_INTER) %{
6268     base($reg);
6269     index($ireg);
6270     scale($scale);
6271     disp($off);
6272   %}
6273 %}
6274 
6275 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6276 %{
6277   predicate(Universe::narrow_oop_shift() == 0);
6278   constraint(ALLOC_IN_RC(ptr_reg));
6279   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6280   op_cost(0);
6281   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6282   interface(MEMORY_INTER) %{
6283     base($reg);
6284     index($ireg);
6285     scale($scale);
6286     disp(0x0);
6287   %}
6288 %}
6289 
6290 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6291 %{
6292   predicate(Universe::narrow_oop_shift() == 0);
6293   constraint(ALLOC_IN_RC(ptr_reg));
6294   match(AddP (DecodeN reg) (LShiftL lreg scale));
6295   op_cost(0);
6296   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6297   interface(MEMORY_INTER) %{
6298     base($reg);
6299     index($lreg);
6300     scale($scale);
6301     disp(0x0);
6302   %}
6303 %}
6304 
6305 operand indIndexN(iRegN reg, iRegL lreg)
6306 %{
6307   predicate(Universe::narrow_oop_shift() == 0);
6308   constraint(ALLOC_IN_RC(ptr_reg));
6309   match(AddP (DecodeN reg) lreg);
6310   op_cost(0);
6311   format %{ "$reg, $lreg\t# narrow" %}
6312   interface(MEMORY_INTER) %{
6313     base($reg);
6314     index($lreg);
6315     scale(0x0);
6316     disp(0x0);
6317   %}
6318 %}
6319 
6320 operand indOffIN(iRegN reg, immIOffset off)
6321 %{
6322   predicate(Universe::narrow_oop_shift() == 0);
6323   constraint(ALLOC_IN_RC(ptr_reg));
6324   match(AddP (DecodeN reg) off);
6325   op_cost(0);
6326   format %{ "[$reg, $off]\t# narrow" %}
6327   interface(MEMORY_INTER) %{
6328     base($reg);
6329     index(0xffffffff);
6330     scale(0x0);
6331     disp($off);
6332   %}
6333 %}
6334 
6335 operand indOffLN(iRegN reg, immLoffset off)
6336 %{
6337   predicate(Universe::narrow_oop_shift() == 0);
6338   constraint(ALLOC_IN_RC(ptr_reg));
6339   match(AddP (DecodeN reg) off);
6340   op_cost(0);
6341   format %{ "[$reg, $off]\t# narrow" %}
6342   interface(MEMORY_INTER) %{
6343     base($reg);
6344     index(0xffffffff);
6345     scale(0x0);
6346     disp($off);
6347   %}
6348 %}
6349 
6350 
6351 
6352 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6353 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6354 %{
6355   constraint(ALLOC_IN_RC(ptr_reg));
6356   match(AddP reg off);
6357   op_cost(0);
6358   format %{ "[$reg, $off]" %}
6359   interface(MEMORY_INTER) %{
6360     base($reg);
6361     index(0xffffffff);
6362     scale(0x0);
6363     disp($off);
6364   %}
6365 %}
6366 
6367 //----------Special Memory Operands--------------------------------------------
6368 // Stack Slot Operand - This operand is used for loading and storing temporary
6369 //                      values on the stack where a match requires a value to
6370 //                      flow through memory.
6371 operand stackSlotP(sRegP reg)
6372 %{
6373   constraint(ALLOC_IN_RC(stack_slots));
6374   op_cost(100);
6375   // No match rule because this operand is only generated in matching
6376   // match(RegP);
6377   format %{ "[$reg]" %}
6378   interface(MEMORY_INTER) %{
6379     base(0x1e);  // RSP
6380     index(0x0);  // No Index
6381     scale(0x0);  // No Scale
6382     disp($reg);  // Stack Offset
6383   %}
6384 %}
6385 
6386 operand stackSlotI(sRegI reg)
6387 %{
6388   constraint(ALLOC_IN_RC(stack_slots));
6389   // No match rule because this operand is only generated in matching
6390   // match(RegI);
6391   format %{ "[$reg]" %}
6392   interface(MEMORY_INTER) %{
6393     base(0x1e);  // RSP
6394     index(0x0);  // No Index
6395     scale(0x0);  // No Scale
6396     disp($reg);  // Stack Offset
6397   %}
6398 %}
6399 
6400 operand stackSlotF(sRegF reg)
6401 %{
6402   constraint(ALLOC_IN_RC(stack_slots));
6403   // No match rule because this operand is only generated in matching
6404   // match(RegF);
6405   format %{ "[$reg]" %}
6406   interface(MEMORY_INTER) %{
6407     base(0x1e);  // RSP
6408     index(0x0);  // No Index
6409     scale(0x0);  // No Scale
6410     disp($reg);  // Stack Offset
6411   %}
6412 %}
6413 
6414 operand stackSlotD(sRegD reg)
6415 %{
6416   constraint(ALLOC_IN_RC(stack_slots));
6417   // No match rule because this operand is only generated in matching
6418   // match(RegD);
6419   format %{ "[$reg]" %}
6420   interface(MEMORY_INTER) %{
6421     base(0x1e);  // RSP
6422     index(0x0);  // No Index
6423     scale(0x0);  // No Scale
6424     disp($reg);  // Stack Offset
6425   %}
6426 %}
6427 
6428 operand stackSlotL(sRegL reg)
6429 %{
6430   constraint(ALLOC_IN_RC(stack_slots));
6431   // No match rule because this operand is only generated in matching
6432   // match(RegL);
6433   format %{ "[$reg]" %}
6434   interface(MEMORY_INTER) %{
6435     base(0x1e);  // RSP
6436     index(0x0);  // No Index
6437     scale(0x0);  // No Scale
6438     disp($reg);  // Stack Offset
6439   %}
6440 %}
6441 
6442 // Operands for expressing Control Flow
6443 // NOTE: Label is a predefined operand which should not be redefined in
6444 //       the AD file. It is generically handled within the ADLC.
6445 
6446 //----------Conditional Branch Operands----------------------------------------
6447 // Comparison Op  - This is the operation of the comparison, and is limited to
6448 //                  the following set of codes:
6449 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6450 //
6451 // Other attributes of the comparison, such as unsignedness, are specified
6452 // by the comparison instruction that sets a condition code flags register.
6453 // That result is represented by a flags operand whose subtype is appropriate
6454 // to the unsignedness (etc.) of the comparison.
6455 //
6456 // Later, the instruction which matches both the Comparison Op (a Bool) and
6457 // the flags (produced by the Cmp) specifies the coding of the comparison op
6458 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6459 
6460 // used for signed integral comparisons and fp comparisons
6461 
6462 operand cmpOp()
6463 %{
6464   match(Bool);
6465 
6466   format %{ "" %}
6467   interface(COND_INTER) %{
6468     equal(0x0, "eq");
6469     not_equal(0x1, "ne");
6470     less(0xb, "lt");
6471     greater_equal(0xa, "ge");
6472     less_equal(0xd, "le");
6473     greater(0xc, "gt");
6474     overflow(0x6, "vs");
6475     no_overflow(0x7, "vc");
6476   %}
6477 %}
6478 
6479 // used for unsigned integral comparisons
6480 
6481 operand cmpOpU()
6482 %{
6483   match(Bool);
6484 
6485   format %{ "" %}
6486   interface(COND_INTER) %{
6487     equal(0x0, "eq");
6488     not_equal(0x1, "ne");
6489     less(0x3, "lo");
6490     greater_equal(0x2, "hs");
6491     less_equal(0x9, "ls");
6492     greater(0x8, "hi");
6493     overflow(0x6, "vs");
6494     no_overflow(0x7, "vc");
6495   %}
6496 %}
6497 
6498 // Special operand allowing long args to int ops to be truncated for free
6499 
6500 operand iRegL2I(iRegL reg) %{
6501 
6502   op_cost(0);
6503 
6504   match(ConvL2I reg);
6505 
6506   format %{ "l2i($reg)" %}
6507 
6508   interface(REG_INTER)
6509 %}
6510 
6511 opclass vmem(indirect, indIndex, indOffI, indOffL);
6512 
6513 //----------OPERAND CLASSES----------------------------------------------------
6514 // Operand Classes are groups of operands that are used as to simplify
6515 // instruction definitions by not requiring the AD writer to specify
6516 // separate instructions for every form of operand when the
6517 // instruction accepts multiple operand types with the same basic
6518 // encoding and format. The classic case of this is memory operands.
6519 
6520 // memory is used to define read/write location for load/store
6521 // instruction defs. we can turn a memory op into an Address
6522 
6523 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6524                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6525 
6526 
6527 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6528 // operations. it allows the src to be either an iRegI or a (ConvL2I
6529 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6530 // can be elided because the 32-bit instruction will just employ the
6531 // lower 32 bits anyway.
6532 //
6533 // n.b. this does not elide all L2I conversions. if the truncated
6534 // value is consumed by more than one operation then the ConvL2I
6535 // cannot be bundled into the consuming nodes so an l2i gets planted
6536 // (actually a movw $dst $src) and the downstream instructions consume
6537 // the result of the l2i as an iRegI input. That's a shame since the
6538 // movw is actually redundant but its not too costly.
6539 
6540 opclass iRegIorL2I(iRegI, iRegL2I);
6541 
6542 //----------PIPELINE-----------------------------------------------------------
6543 // Rules which define the behavior of the target architectures pipeline.
6544 
6545 // For specific pipelines, eg A53, define the stages of that pipeline
6546 //pipe_desc(ISS, EX1, EX2, WR);
6547 #define ISS S0
6548 #define EX1 S1
6549 #define EX2 S2
6550 #define WR  S3
6551 
6552 // Integer ALU reg operation
6553 pipeline %{
6554 
6555 attributes %{
6556   // ARM instructions are of fixed length
6557   fixed_size_instructions;        // Fixed size instructions TODO does
6558   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6559   // ARM instructions come in 32-bit word units
6560   instruction_unit_size = 4;         // An instruction is 4 bytes long
6561   instruction_fetch_unit_size = 64;  // The processor fetches one line
6562   instruction_fetch_units = 1;       // of 64 bytes
6563 
6564   // List of nop instructions
6565   nops( MachNop );
6566 %}
6567 
6568 // We don't use an actual pipeline model so don't care about resources
6569 // or description. we do use pipeline classes to introduce fixed
6570 // latencies
6571 
6572 //----------RESOURCES----------------------------------------------------------
6573 // Resources are the functional units available to the machine
6574 
6575 resources( INS0, INS1, INS01 = INS0 | INS1,
6576            ALU0, ALU1, ALU = ALU0 | ALU1,
6577            MAC,
6578            DIV,
6579            BRANCH,
6580            LDST,
6581            NEON_FP);
6582 
6583 //----------PIPELINE DESCRIPTION-----------------------------------------------
6584 // Pipeline Description specifies the stages in the machine's pipeline
6585 
6586 // Define the pipeline as a generic 6 stage pipeline
6587 pipe_desc(S0, S1, S2, S3, S4, S5);
6588 
6589 //----------PIPELINE CLASSES---------------------------------------------------
6590 // Pipeline Classes describe the stages in which input and output are
6591 // referenced by the hardware pipeline.
6592 
6593 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6594 %{
6595   single_instruction;
6596   src1   : S1(read);
6597   src2   : S2(read);
6598   dst    : S5(write);
6599   INS01  : ISS;
6600   NEON_FP : S5;
6601 %}
6602 
6603 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6604 %{
6605   single_instruction;
6606   src1   : S1(read);
6607   src2   : S2(read);
6608   dst    : S5(write);
6609   INS01  : ISS;
6610   NEON_FP : S5;
6611 %}
6612 
6613 pipe_class fp_uop_s(vRegF dst, vRegF src)
6614 %{
6615   single_instruction;
6616   src    : S1(read);
6617   dst    : S5(write);
6618   INS01  : ISS;
6619   NEON_FP : S5;
6620 %}
6621 
6622 pipe_class fp_uop_d(vRegD dst, vRegD src)
6623 %{
6624   single_instruction;
6625   src    : S1(read);
6626   dst    : S5(write);
6627   INS01  : ISS;
6628   NEON_FP : S5;
6629 %}
6630 
6631 pipe_class fp_d2f(vRegF dst, vRegD src)
6632 %{
6633   single_instruction;
6634   src    : S1(read);
6635   dst    : S5(write);
6636   INS01  : ISS;
6637   NEON_FP : S5;
6638 %}
6639 
6640 pipe_class fp_f2d(vRegD dst, vRegF src)
6641 %{
6642   single_instruction;
6643   src    : S1(read);
6644   dst    : S5(write);
6645   INS01  : ISS;
6646   NEON_FP : S5;
6647 %}
6648 
6649 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6650 %{
6651   single_instruction;
6652   src    : S1(read);
6653   dst    : S5(write);
6654   INS01  : ISS;
6655   NEON_FP : S5;
6656 %}
6657 
6658 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6659 %{
6660   single_instruction;
6661   src    : S1(read);
6662   dst    : S5(write);
6663   INS01  : ISS;
6664   NEON_FP : S5;
6665 %}
6666 
6667 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6668 %{
6669   single_instruction;
6670   src    : S1(read);
6671   dst    : S5(write);
6672   INS01  : ISS;
6673   NEON_FP : S5;
6674 %}
6675 
6676 pipe_class fp_l2f(vRegF dst, iRegL src)
6677 %{
6678   single_instruction;
6679   src    : S1(read);
6680   dst    : S5(write);
6681   INS01  : ISS;
6682   NEON_FP : S5;
6683 %}
6684 
6685 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6686 %{
6687   single_instruction;
6688   src    : S1(read);
6689   dst    : S5(write);
6690   INS01  : ISS;
6691   NEON_FP : S5;
6692 %}
6693 
6694 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6695 %{
6696   single_instruction;
6697   src    : S1(read);
6698   dst    : S5(write);
6699   INS01  : ISS;
6700   NEON_FP : S5;
6701 %}
6702 
6703 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6704 %{
6705   single_instruction;
6706   src    : S1(read);
6707   dst    : S5(write);
6708   INS01  : ISS;
6709   NEON_FP : S5;
6710 %}
6711 
6712 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6713 %{
6714   single_instruction;
6715   src    : S1(read);
6716   dst    : S5(write);
6717   INS01  : ISS;
6718   NEON_FP : S5;
6719 %}
6720 
6721 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6722 %{
6723   single_instruction;
6724   src1   : S1(read);
6725   src2   : S2(read);
6726   dst    : S5(write);
6727   INS0   : ISS;
6728   NEON_FP : S5;
6729 %}
6730 
6731 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6732 %{
6733   single_instruction;
6734   src1   : S1(read);
6735   src2   : S2(read);
6736   dst    : S5(write);
6737   INS0   : ISS;
6738   NEON_FP : S5;
6739 %}
6740 
6741 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6742 %{
6743   single_instruction;
6744   cr     : S1(read);
6745   src1   : S1(read);
6746   src2   : S1(read);
6747   dst    : S3(write);
6748   INS01  : ISS;
6749   NEON_FP : S3;
6750 %}
6751 
6752 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6753 %{
6754   single_instruction;
6755   cr     : S1(read);
6756   src1   : S1(read);
6757   src2   : S1(read);
6758   dst    : S3(write);
6759   INS01  : ISS;
6760   NEON_FP : S3;
6761 %}
6762 
6763 pipe_class fp_imm_s(vRegF dst)
6764 %{
6765   single_instruction;
6766   dst    : S3(write);
6767   INS01  : ISS;
6768   NEON_FP : S3;
6769 %}
6770 
6771 pipe_class fp_imm_d(vRegD dst)
6772 %{
6773   single_instruction;
6774   dst    : S3(write);
6775   INS01  : ISS;
6776   NEON_FP : S3;
6777 %}
6778 
6779 pipe_class fp_load_constant_s(vRegF dst)
6780 %{
6781   single_instruction;
6782   dst    : S4(write);
6783   INS01  : ISS;
6784   NEON_FP : S4;
6785 %}
6786 
6787 pipe_class fp_load_constant_d(vRegD dst)
6788 %{
6789   single_instruction;
6790   dst    : S4(write);
6791   INS01  : ISS;
6792   NEON_FP : S4;
6793 %}
6794 
6795 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6796 %{
6797   single_instruction;
6798   dst    : S5(write);
6799   src1   : S1(read);
6800   src2   : S1(read);
6801   INS01  : ISS;
6802   NEON_FP : S5;
6803 %}
6804 
6805 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6806 %{
6807   single_instruction;
6808   dst    : S5(write);
6809   src1   : S1(read);
6810   src2   : S1(read);
6811   INS0   : ISS;
6812   NEON_FP : S5;
6813 %}
6814 
6815 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6816 %{
6817   single_instruction;
6818   dst    : S5(write);
6819   src1   : S1(read);
6820   src2   : S1(read);
6821   dst    : S1(read);
6822   INS01  : ISS;
6823   NEON_FP : S5;
6824 %}
6825 
6826 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6827 %{
6828   single_instruction;
6829   dst    : S5(write);
6830   src1   : S1(read);
6831   src2   : S1(read);
6832   dst    : S1(read);
6833   INS0   : ISS;
6834   NEON_FP : S5;
6835 %}
6836 
6837 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6838 %{
6839   single_instruction;
6840   dst    : S4(write);
6841   src1   : S2(read);
6842   src2   : S2(read);
6843   INS01  : ISS;
6844   NEON_FP : S4;
6845 %}
6846 
6847 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6848 %{
6849   single_instruction;
6850   dst    : S4(write);
6851   src1   : S2(read);
6852   src2   : S2(read);
6853   INS0   : ISS;
6854   NEON_FP : S4;
6855 %}
6856 
6857 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6858 %{
6859   single_instruction;
6860   dst    : S3(write);
6861   src1   : S2(read);
6862   src2   : S2(read);
6863   INS01  : ISS;
6864   NEON_FP : S3;
6865 %}
6866 
6867 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6868 %{
6869   single_instruction;
6870   dst    : S3(write);
6871   src1   : S2(read);
6872   src2   : S2(read);
6873   INS0   : ISS;
6874   NEON_FP : S3;
6875 %}
6876 
6877 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6878 %{
6879   single_instruction;
6880   dst    : S3(write);
6881   src    : S1(read);
6882   shift  : S1(read);
6883   INS01  : ISS;
6884   NEON_FP : S3;
6885 %}
6886 
6887 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6888 %{
6889   single_instruction;
6890   dst    : S3(write);
6891   src    : S1(read);
6892   shift  : S1(read);
6893   INS0   : ISS;
6894   NEON_FP : S3;
6895 %}
6896 
6897 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6898 %{
6899   single_instruction;
6900   dst    : S3(write);
6901   src    : S1(read);
6902   INS01  : ISS;
6903   NEON_FP : S3;
6904 %}
6905 
6906 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6907 %{
6908   single_instruction;
6909   dst    : S3(write);
6910   src    : S1(read);
6911   INS0   : ISS;
6912   NEON_FP : S3;
6913 %}
6914 
6915 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6916 %{
6917   single_instruction;
6918   dst    : S5(write);
6919   src1   : S1(read);
6920   src2   : S1(read);
6921   INS01  : ISS;
6922   NEON_FP : S5;
6923 %}
6924 
6925 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6926 %{
6927   single_instruction;
6928   dst    : S5(write);
6929   src1   : S1(read);
6930   src2   : S1(read);
6931   INS0   : ISS;
6932   NEON_FP : S5;
6933 %}
6934 
6935 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6936 %{
6937   single_instruction;
6938   dst    : S5(write);
6939   src1   : S1(read);
6940   src2   : S1(read);
6941   INS0   : ISS;
6942   NEON_FP : S5;
6943 %}
6944 
6945 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6946 %{
6947   single_instruction;
6948   dst    : S5(write);
6949   src1   : S1(read);
6950   src2   : S1(read);
6951   INS0   : ISS;
6952   NEON_FP : S5;
6953 %}
6954 
6955 pipe_class vsqrt_fp128(vecX dst, vecX src)
6956 %{
6957   single_instruction;
6958   dst    : S5(write);
6959   src    : S1(read);
6960   INS0   : ISS;
6961   NEON_FP : S5;
6962 %}
6963 
6964 pipe_class vunop_fp64(vecD dst, vecD src)
6965 %{
6966   single_instruction;
6967   dst    : S5(write);
6968   src    : S1(read);
6969   INS01  : ISS;
6970   NEON_FP : S5;
6971 %}
6972 
6973 pipe_class vunop_fp128(vecX dst, vecX src)
6974 %{
6975   single_instruction;
6976   dst    : S5(write);
6977   src    : S1(read);
6978   INS0   : ISS;
6979   NEON_FP : S5;
6980 %}
6981 
6982 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
6983 %{
6984   single_instruction;
6985   dst    : S3(write);
6986   src    : S1(read);
6987   INS01  : ISS;
6988   NEON_FP : S3;
6989 %}
6990 
6991 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
6992 %{
6993   single_instruction;
6994   dst    : S3(write);
6995   src    : S1(read);
6996   INS01  : ISS;
6997   NEON_FP : S3;
6998 %}
6999 
7000 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7001 %{
7002   single_instruction;
7003   dst    : S3(write);
7004   src    : S1(read);
7005   INS01  : ISS;
7006   NEON_FP : S3;
7007 %}
7008 
7009 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7010 %{
7011   single_instruction;
7012   dst    : S3(write);
7013   src    : S1(read);
7014   INS01  : ISS;
7015   NEON_FP : S3;
7016 %}
7017 
7018 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7019 %{
7020   single_instruction;
7021   dst    : S3(write);
7022   src    : S1(read);
7023   INS01  : ISS;
7024   NEON_FP : S3;
7025 %}
7026 
7027 pipe_class vmovi_reg_imm64(vecD dst)
7028 %{
7029   single_instruction;
7030   dst    : S3(write);
7031   INS01  : ISS;
7032   NEON_FP : S3;
7033 %}
7034 
7035 pipe_class vmovi_reg_imm128(vecX dst)
7036 %{
7037   single_instruction;
7038   dst    : S3(write);
7039   INS0   : ISS;
7040   NEON_FP : S3;
7041 %}
7042 
7043 pipe_class vload_reg_mem64(vecD dst, vmem mem)
7044 %{
7045   single_instruction;
7046   dst    : S5(write);
7047   mem    : ISS(read);
7048   INS01  : ISS;
7049   NEON_FP : S3;
7050 %}
7051 
7052 pipe_class vload_reg_mem128(vecX dst, vmem mem)
7053 %{
7054   single_instruction;
7055   dst    : S5(write);
7056   mem    : ISS(read);
7057   INS01  : ISS;
7058   NEON_FP : S3;
7059 %}
7060 
7061 pipe_class vstore_reg_mem64(vecD src, vmem mem)
7062 %{
7063   single_instruction;
7064   mem    : ISS(read);
7065   src    : S2(read);
7066   INS01  : ISS;
7067   NEON_FP : S3;
7068 %}
7069 
7070 pipe_class vstore_reg_mem128(vecD src, vmem mem)
7071 %{
7072   single_instruction;
7073   mem    : ISS(read);
7074   src    : S2(read);
7075   INS01  : ISS;
7076   NEON_FP : S3;
7077 %}
7078 
7079 //------- Integer ALU operations --------------------------
7080 
7081 // Integer ALU reg-reg operation
7082 // Operands needed in EX1, result generated in EX2
7083 // Eg.  ADD     x0, x1, x2
7084 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7085 %{
7086   single_instruction;
7087   dst    : EX2(write);
7088   src1   : EX1(read);
7089   src2   : EX1(read);
7090   INS01  : ISS; // Dual issue as instruction 0 or 1
7091   ALU    : EX2;
7092 %}
7093 
7094 // Integer ALU reg-reg operation with constant shift
7095 // Shifted register must be available in LATE_ISS instead of EX1
7096 // Eg.  ADD     x0, x1, x2, LSL #2
7097 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7098 %{
7099   single_instruction;
7100   dst    : EX2(write);
7101   src1   : EX1(read);
7102   src2   : ISS(read);
7103   INS01  : ISS;
7104   ALU    : EX2;
7105 %}
7106 
7107 // Integer ALU reg operation with constant shift
7108 // Eg.  LSL     x0, x1, #shift
7109 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7110 %{
7111   single_instruction;
7112   dst    : EX2(write);
7113   src1   : ISS(read);
7114   INS01  : ISS;
7115   ALU    : EX2;
7116 %}
7117 
7118 // Integer ALU reg-reg operation with variable shift
7119 // Both operands must be available in LATE_ISS instead of EX1
7120 // Result is available in EX1 instead of EX2
7121 // Eg.  LSLV    x0, x1, x2
7122 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7123 %{
7124   single_instruction;
7125   dst    : EX1(write);
7126   src1   : ISS(read);
7127   src2   : ISS(read);
7128   INS01  : ISS;
7129   ALU    : EX1;
7130 %}
7131 
7132 // Integer ALU reg-reg operation with extract
7133 // As for _vshift above, but result generated in EX2
7134 // Eg.  EXTR    x0, x1, x2, #N
7135 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7136 %{
7137   single_instruction;
7138   dst    : EX2(write);
7139   src1   : ISS(read);
7140   src2   : ISS(read);
7141   INS1   : ISS; // Can only dual issue as Instruction 1
7142   ALU    : EX1;
7143 %}
7144 
7145 // Integer ALU reg operation
7146 // Eg.  NEG     x0, x1
7147 pipe_class ialu_reg(iRegI dst, iRegI src)
7148 %{
7149   single_instruction;
7150   dst    : EX2(write);
7151   src    : EX1(read);
7152   INS01  : ISS;
7153   ALU    : EX2;
7154 %}
7155 
7156 // Integer ALU reg mmediate operation
7157 // Eg.  ADD     x0, x1, #N
7158 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7159 %{
7160   single_instruction;
7161   dst    : EX2(write);
7162   src1   : EX1(read);
7163   INS01  : ISS;
7164   ALU    : EX2;
7165 %}
7166 
7167 // Integer ALU immediate operation (no source operands)
7168 // Eg.  MOV     x0, #N
7169 pipe_class ialu_imm(iRegI dst)
7170 %{
7171   single_instruction;
7172   dst    : EX1(write);
7173   INS01  : ISS;
7174   ALU    : EX1;
7175 %}
7176 
7177 //------- Compare operation -------------------------------
7178 
7179 // Compare reg-reg
7180 // Eg.  CMP     x0, x1
7181 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7182 %{
7183   single_instruction;
7184 //  fixed_latency(16);
7185   cr     : EX2(write);
7186   op1    : EX1(read);
7187   op2    : EX1(read);
7188   INS01  : ISS;
7189   ALU    : EX2;
7190 %}
7191 
7192 // Compare reg-reg
7193 // Eg.  CMP     x0, #N
7194 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7195 %{
7196   single_instruction;
7197 //  fixed_latency(16);
7198   cr     : EX2(write);
7199   op1    : EX1(read);
7200   INS01  : ISS;
7201   ALU    : EX2;
7202 %}
7203 
7204 //------- Conditional instructions ------------------------
7205 
7206 // Conditional no operands
7207 // Eg.  CSINC   x0, zr, zr, <cond>
7208 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7209 %{
7210   single_instruction;
7211   cr     : EX1(read);
7212   dst    : EX2(write);
7213   INS01  : ISS;
7214   ALU    : EX2;
7215 %}
7216 
7217 // Conditional 2 operand
7218 // EG.  CSEL    X0, X1, X2, <cond>
7219 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7220 %{
7221   single_instruction;
7222   cr     : EX1(read);
7223   src1   : EX1(read);
7224   src2   : EX1(read);
7225   dst    : EX2(write);
7226   INS01  : ISS;
7227   ALU    : EX2;
7228 %}
7229 
7230 // Conditional 2 operand
7231 // EG.  CSEL    X0, X1, X2, <cond>
7232 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7233 %{
7234   single_instruction;
7235   cr     : EX1(read);
7236   src    : EX1(read);
7237   dst    : EX2(write);
7238   INS01  : ISS;
7239   ALU    : EX2;
7240 %}
7241 
7242 //------- Multiply pipeline operations --------------------
7243 
7244 // Multiply reg-reg
7245 // Eg.  MUL     w0, w1, w2
7246 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7247 %{
7248   single_instruction;
7249   dst    : WR(write);
7250   src1   : ISS(read);
7251   src2   : ISS(read);
7252   INS01  : ISS;
7253   MAC    : WR;
7254 %}
7255 
7256 // Multiply accumulate
7257 // Eg.  MADD    w0, w1, w2, w3
7258 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7259 %{
7260   single_instruction;
7261   dst    : WR(write);
7262   src1   : ISS(read);
7263   src2   : ISS(read);
7264   src3   : ISS(read);
7265   INS01  : ISS;
7266   MAC    : WR;
7267 %}
7268 
7269 // Eg.  MUL     w0, w1, w2
7270 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7271 %{
7272   single_instruction;
7273   fixed_latency(3); // Maximum latency for 64 bit mul
7274   dst    : WR(write);
7275   src1   : ISS(read);
7276   src2   : ISS(read);
7277   INS01  : ISS;
7278   MAC    : WR;
7279 %}
7280 
7281 // Multiply accumulate
7282 // Eg.  MADD    w0, w1, w2, w3
7283 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7284 %{
7285   single_instruction;
7286   fixed_latency(3); // Maximum latency for 64 bit mul
7287   dst    : WR(write);
7288   src1   : ISS(read);
7289   src2   : ISS(read);
7290   src3   : ISS(read);
7291   INS01  : ISS;
7292   MAC    : WR;
7293 %}
7294 
7295 //------- Divide pipeline operations --------------------
7296 
7297 // Eg.  SDIV    w0, w1, w2
7298 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7299 %{
7300   single_instruction;
7301   fixed_latency(8); // Maximum latency for 32 bit divide
7302   dst    : WR(write);
7303   src1   : ISS(read);
7304   src2   : ISS(read);
7305   INS0   : ISS; // Can only dual issue as instruction 0
7306   DIV    : WR;
7307 %}
7308 
7309 // Eg.  SDIV    x0, x1, x2
7310 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7311 %{
7312   single_instruction;
7313   fixed_latency(16); // Maximum latency for 64 bit divide
7314   dst    : WR(write);
7315   src1   : ISS(read);
7316   src2   : ISS(read);
7317   INS0   : ISS; // Can only dual issue as instruction 0
7318   DIV    : WR;
7319 %}
7320 
7321 //------- Load pipeline operations ------------------------
7322 
7323 // Load - prefetch
7324 // Eg.  PFRM    <mem>
7325 pipe_class iload_prefetch(memory mem)
7326 %{
7327   single_instruction;
7328   mem    : ISS(read);
7329   INS01  : ISS;
7330   LDST   : WR;
7331 %}
7332 
7333 // Load - reg, mem
7334 // Eg.  LDR     x0, <mem>
7335 pipe_class iload_reg_mem(iRegI dst, memory mem)
7336 %{
7337   single_instruction;
7338   dst    : WR(write);
7339   mem    : ISS(read);
7340   INS01  : ISS;
7341   LDST   : WR;
7342 %}
7343 
7344 // Load - reg, reg
7345 // Eg.  LDR     x0, [sp, x1]
7346 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7347 %{
7348   single_instruction;
7349   dst    : WR(write);
7350   src    : ISS(read);
7351   INS01  : ISS;
7352   LDST   : WR;
7353 %}
7354 
7355 //------- Store pipeline operations -----------------------
7356 
7357 // Store - zr, mem
7358 // Eg.  STR     zr, <mem>
7359 pipe_class istore_mem(memory mem)
7360 %{
7361   single_instruction;
7362   mem    : ISS(read);
7363   INS01  : ISS;
7364   LDST   : WR;
7365 %}
7366 
7367 // Store - reg, mem
7368 // Eg.  STR     x0, <mem>
7369 pipe_class istore_reg_mem(iRegI src, memory mem)
7370 %{
7371   single_instruction;
7372   mem    : ISS(read);
7373   src    : EX2(read);
7374   INS01  : ISS;
7375   LDST   : WR;
7376 %}
7377 
7378 // Store - reg, reg
7379 // Eg. STR      x0, [sp, x1]
7380 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7381 %{
7382   single_instruction;
7383   dst    : ISS(read);
7384   src    : EX2(read);
7385   INS01  : ISS;
7386   LDST   : WR;
7387 %}
7388 
7389 //------- Store pipeline operations -----------------------
7390 
7391 // Branch
7392 pipe_class pipe_branch()
7393 %{
7394   single_instruction;
7395   INS01  : ISS;
7396   BRANCH : EX1;
7397 %}
7398 
7399 // Conditional branch
7400 pipe_class pipe_branch_cond(rFlagsReg cr)
7401 %{
7402   single_instruction;
7403   cr     : EX1(read);
7404   INS01  : ISS;
7405   BRANCH : EX1;
7406 %}
7407 
7408 // Compare & Branch
7409 // EG.  CBZ/CBNZ
7410 pipe_class pipe_cmp_branch(iRegI op1)
7411 %{
7412   single_instruction;
7413   op1    : EX1(read);
7414   INS01  : ISS;
7415   BRANCH : EX1;
7416 %}
7417 
7418 //------- Synchronisation operations ----------------------
7419 
7420 // Any operation requiring serialization.
7421 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7422 pipe_class pipe_serial()
7423 %{
7424   single_instruction;
7425   force_serialization;
7426   fixed_latency(16);
7427   INS01  : ISS(2); // Cannot dual issue with any other instruction
7428   LDST   : WR;
7429 %}
7430 
7431 // Generic big/slow expanded idiom - also serialized
7432 pipe_class pipe_slow()
7433 %{
7434   instruction_count(10);
7435   multiple_bundles;
7436   force_serialization;
7437   fixed_latency(16);
7438   INS01  : ISS(2); // Cannot dual issue with any other instruction
7439   LDST   : WR;
7440 %}
7441 
7442 // Empty pipeline class
7443 pipe_class pipe_class_empty()
7444 %{
7445   single_instruction;
7446   fixed_latency(0);
7447 %}
7448 
7449 // Default pipeline class.
7450 pipe_class pipe_class_default()
7451 %{
7452   single_instruction;
7453   fixed_latency(2);
7454 %}
7455 
7456 // Pipeline class for compares.
7457 pipe_class pipe_class_compare()
7458 %{
7459   single_instruction;
7460   fixed_latency(16);
7461 %}
7462 
7463 // Pipeline class for memory operations.
7464 pipe_class pipe_class_memory()
7465 %{
7466   single_instruction;
7467   fixed_latency(16);
7468 %}
7469 
7470 // Pipeline class for call.
7471 pipe_class pipe_class_call()
7472 %{
7473   single_instruction;
7474   fixed_latency(100);
7475 %}
7476 
7477 // Define the class for the Nop node.
7478 define %{
7479    MachNop = pipe_class_empty;
7480 %}
7481 
7482 %}
7483 //----------INSTRUCTIONS-------------------------------------------------------
7484 //
7485 // match      -- States which machine-independent subtree may be replaced
7486 //               by this instruction.
7487 // ins_cost   -- The estimated cost of this instruction is used by instruction
7488 //               selection to identify a minimum cost tree of machine
7489 //               instructions that matches a tree of machine-independent
7490 //               instructions.
7491 // format     -- A string providing the disassembly for this instruction.
7492 //               The value of an instruction's operand may be inserted
7493 //               by referring to it with a '$' prefix.
7494 // opcode     -- Three instruction opcodes may be provided.  These are referred
7495 //               to within an encode class as $primary, $secondary, and $tertiary
7496 //               rrspectively.  The primary opcode is commonly used to
7497 //               indicate the type of machine instruction, while secondary
7498 //               and tertiary are often used for prefix options or addressing
7499 //               modes.
7500 // ins_encode -- A list of encode classes with parameters. The encode class
7501 //               name must have been defined in an 'enc_class' specification
7502 //               in the encode section of the architecture description.
7503 
7504 // ============================================================================
7505 // Memory (Load/Store) Instructions
7506 
7507 // Load Instructions
7508 
7509 // Load Byte (8 bit signed)
7510 instruct loadB(iRegINoSp dst, memory mem)
7511 %{
7512   match(Set dst (LoadB mem));
7513   predicate(!needs_acquiring_load(n));
7514 
7515   ins_cost(4 * INSN_COST);
7516   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7517 
7518   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7519 
7520   ins_pipe(iload_reg_mem);
7521 %}
7522 
7523 // Load Byte (8 bit signed) into long
7524 instruct loadB2L(iRegLNoSp dst, memory mem)
7525 %{
7526   match(Set dst (ConvI2L (LoadB mem)));
7527   predicate(!needs_acquiring_load(n->in(1)));
7528 
7529   ins_cost(4 * INSN_COST);
7530   format %{ "ldrsb  $dst, $mem\t# byte" %}
7531 
7532   ins_encode(aarch64_enc_ldrsb(dst, mem));
7533 
7534   ins_pipe(iload_reg_mem);
7535 %}
7536 
7537 // Load Byte (8 bit unsigned)
7538 instruct loadUB(iRegINoSp dst, memory mem)
7539 %{
7540   match(Set dst (LoadUB mem));
7541   predicate(!needs_acquiring_load(n));
7542 
7543   ins_cost(4 * INSN_COST);
7544   format %{ "ldrbw  $dst, $mem\t# byte" %}
7545 
7546   ins_encode(aarch64_enc_ldrb(dst, mem));
7547 
7548   ins_pipe(iload_reg_mem);
7549 %}
7550 
7551 // Load Byte (8 bit unsigned) into long
7552 instruct loadUB2L(iRegLNoSp dst, memory mem)
7553 %{
7554   match(Set dst (ConvI2L (LoadUB mem)));
7555   predicate(!needs_acquiring_load(n->in(1)));
7556 
7557   ins_cost(4 * INSN_COST);
7558   format %{ "ldrb  $dst, $mem\t# byte" %}
7559 
7560   ins_encode(aarch64_enc_ldrb(dst, mem));
7561 
7562   ins_pipe(iload_reg_mem);
7563 %}
7564 
7565 // Load Short (16 bit signed)
7566 instruct loadS(iRegINoSp dst, memory mem)
7567 %{
7568   match(Set dst (LoadS mem));
7569   predicate(!needs_acquiring_load(n));
7570 
7571   ins_cost(4 * INSN_COST);
7572   format %{ "ldrshw  $dst, $mem\t# short" %}
7573 
7574   ins_encode(aarch64_enc_ldrshw(dst, mem));
7575 
7576   ins_pipe(iload_reg_mem);
7577 %}
7578 
7579 // Load Short (16 bit signed) into long
7580 instruct loadS2L(iRegLNoSp dst, memory mem)
7581 %{
7582   match(Set dst (ConvI2L (LoadS mem)));
7583   predicate(!needs_acquiring_load(n->in(1)));
7584 
7585   ins_cost(4 * INSN_COST);
7586   format %{ "ldrsh  $dst, $mem\t# short" %}
7587 
7588   ins_encode(aarch64_enc_ldrsh(dst, mem));
7589 
7590   ins_pipe(iload_reg_mem);
7591 %}
7592 
7593 // Load Char (16 bit unsigned)
7594 instruct loadUS(iRegINoSp dst, memory mem)
7595 %{
7596   match(Set dst (LoadUS mem));
7597   predicate(!needs_acquiring_load(n));
7598 
7599   ins_cost(4 * INSN_COST);
7600   format %{ "ldrh  $dst, $mem\t# short" %}
7601 
7602   ins_encode(aarch64_enc_ldrh(dst, mem));
7603 
7604   ins_pipe(iload_reg_mem);
7605 %}
7606 
7607 // Load Short/Char (16 bit unsigned) into long
7608 instruct loadUS2L(iRegLNoSp dst, memory mem)
7609 %{
7610   match(Set dst (ConvI2L (LoadUS mem)));
7611   predicate(!needs_acquiring_load(n->in(1)));
7612 
7613   ins_cost(4 * INSN_COST);
7614   format %{ "ldrh  $dst, $mem\t# short" %}
7615 
7616   ins_encode(aarch64_enc_ldrh(dst, mem));
7617 
7618   ins_pipe(iload_reg_mem);
7619 %}
7620 
7621 // Load Integer (32 bit signed)
7622 instruct loadI(iRegINoSp dst, memory mem)
7623 %{
7624   match(Set dst (LoadI mem));
7625   predicate(!needs_acquiring_load(n));
7626 
7627   ins_cost(4 * INSN_COST);
7628   format %{ "ldrw  $dst, $mem\t# int" %}
7629 
7630   ins_encode(aarch64_enc_ldrw(dst, mem));
7631 
7632   ins_pipe(iload_reg_mem);
7633 %}
7634 
7635 // Load Integer (32 bit signed) into long
7636 instruct loadI2L(iRegLNoSp dst, memory mem)
7637 %{
7638   match(Set dst (ConvI2L (LoadI mem)));
7639   predicate(!needs_acquiring_load(n->in(1)));
7640 
7641   ins_cost(4 * INSN_COST);
7642   format %{ "ldrsw  $dst, $mem\t# int" %}
7643 
7644   ins_encode(aarch64_enc_ldrsw(dst, mem));
7645 
7646   ins_pipe(iload_reg_mem);
7647 %}
7648 
7649 // Load Integer (32 bit unsigned) into long
7650 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7651 %{
7652   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7653   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7654 
7655   ins_cost(4 * INSN_COST);
7656   format %{ "ldrw  $dst, $mem\t# int" %}
7657 
7658   ins_encode(aarch64_enc_ldrw(dst, mem));
7659 
7660   ins_pipe(iload_reg_mem);
7661 %}
7662 
7663 // Load Long (64 bit signed)
7664 instruct loadL(iRegLNoSp dst, memory mem)
7665 %{
7666   match(Set dst (LoadL mem));
7667   predicate(!needs_acquiring_load(n));
7668 
7669   ins_cost(4 * INSN_COST);
7670   format %{ "ldr  $dst, $mem\t# int" %}
7671 
7672   ins_encode(aarch64_enc_ldr(dst, mem));
7673 
7674   ins_pipe(iload_reg_mem);
7675 %}
7676 
7677 // Load Range
7678 instruct loadRange(iRegINoSp dst, memory mem)
7679 %{
7680   match(Set dst (LoadRange mem));
7681 
7682   ins_cost(4 * INSN_COST);
7683   format %{ "ldrw  $dst, $mem\t# range" %}
7684 
7685   ins_encode(aarch64_enc_ldrw(dst, mem));
7686 
7687   ins_pipe(iload_reg_mem);
7688 %}
7689 
7690 // Load Pointer
7691 instruct loadP(iRegPNoSp dst, memory mem)
7692 %{
7693   match(Set dst (LoadP mem));
7694   predicate(!needs_acquiring_load(n));
7695 
7696   ins_cost(4 * INSN_COST);
7697   format %{ "ldr  $dst, $mem\t# ptr" %}
7698 
7699   ins_encode(aarch64_enc_ldr(dst, mem));
7700 
7701   ins_pipe(iload_reg_mem);
7702 %}
7703 
7704 // Load Compressed Pointer
7705 instruct loadN(iRegNNoSp dst, memory mem)
7706 %{
7707   match(Set dst (LoadN mem));
7708   predicate(!needs_acquiring_load(n));
7709 
7710   ins_cost(4 * INSN_COST);
7711   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7712 
7713   ins_encode(aarch64_enc_ldrw(dst, mem));
7714 
7715   ins_pipe(iload_reg_mem);
7716 %}
7717 
7718 // Load Klass Pointer
7719 instruct loadKlass(iRegPNoSp dst, memory mem)
7720 %{
7721   match(Set dst (LoadKlass mem));
7722   predicate(!needs_acquiring_load(n));
7723 
7724   ins_cost(4 * INSN_COST);
7725   format %{ "ldr  $dst, $mem\t# class" %}
7726 
7727   ins_encode(aarch64_enc_ldr(dst, mem));
7728 
7729   ins_pipe(iload_reg_mem);
7730 %}
7731 
7732 // Load Narrow Klass Pointer
7733 instruct loadNKlass(iRegNNoSp dst, memory mem)
7734 %{
7735   match(Set dst (LoadNKlass mem));
7736   predicate(!needs_acquiring_load(n));
7737 
7738   ins_cost(4 * INSN_COST);
7739   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7740 
7741   ins_encode(aarch64_enc_ldrw(dst, mem));
7742 
7743   ins_pipe(iload_reg_mem);
7744 %}
7745 
7746 // Load Float
7747 instruct loadF(vRegF dst, memory mem)
7748 %{
7749   match(Set dst (LoadF mem));
7750   predicate(!needs_acquiring_load(n));
7751 
7752   ins_cost(4 * INSN_COST);
7753   format %{ "ldrs  $dst, $mem\t# float" %}
7754 
7755   ins_encode( aarch64_enc_ldrs(dst, mem) );
7756 
7757   ins_pipe(pipe_class_memory);
7758 %}
7759 
7760 // Load Double
7761 instruct loadD(vRegD dst, memory mem)
7762 %{
7763   match(Set dst (LoadD mem));
7764   predicate(!needs_acquiring_load(n));
7765 
7766   ins_cost(4 * INSN_COST);
7767   format %{ "ldrd  $dst, $mem\t# double" %}
7768 
7769   ins_encode( aarch64_enc_ldrd(dst, mem) );
7770 
7771   ins_pipe(pipe_class_memory);
7772 %}
7773 
7774 
7775 // Load Int Constant
7776 instruct loadConI(iRegINoSp dst, immI src)
7777 %{
7778   match(Set dst src);
7779 
7780   ins_cost(INSN_COST);
7781   format %{ "mov $dst, $src\t# int" %}
7782 
7783   ins_encode( aarch64_enc_movw_imm(dst, src) );
7784 
7785   ins_pipe(ialu_imm);
7786 %}
7787 
7788 // Load Long Constant
7789 instruct loadConL(iRegLNoSp dst, immL src)
7790 %{
7791   match(Set dst src);
7792 
7793   ins_cost(INSN_COST);
7794   format %{ "mov $dst, $src\t# long" %}
7795 
7796   ins_encode( aarch64_enc_mov_imm(dst, src) );
7797 
7798   ins_pipe(ialu_imm);
7799 %}
7800 
7801 // Load Pointer Constant
7802 
7803 instruct loadConP(iRegPNoSp dst, immP con)
7804 %{
7805   match(Set dst con);
7806 
7807   ins_cost(INSN_COST * 4);
7808   format %{
7809     "mov  $dst, $con\t# ptr\n\t"
7810   %}
7811 
7812   ins_encode(aarch64_enc_mov_p(dst, con));
7813 
7814   ins_pipe(ialu_imm);
7815 %}
7816 
7817 // Load Null Pointer Constant
7818 
7819 instruct loadConP0(iRegPNoSp dst, immP0 con)
7820 %{
7821   match(Set dst con);
7822 
7823   ins_cost(INSN_COST);
7824   format %{ "mov  $dst, $con\t# NULL ptr" %}
7825 
7826   ins_encode(aarch64_enc_mov_p0(dst, con));
7827 
7828   ins_pipe(ialu_imm);
7829 %}
7830 
7831 // Load Pointer Constant One
7832 
7833 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7834 %{
7835   match(Set dst con);
7836 
7837   ins_cost(INSN_COST);
7838   format %{ "mov  $dst, $con\t# NULL ptr" %}
7839 
7840   ins_encode(aarch64_enc_mov_p1(dst, con));
7841 
7842   ins_pipe(ialu_imm);
7843 %}
7844 
7845 // Load Poll Page Constant
7846 
7847 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7848 %{
7849   match(Set dst con);
7850 
7851   ins_cost(INSN_COST);
7852   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7853 
7854   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7855 
7856   ins_pipe(ialu_imm);
7857 %}
7858 
7859 // Load Byte Map Base Constant
7860 
7861 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7862 %{
7863   match(Set dst con);
7864 
7865   ins_cost(INSN_COST);
7866   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7867 
7868   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7869 
7870   ins_pipe(ialu_imm);
7871 %}
7872 
7873 // Load Narrow Pointer Constant
7874 
7875 instruct loadConN(iRegNNoSp dst, immN con)
7876 %{
7877   match(Set dst con);
7878 
7879   ins_cost(INSN_COST * 4);
7880   format %{ "mov  $dst, $con\t# compressed ptr" %}
7881 
7882   ins_encode(aarch64_enc_mov_n(dst, con));
7883 
7884   ins_pipe(ialu_imm);
7885 %}
7886 
7887 // Load Narrow Null Pointer Constant
7888 
7889 instruct loadConN0(iRegNNoSp dst, immN0 con)
7890 %{
7891   match(Set dst con);
7892 
7893   ins_cost(INSN_COST);
7894   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7895 
7896   ins_encode(aarch64_enc_mov_n0(dst, con));
7897 
7898   ins_pipe(ialu_imm);
7899 %}
7900 
7901 // Load Narrow Klass Constant
7902 
7903 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7904 %{
7905   match(Set dst con);
7906 
7907   ins_cost(INSN_COST);
7908   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7909 
7910   ins_encode(aarch64_enc_mov_nk(dst, con));
7911 
7912   ins_pipe(ialu_imm);
7913 %}
7914 
7915 // Load Packed Float Constant
7916 
7917 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7918   match(Set dst con);
7919   ins_cost(INSN_COST * 4);
7920   format %{ "fmovs  $dst, $con"%}
7921   ins_encode %{
7922     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7923   %}
7924 
7925   ins_pipe(fp_imm_s);
7926 %}
7927 
7928 // Load Float Constant
7929 
7930 instruct loadConF(vRegF dst, immF con) %{
7931   match(Set dst con);
7932 
7933   ins_cost(INSN_COST * 4);
7934 
7935   format %{
7936     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7937   %}
7938 
7939   ins_encode %{
7940     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7941   %}
7942 
7943   ins_pipe(fp_load_constant_s);
7944 %}
7945 
7946 // Load Packed Double Constant
7947 
7948 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7949   match(Set dst con);
7950   ins_cost(INSN_COST);
7951   format %{ "fmovd  $dst, $con"%}
7952   ins_encode %{
7953     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7954   %}
7955 
7956   ins_pipe(fp_imm_d);
7957 %}
7958 
7959 // Load Double Constant
7960 
7961 instruct loadConD(vRegD dst, immD con) %{
7962   match(Set dst con);
7963 
7964   ins_cost(INSN_COST * 5);
7965   format %{
7966     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7967   %}
7968 
7969   ins_encode %{
7970     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7971   %}
7972 
7973   ins_pipe(fp_load_constant_d);
7974 %}
7975 
7976 // Store Instructions
7977 
7978 // Store CMS card-mark Immediate
7979 instruct storeimmCM0(immI0 zero, memory mem)
7980 %{
7981   match(Set mem (StoreCM mem zero));
7982   predicate(unnecessary_storestore(n));
7983 
7984   ins_cost(INSN_COST);
7985   format %{ "strb zr, $mem\t# byte" %}
7986 
7987   ins_encode(aarch64_enc_strb0(mem));
7988 
7989   ins_pipe(istore_mem);
7990 %}
7991 
7992 // Store CMS card-mark Immediate with intervening StoreStore
7993 // needed when using CMS with no conditional card marking
7994 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7995 %{
7996   match(Set mem (StoreCM mem zero));
7997 
7998   ins_cost(INSN_COST * 2);
7999   format %{ "dmb ishst"
8000       "\n\tstrb zr, $mem\t# byte" %}
8001 
8002   ins_encode(aarch64_enc_strb0_ordered(mem));
8003 
8004   ins_pipe(istore_mem);
8005 %}
8006 
8007 // Store Byte
8008 instruct storeB(iRegIorL2I src, memory mem)
8009 %{
8010   match(Set mem (StoreB mem src));
8011   predicate(!needs_releasing_store(n));
8012 
8013   ins_cost(INSN_COST);
8014   format %{ "strb  $src, $mem\t# byte" %}
8015 
8016   ins_encode(aarch64_enc_strb(src, mem));
8017 
8018   ins_pipe(istore_reg_mem);
8019 %}
8020 
8021 
8022 instruct storeimmB0(immI0 zero, memory mem)
8023 %{
8024   match(Set mem (StoreB mem zero));
8025   predicate(!needs_releasing_store(n));
8026 
8027   ins_cost(INSN_COST);
8028   format %{ "strb rscractch2, $mem\t# byte" %}
8029 
8030   ins_encode(aarch64_enc_strb0(mem));
8031 
8032   ins_pipe(istore_mem);
8033 %}
8034 
8035 // Store Char/Short
8036 instruct storeC(iRegIorL2I src, memory mem)
8037 %{
8038   match(Set mem (StoreC mem src));
8039   predicate(!needs_releasing_store(n));
8040 
8041   ins_cost(INSN_COST);
8042   format %{ "strh  $src, $mem\t# short" %}
8043 
8044   ins_encode(aarch64_enc_strh(src, mem));
8045 
8046   ins_pipe(istore_reg_mem);
8047 %}
8048 
8049 instruct storeimmC0(immI0 zero, memory mem)
8050 %{
8051   match(Set mem (StoreC mem zero));
8052   predicate(!needs_releasing_store(n));
8053 
8054   ins_cost(INSN_COST);
8055   format %{ "strh  zr, $mem\t# short" %}
8056 
8057   ins_encode(aarch64_enc_strh0(mem));
8058 
8059   ins_pipe(istore_mem);
8060 %}
8061 
8062 // Store Integer
8063 
8064 instruct storeI(iRegIorL2I src, memory mem)
8065 %{
8066   match(Set mem(StoreI mem src));
8067   predicate(!needs_releasing_store(n));
8068 
8069   ins_cost(INSN_COST);
8070   format %{ "strw  $src, $mem\t# int" %}
8071 
8072   ins_encode(aarch64_enc_strw(src, mem));
8073 
8074   ins_pipe(istore_reg_mem);
8075 %}
8076 
8077 instruct storeimmI0(immI0 zero, memory mem)
8078 %{
8079   match(Set mem(StoreI mem zero));
8080   predicate(!needs_releasing_store(n));
8081 
8082   ins_cost(INSN_COST);
8083   format %{ "strw  zr, $mem\t# int" %}
8084 
8085   ins_encode(aarch64_enc_strw0(mem));
8086 
8087   ins_pipe(istore_mem);
8088 %}
8089 
8090 // Store Long (64 bit signed)
8091 instruct storeL(iRegL src, memory mem)
8092 %{
8093   match(Set mem (StoreL mem src));
8094   predicate(!needs_releasing_store(n));
8095 
8096   ins_cost(INSN_COST);
8097   format %{ "str  $src, $mem\t# int" %}
8098 
8099   ins_encode(aarch64_enc_str(src, mem));
8100 
8101   ins_pipe(istore_reg_mem);
8102 %}
8103 
8104 // Store Long (64 bit signed)
8105 instruct storeimmL0(immL0 zero, memory mem)
8106 %{
8107   match(Set mem (StoreL mem zero));
8108   predicate(!needs_releasing_store(n));
8109 
8110   ins_cost(INSN_COST);
8111   format %{ "str  zr, $mem\t# int" %}
8112 
8113   ins_encode(aarch64_enc_str0(mem));
8114 
8115   ins_pipe(istore_mem);
8116 %}
8117 
8118 // Store Pointer
8119 instruct storeP(iRegP src, memory mem)
8120 %{
8121   match(Set mem (StoreP mem src));
8122   predicate(!needs_releasing_store(n));
8123 
8124   ins_cost(INSN_COST);
8125   format %{ "str  $src, $mem\t# ptr" %}
8126 
8127   ins_encode(aarch64_enc_str(src, mem));
8128 
8129   ins_pipe(istore_reg_mem);
8130 %}
8131 
8132 // Store Pointer
8133 instruct storeimmP0(immP0 zero, memory mem)
8134 %{
8135   match(Set mem (StoreP mem zero));
8136   predicate(!needs_releasing_store(n));
8137 
8138   ins_cost(INSN_COST);
8139   format %{ "str zr, $mem\t# ptr" %}
8140 
8141   ins_encode(aarch64_enc_str0(mem));
8142 
8143   ins_pipe(istore_mem);
8144 %}
8145 
8146 // Store Compressed Pointer
8147 instruct storeN(iRegN src, memory mem)
8148 %{
8149   match(Set mem (StoreN mem src));
8150   predicate(!needs_releasing_store(n));
8151 
8152   ins_cost(INSN_COST);
8153   format %{ "strw  $src, $mem\t# compressed ptr" %}
8154 
8155   ins_encode(aarch64_enc_strw(src, mem));
8156 
8157   ins_pipe(istore_reg_mem);
8158 %}
8159 
8160 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8161 %{
8162   match(Set mem (StoreN mem zero));
8163   predicate(Universe::narrow_oop_base() == NULL &&
8164             Universe::narrow_klass_base() == NULL &&
8165             (!needs_releasing_store(n)));
8166 
8167   ins_cost(INSN_COST);
8168   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8169 
8170   ins_encode(aarch64_enc_strw(heapbase, mem));
8171 
8172   ins_pipe(istore_reg_mem);
8173 %}
8174 
8175 // Store Float
8176 instruct storeF(vRegF src, memory mem)
8177 %{
8178   match(Set mem (StoreF mem src));
8179   predicate(!needs_releasing_store(n));
8180 
8181   ins_cost(INSN_COST);
8182   format %{ "strs  $src, $mem\t# float" %}
8183 
8184   ins_encode( aarch64_enc_strs(src, mem) );
8185 
8186   ins_pipe(pipe_class_memory);
8187 %}
8188 
8189 // TODO
8190 // implement storeImmF0 and storeFImmPacked
8191 
8192 // Store Double
8193 instruct storeD(vRegD src, memory mem)
8194 %{
8195   match(Set mem (StoreD mem src));
8196   predicate(!needs_releasing_store(n));
8197 
8198   ins_cost(INSN_COST);
8199   format %{ "strd  $src, $mem\t# double" %}
8200 
8201   ins_encode( aarch64_enc_strd(src, mem) );
8202 
8203   ins_pipe(pipe_class_memory);
8204 %}
8205 
8206 // Store Compressed Klass Pointer
8207 instruct storeNKlass(iRegN src, memory mem)
8208 %{
8209   predicate(!needs_releasing_store(n));
8210   match(Set mem (StoreNKlass mem src));
8211 
8212   ins_cost(INSN_COST);
8213   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8214 
8215   ins_encode(aarch64_enc_strw(src, mem));
8216 
8217   ins_pipe(istore_reg_mem);
8218 %}
8219 
8220 // TODO
8221 // implement storeImmD0 and storeDImmPacked
8222 
8223 // prefetch instructions
8224 // Must be safe to execute with invalid address (cannot fault).
8225 
8226 instruct prefetchalloc( memory mem ) %{
8227   match(PrefetchAllocation mem);
8228 
8229   ins_cost(INSN_COST);
8230   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8231 
8232   ins_encode( aarch64_enc_prefetchw(mem) );
8233 
8234   ins_pipe(iload_prefetch);
8235 %}
8236 
8237 //  ---------------- volatile loads and stores ----------------
8238 
8239 // Load Byte (8 bit signed)
8240 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8241 %{
8242   match(Set dst (LoadB mem));
8243 
8244   ins_cost(VOLATILE_REF_COST);
8245   format %{ "ldarsb  $dst, $mem\t# byte" %}
8246 
8247   ins_encode(aarch64_enc_ldarsb(dst, mem));
8248 
8249   ins_pipe(pipe_serial);
8250 %}
8251 
8252 // Load Byte (8 bit signed) into long
8253 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8254 %{
8255   match(Set dst (ConvI2L (LoadB mem)));
8256 
8257   ins_cost(VOLATILE_REF_COST);
8258   format %{ "ldarsb  $dst, $mem\t# byte" %}
8259 
8260   ins_encode(aarch64_enc_ldarsb(dst, mem));
8261 
8262   ins_pipe(pipe_serial);
8263 %}
8264 
8265 // Load Byte (8 bit unsigned)
8266 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8267 %{
8268   match(Set dst (LoadUB mem));
8269 
8270   ins_cost(VOLATILE_REF_COST);
8271   format %{ "ldarb  $dst, $mem\t# byte" %}
8272 
8273   ins_encode(aarch64_enc_ldarb(dst, mem));
8274 
8275   ins_pipe(pipe_serial);
8276 %}
8277 
8278 // Load Byte (8 bit unsigned) into long
8279 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8280 %{
8281   match(Set dst (ConvI2L (LoadUB mem)));
8282 
8283   ins_cost(VOLATILE_REF_COST);
8284   format %{ "ldarb  $dst, $mem\t# byte" %}
8285 
8286   ins_encode(aarch64_enc_ldarb(dst, mem));
8287 
8288   ins_pipe(pipe_serial);
8289 %}
8290 
8291 // Load Short (16 bit signed)
8292 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8293 %{
8294   match(Set dst (LoadS mem));
8295 
8296   ins_cost(VOLATILE_REF_COST);
8297   format %{ "ldarshw  $dst, $mem\t# short" %}
8298 
8299   ins_encode(aarch64_enc_ldarshw(dst, mem));
8300 
8301   ins_pipe(pipe_serial);
8302 %}
8303 
8304 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8305 %{
8306   match(Set dst (LoadUS mem));
8307 
8308   ins_cost(VOLATILE_REF_COST);
8309   format %{ "ldarhw  $dst, $mem\t# short" %}
8310 
8311   ins_encode(aarch64_enc_ldarhw(dst, mem));
8312 
8313   ins_pipe(pipe_serial);
8314 %}
8315 
8316 // Load Short/Char (16 bit unsigned) into long
8317 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8318 %{
8319   match(Set dst (ConvI2L (LoadUS mem)));
8320 
8321   ins_cost(VOLATILE_REF_COST);
8322   format %{ "ldarh  $dst, $mem\t# short" %}
8323 
8324   ins_encode(aarch64_enc_ldarh(dst, mem));
8325 
8326   ins_pipe(pipe_serial);
8327 %}
8328 
8329 // Load Short/Char (16 bit signed) into long
8330 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8331 %{
8332   match(Set dst (ConvI2L (LoadS mem)));
8333 
8334   ins_cost(VOLATILE_REF_COST);
8335   format %{ "ldarh  $dst, $mem\t# short" %}
8336 
8337   ins_encode(aarch64_enc_ldarsh(dst, mem));
8338 
8339   ins_pipe(pipe_serial);
8340 %}
8341 
8342 // Load Integer (32 bit signed)
8343 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8344 %{
8345   match(Set dst (LoadI mem));
8346 
8347   ins_cost(VOLATILE_REF_COST);
8348   format %{ "ldarw  $dst, $mem\t# int" %}
8349 
8350   ins_encode(aarch64_enc_ldarw(dst, mem));
8351 
8352   ins_pipe(pipe_serial);
8353 %}
8354 
8355 // Load Integer (32 bit unsigned) into long
8356 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8357 %{
8358   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8359 
8360   ins_cost(VOLATILE_REF_COST);
8361   format %{ "ldarw  $dst, $mem\t# int" %}
8362 
8363   ins_encode(aarch64_enc_ldarw(dst, mem));
8364 
8365   ins_pipe(pipe_serial);
8366 %}
8367 
8368 // Load Long (64 bit signed)
8369 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8370 %{
8371   match(Set dst (LoadL mem));
8372 
8373   ins_cost(VOLATILE_REF_COST);
8374   format %{ "ldar  $dst, $mem\t# int" %}
8375 
8376   ins_encode(aarch64_enc_ldar(dst, mem));
8377 
8378   ins_pipe(pipe_serial);
8379 %}
8380 
8381 // Load Pointer
8382 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8383 %{
8384   match(Set dst (LoadP mem));
8385 
8386   ins_cost(VOLATILE_REF_COST);
8387   format %{ "ldar  $dst, $mem\t# ptr" %}
8388 
8389   ins_encode(aarch64_enc_ldar(dst, mem));
8390 
8391   ins_pipe(pipe_serial);
8392 %}
8393 
8394 // Load Compressed Pointer
8395 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8396 %{
8397   match(Set dst (LoadN mem));
8398 
8399   ins_cost(VOLATILE_REF_COST);
8400   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8401 
8402   ins_encode(aarch64_enc_ldarw(dst, mem));
8403 
8404   ins_pipe(pipe_serial);
8405 %}
8406 
8407 // Load Float
8408 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8409 %{
8410   match(Set dst (LoadF mem));
8411 
8412   ins_cost(VOLATILE_REF_COST);
8413   format %{ "ldars  $dst, $mem\t# float" %}
8414 
8415   ins_encode( aarch64_enc_fldars(dst, mem) );
8416 
8417   ins_pipe(pipe_serial);
8418 %}
8419 
8420 // Load Double
8421 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8422 %{
8423   match(Set dst (LoadD mem));
8424 
8425   ins_cost(VOLATILE_REF_COST);
8426   format %{ "ldard  $dst, $mem\t# double" %}
8427 
8428   ins_encode( aarch64_enc_fldard(dst, mem) );
8429 
8430   ins_pipe(pipe_serial);
8431 %}
8432 
8433 // Store Byte
8434 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8435 %{
8436   match(Set mem (StoreB mem src));
8437 
8438   ins_cost(VOLATILE_REF_COST);
8439   format %{ "stlrb  $src, $mem\t# byte" %}
8440 
8441   ins_encode(aarch64_enc_stlrb(src, mem));
8442 
8443   ins_pipe(pipe_class_memory);
8444 %}
8445 
8446 // Store Char/Short
8447 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8448 %{
8449   match(Set mem (StoreC mem src));
8450 
8451   ins_cost(VOLATILE_REF_COST);
8452   format %{ "stlrh  $src, $mem\t# short" %}
8453 
8454   ins_encode(aarch64_enc_stlrh(src, mem));
8455 
8456   ins_pipe(pipe_class_memory);
8457 %}
8458 
8459 // Store Integer
8460 
8461 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8462 %{
8463   match(Set mem(StoreI mem src));
8464 
8465   ins_cost(VOLATILE_REF_COST);
8466   format %{ "stlrw  $src, $mem\t# int" %}
8467 
8468   ins_encode(aarch64_enc_stlrw(src, mem));
8469 
8470   ins_pipe(pipe_class_memory);
8471 %}
8472 
8473 // Store Long (64 bit signed)
8474 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8475 %{
8476   match(Set mem (StoreL mem src));
8477 
8478   ins_cost(VOLATILE_REF_COST);
8479   format %{ "stlr  $src, $mem\t# int" %}
8480 
8481   ins_encode(aarch64_enc_stlr(src, mem));
8482 
8483   ins_pipe(pipe_class_memory);
8484 %}
8485 
8486 // Store Pointer
8487 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8488 %{
8489   match(Set mem (StoreP mem src));
8490 
8491   ins_cost(VOLATILE_REF_COST);
8492   format %{ "stlr  $src, $mem\t# ptr" %}
8493 
8494   ins_encode(aarch64_enc_stlr(src, mem));
8495 
8496   ins_pipe(pipe_class_memory);
8497 %}
8498 
8499 // Store Compressed Pointer
8500 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8501 %{
8502   match(Set mem (StoreN mem src));
8503 
8504   ins_cost(VOLATILE_REF_COST);
8505   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8506 
8507   ins_encode(aarch64_enc_stlrw(src, mem));
8508 
8509   ins_pipe(pipe_class_memory);
8510 %}
8511 
8512 // Store Float
8513 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8514 %{
8515   match(Set mem (StoreF mem src));
8516 
8517   ins_cost(VOLATILE_REF_COST);
8518   format %{ "stlrs  $src, $mem\t# float" %}
8519 
8520   ins_encode( aarch64_enc_fstlrs(src, mem) );
8521 
8522   ins_pipe(pipe_class_memory);
8523 %}
8524 
8525 // TODO
8526 // implement storeImmF0 and storeFImmPacked
8527 
8528 // Store Double
8529 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8530 %{
8531   match(Set mem (StoreD mem src));
8532 
8533   ins_cost(VOLATILE_REF_COST);
8534   format %{ "stlrd  $src, $mem\t# double" %}
8535 
8536   ins_encode( aarch64_enc_fstlrd(src, mem) );
8537 
8538   ins_pipe(pipe_class_memory);
8539 %}
8540 
8541 //  ---------------- end of volatile loads and stores ----------------
8542 
8543 // ============================================================================
8544 // BSWAP Instructions
8545 
8546 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8547   match(Set dst (ReverseBytesI src));
8548 
8549   ins_cost(INSN_COST);
8550   format %{ "revw  $dst, $src" %}
8551 
8552   ins_encode %{
8553     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8554   %}
8555 
8556   ins_pipe(ialu_reg);
8557 %}
8558 
8559 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8560   match(Set dst (ReverseBytesL src));
8561 
8562   ins_cost(INSN_COST);
8563   format %{ "rev  $dst, $src" %}
8564 
8565   ins_encode %{
8566     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8567   %}
8568 
8569   ins_pipe(ialu_reg);
8570 %}
8571 
8572 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8573   match(Set dst (ReverseBytesUS src));
8574 
8575   ins_cost(INSN_COST);
8576   format %{ "rev16w  $dst, $src" %}
8577 
8578   ins_encode %{
8579     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8580   %}
8581 
8582   ins_pipe(ialu_reg);
8583 %}
8584 
8585 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8586   match(Set dst (ReverseBytesS src));
8587 
8588   ins_cost(INSN_COST);
8589   format %{ "rev16w  $dst, $src\n\t"
8590             "sbfmw $dst, $dst, #0, #15" %}
8591 
8592   ins_encode %{
8593     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8594     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8595   %}
8596 
8597   ins_pipe(ialu_reg);
8598 %}
8599 
8600 // ============================================================================
8601 // Zero Count Instructions
8602 
8603 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8604   match(Set dst (CountLeadingZerosI src));
8605 
8606   ins_cost(INSN_COST);
8607   format %{ "clzw  $dst, $src" %}
8608   ins_encode %{
8609     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8610   %}
8611 
8612   ins_pipe(ialu_reg);
8613 %}
8614 
8615 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8616   match(Set dst (CountLeadingZerosL src));
8617 
8618   ins_cost(INSN_COST);
8619   format %{ "clz   $dst, $src" %}
8620   ins_encode %{
8621     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8622   %}
8623 
8624   ins_pipe(ialu_reg);
8625 %}
8626 
8627 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8628   match(Set dst (CountTrailingZerosI src));
8629 
8630   ins_cost(INSN_COST * 2);
8631   format %{ "rbitw  $dst, $src\n\t"
8632             "clzw   $dst, $dst" %}
8633   ins_encode %{
8634     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8635     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8636   %}
8637 
8638   ins_pipe(ialu_reg);
8639 %}
8640 
8641 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8642   match(Set dst (CountTrailingZerosL src));
8643 
8644   ins_cost(INSN_COST * 2);
8645   format %{ "rbit   $dst, $src\n\t"
8646             "clz    $dst, $dst" %}
8647   ins_encode %{
8648     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8649     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8650   %}
8651 
8652   ins_pipe(ialu_reg);
8653 %}
8654 
8655 //---------- Population Count Instructions -------------------------------------
8656 //
8657 
8658 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8659   predicate(UsePopCountInstruction);
8660   match(Set dst (PopCountI src));
8661   effect(TEMP tmp);
8662   ins_cost(INSN_COST * 13);
8663 
8664   format %{ "movw   $src, $src\n\t"
8665             "mov    $tmp, $src\t# vector (1D)\n\t"
8666             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8667             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8668             "mov    $dst, $tmp\t# vector (1D)" %}
8669   ins_encode %{
8670     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8671     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8672     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8673     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8674     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8675   %}
8676 
8677   ins_pipe(pipe_class_default);
8678 %}
8679 
8680 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8681   predicate(UsePopCountInstruction);
8682   match(Set dst (PopCountI (LoadI mem)));
8683   effect(TEMP tmp);
8684   ins_cost(INSN_COST * 13);
8685 
8686   format %{ "ldrs   $tmp, $mem\n\t"
8687             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8688             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8689             "mov    $dst, $tmp\t# vector (1D)" %}
8690   ins_encode %{
8691     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8692     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8693                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8694     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8695     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8696     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8697   %}
8698 
8699   ins_pipe(pipe_class_default);
8700 %}
8701 
8702 // Note: Long.bitCount(long) returns an int.
8703 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8704   predicate(UsePopCountInstruction);
8705   match(Set dst (PopCountL src));
8706   effect(TEMP tmp);
8707   ins_cost(INSN_COST * 13);
8708 
8709   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8710             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8711             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8712             "mov    $dst, $tmp\t# vector (1D)" %}
8713   ins_encode %{
8714     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8715     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8716     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8717     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8718   %}
8719 
8720   ins_pipe(pipe_class_default);
8721 %}
8722 
8723 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8724   predicate(UsePopCountInstruction);
8725   match(Set dst (PopCountL (LoadL mem)));
8726   effect(TEMP tmp);
8727   ins_cost(INSN_COST * 13);
8728 
8729   format %{ "ldrd   $tmp, $mem\n\t"
8730             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8731             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8732             "mov    $dst, $tmp\t# vector (1D)" %}
8733   ins_encode %{
8734     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8735     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8736                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8737     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8738     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8739     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8740   %}
8741 
8742   ins_pipe(pipe_class_default);
8743 %}
8744 
8745 // ============================================================================
8746 // MemBar Instruction
8747 
8748 instruct load_fence() %{
8749   match(LoadFence);
8750   ins_cost(VOLATILE_REF_COST);
8751 
8752   format %{ "load_fence" %}
8753 
8754   ins_encode %{
8755     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8756   %}
8757   ins_pipe(pipe_serial);
8758 %}
8759 
8760 instruct unnecessary_membar_acquire() %{
8761   predicate(unnecessary_acquire(n));
8762   match(MemBarAcquire);
8763   ins_cost(0);
8764 
8765   format %{ "membar_acquire (elided)" %}
8766 
8767   ins_encode %{
8768     __ block_comment("membar_acquire (elided)");
8769   %}
8770 
8771   ins_pipe(pipe_class_empty);
8772 %}
8773 
8774 instruct membar_acquire() %{
8775   match(MemBarAcquire);
8776   ins_cost(VOLATILE_REF_COST);
8777 
8778   format %{ "membar_acquire" %}
8779 
8780   ins_encode %{
8781     __ block_comment("membar_acquire");
8782     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8783   %}
8784 
8785   ins_pipe(pipe_serial);
8786 %}
8787 
8788 
8789 instruct membar_acquire_lock() %{
8790   match(MemBarAcquireLock);
8791   ins_cost(VOLATILE_REF_COST);
8792 
8793   format %{ "membar_acquire_lock (elided)" %}
8794 
8795   ins_encode %{
8796     __ block_comment("membar_acquire_lock (elided)");
8797   %}
8798 
8799   ins_pipe(pipe_serial);
8800 %}
8801 
8802 instruct store_fence() %{
8803   match(StoreFence);
8804   ins_cost(VOLATILE_REF_COST);
8805 
8806   format %{ "store_fence" %}
8807 
8808   ins_encode %{
8809     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8810   %}
8811   ins_pipe(pipe_serial);
8812 %}
8813 
8814 instruct unnecessary_membar_release() %{
8815   predicate(unnecessary_release(n));
8816   match(MemBarRelease);
8817   ins_cost(0);
8818 
8819   format %{ "membar_release (elided)" %}
8820 
8821   ins_encode %{
8822     __ block_comment("membar_release (elided)");
8823   %}
8824   ins_pipe(pipe_serial);
8825 %}
8826 
8827 instruct membar_release() %{
8828   match(MemBarRelease);
8829   ins_cost(VOLATILE_REF_COST);
8830 
8831   format %{ "membar_release" %}
8832 
8833   ins_encode %{
8834     __ block_comment("membar_release");
8835     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8836   %}
8837   ins_pipe(pipe_serial);
8838 %}
8839 
8840 instruct membar_storestore() %{
8841   match(MemBarStoreStore);
8842   ins_cost(VOLATILE_REF_COST);
8843 
8844   format %{ "MEMBAR-store-store" %}
8845 
8846   ins_encode %{
8847     __ membar(Assembler::StoreStore);
8848   %}
8849   ins_pipe(pipe_serial);
8850 %}
8851 
8852 instruct membar_release_lock() %{
8853   match(MemBarReleaseLock);
8854   ins_cost(VOLATILE_REF_COST);
8855 
8856   format %{ "membar_release_lock (elided)" %}
8857 
8858   ins_encode %{
8859     __ block_comment("membar_release_lock (elided)");
8860   %}
8861 
8862   ins_pipe(pipe_serial);
8863 %}
8864 
8865 instruct unnecessary_membar_volatile() %{
8866   predicate(unnecessary_volatile(n));
8867   match(MemBarVolatile);
8868   ins_cost(0);
8869 
8870   format %{ "membar_volatile (elided)" %}
8871 
8872   ins_encode %{
8873     __ block_comment("membar_volatile (elided)");
8874   %}
8875 
8876   ins_pipe(pipe_serial);
8877 %}
8878 
8879 instruct membar_volatile() %{
8880   match(MemBarVolatile);
8881   ins_cost(VOLATILE_REF_COST*100);
8882 
8883   format %{ "membar_volatile" %}
8884 
8885   ins_encode %{
8886     __ block_comment("membar_volatile");
8887     __ membar(Assembler::StoreLoad);
8888   %}
8889 
8890   ins_pipe(pipe_serial);
8891 %}
8892 
8893 // ============================================================================
8894 // Cast/Convert Instructions
8895 
8896 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8897   match(Set dst (CastX2P src));
8898 
8899   ins_cost(INSN_COST);
8900   format %{ "mov $dst, $src\t# long -> ptr" %}
8901 
8902   ins_encode %{
8903     if ($dst$$reg != $src$$reg) {
8904       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8905     }
8906   %}
8907 
8908   ins_pipe(ialu_reg);
8909 %}
8910 
8911 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8912   match(Set dst (CastP2X src));
8913 
8914   ins_cost(INSN_COST);
8915   format %{ "mov $dst, $src\t# ptr -> long" %}
8916 
8917   ins_encode %{
8918     if ($dst$$reg != $src$$reg) {
8919       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8920     }
8921   %}
8922 
8923   ins_pipe(ialu_reg);
8924 %}
8925 
8926 // Convert oop into int for vectors alignment masking
8927 instruct convP2I(iRegINoSp dst, iRegP src) %{
8928   match(Set dst (ConvL2I (CastP2X src)));
8929 
8930   ins_cost(INSN_COST);
8931   format %{ "movw $dst, $src\t# ptr -> int" %}
8932   ins_encode %{
8933     __ movw($dst$$Register, $src$$Register);
8934   %}
8935 
8936   ins_pipe(ialu_reg);
8937 %}
8938 
8939 // Convert compressed oop into int for vectors alignment masking
8940 // in case of 32bit oops (heap < 4Gb).
8941 instruct convN2I(iRegINoSp dst, iRegN src)
8942 %{
8943   predicate(Universe::narrow_oop_shift() == 0);
8944   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8945 
8946   ins_cost(INSN_COST);
8947   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8948   ins_encode %{
8949     __ movw($dst$$Register, $src$$Register);
8950   %}
8951 
8952   ins_pipe(ialu_reg);
8953 %}
8954 
8955 
8956 // Convert oop pointer into compressed form
8957 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8958   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8959   match(Set dst (EncodeP src));
8960   effect(KILL cr);
8961   ins_cost(INSN_COST * 3);
8962   format %{ "encode_heap_oop $dst, $src" %}
8963   ins_encode %{
8964     Register s = $src$$Register;
8965     Register d = $dst$$Register;
8966     __ encode_heap_oop(d, s);
8967   %}
8968   ins_pipe(ialu_reg);
8969 %}
8970 
8971 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8972   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8973   match(Set dst (EncodeP src));
8974   ins_cost(INSN_COST * 3);
8975   format %{ "encode_heap_oop_not_null $dst, $src" %}
8976   ins_encode %{
8977     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8978   %}
8979   ins_pipe(ialu_reg);
8980 %}
8981 
8982 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8983   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8984             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8985   match(Set dst (DecodeN src));
8986   ins_cost(INSN_COST * 3);
8987   format %{ "decode_heap_oop $dst, $src" %}
8988   ins_encode %{
8989     Register s = $src$$Register;
8990     Register d = $dst$$Register;
8991     __ decode_heap_oop(d, s);
8992   %}
8993   ins_pipe(ialu_reg);
8994 %}
8995 
8996 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8997   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8998             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8999   match(Set dst (DecodeN src));
9000   ins_cost(INSN_COST * 3);
9001   format %{ "decode_heap_oop_not_null $dst, $src" %}
9002   ins_encode %{
9003     Register s = $src$$Register;
9004     Register d = $dst$$Register;
9005     __ decode_heap_oop_not_null(d, s);
9006   %}
9007   ins_pipe(ialu_reg);
9008 %}
9009 
9010 // n.b. AArch64 implementations of encode_klass_not_null and
9011 // decode_klass_not_null do not modify the flags register so, unlike
9012 // Intel, we don't kill CR as a side effect here
9013 
9014 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9015   match(Set dst (EncodePKlass src));
9016 
9017   ins_cost(INSN_COST * 3);
9018   format %{ "encode_klass_not_null $dst,$src" %}
9019 
9020   ins_encode %{
9021     Register src_reg = as_Register($src$$reg);
9022     Register dst_reg = as_Register($dst$$reg);
9023     __ encode_klass_not_null(dst_reg, src_reg);
9024   %}
9025 
9026    ins_pipe(ialu_reg);
9027 %}
9028 
9029 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9030   match(Set dst (DecodeNKlass src));
9031 
9032   ins_cost(INSN_COST * 3);
9033   format %{ "decode_klass_not_null $dst,$src" %}
9034 
9035   ins_encode %{
9036     Register src_reg = as_Register($src$$reg);
9037     Register dst_reg = as_Register($dst$$reg);
9038     if (dst_reg != src_reg) {
9039       __ decode_klass_not_null(dst_reg, src_reg);
9040     } else {
9041       __ decode_klass_not_null(dst_reg);
9042     }
9043   %}
9044 
9045    ins_pipe(ialu_reg);
9046 %}
9047 
9048 instruct checkCastPP(iRegPNoSp dst)
9049 %{
9050   match(Set dst (CheckCastPP dst));
9051 
9052   size(0);
9053   format %{ "# checkcastPP of $dst" %}
9054   ins_encode(/* empty encoding */);
9055   ins_pipe(pipe_class_empty);
9056 %}
9057 
9058 instruct castPP(iRegPNoSp dst)
9059 %{
9060   match(Set dst (CastPP dst));
9061 
9062   size(0);
9063   format %{ "# castPP of $dst" %}
9064   ins_encode(/* empty encoding */);
9065   ins_pipe(pipe_class_empty);
9066 %}
9067 
9068 instruct castII(iRegI dst)
9069 %{
9070   match(Set dst (CastII dst));
9071 
9072   size(0);
9073   format %{ "# castII of $dst" %}
9074   ins_encode(/* empty encoding */);
9075   ins_cost(0);
9076   ins_pipe(pipe_class_empty);
9077 %}
9078 
9079 // ============================================================================
9080 // Atomic operation instructions
9081 //
9082 // Intel and SPARC both implement Ideal Node LoadPLocked and
9083 // Store{PIL}Conditional instructions using a normal load for the
9084 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9085 //
9086 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9087 // pair to lock object allocations from Eden space when not using
9088 // TLABs.
9089 //
9090 // There does not appear to be a Load{IL}Locked Ideal Node and the
9091 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9092 // and to use StoreIConditional only for 32-bit and StoreLConditional
9093 // only for 64-bit.
9094 //
9095 // We implement LoadPLocked and StorePLocked instructions using,
9096 // respectively the AArch64 hw load-exclusive and store-conditional
9097 // instructions. Whereas we must implement each of
9098 // Store{IL}Conditional using a CAS which employs a pair of
9099 // instructions comprising a load-exclusive followed by a
9100 // store-conditional.
9101 
9102 
9103 // Locked-load (linked load) of the current heap-top
9104 // used when updating the eden heap top
9105 // implemented using ldaxr on AArch64
9106 
9107 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9108 %{
9109   match(Set dst (LoadPLocked mem));
9110 
9111   ins_cost(VOLATILE_REF_COST);
9112 
9113   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9114 
9115   ins_encode(aarch64_enc_ldaxr(dst, mem));
9116 
9117   ins_pipe(pipe_serial);
9118 %}
9119 
9120 // Conditional-store of the updated heap-top.
9121 // Used during allocation of the shared heap.
9122 // Sets flag (EQ) on success.
9123 // implemented using stlxr on AArch64.
9124 
9125 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9126 %{
9127   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9128 
9129   ins_cost(VOLATILE_REF_COST);
9130 
9131  // TODO
9132  // do we need to do a store-conditional release or can we just use a
9133  // plain store-conditional?
9134 
9135   format %{
9136     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9137     "cmpw rscratch1, zr\t# EQ on successful write"
9138   %}
9139 
9140   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9141 
9142   ins_pipe(pipe_serial);
9143 %}
9144 
9145 
9146 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9147 // when attempting to rebias a lock towards the current thread.  We
9148 // must use the acquire form of cmpxchg in order to guarantee acquire
9149 // semantics in this case.
9150 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9151 %{
9152   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9153 
9154   ins_cost(VOLATILE_REF_COST);
9155 
9156   format %{
9157     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9158     "cmpw rscratch1, zr\t# EQ on successful write"
9159   %}
9160 
9161   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9162 
9163   ins_pipe(pipe_slow);
9164 %}
9165 
9166 // storeIConditional also has acquire semantics, for no better reason
9167 // than matching storeLConditional.  At the time of writing this
9168 // comment storeIConditional was not used anywhere by AArch64.
9169 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9170 %{
9171   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9172 
9173   ins_cost(VOLATILE_REF_COST);
9174 
9175   format %{
9176     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9177     "cmpw rscratch1, zr\t# EQ on successful write"
9178   %}
9179 
9180   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9181 
9182   ins_pipe(pipe_slow);
9183 %}
9184 
9185 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9186 // can't match them
9187 
9188 // standard CompareAndSwapX when we are using barriers
9189 // these have higher priority than the rules selected by a predicate
9190 
9191 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9192 
9193   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9194   ins_cost(2 * VOLATILE_REF_COST);
9195 
9196   effect(KILL cr);
9197 
9198  format %{
9199     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9200     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9201  %}
9202 
9203  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9204             aarch64_enc_cset_eq(res));
9205 
9206   ins_pipe(pipe_slow);
9207 %}
9208 
9209 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9210 
9211   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9212   ins_cost(2 * VOLATILE_REF_COST);
9213 
9214   effect(KILL cr);
9215 
9216  format %{
9217     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9218     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9219  %}
9220 
9221  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9222             aarch64_enc_cset_eq(res));
9223 
9224   ins_pipe(pipe_slow);
9225 %}
9226 
9227 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9228 
9229   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9230   ins_cost(2 * VOLATILE_REF_COST);
9231 
9232   effect(KILL cr);
9233 
9234  format %{
9235     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9236     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9237  %}
9238 
9239  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9240             aarch64_enc_cset_eq(res));
9241 
9242   ins_pipe(pipe_slow);
9243 %}
9244 
9245 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9246 
9247   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9248   ins_cost(2 * VOLATILE_REF_COST);
9249 
9250   effect(KILL cr);
9251 
9252  format %{
9253     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9254     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9255  %}
9256 
9257  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9258             aarch64_enc_cset_eq(res));
9259 
9260   ins_pipe(pipe_slow);
9261 %}
9262 
9263 // alternative CompareAndSwapX when we are eliding barriers
9264 
9265 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9266 
9267   predicate(needs_acquiring_load_exclusive(n));
9268   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9269   ins_cost(VOLATILE_REF_COST);
9270 
9271   effect(KILL cr);
9272 
9273  format %{
9274     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9275     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9276  %}
9277 
9278  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9279             aarch64_enc_cset_eq(res));
9280 
9281   ins_pipe(pipe_slow);
9282 %}
9283 
9284 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9285 
9286   predicate(needs_acquiring_load_exclusive(n));
9287   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9288   ins_cost(VOLATILE_REF_COST);
9289 
9290   effect(KILL cr);
9291 
9292  format %{
9293     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9294     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9295  %}
9296 
9297  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9298             aarch64_enc_cset_eq(res));
9299 
9300   ins_pipe(pipe_slow);
9301 %}
9302 
9303 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9304 
9305   predicate(needs_acquiring_load_exclusive(n));
9306   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9307   ins_cost(VOLATILE_REF_COST);
9308 
9309   effect(KILL cr);
9310 
9311  format %{
9312     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9313     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9314  %}
9315 
9316  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9317             aarch64_enc_cset_eq(res));
9318 
9319   ins_pipe(pipe_slow);
9320 %}
9321 
9322 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9323 
9324   predicate(needs_acquiring_load_exclusive(n));
9325   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9326   ins_cost(VOLATILE_REF_COST);
9327 
9328   effect(KILL cr);
9329 
9330  format %{
9331     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9332     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9333  %}
9334 
9335  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9336             aarch64_enc_cset_eq(res));
9337 
9338   ins_pipe(pipe_slow);
9339 %}
9340 
9341 
9342 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9343   match(Set prev (GetAndSetI mem newv));
9344   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9345   ins_encode %{
9346     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9347   %}
9348   ins_pipe(pipe_serial);
9349 %}
9350 
9351 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9352   match(Set prev (GetAndSetL mem newv));
9353   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9354   ins_encode %{
9355     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9356   %}
9357   ins_pipe(pipe_serial);
9358 %}
9359 
9360 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9361   match(Set prev (GetAndSetN mem newv));
9362   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9363   ins_encode %{
9364     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9365   %}
9366   ins_pipe(pipe_serial);
9367 %}
9368 
9369 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9370   match(Set prev (GetAndSetP mem newv));
9371   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9372   ins_encode %{
9373     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9374   %}
9375   ins_pipe(pipe_serial);
9376 %}
9377 
9378 
9379 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9380   match(Set newval (GetAndAddL mem incr));
9381   ins_cost(INSN_COST * 10);
9382   format %{ "get_and_addL $newval, [$mem], $incr" %}
9383   ins_encode %{
9384     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9385   %}
9386   ins_pipe(pipe_serial);
9387 %}
9388 
9389 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9390   predicate(n->as_LoadStore()->result_not_used());
9391   match(Set dummy (GetAndAddL mem incr));
9392   ins_cost(INSN_COST * 9);
9393   format %{ "get_and_addL [$mem], $incr" %}
9394   ins_encode %{
9395     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9396   %}
9397   ins_pipe(pipe_serial);
9398 %}
9399 
9400 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9401   match(Set newval (GetAndAddL mem incr));
9402   ins_cost(INSN_COST * 10);
9403   format %{ "get_and_addL $newval, [$mem], $incr" %}
9404   ins_encode %{
9405     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9406   %}
9407   ins_pipe(pipe_serial);
9408 %}
9409 
9410 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9411   predicate(n->as_LoadStore()->result_not_used());
9412   match(Set dummy (GetAndAddL mem incr));
9413   ins_cost(INSN_COST * 9);
9414   format %{ "get_and_addL [$mem], $incr" %}
9415   ins_encode %{
9416     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9417   %}
9418   ins_pipe(pipe_serial);
9419 %}
9420 
9421 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9422   match(Set newval (GetAndAddI mem incr));
9423   ins_cost(INSN_COST * 10);
9424   format %{ "get_and_addI $newval, [$mem], $incr" %}
9425   ins_encode %{
9426     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9427   %}
9428   ins_pipe(pipe_serial);
9429 %}
9430 
9431 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9432   predicate(n->as_LoadStore()->result_not_used());
9433   match(Set dummy (GetAndAddI mem incr));
9434   ins_cost(INSN_COST * 9);
9435   format %{ "get_and_addI [$mem], $incr" %}
9436   ins_encode %{
9437     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9438   %}
9439   ins_pipe(pipe_serial);
9440 %}
9441 
9442 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9443   match(Set newval (GetAndAddI mem incr));
9444   ins_cost(INSN_COST * 10);
9445   format %{ "get_and_addI $newval, [$mem], $incr" %}
9446   ins_encode %{
9447     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9448   %}
9449   ins_pipe(pipe_serial);
9450 %}
9451 
9452 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9453   predicate(n->as_LoadStore()->result_not_used());
9454   match(Set dummy (GetAndAddI mem incr));
9455   ins_cost(INSN_COST * 9);
9456   format %{ "get_and_addI [$mem], $incr" %}
9457   ins_encode %{
9458     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9459   %}
9460   ins_pipe(pipe_serial);
9461 %}
9462 
9463 // Manifest a CmpL result in an integer register.
9464 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9465 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9466 %{
9467   match(Set dst (CmpL3 src1 src2));
9468   effect(KILL flags);
9469 
9470   ins_cost(INSN_COST * 6);
9471   format %{
9472       "cmp $src1, $src2"
9473       "csetw $dst, ne"
9474       "cnegw $dst, lt"
9475   %}
9476   // format %{ "CmpL3 $dst, $src1, $src2" %}
9477   ins_encode %{
9478     __ cmp($src1$$Register, $src2$$Register);
9479     __ csetw($dst$$Register, Assembler::NE);
9480     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9481   %}
9482 
9483   ins_pipe(pipe_class_default);
9484 %}
9485 
9486 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9487 %{
9488   match(Set dst (CmpL3 src1 src2));
9489   effect(KILL flags);
9490 
9491   ins_cost(INSN_COST * 6);
9492   format %{
9493       "cmp $src1, $src2"
9494       "csetw $dst, ne"
9495       "cnegw $dst, lt"
9496   %}
9497   ins_encode %{
9498     int32_t con = (int32_t)$src2$$constant;
9499      if (con < 0) {
9500       __ adds(zr, $src1$$Register, -con);
9501     } else {
9502       __ subs(zr, $src1$$Register, con);
9503     }
9504     __ csetw($dst$$Register, Assembler::NE);
9505     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9506   %}
9507 
9508   ins_pipe(pipe_class_default);
9509 %}
9510 
9511 // ============================================================================
9512 // Conditional Move Instructions
9513 
9514 // n.b. we have identical rules for both a signed compare op (cmpOp)
9515 // and an unsigned compare op (cmpOpU). it would be nice if we could
9516 // define an op class which merged both inputs and use it to type the
9517 // argument to a single rule. unfortunatelyt his fails because the
9518 // opclass does not live up to the COND_INTER interface of its
9519 // component operands. When the generic code tries to negate the
9520 // operand it ends up running the generci Machoper::negate method
9521 // which throws a ShouldNotHappen. So, we have to provide two flavours
9522 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9523 
9524 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9525   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9526 
9527   ins_cost(INSN_COST * 2);
9528   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9529 
9530   ins_encode %{
9531     __ cselw(as_Register($dst$$reg),
9532              as_Register($src2$$reg),
9533              as_Register($src1$$reg),
9534              (Assembler::Condition)$cmp$$cmpcode);
9535   %}
9536 
9537   ins_pipe(icond_reg_reg);
9538 %}
9539 
9540 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9541   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9542 
9543   ins_cost(INSN_COST * 2);
9544   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9545 
9546   ins_encode %{
9547     __ cselw(as_Register($dst$$reg),
9548              as_Register($src2$$reg),
9549              as_Register($src1$$reg),
9550              (Assembler::Condition)$cmp$$cmpcode);
9551   %}
9552 
9553   ins_pipe(icond_reg_reg);
9554 %}
9555 
9556 // special cases where one arg is zero
9557 
9558 // n.b. this is selected in preference to the rule above because it
9559 // avoids loading constant 0 into a source register
9560 
9561 // TODO
9562 // we ought only to be able to cull one of these variants as the ideal
9563 // transforms ought always to order the zero consistently (to left/right?)
9564 
9565 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9566   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9567 
9568   ins_cost(INSN_COST * 2);
9569   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9570 
9571   ins_encode %{
9572     __ cselw(as_Register($dst$$reg),
9573              as_Register($src$$reg),
9574              zr,
9575              (Assembler::Condition)$cmp$$cmpcode);
9576   %}
9577 
9578   ins_pipe(icond_reg);
9579 %}
9580 
9581 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9582   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9583 
9584   ins_cost(INSN_COST * 2);
9585   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9586 
9587   ins_encode %{
9588     __ cselw(as_Register($dst$$reg),
9589              as_Register($src$$reg),
9590              zr,
9591              (Assembler::Condition)$cmp$$cmpcode);
9592   %}
9593 
9594   ins_pipe(icond_reg);
9595 %}
9596 
9597 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9598   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9599 
9600   ins_cost(INSN_COST * 2);
9601   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9602 
9603   ins_encode %{
9604     __ cselw(as_Register($dst$$reg),
9605              zr,
9606              as_Register($src$$reg),
9607              (Assembler::Condition)$cmp$$cmpcode);
9608   %}
9609 
9610   ins_pipe(icond_reg);
9611 %}
9612 
9613 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9614   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9615 
9616   ins_cost(INSN_COST * 2);
9617   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9618 
9619   ins_encode %{
9620     __ cselw(as_Register($dst$$reg),
9621              zr,
9622              as_Register($src$$reg),
9623              (Assembler::Condition)$cmp$$cmpcode);
9624   %}
9625 
9626   ins_pipe(icond_reg);
9627 %}
9628 
9629 // special case for creating a boolean 0 or 1
9630 
9631 // n.b. this is selected in preference to the rule above because it
9632 // avoids loading constants 0 and 1 into a source register
9633 
9634 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9635   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9636 
9637   ins_cost(INSN_COST * 2);
9638   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9639 
9640   ins_encode %{
9641     // equivalently
9642     // cset(as_Register($dst$$reg),
9643     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9644     __ csincw(as_Register($dst$$reg),
9645              zr,
9646              zr,
9647              (Assembler::Condition)$cmp$$cmpcode);
9648   %}
9649 
9650   ins_pipe(icond_none);
9651 %}
9652 
9653 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9654   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9655 
9656   ins_cost(INSN_COST * 2);
9657   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9658 
9659   ins_encode %{
9660     // equivalently
9661     // cset(as_Register($dst$$reg),
9662     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9663     __ csincw(as_Register($dst$$reg),
9664              zr,
9665              zr,
9666              (Assembler::Condition)$cmp$$cmpcode);
9667   %}
9668 
9669   ins_pipe(icond_none);
9670 %}
9671 
9672 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9673   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9674 
9675   ins_cost(INSN_COST * 2);
9676   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9677 
9678   ins_encode %{
9679     __ csel(as_Register($dst$$reg),
9680             as_Register($src2$$reg),
9681             as_Register($src1$$reg),
9682             (Assembler::Condition)$cmp$$cmpcode);
9683   %}
9684 
9685   ins_pipe(icond_reg_reg);
9686 %}
9687 
9688 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9689   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9690 
9691   ins_cost(INSN_COST * 2);
9692   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9693 
9694   ins_encode %{
9695     __ csel(as_Register($dst$$reg),
9696             as_Register($src2$$reg),
9697             as_Register($src1$$reg),
9698             (Assembler::Condition)$cmp$$cmpcode);
9699   %}
9700 
9701   ins_pipe(icond_reg_reg);
9702 %}
9703 
9704 // special cases where one arg is zero
9705 
9706 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9707   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9708 
9709   ins_cost(INSN_COST * 2);
9710   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9711 
9712   ins_encode %{
9713     __ csel(as_Register($dst$$reg),
9714             zr,
9715             as_Register($src$$reg),
9716             (Assembler::Condition)$cmp$$cmpcode);
9717   %}
9718 
9719   ins_pipe(icond_reg);
9720 %}
9721 
9722 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9723   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9724 
9725   ins_cost(INSN_COST * 2);
9726   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9727 
9728   ins_encode %{
9729     __ csel(as_Register($dst$$reg),
9730             zr,
9731             as_Register($src$$reg),
9732             (Assembler::Condition)$cmp$$cmpcode);
9733   %}
9734 
9735   ins_pipe(icond_reg);
9736 %}
9737 
9738 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9739   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9740 
9741   ins_cost(INSN_COST * 2);
9742   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9743 
9744   ins_encode %{
9745     __ csel(as_Register($dst$$reg),
9746             as_Register($src$$reg),
9747             zr,
9748             (Assembler::Condition)$cmp$$cmpcode);
9749   %}
9750 
9751   ins_pipe(icond_reg);
9752 %}
9753 
9754 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9755   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9756 
9757   ins_cost(INSN_COST * 2);
9758   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9759 
9760   ins_encode %{
9761     __ csel(as_Register($dst$$reg),
9762             as_Register($src$$reg),
9763             zr,
9764             (Assembler::Condition)$cmp$$cmpcode);
9765   %}
9766 
9767   ins_pipe(icond_reg);
9768 %}
9769 
9770 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9771   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9772 
9773   ins_cost(INSN_COST * 2);
9774   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9775 
9776   ins_encode %{
9777     __ csel(as_Register($dst$$reg),
9778             as_Register($src2$$reg),
9779             as_Register($src1$$reg),
9780             (Assembler::Condition)$cmp$$cmpcode);
9781   %}
9782 
9783   ins_pipe(icond_reg_reg);
9784 %}
9785 
9786 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9787   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9788 
9789   ins_cost(INSN_COST * 2);
9790   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9791 
9792   ins_encode %{
9793     __ csel(as_Register($dst$$reg),
9794             as_Register($src2$$reg),
9795             as_Register($src1$$reg),
9796             (Assembler::Condition)$cmp$$cmpcode);
9797   %}
9798 
9799   ins_pipe(icond_reg_reg);
9800 %}
9801 
9802 // special cases where one arg is zero
9803 
9804 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9805   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9806 
9807   ins_cost(INSN_COST * 2);
9808   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9809 
9810   ins_encode %{
9811     __ csel(as_Register($dst$$reg),
9812             zr,
9813             as_Register($src$$reg),
9814             (Assembler::Condition)$cmp$$cmpcode);
9815   %}
9816 
9817   ins_pipe(icond_reg);
9818 %}
9819 
9820 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9821   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9822 
9823   ins_cost(INSN_COST * 2);
9824   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9825 
9826   ins_encode %{
9827     __ csel(as_Register($dst$$reg),
9828             zr,
9829             as_Register($src$$reg),
9830             (Assembler::Condition)$cmp$$cmpcode);
9831   %}
9832 
9833   ins_pipe(icond_reg);
9834 %}
9835 
9836 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9837   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9838 
9839   ins_cost(INSN_COST * 2);
9840   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9841 
9842   ins_encode %{
9843     __ csel(as_Register($dst$$reg),
9844             as_Register($src$$reg),
9845             zr,
9846             (Assembler::Condition)$cmp$$cmpcode);
9847   %}
9848 
9849   ins_pipe(icond_reg);
9850 %}
9851 
9852 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9853   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9854 
9855   ins_cost(INSN_COST * 2);
9856   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9857 
9858   ins_encode %{
9859     __ csel(as_Register($dst$$reg),
9860             as_Register($src$$reg),
9861             zr,
9862             (Assembler::Condition)$cmp$$cmpcode);
9863   %}
9864 
9865   ins_pipe(icond_reg);
9866 %}
9867 
9868 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9869   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9870 
9871   ins_cost(INSN_COST * 2);
9872   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9873 
9874   ins_encode %{
9875     __ cselw(as_Register($dst$$reg),
9876              as_Register($src2$$reg),
9877              as_Register($src1$$reg),
9878              (Assembler::Condition)$cmp$$cmpcode);
9879   %}
9880 
9881   ins_pipe(icond_reg_reg);
9882 %}
9883 
9884 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9885   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9886 
9887   ins_cost(INSN_COST * 2);
9888   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9889 
9890   ins_encode %{
9891     __ cselw(as_Register($dst$$reg),
9892              as_Register($src2$$reg),
9893              as_Register($src1$$reg),
9894              (Assembler::Condition)$cmp$$cmpcode);
9895   %}
9896 
9897   ins_pipe(icond_reg_reg);
9898 %}
9899 
9900 // special cases where one arg is zero
9901 
9902 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9903   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9904 
9905   ins_cost(INSN_COST * 2);
9906   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9907 
9908   ins_encode %{
9909     __ cselw(as_Register($dst$$reg),
9910              zr,
9911              as_Register($src$$reg),
9912              (Assembler::Condition)$cmp$$cmpcode);
9913   %}
9914 
9915   ins_pipe(icond_reg);
9916 %}
9917 
9918 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9919   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9920 
9921   ins_cost(INSN_COST * 2);
9922   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9923 
9924   ins_encode %{
9925     __ cselw(as_Register($dst$$reg),
9926              zr,
9927              as_Register($src$$reg),
9928              (Assembler::Condition)$cmp$$cmpcode);
9929   %}
9930 
9931   ins_pipe(icond_reg);
9932 %}
9933 
9934 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9935   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9936 
9937   ins_cost(INSN_COST * 2);
9938   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9939 
9940   ins_encode %{
9941     __ cselw(as_Register($dst$$reg),
9942              as_Register($src$$reg),
9943              zr,
9944              (Assembler::Condition)$cmp$$cmpcode);
9945   %}
9946 
9947   ins_pipe(icond_reg);
9948 %}
9949 
9950 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9951   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9952 
9953   ins_cost(INSN_COST * 2);
9954   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9955 
9956   ins_encode %{
9957     __ cselw(as_Register($dst$$reg),
9958              as_Register($src$$reg),
9959              zr,
9960              (Assembler::Condition)$cmp$$cmpcode);
9961   %}
9962 
9963   ins_pipe(icond_reg);
9964 %}
9965 
9966 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9967 %{
9968   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9969 
9970   ins_cost(INSN_COST * 3);
9971 
9972   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9973   ins_encode %{
9974     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9975     __ fcsels(as_FloatRegister($dst$$reg),
9976               as_FloatRegister($src2$$reg),
9977               as_FloatRegister($src1$$reg),
9978               cond);
9979   %}
9980 
9981   ins_pipe(fp_cond_reg_reg_s);
9982 %}
9983 
9984 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9985 %{
9986   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9987 
9988   ins_cost(INSN_COST * 3);
9989 
9990   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9991   ins_encode %{
9992     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9993     __ fcsels(as_FloatRegister($dst$$reg),
9994               as_FloatRegister($src2$$reg),
9995               as_FloatRegister($src1$$reg),
9996               cond);
9997   %}
9998 
9999   ins_pipe(fp_cond_reg_reg_s);
10000 %}
10001 
10002 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10003 %{
10004   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10005 
10006   ins_cost(INSN_COST * 3);
10007 
10008   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10009   ins_encode %{
10010     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10011     __ fcseld(as_FloatRegister($dst$$reg),
10012               as_FloatRegister($src2$$reg),
10013               as_FloatRegister($src1$$reg),
10014               cond);
10015   %}
10016 
10017   ins_pipe(fp_cond_reg_reg_d);
10018 %}
10019 
10020 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10021 %{
10022   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10023 
10024   ins_cost(INSN_COST * 3);
10025 
10026   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10027   ins_encode %{
10028     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10029     __ fcseld(as_FloatRegister($dst$$reg),
10030               as_FloatRegister($src2$$reg),
10031               as_FloatRegister($src1$$reg),
10032               cond);
10033   %}
10034 
10035   ins_pipe(fp_cond_reg_reg_d);
10036 %}
10037 
10038 // ============================================================================
10039 // Arithmetic Instructions
10040 //
10041 
10042 // Integer Addition
10043 
10044 // TODO
10045 // these currently employ operations which do not set CR and hence are
10046 // not flagged as killing CR but we would like to isolate the cases
10047 // where we want to set flags from those where we don't. need to work
10048 // out how to do that.
10049 
10050 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10051   match(Set dst (AddI src1 src2));
10052 
10053   ins_cost(INSN_COST);
10054   format %{ "addw  $dst, $src1, $src2" %}
10055 
10056   ins_encode %{
10057     __ addw(as_Register($dst$$reg),
10058             as_Register($src1$$reg),
10059             as_Register($src2$$reg));
10060   %}
10061 
10062   ins_pipe(ialu_reg_reg);
10063 %}
10064 
10065 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10066   match(Set dst (AddI src1 src2));
10067 
10068   ins_cost(INSN_COST);
10069   format %{ "addw $dst, $src1, $src2" %}
10070 
10071   // use opcode to indicate that this is an add not a sub
10072   opcode(0x0);
10073 
10074   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10075 
10076   ins_pipe(ialu_reg_imm);
10077 %}
10078 
10079 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10080   match(Set dst (AddI (ConvL2I src1) src2));
10081 
10082   ins_cost(INSN_COST);
10083   format %{ "addw $dst, $src1, $src2" %}
10084 
10085   // use opcode to indicate that this is an add not a sub
10086   opcode(0x0);
10087 
10088   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10089 
10090   ins_pipe(ialu_reg_imm);
10091 %}
10092 
10093 // Pointer Addition
10094 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10095   match(Set dst (AddP src1 src2));
10096 
10097   ins_cost(INSN_COST);
10098   format %{ "add $dst, $src1, $src2\t# ptr" %}
10099 
10100   ins_encode %{
10101     __ add(as_Register($dst$$reg),
10102            as_Register($src1$$reg),
10103            as_Register($src2$$reg));
10104   %}
10105 
10106   ins_pipe(ialu_reg_reg);
10107 %}
10108 
10109 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10110   match(Set dst (AddP src1 (ConvI2L src2)));
10111 
10112   ins_cost(1.9 * INSN_COST);
10113   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10114 
10115   ins_encode %{
10116     __ add(as_Register($dst$$reg),
10117            as_Register($src1$$reg),
10118            as_Register($src2$$reg), ext::sxtw);
10119   %}
10120 
10121   ins_pipe(ialu_reg_reg);
10122 %}
10123 
10124 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10125   match(Set dst (AddP src1 (LShiftL src2 scale)));
10126 
10127   ins_cost(1.9 * INSN_COST);
10128   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10129 
10130   ins_encode %{
10131     __ lea(as_Register($dst$$reg),
10132            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10133                    Address::lsl($scale$$constant)));
10134   %}
10135 
10136   ins_pipe(ialu_reg_reg_shift);
10137 %}
10138 
10139 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10140   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10141 
10142   ins_cost(1.9 * INSN_COST);
10143   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10144 
10145   ins_encode %{
10146     __ lea(as_Register($dst$$reg),
10147            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10148                    Address::sxtw($scale$$constant)));
10149   %}
10150 
10151   ins_pipe(ialu_reg_reg_shift);
10152 %}
10153 
10154 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10155   match(Set dst (LShiftL (ConvI2L src) scale));
10156 
10157   ins_cost(INSN_COST);
10158   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10159 
10160   ins_encode %{
10161     __ sbfiz(as_Register($dst$$reg),
10162           as_Register($src$$reg),
10163           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10164   %}
10165 
10166   ins_pipe(ialu_reg_shift);
10167 %}
10168 
10169 // Pointer Immediate Addition
10170 // n.b. this needs to be more expensive than using an indirect memory
10171 // operand
10172 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10173   match(Set dst (AddP src1 src2));
10174 
10175   ins_cost(INSN_COST);
10176   format %{ "add $dst, $src1, $src2\t# ptr" %}
10177 
10178   // use opcode to indicate that this is an add not a sub
10179   opcode(0x0);
10180 
10181   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10182 
10183   ins_pipe(ialu_reg_imm);
10184 %}
10185 
10186 // Long Addition
10187 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10188 
10189   match(Set dst (AddL src1 src2));
10190 
10191   ins_cost(INSN_COST);
10192   format %{ "add  $dst, $src1, $src2" %}
10193 
10194   ins_encode %{
10195     __ add(as_Register($dst$$reg),
10196            as_Register($src1$$reg),
10197            as_Register($src2$$reg));
10198   %}
10199 
10200   ins_pipe(ialu_reg_reg);
10201 %}
10202 
10203 // No constant pool entries requiredLong Immediate Addition.
10204 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10205   match(Set dst (AddL src1 src2));
10206 
10207   ins_cost(INSN_COST);
10208   format %{ "add $dst, $src1, $src2" %}
10209 
10210   // use opcode to indicate that this is an add not a sub
10211   opcode(0x0);
10212 
10213   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10214 
10215   ins_pipe(ialu_reg_imm);
10216 %}
10217 
10218 // Integer Subtraction
10219 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10220   match(Set dst (SubI src1 src2));
10221 
10222   ins_cost(INSN_COST);
10223   format %{ "subw  $dst, $src1, $src2" %}
10224 
10225   ins_encode %{
10226     __ subw(as_Register($dst$$reg),
10227             as_Register($src1$$reg),
10228             as_Register($src2$$reg));
10229   %}
10230 
10231   ins_pipe(ialu_reg_reg);
10232 %}
10233 
10234 // Immediate Subtraction
10235 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10236   match(Set dst (SubI src1 src2));
10237 
10238   ins_cost(INSN_COST);
10239   format %{ "subw $dst, $src1, $src2" %}
10240 
10241   // use opcode to indicate that this is a sub not an add
10242   opcode(0x1);
10243 
10244   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10245 
10246   ins_pipe(ialu_reg_imm);
10247 %}
10248 
10249 // Long Subtraction
10250 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10251 
10252   match(Set dst (SubL src1 src2));
10253 
10254   ins_cost(INSN_COST);
10255   format %{ "sub  $dst, $src1, $src2" %}
10256 
10257   ins_encode %{
10258     __ sub(as_Register($dst$$reg),
10259            as_Register($src1$$reg),
10260            as_Register($src2$$reg));
10261   %}
10262 
10263   ins_pipe(ialu_reg_reg);
10264 %}
10265 
10266 // No constant pool entries requiredLong Immediate Subtraction.
10267 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10268   match(Set dst (SubL src1 src2));
10269 
10270   ins_cost(INSN_COST);
10271   format %{ "sub$dst, $src1, $src2" %}
10272 
10273   // use opcode to indicate that this is a sub not an add
10274   opcode(0x1);
10275 
10276   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10277 
10278   ins_pipe(ialu_reg_imm);
10279 %}
10280 
10281 // Integer Negation (special case for sub)
10282 
10283 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10284   match(Set dst (SubI zero src));
10285 
10286   ins_cost(INSN_COST);
10287   format %{ "negw $dst, $src\t# int" %}
10288 
10289   ins_encode %{
10290     __ negw(as_Register($dst$$reg),
10291             as_Register($src$$reg));
10292   %}
10293 
10294   ins_pipe(ialu_reg);
10295 %}
10296 
10297 // Long Negation
10298 
10299 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10300   match(Set dst (SubL zero src));
10301 
10302   ins_cost(INSN_COST);
10303   format %{ "neg $dst, $src\t# long" %}
10304 
10305   ins_encode %{
10306     __ neg(as_Register($dst$$reg),
10307            as_Register($src$$reg));
10308   %}
10309 
10310   ins_pipe(ialu_reg);
10311 %}
10312 
10313 // Integer Multiply
10314 
10315 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10316   match(Set dst (MulI src1 src2));
10317 
10318   ins_cost(INSN_COST * 3);
10319   format %{ "mulw  $dst, $src1, $src2" %}
10320 
10321   ins_encode %{
10322     __ mulw(as_Register($dst$$reg),
10323             as_Register($src1$$reg),
10324             as_Register($src2$$reg));
10325   %}
10326 
10327   ins_pipe(imul_reg_reg);
10328 %}
10329 
10330 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10331   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10332 
10333   ins_cost(INSN_COST * 3);
10334   format %{ "smull  $dst, $src1, $src2" %}
10335 
10336   ins_encode %{
10337     __ smull(as_Register($dst$$reg),
10338              as_Register($src1$$reg),
10339              as_Register($src2$$reg));
10340   %}
10341 
10342   ins_pipe(imul_reg_reg);
10343 %}
10344 
10345 // Long Multiply
10346 
10347 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10348   match(Set dst (MulL src1 src2));
10349 
10350   ins_cost(INSN_COST * 5);
10351   format %{ "mul  $dst, $src1, $src2" %}
10352 
10353   ins_encode %{
10354     __ mul(as_Register($dst$$reg),
10355            as_Register($src1$$reg),
10356            as_Register($src2$$reg));
10357   %}
10358 
10359   ins_pipe(lmul_reg_reg);
10360 %}
10361 
10362 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10363 %{
10364   match(Set dst (MulHiL src1 src2));
10365 
10366   ins_cost(INSN_COST * 7);
10367   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10368 
10369   ins_encode %{
10370     __ smulh(as_Register($dst$$reg),
10371              as_Register($src1$$reg),
10372              as_Register($src2$$reg));
10373   %}
10374 
10375   ins_pipe(lmul_reg_reg);
10376 %}
10377 
10378 // Combined Integer Multiply & Add/Sub
10379 
10380 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10381   match(Set dst (AddI src3 (MulI src1 src2)));
10382 
10383   ins_cost(INSN_COST * 3);
10384   format %{ "madd  $dst, $src1, $src2, $src3" %}
10385 
10386   ins_encode %{
10387     __ maddw(as_Register($dst$$reg),
10388              as_Register($src1$$reg),
10389              as_Register($src2$$reg),
10390              as_Register($src3$$reg));
10391   %}
10392 
10393   ins_pipe(imac_reg_reg);
10394 %}
10395 
10396 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10397   match(Set dst (SubI src3 (MulI src1 src2)));
10398 
10399   ins_cost(INSN_COST * 3);
10400   format %{ "msub  $dst, $src1, $src2, $src3" %}
10401 
10402   ins_encode %{
10403     __ msubw(as_Register($dst$$reg),
10404              as_Register($src1$$reg),
10405              as_Register($src2$$reg),
10406              as_Register($src3$$reg));
10407   %}
10408 
10409   ins_pipe(imac_reg_reg);
10410 %}
10411 
10412 // Combined Long Multiply & Add/Sub
10413 
10414 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10415   match(Set dst (AddL src3 (MulL src1 src2)));
10416 
10417   ins_cost(INSN_COST * 5);
10418   format %{ "madd  $dst, $src1, $src2, $src3" %}
10419 
10420   ins_encode %{
10421     __ madd(as_Register($dst$$reg),
10422             as_Register($src1$$reg),
10423             as_Register($src2$$reg),
10424             as_Register($src3$$reg));
10425   %}
10426 
10427   ins_pipe(lmac_reg_reg);
10428 %}
10429 
10430 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10431   match(Set dst (SubL src3 (MulL src1 src2)));
10432 
10433   ins_cost(INSN_COST * 5);
10434   format %{ "msub  $dst, $src1, $src2, $src3" %}
10435 
10436   ins_encode %{
10437     __ msub(as_Register($dst$$reg),
10438             as_Register($src1$$reg),
10439             as_Register($src2$$reg),
10440             as_Register($src3$$reg));
10441   %}
10442 
10443   ins_pipe(lmac_reg_reg);
10444 %}
10445 
10446 // Integer Divide
10447 
10448 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10449   match(Set dst (DivI src1 src2));
10450 
10451   ins_cost(INSN_COST * 19);
10452   format %{ "sdivw  $dst, $src1, $src2" %}
10453 
10454   ins_encode(aarch64_enc_divw(dst, src1, src2));
10455   ins_pipe(idiv_reg_reg);
10456 %}
10457 
10458 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10459   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10460   ins_cost(INSN_COST);
10461   format %{ "lsrw $dst, $src1, $div1" %}
10462   ins_encode %{
10463     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10464   %}
10465   ins_pipe(ialu_reg_shift);
10466 %}
10467 
10468 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10469   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10470   ins_cost(INSN_COST);
10471   format %{ "addw $dst, $src, LSR $div1" %}
10472 
10473   ins_encode %{
10474     __ addw(as_Register($dst$$reg),
10475               as_Register($src$$reg),
10476               as_Register($src$$reg),
10477               Assembler::LSR, 31);
10478   %}
10479   ins_pipe(ialu_reg);
10480 %}
10481 
10482 // Long Divide
10483 
10484 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10485   match(Set dst (DivL src1 src2));
10486 
10487   ins_cost(INSN_COST * 35);
10488   format %{ "sdiv   $dst, $src1, $src2" %}
10489 
10490   ins_encode(aarch64_enc_div(dst, src1, src2));
10491   ins_pipe(ldiv_reg_reg);
10492 %}
10493 
10494 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10495   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10496   ins_cost(INSN_COST);
10497   format %{ "lsr $dst, $src1, $div1" %}
10498   ins_encode %{
10499     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10500   %}
10501   ins_pipe(ialu_reg_shift);
10502 %}
10503 
10504 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10505   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10506   ins_cost(INSN_COST);
10507   format %{ "add $dst, $src, $div1" %}
10508 
10509   ins_encode %{
10510     __ add(as_Register($dst$$reg),
10511               as_Register($src$$reg),
10512               as_Register($src$$reg),
10513               Assembler::LSR, 63);
10514   %}
10515   ins_pipe(ialu_reg);
10516 %}
10517 
10518 // Integer Remainder
10519 
10520 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10521   match(Set dst (ModI src1 src2));
10522 
10523   ins_cost(INSN_COST * 22);
10524   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10525             "msubw($dst, rscratch1, $src2, $src1" %}
10526 
10527   ins_encode(aarch64_enc_modw(dst, src1, src2));
10528   ins_pipe(idiv_reg_reg);
10529 %}
10530 
10531 // Long Remainder
10532 
10533 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10534   match(Set dst (ModL src1 src2));
10535 
10536   ins_cost(INSN_COST * 38);
10537   format %{ "sdiv   rscratch1, $src1, $src2\n"
10538             "msub($dst, rscratch1, $src2, $src1" %}
10539 
10540   ins_encode(aarch64_enc_mod(dst, src1, src2));
10541   ins_pipe(ldiv_reg_reg);
10542 %}
10543 
10544 // Integer Shifts
10545 
10546 // Shift Left Register
10547 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10548   match(Set dst (LShiftI src1 src2));
10549 
10550   ins_cost(INSN_COST * 2);
10551   format %{ "lslvw  $dst, $src1, $src2" %}
10552 
10553   ins_encode %{
10554     __ lslvw(as_Register($dst$$reg),
10555              as_Register($src1$$reg),
10556              as_Register($src2$$reg));
10557   %}
10558 
10559   ins_pipe(ialu_reg_reg_vshift);
10560 %}
10561 
10562 // Shift Left Immediate
10563 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10564   match(Set dst (LShiftI src1 src2));
10565 
10566   ins_cost(INSN_COST);
10567   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10568 
10569   ins_encode %{
10570     __ lslw(as_Register($dst$$reg),
10571             as_Register($src1$$reg),
10572             $src2$$constant & 0x1f);
10573   %}
10574 
10575   ins_pipe(ialu_reg_shift);
10576 %}
10577 
10578 // Shift Right Logical Register
10579 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10580   match(Set dst (URShiftI src1 src2));
10581 
10582   ins_cost(INSN_COST * 2);
10583   format %{ "lsrvw  $dst, $src1, $src2" %}
10584 
10585   ins_encode %{
10586     __ lsrvw(as_Register($dst$$reg),
10587              as_Register($src1$$reg),
10588              as_Register($src2$$reg));
10589   %}
10590 
10591   ins_pipe(ialu_reg_reg_vshift);
10592 %}
10593 
10594 // Shift Right Logical Immediate
10595 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10596   match(Set dst (URShiftI src1 src2));
10597 
10598   ins_cost(INSN_COST);
10599   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10600 
10601   ins_encode %{
10602     __ lsrw(as_Register($dst$$reg),
10603             as_Register($src1$$reg),
10604             $src2$$constant & 0x1f);
10605   %}
10606 
10607   ins_pipe(ialu_reg_shift);
10608 %}
10609 
10610 // Shift Right Arithmetic Register
10611 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10612   match(Set dst (RShiftI src1 src2));
10613 
10614   ins_cost(INSN_COST * 2);
10615   format %{ "asrvw  $dst, $src1, $src2" %}
10616 
10617   ins_encode %{
10618     __ asrvw(as_Register($dst$$reg),
10619              as_Register($src1$$reg),
10620              as_Register($src2$$reg));
10621   %}
10622 
10623   ins_pipe(ialu_reg_reg_vshift);
10624 %}
10625 
10626 // Shift Right Arithmetic Immediate
10627 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10628   match(Set dst (RShiftI src1 src2));
10629 
10630   ins_cost(INSN_COST);
10631   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10632 
10633   ins_encode %{
10634     __ asrw(as_Register($dst$$reg),
10635             as_Register($src1$$reg),
10636             $src2$$constant & 0x1f);
10637   %}
10638 
10639   ins_pipe(ialu_reg_shift);
10640 %}
10641 
10642 // Combined Int Mask and Right Shift (using UBFM)
10643 // TODO
10644 
10645 // Long Shifts
10646 
10647 // Shift Left Register
10648 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10649   match(Set dst (LShiftL src1 src2));
10650 
10651   ins_cost(INSN_COST * 2);
10652   format %{ "lslv  $dst, $src1, $src2" %}
10653 
10654   ins_encode %{
10655     __ lslv(as_Register($dst$$reg),
10656             as_Register($src1$$reg),
10657             as_Register($src2$$reg));
10658   %}
10659 
10660   ins_pipe(ialu_reg_reg_vshift);
10661 %}
10662 
10663 // Shift Left Immediate
10664 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10665   match(Set dst (LShiftL src1 src2));
10666 
10667   ins_cost(INSN_COST);
10668   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10669 
10670   ins_encode %{
10671     __ lsl(as_Register($dst$$reg),
10672             as_Register($src1$$reg),
10673             $src2$$constant & 0x3f);
10674   %}
10675 
10676   ins_pipe(ialu_reg_shift);
10677 %}
10678 
10679 // Shift Right Logical Register
10680 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10681   match(Set dst (URShiftL src1 src2));
10682 
10683   ins_cost(INSN_COST * 2);
10684   format %{ "lsrv  $dst, $src1, $src2" %}
10685 
10686   ins_encode %{
10687     __ lsrv(as_Register($dst$$reg),
10688             as_Register($src1$$reg),
10689             as_Register($src2$$reg));
10690   %}
10691 
10692   ins_pipe(ialu_reg_reg_vshift);
10693 %}
10694 
10695 // Shift Right Logical Immediate
10696 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10697   match(Set dst (URShiftL src1 src2));
10698 
10699   ins_cost(INSN_COST);
10700   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10701 
10702   ins_encode %{
10703     __ lsr(as_Register($dst$$reg),
10704            as_Register($src1$$reg),
10705            $src2$$constant & 0x3f);
10706   %}
10707 
10708   ins_pipe(ialu_reg_shift);
10709 %}
10710 
10711 // A special-case pattern for card table stores.
10712 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10713   match(Set dst (URShiftL (CastP2X src1) src2));
10714 
10715   ins_cost(INSN_COST);
10716   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10717 
10718   ins_encode %{
10719     __ lsr(as_Register($dst$$reg),
10720            as_Register($src1$$reg),
10721            $src2$$constant & 0x3f);
10722   %}
10723 
10724   ins_pipe(ialu_reg_shift);
10725 %}
10726 
10727 // Shift Right Arithmetic Register
10728 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10729   match(Set dst (RShiftL src1 src2));
10730 
10731   ins_cost(INSN_COST * 2);
10732   format %{ "asrv  $dst, $src1, $src2" %}
10733 
10734   ins_encode %{
10735     __ asrv(as_Register($dst$$reg),
10736             as_Register($src1$$reg),
10737             as_Register($src2$$reg));
10738   %}
10739 
10740   ins_pipe(ialu_reg_reg_vshift);
10741 %}
10742 
10743 // Shift Right Arithmetic Immediate
10744 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10745   match(Set dst (RShiftL src1 src2));
10746 
10747   ins_cost(INSN_COST);
10748   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10749 
10750   ins_encode %{
10751     __ asr(as_Register($dst$$reg),
10752            as_Register($src1$$reg),
10753            $src2$$constant & 0x3f);
10754   %}
10755 
10756   ins_pipe(ialu_reg_shift);
10757 %}
10758 
10759 // BEGIN This section of the file is automatically generated. Do not edit --------------
10760 
10761 instruct regL_not_reg(iRegLNoSp dst,
10762                          iRegL src1, immL_M1 m1,
10763                          rFlagsReg cr) %{
10764   match(Set dst (XorL src1 m1));
10765   ins_cost(INSN_COST);
10766   format %{ "eon  $dst, $src1, zr" %}
10767 
10768   ins_encode %{
10769     __ eon(as_Register($dst$$reg),
10770               as_Register($src1$$reg),
10771               zr,
10772               Assembler::LSL, 0);
10773   %}
10774 
10775   ins_pipe(ialu_reg);
10776 %}
10777 instruct regI_not_reg(iRegINoSp dst,
10778                          iRegIorL2I src1, immI_M1 m1,
10779                          rFlagsReg cr) %{
10780   match(Set dst (XorI src1 m1));
10781   ins_cost(INSN_COST);
10782   format %{ "eonw  $dst, $src1, zr" %}
10783 
10784   ins_encode %{
10785     __ eonw(as_Register($dst$$reg),
10786               as_Register($src1$$reg),
10787               zr,
10788               Assembler::LSL, 0);
10789   %}
10790 
10791   ins_pipe(ialu_reg);
10792 %}
10793 
10794 instruct AndI_reg_not_reg(iRegINoSp dst,
10795                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10796                          rFlagsReg cr) %{
10797   match(Set dst (AndI src1 (XorI src2 m1)));
10798   ins_cost(INSN_COST);
10799   format %{ "bicw  $dst, $src1, $src2" %}
10800 
10801   ins_encode %{
10802     __ bicw(as_Register($dst$$reg),
10803               as_Register($src1$$reg),
10804               as_Register($src2$$reg),
10805               Assembler::LSL, 0);
10806   %}
10807 
10808   ins_pipe(ialu_reg_reg);
10809 %}
10810 
10811 instruct AndL_reg_not_reg(iRegLNoSp dst,
10812                          iRegL src1, iRegL src2, immL_M1 m1,
10813                          rFlagsReg cr) %{
10814   match(Set dst (AndL src1 (XorL src2 m1)));
10815   ins_cost(INSN_COST);
10816   format %{ "bic  $dst, $src1, $src2" %}
10817 
10818   ins_encode %{
10819     __ bic(as_Register($dst$$reg),
10820               as_Register($src1$$reg),
10821               as_Register($src2$$reg),
10822               Assembler::LSL, 0);
10823   %}
10824 
10825   ins_pipe(ialu_reg_reg);
10826 %}
10827 
10828 instruct OrI_reg_not_reg(iRegINoSp dst,
10829                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10830                          rFlagsReg cr) %{
10831   match(Set dst (OrI src1 (XorI src2 m1)));
10832   ins_cost(INSN_COST);
10833   format %{ "ornw  $dst, $src1, $src2" %}
10834 
10835   ins_encode %{
10836     __ ornw(as_Register($dst$$reg),
10837               as_Register($src1$$reg),
10838               as_Register($src2$$reg),
10839               Assembler::LSL, 0);
10840   %}
10841 
10842   ins_pipe(ialu_reg_reg);
10843 %}
10844 
10845 instruct OrL_reg_not_reg(iRegLNoSp dst,
10846                          iRegL src1, iRegL src2, immL_M1 m1,
10847                          rFlagsReg cr) %{
10848   match(Set dst (OrL src1 (XorL src2 m1)));
10849   ins_cost(INSN_COST);
10850   format %{ "orn  $dst, $src1, $src2" %}
10851 
10852   ins_encode %{
10853     __ orn(as_Register($dst$$reg),
10854               as_Register($src1$$reg),
10855               as_Register($src2$$reg),
10856               Assembler::LSL, 0);
10857   %}
10858 
10859   ins_pipe(ialu_reg_reg);
10860 %}
10861 
10862 instruct XorI_reg_not_reg(iRegINoSp dst,
10863                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10864                          rFlagsReg cr) %{
10865   match(Set dst (XorI m1 (XorI src2 src1)));
10866   ins_cost(INSN_COST);
10867   format %{ "eonw  $dst, $src1, $src2" %}
10868 
10869   ins_encode %{
10870     __ eonw(as_Register($dst$$reg),
10871               as_Register($src1$$reg),
10872               as_Register($src2$$reg),
10873               Assembler::LSL, 0);
10874   %}
10875 
10876   ins_pipe(ialu_reg_reg);
10877 %}
10878 
10879 instruct XorL_reg_not_reg(iRegLNoSp dst,
10880                          iRegL src1, iRegL src2, immL_M1 m1,
10881                          rFlagsReg cr) %{
10882   match(Set dst (XorL m1 (XorL src2 src1)));
10883   ins_cost(INSN_COST);
10884   format %{ "eon  $dst, $src1, $src2" %}
10885 
10886   ins_encode %{
10887     __ eon(as_Register($dst$$reg),
10888               as_Register($src1$$reg),
10889               as_Register($src2$$reg),
10890               Assembler::LSL, 0);
10891   %}
10892 
10893   ins_pipe(ialu_reg_reg);
10894 %}
10895 
10896 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10897                          iRegIorL2I src1, iRegIorL2I src2,
10898                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10899   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10900   ins_cost(1.9 * INSN_COST);
10901   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10902 
10903   ins_encode %{
10904     __ bicw(as_Register($dst$$reg),
10905               as_Register($src1$$reg),
10906               as_Register($src2$$reg),
10907               Assembler::LSR,
10908               $src3$$constant & 0x1f);
10909   %}
10910 
10911   ins_pipe(ialu_reg_reg_shift);
10912 %}
10913 
10914 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10915                          iRegL src1, iRegL src2,
10916                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10917   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10918   ins_cost(1.9 * INSN_COST);
10919   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10920 
10921   ins_encode %{
10922     __ bic(as_Register($dst$$reg),
10923               as_Register($src1$$reg),
10924               as_Register($src2$$reg),
10925               Assembler::LSR,
10926               $src3$$constant & 0x3f);
10927   %}
10928 
10929   ins_pipe(ialu_reg_reg_shift);
10930 %}
10931 
10932 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10933                          iRegIorL2I src1, iRegIorL2I src2,
10934                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10935   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10936   ins_cost(1.9 * INSN_COST);
10937   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10938 
10939   ins_encode %{
10940     __ bicw(as_Register($dst$$reg),
10941               as_Register($src1$$reg),
10942               as_Register($src2$$reg),
10943               Assembler::ASR,
10944               $src3$$constant & 0x1f);
10945   %}
10946 
10947   ins_pipe(ialu_reg_reg_shift);
10948 %}
10949 
10950 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10951                          iRegL src1, iRegL src2,
10952                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10953   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10954   ins_cost(1.9 * INSN_COST);
10955   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10956 
10957   ins_encode %{
10958     __ bic(as_Register($dst$$reg),
10959               as_Register($src1$$reg),
10960               as_Register($src2$$reg),
10961               Assembler::ASR,
10962               $src3$$constant & 0x3f);
10963   %}
10964 
10965   ins_pipe(ialu_reg_reg_shift);
10966 %}
10967 
10968 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10969                          iRegIorL2I src1, iRegIorL2I src2,
10970                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10971   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10972   ins_cost(1.9 * INSN_COST);
10973   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10974 
10975   ins_encode %{
10976     __ bicw(as_Register($dst$$reg),
10977               as_Register($src1$$reg),
10978               as_Register($src2$$reg),
10979               Assembler::LSL,
10980               $src3$$constant & 0x1f);
10981   %}
10982 
10983   ins_pipe(ialu_reg_reg_shift);
10984 %}
10985 
10986 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10987                          iRegL src1, iRegL src2,
10988                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10989   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10990   ins_cost(1.9 * INSN_COST);
10991   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10992 
10993   ins_encode %{
10994     __ bic(as_Register($dst$$reg),
10995               as_Register($src1$$reg),
10996               as_Register($src2$$reg),
10997               Assembler::LSL,
10998               $src3$$constant & 0x3f);
10999   %}
11000 
11001   ins_pipe(ialu_reg_reg_shift);
11002 %}
11003 
11004 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11005                          iRegIorL2I src1, iRegIorL2I src2,
11006                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11007   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11008   ins_cost(1.9 * INSN_COST);
11009   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11010 
11011   ins_encode %{
11012     __ eonw(as_Register($dst$$reg),
11013               as_Register($src1$$reg),
11014               as_Register($src2$$reg),
11015               Assembler::LSR,
11016               $src3$$constant & 0x1f);
11017   %}
11018 
11019   ins_pipe(ialu_reg_reg_shift);
11020 %}
11021 
11022 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11023                          iRegL src1, iRegL src2,
11024                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11025   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11026   ins_cost(1.9 * INSN_COST);
11027   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11028 
11029   ins_encode %{
11030     __ eon(as_Register($dst$$reg),
11031               as_Register($src1$$reg),
11032               as_Register($src2$$reg),
11033               Assembler::LSR,
11034               $src3$$constant & 0x3f);
11035   %}
11036 
11037   ins_pipe(ialu_reg_reg_shift);
11038 %}
11039 
11040 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11041                          iRegIorL2I src1, iRegIorL2I src2,
11042                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11043   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11044   ins_cost(1.9 * INSN_COST);
11045   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11046 
11047   ins_encode %{
11048     __ eonw(as_Register($dst$$reg),
11049               as_Register($src1$$reg),
11050               as_Register($src2$$reg),
11051               Assembler::ASR,
11052               $src3$$constant & 0x1f);
11053   %}
11054 
11055   ins_pipe(ialu_reg_reg_shift);
11056 %}
11057 
11058 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11059                          iRegL src1, iRegL src2,
11060                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11061   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11062   ins_cost(1.9 * INSN_COST);
11063   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11064 
11065   ins_encode %{
11066     __ eon(as_Register($dst$$reg),
11067               as_Register($src1$$reg),
11068               as_Register($src2$$reg),
11069               Assembler::ASR,
11070               $src3$$constant & 0x3f);
11071   %}
11072 
11073   ins_pipe(ialu_reg_reg_shift);
11074 %}
11075 
11076 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11077                          iRegIorL2I src1, iRegIorL2I src2,
11078                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11079   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11080   ins_cost(1.9 * INSN_COST);
11081   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11082 
11083   ins_encode %{
11084     __ eonw(as_Register($dst$$reg),
11085               as_Register($src1$$reg),
11086               as_Register($src2$$reg),
11087               Assembler::LSL,
11088               $src3$$constant & 0x1f);
11089   %}
11090 
11091   ins_pipe(ialu_reg_reg_shift);
11092 %}
11093 
11094 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11095                          iRegL src1, iRegL src2,
11096                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11097   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11098   ins_cost(1.9 * INSN_COST);
11099   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11100 
11101   ins_encode %{
11102     __ eon(as_Register($dst$$reg),
11103               as_Register($src1$$reg),
11104               as_Register($src2$$reg),
11105               Assembler::LSL,
11106               $src3$$constant & 0x3f);
11107   %}
11108 
11109   ins_pipe(ialu_reg_reg_shift);
11110 %}
11111 
11112 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11113                          iRegIorL2I src1, iRegIorL2I src2,
11114                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11115   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11116   ins_cost(1.9 * INSN_COST);
11117   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11118 
11119   ins_encode %{
11120     __ ornw(as_Register($dst$$reg),
11121               as_Register($src1$$reg),
11122               as_Register($src2$$reg),
11123               Assembler::LSR,
11124               $src3$$constant & 0x1f);
11125   %}
11126 
11127   ins_pipe(ialu_reg_reg_shift);
11128 %}
11129 
11130 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11131                          iRegL src1, iRegL src2,
11132                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11133   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11134   ins_cost(1.9 * INSN_COST);
11135   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11136 
11137   ins_encode %{
11138     __ orn(as_Register($dst$$reg),
11139               as_Register($src1$$reg),
11140               as_Register($src2$$reg),
11141               Assembler::LSR,
11142               $src3$$constant & 0x3f);
11143   %}
11144 
11145   ins_pipe(ialu_reg_reg_shift);
11146 %}
11147 
11148 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11149                          iRegIorL2I src1, iRegIorL2I src2,
11150                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11151   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11152   ins_cost(1.9 * INSN_COST);
11153   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11154 
11155   ins_encode %{
11156     __ ornw(as_Register($dst$$reg),
11157               as_Register($src1$$reg),
11158               as_Register($src2$$reg),
11159               Assembler::ASR,
11160               $src3$$constant & 0x1f);
11161   %}
11162 
11163   ins_pipe(ialu_reg_reg_shift);
11164 %}
11165 
11166 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11167                          iRegL src1, iRegL src2,
11168                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11169   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11170   ins_cost(1.9 * INSN_COST);
11171   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11172 
11173   ins_encode %{
11174     __ orn(as_Register($dst$$reg),
11175               as_Register($src1$$reg),
11176               as_Register($src2$$reg),
11177               Assembler::ASR,
11178               $src3$$constant & 0x3f);
11179   %}
11180 
11181   ins_pipe(ialu_reg_reg_shift);
11182 %}
11183 
11184 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11185                          iRegIorL2I src1, iRegIorL2I src2,
11186                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11187   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11188   ins_cost(1.9 * INSN_COST);
11189   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11190 
11191   ins_encode %{
11192     __ ornw(as_Register($dst$$reg),
11193               as_Register($src1$$reg),
11194               as_Register($src2$$reg),
11195               Assembler::LSL,
11196               $src3$$constant & 0x1f);
11197   %}
11198 
11199   ins_pipe(ialu_reg_reg_shift);
11200 %}
11201 
11202 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11203                          iRegL src1, iRegL src2,
11204                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11205   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11206   ins_cost(1.9 * INSN_COST);
11207   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11208 
11209   ins_encode %{
11210     __ orn(as_Register($dst$$reg),
11211               as_Register($src1$$reg),
11212               as_Register($src2$$reg),
11213               Assembler::LSL,
11214               $src3$$constant & 0x3f);
11215   %}
11216 
11217   ins_pipe(ialu_reg_reg_shift);
11218 %}
11219 
11220 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11221                          iRegIorL2I src1, iRegIorL2I src2,
11222                          immI src3, rFlagsReg cr) %{
11223   match(Set dst (AndI src1 (URShiftI src2 src3)));
11224 
11225   ins_cost(1.9 * INSN_COST);
11226   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11227 
11228   ins_encode %{
11229     __ andw(as_Register($dst$$reg),
11230               as_Register($src1$$reg),
11231               as_Register($src2$$reg),
11232               Assembler::LSR,
11233               $src3$$constant & 0x1f);
11234   %}
11235 
11236   ins_pipe(ialu_reg_reg_shift);
11237 %}
11238 
11239 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11240                          iRegL src1, iRegL src2,
11241                          immI src3, rFlagsReg cr) %{
11242   match(Set dst (AndL src1 (URShiftL src2 src3)));
11243 
11244   ins_cost(1.9 * INSN_COST);
11245   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11246 
11247   ins_encode %{
11248     __ andr(as_Register($dst$$reg),
11249               as_Register($src1$$reg),
11250               as_Register($src2$$reg),
11251               Assembler::LSR,
11252               $src3$$constant & 0x3f);
11253   %}
11254 
11255   ins_pipe(ialu_reg_reg_shift);
11256 %}
11257 
11258 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11259                          iRegIorL2I src1, iRegIorL2I src2,
11260                          immI src3, rFlagsReg cr) %{
11261   match(Set dst (AndI src1 (RShiftI src2 src3)));
11262 
11263   ins_cost(1.9 * INSN_COST);
11264   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11265 
11266   ins_encode %{
11267     __ andw(as_Register($dst$$reg),
11268               as_Register($src1$$reg),
11269               as_Register($src2$$reg),
11270               Assembler::ASR,
11271               $src3$$constant & 0x1f);
11272   %}
11273 
11274   ins_pipe(ialu_reg_reg_shift);
11275 %}
11276 
11277 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11278                          iRegL src1, iRegL src2,
11279                          immI src3, rFlagsReg cr) %{
11280   match(Set dst (AndL src1 (RShiftL src2 src3)));
11281 
11282   ins_cost(1.9 * INSN_COST);
11283   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11284 
11285   ins_encode %{
11286     __ andr(as_Register($dst$$reg),
11287               as_Register($src1$$reg),
11288               as_Register($src2$$reg),
11289               Assembler::ASR,
11290               $src3$$constant & 0x3f);
11291   %}
11292 
11293   ins_pipe(ialu_reg_reg_shift);
11294 %}
11295 
11296 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11297                          iRegIorL2I src1, iRegIorL2I src2,
11298                          immI src3, rFlagsReg cr) %{
11299   match(Set dst (AndI src1 (LShiftI src2 src3)));
11300 
11301   ins_cost(1.9 * INSN_COST);
11302   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11303 
11304   ins_encode %{
11305     __ andw(as_Register($dst$$reg),
11306               as_Register($src1$$reg),
11307               as_Register($src2$$reg),
11308               Assembler::LSL,
11309               $src3$$constant & 0x1f);
11310   %}
11311 
11312   ins_pipe(ialu_reg_reg_shift);
11313 %}
11314 
11315 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11316                          iRegL src1, iRegL src2,
11317                          immI src3, rFlagsReg cr) %{
11318   match(Set dst (AndL src1 (LShiftL src2 src3)));
11319 
11320   ins_cost(1.9 * INSN_COST);
11321   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11322 
11323   ins_encode %{
11324     __ andr(as_Register($dst$$reg),
11325               as_Register($src1$$reg),
11326               as_Register($src2$$reg),
11327               Assembler::LSL,
11328               $src3$$constant & 0x3f);
11329   %}
11330 
11331   ins_pipe(ialu_reg_reg_shift);
11332 %}
11333 
11334 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11335                          iRegIorL2I src1, iRegIorL2I src2,
11336                          immI src3, rFlagsReg cr) %{
11337   match(Set dst (XorI src1 (URShiftI src2 src3)));
11338 
11339   ins_cost(1.9 * INSN_COST);
11340   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11341 
11342   ins_encode %{
11343     __ eorw(as_Register($dst$$reg),
11344               as_Register($src1$$reg),
11345               as_Register($src2$$reg),
11346               Assembler::LSR,
11347               $src3$$constant & 0x1f);
11348   %}
11349 
11350   ins_pipe(ialu_reg_reg_shift);
11351 %}
11352 
11353 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11354                          iRegL src1, iRegL src2,
11355                          immI src3, rFlagsReg cr) %{
11356   match(Set dst (XorL src1 (URShiftL src2 src3)));
11357 
11358   ins_cost(1.9 * INSN_COST);
11359   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11360 
11361   ins_encode %{
11362     __ eor(as_Register($dst$$reg),
11363               as_Register($src1$$reg),
11364               as_Register($src2$$reg),
11365               Assembler::LSR,
11366               $src3$$constant & 0x3f);
11367   %}
11368 
11369   ins_pipe(ialu_reg_reg_shift);
11370 %}
11371 
11372 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11373                          iRegIorL2I src1, iRegIorL2I src2,
11374                          immI src3, rFlagsReg cr) %{
11375   match(Set dst (XorI src1 (RShiftI src2 src3)));
11376 
11377   ins_cost(1.9 * INSN_COST);
11378   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11379 
11380   ins_encode %{
11381     __ eorw(as_Register($dst$$reg),
11382               as_Register($src1$$reg),
11383               as_Register($src2$$reg),
11384               Assembler::ASR,
11385               $src3$$constant & 0x1f);
11386   %}
11387 
11388   ins_pipe(ialu_reg_reg_shift);
11389 %}
11390 
11391 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11392                          iRegL src1, iRegL src2,
11393                          immI src3, rFlagsReg cr) %{
11394   match(Set dst (XorL src1 (RShiftL src2 src3)));
11395 
11396   ins_cost(1.9 * INSN_COST);
11397   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11398 
11399   ins_encode %{
11400     __ eor(as_Register($dst$$reg),
11401               as_Register($src1$$reg),
11402               as_Register($src2$$reg),
11403               Assembler::ASR,
11404               $src3$$constant & 0x3f);
11405   %}
11406 
11407   ins_pipe(ialu_reg_reg_shift);
11408 %}
11409 
11410 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11411                          iRegIorL2I src1, iRegIorL2I src2,
11412                          immI src3, rFlagsReg cr) %{
11413   match(Set dst (XorI src1 (LShiftI src2 src3)));
11414 
11415   ins_cost(1.9 * INSN_COST);
11416   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11417 
11418   ins_encode %{
11419     __ eorw(as_Register($dst$$reg),
11420               as_Register($src1$$reg),
11421               as_Register($src2$$reg),
11422               Assembler::LSL,
11423               $src3$$constant & 0x1f);
11424   %}
11425 
11426   ins_pipe(ialu_reg_reg_shift);
11427 %}
11428 
11429 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11430                          iRegL src1, iRegL src2,
11431                          immI src3, rFlagsReg cr) %{
11432   match(Set dst (XorL src1 (LShiftL src2 src3)));
11433 
11434   ins_cost(1.9 * INSN_COST);
11435   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11436 
11437   ins_encode %{
11438     __ eor(as_Register($dst$$reg),
11439               as_Register($src1$$reg),
11440               as_Register($src2$$reg),
11441               Assembler::LSL,
11442               $src3$$constant & 0x3f);
11443   %}
11444 
11445   ins_pipe(ialu_reg_reg_shift);
11446 %}
11447 
11448 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11449                          iRegIorL2I src1, iRegIorL2I src2,
11450                          immI src3, rFlagsReg cr) %{
11451   match(Set dst (OrI src1 (URShiftI src2 src3)));
11452 
11453   ins_cost(1.9 * INSN_COST);
11454   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11455 
11456   ins_encode %{
11457     __ orrw(as_Register($dst$$reg),
11458               as_Register($src1$$reg),
11459               as_Register($src2$$reg),
11460               Assembler::LSR,
11461               $src3$$constant & 0x1f);
11462   %}
11463 
11464   ins_pipe(ialu_reg_reg_shift);
11465 %}
11466 
11467 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11468                          iRegL src1, iRegL src2,
11469                          immI src3, rFlagsReg cr) %{
11470   match(Set dst (OrL src1 (URShiftL src2 src3)));
11471 
11472   ins_cost(1.9 * INSN_COST);
11473   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11474 
11475   ins_encode %{
11476     __ orr(as_Register($dst$$reg),
11477               as_Register($src1$$reg),
11478               as_Register($src2$$reg),
11479               Assembler::LSR,
11480               $src3$$constant & 0x3f);
11481   %}
11482 
11483   ins_pipe(ialu_reg_reg_shift);
11484 %}
11485 
11486 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11487                          iRegIorL2I src1, iRegIorL2I src2,
11488                          immI src3, rFlagsReg cr) %{
11489   match(Set dst (OrI src1 (RShiftI src2 src3)));
11490 
11491   ins_cost(1.9 * INSN_COST);
11492   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11493 
11494   ins_encode %{
11495     __ orrw(as_Register($dst$$reg),
11496               as_Register($src1$$reg),
11497               as_Register($src2$$reg),
11498               Assembler::ASR,
11499               $src3$$constant & 0x1f);
11500   %}
11501 
11502   ins_pipe(ialu_reg_reg_shift);
11503 %}
11504 
11505 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11506                          iRegL src1, iRegL src2,
11507                          immI src3, rFlagsReg cr) %{
11508   match(Set dst (OrL src1 (RShiftL src2 src3)));
11509 
11510   ins_cost(1.9 * INSN_COST);
11511   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11512 
11513   ins_encode %{
11514     __ orr(as_Register($dst$$reg),
11515               as_Register($src1$$reg),
11516               as_Register($src2$$reg),
11517               Assembler::ASR,
11518               $src3$$constant & 0x3f);
11519   %}
11520 
11521   ins_pipe(ialu_reg_reg_shift);
11522 %}
11523 
11524 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11525                          iRegIorL2I src1, iRegIorL2I src2,
11526                          immI src3, rFlagsReg cr) %{
11527   match(Set dst (OrI src1 (LShiftI src2 src3)));
11528 
11529   ins_cost(1.9 * INSN_COST);
11530   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11531 
11532   ins_encode %{
11533     __ orrw(as_Register($dst$$reg),
11534               as_Register($src1$$reg),
11535               as_Register($src2$$reg),
11536               Assembler::LSL,
11537               $src3$$constant & 0x1f);
11538   %}
11539 
11540   ins_pipe(ialu_reg_reg_shift);
11541 %}
11542 
11543 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11544                          iRegL src1, iRegL src2,
11545                          immI src3, rFlagsReg cr) %{
11546   match(Set dst (OrL src1 (LShiftL src2 src3)));
11547 
11548   ins_cost(1.9 * INSN_COST);
11549   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11550 
11551   ins_encode %{
11552     __ orr(as_Register($dst$$reg),
11553               as_Register($src1$$reg),
11554               as_Register($src2$$reg),
11555               Assembler::LSL,
11556               $src3$$constant & 0x3f);
11557   %}
11558 
11559   ins_pipe(ialu_reg_reg_shift);
11560 %}
11561 
11562 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11563                          iRegIorL2I src1, iRegIorL2I src2,
11564                          immI src3, rFlagsReg cr) %{
11565   match(Set dst (AddI src1 (URShiftI src2 src3)));
11566 
11567   ins_cost(1.9 * INSN_COST);
11568   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11569 
11570   ins_encode %{
11571     __ addw(as_Register($dst$$reg),
11572               as_Register($src1$$reg),
11573               as_Register($src2$$reg),
11574               Assembler::LSR,
11575               $src3$$constant & 0x1f);
11576   %}
11577 
11578   ins_pipe(ialu_reg_reg_shift);
11579 %}
11580 
11581 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11582                          iRegL src1, iRegL src2,
11583                          immI src3, rFlagsReg cr) %{
11584   match(Set dst (AddL src1 (URShiftL src2 src3)));
11585 
11586   ins_cost(1.9 * INSN_COST);
11587   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11588 
11589   ins_encode %{
11590     __ add(as_Register($dst$$reg),
11591               as_Register($src1$$reg),
11592               as_Register($src2$$reg),
11593               Assembler::LSR,
11594               $src3$$constant & 0x3f);
11595   %}
11596 
11597   ins_pipe(ialu_reg_reg_shift);
11598 %}
11599 
11600 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11601                          iRegIorL2I src1, iRegIorL2I src2,
11602                          immI src3, rFlagsReg cr) %{
11603   match(Set dst (AddI src1 (RShiftI src2 src3)));
11604 
11605   ins_cost(1.9 * INSN_COST);
11606   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11607 
11608   ins_encode %{
11609     __ addw(as_Register($dst$$reg),
11610               as_Register($src1$$reg),
11611               as_Register($src2$$reg),
11612               Assembler::ASR,
11613               $src3$$constant & 0x1f);
11614   %}
11615 
11616   ins_pipe(ialu_reg_reg_shift);
11617 %}
11618 
11619 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11620                          iRegL src1, iRegL src2,
11621                          immI src3, rFlagsReg cr) %{
11622   match(Set dst (AddL src1 (RShiftL src2 src3)));
11623 
11624   ins_cost(1.9 * INSN_COST);
11625   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11626 
11627   ins_encode %{
11628     __ add(as_Register($dst$$reg),
11629               as_Register($src1$$reg),
11630               as_Register($src2$$reg),
11631               Assembler::ASR,
11632               $src3$$constant & 0x3f);
11633   %}
11634 
11635   ins_pipe(ialu_reg_reg_shift);
11636 %}
11637 
11638 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11639                          iRegIorL2I src1, iRegIorL2I src2,
11640                          immI src3, rFlagsReg cr) %{
11641   match(Set dst (AddI src1 (LShiftI src2 src3)));
11642 
11643   ins_cost(1.9 * INSN_COST);
11644   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11645 
11646   ins_encode %{
11647     __ addw(as_Register($dst$$reg),
11648               as_Register($src1$$reg),
11649               as_Register($src2$$reg),
11650               Assembler::LSL,
11651               $src3$$constant & 0x1f);
11652   %}
11653 
11654   ins_pipe(ialu_reg_reg_shift);
11655 %}
11656 
11657 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11658                          iRegL src1, iRegL src2,
11659                          immI src3, rFlagsReg cr) %{
11660   match(Set dst (AddL src1 (LShiftL src2 src3)));
11661 
11662   ins_cost(1.9 * INSN_COST);
11663   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11664 
11665   ins_encode %{
11666     __ add(as_Register($dst$$reg),
11667               as_Register($src1$$reg),
11668               as_Register($src2$$reg),
11669               Assembler::LSL,
11670               $src3$$constant & 0x3f);
11671   %}
11672 
11673   ins_pipe(ialu_reg_reg_shift);
11674 %}
11675 
11676 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11677                          iRegIorL2I src1, iRegIorL2I src2,
11678                          immI src3, rFlagsReg cr) %{
11679   match(Set dst (SubI src1 (URShiftI src2 src3)));
11680 
11681   ins_cost(1.9 * INSN_COST);
11682   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11683 
11684   ins_encode %{
11685     __ subw(as_Register($dst$$reg),
11686               as_Register($src1$$reg),
11687               as_Register($src2$$reg),
11688               Assembler::LSR,
11689               $src3$$constant & 0x1f);
11690   %}
11691 
11692   ins_pipe(ialu_reg_reg_shift);
11693 %}
11694 
11695 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11696                          iRegL src1, iRegL src2,
11697                          immI src3, rFlagsReg cr) %{
11698   match(Set dst (SubL src1 (URShiftL src2 src3)));
11699 
11700   ins_cost(1.9 * INSN_COST);
11701   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11702 
11703   ins_encode %{
11704     __ sub(as_Register($dst$$reg),
11705               as_Register($src1$$reg),
11706               as_Register($src2$$reg),
11707               Assembler::LSR,
11708               $src3$$constant & 0x3f);
11709   %}
11710 
11711   ins_pipe(ialu_reg_reg_shift);
11712 %}
11713 
11714 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11715                          iRegIorL2I src1, iRegIorL2I src2,
11716                          immI src3, rFlagsReg cr) %{
11717   match(Set dst (SubI src1 (RShiftI src2 src3)));
11718 
11719   ins_cost(1.9 * INSN_COST);
11720   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11721 
11722   ins_encode %{
11723     __ subw(as_Register($dst$$reg),
11724               as_Register($src1$$reg),
11725               as_Register($src2$$reg),
11726               Assembler::ASR,
11727               $src3$$constant & 0x1f);
11728   %}
11729 
11730   ins_pipe(ialu_reg_reg_shift);
11731 %}
11732 
11733 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11734                          iRegL src1, iRegL src2,
11735                          immI src3, rFlagsReg cr) %{
11736   match(Set dst (SubL src1 (RShiftL src2 src3)));
11737 
11738   ins_cost(1.9 * INSN_COST);
11739   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11740 
11741   ins_encode %{
11742     __ sub(as_Register($dst$$reg),
11743               as_Register($src1$$reg),
11744               as_Register($src2$$reg),
11745               Assembler::ASR,
11746               $src3$$constant & 0x3f);
11747   %}
11748 
11749   ins_pipe(ialu_reg_reg_shift);
11750 %}
11751 
11752 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11753                          iRegIorL2I src1, iRegIorL2I src2,
11754                          immI src3, rFlagsReg cr) %{
11755   match(Set dst (SubI src1 (LShiftI src2 src3)));
11756 
11757   ins_cost(1.9 * INSN_COST);
11758   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11759 
11760   ins_encode %{
11761     __ subw(as_Register($dst$$reg),
11762               as_Register($src1$$reg),
11763               as_Register($src2$$reg),
11764               Assembler::LSL,
11765               $src3$$constant & 0x1f);
11766   %}
11767 
11768   ins_pipe(ialu_reg_reg_shift);
11769 %}
11770 
11771 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11772                          iRegL src1, iRegL src2,
11773                          immI src3, rFlagsReg cr) %{
11774   match(Set dst (SubL src1 (LShiftL src2 src3)));
11775 
11776   ins_cost(1.9 * INSN_COST);
11777   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11778 
11779   ins_encode %{
11780     __ sub(as_Register($dst$$reg),
11781               as_Register($src1$$reg),
11782               as_Register($src2$$reg),
11783               Assembler::LSL,
11784               $src3$$constant & 0x3f);
11785   %}
11786 
11787   ins_pipe(ialu_reg_reg_shift);
11788 %}
11789 
11790 
11791 
11792 // Shift Left followed by Shift Right.
11793 // This idiom is used by the compiler for the i2b bytecode etc.
11794 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11795 %{
11796   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11797   // Make sure we are not going to exceed what sbfm can do.
11798   predicate((unsigned int)n->in(2)->get_int() <= 63
11799             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11800 
11801   ins_cost(INSN_COST * 2);
11802   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11803   ins_encode %{
11804     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11805     int s = 63 - lshift;
11806     int r = (rshift - lshift) & 63;
11807     __ sbfm(as_Register($dst$$reg),
11808             as_Register($src$$reg),
11809             r, s);
11810   %}
11811 
11812   ins_pipe(ialu_reg_shift);
11813 %}
11814 
11815 // Shift Left followed by Shift Right.
11816 // This idiom is used by the compiler for the i2b bytecode etc.
11817 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11818 %{
11819   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11820   // Make sure we are not going to exceed what sbfmw can do.
11821   predicate((unsigned int)n->in(2)->get_int() <= 31
11822             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11823 
11824   ins_cost(INSN_COST * 2);
11825   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11826   ins_encode %{
11827     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11828     int s = 31 - lshift;
11829     int r = (rshift - lshift) & 31;
11830     __ sbfmw(as_Register($dst$$reg),
11831             as_Register($src$$reg),
11832             r, s);
11833   %}
11834 
11835   ins_pipe(ialu_reg_shift);
11836 %}
11837 
11838 // Shift Left followed by Shift Right.
11839 // This idiom is used by the compiler for the i2b bytecode etc.
11840 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11841 %{
11842   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11843   // Make sure we are not going to exceed what ubfm can do.
11844   predicate((unsigned int)n->in(2)->get_int() <= 63
11845             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11846 
11847   ins_cost(INSN_COST * 2);
11848   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11849   ins_encode %{
11850     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11851     int s = 63 - lshift;
11852     int r = (rshift - lshift) & 63;
11853     __ ubfm(as_Register($dst$$reg),
11854             as_Register($src$$reg),
11855             r, s);
11856   %}
11857 
11858   ins_pipe(ialu_reg_shift);
11859 %}
11860 
11861 // Shift Left followed by Shift Right.
11862 // This idiom is used by the compiler for the i2b bytecode etc.
11863 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11864 %{
11865   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11866   // Make sure we are not going to exceed what ubfmw can do.
11867   predicate((unsigned int)n->in(2)->get_int() <= 31
11868             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11869 
11870   ins_cost(INSN_COST * 2);
11871   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11872   ins_encode %{
11873     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11874     int s = 31 - lshift;
11875     int r = (rshift - lshift) & 31;
11876     __ ubfmw(as_Register($dst$$reg),
11877             as_Register($src$$reg),
11878             r, s);
11879   %}
11880 
11881   ins_pipe(ialu_reg_shift);
11882 %}
11883 // Bitfield extract with shift & mask
11884 
11885 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11886 %{
11887   match(Set dst (AndI (URShiftI src rshift) mask));
11888 
11889   ins_cost(INSN_COST);
11890   format %{ "ubfxw $dst, $src, $mask" %}
11891   ins_encode %{
11892     int rshift = $rshift$$constant;
11893     long mask = $mask$$constant;
11894     int width = exact_log2(mask+1);
11895     __ ubfxw(as_Register($dst$$reg),
11896             as_Register($src$$reg), rshift, width);
11897   %}
11898   ins_pipe(ialu_reg_shift);
11899 %}
11900 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11901 %{
11902   match(Set dst (AndL (URShiftL src rshift) mask));
11903 
11904   ins_cost(INSN_COST);
11905   format %{ "ubfx $dst, $src, $mask" %}
11906   ins_encode %{
11907     int rshift = $rshift$$constant;
11908     long mask = $mask$$constant;
11909     int width = exact_log2(mask+1);
11910     __ ubfx(as_Register($dst$$reg),
11911             as_Register($src$$reg), rshift, width);
11912   %}
11913   ins_pipe(ialu_reg_shift);
11914 %}
11915 
11916 // We can use ubfx when extending an And with a mask when we know mask
11917 // is positive.  We know that because immI_bitmask guarantees it.
11918 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11919 %{
11920   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11921 
11922   ins_cost(INSN_COST * 2);
11923   format %{ "ubfx $dst, $src, $mask" %}
11924   ins_encode %{
11925     int rshift = $rshift$$constant;
11926     long mask = $mask$$constant;
11927     int width = exact_log2(mask+1);
11928     __ ubfx(as_Register($dst$$reg),
11929             as_Register($src$$reg), rshift, width);
11930   %}
11931   ins_pipe(ialu_reg_shift);
11932 %}
11933 
11934 // Rotations
11935 
11936 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11937 %{
11938   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11939   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11940 
11941   ins_cost(INSN_COST);
11942   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11943 
11944   ins_encode %{
11945     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11946             $rshift$$constant & 63);
11947   %}
11948   ins_pipe(ialu_reg_reg_extr);
11949 %}
11950 
11951 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11952 %{
11953   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11954   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11955 
11956   ins_cost(INSN_COST);
11957   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11958 
11959   ins_encode %{
11960     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11961             $rshift$$constant & 31);
11962   %}
11963   ins_pipe(ialu_reg_reg_extr);
11964 %}
11965 
11966 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11967 %{
11968   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11969   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11970 
11971   ins_cost(INSN_COST);
11972   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11973 
11974   ins_encode %{
11975     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11976             $rshift$$constant & 63);
11977   %}
11978   ins_pipe(ialu_reg_reg_extr);
11979 %}
11980 
11981 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11982 %{
11983   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11984   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11985 
11986   ins_cost(INSN_COST);
11987   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11988 
11989   ins_encode %{
11990     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11991             $rshift$$constant & 31);
11992   %}
11993   ins_pipe(ialu_reg_reg_extr);
11994 %}
11995 
11996 
11997 // rol expander
11998 
11999 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12000 %{
12001   effect(DEF dst, USE src, USE shift);
12002 
12003   format %{ "rol    $dst, $src, $shift" %}
12004   ins_cost(INSN_COST * 3);
12005   ins_encode %{
12006     __ subw(rscratch1, zr, as_Register($shift$$reg));
12007     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12008             rscratch1);
12009     %}
12010   ins_pipe(ialu_reg_reg_vshift);
12011 %}
12012 
12013 // rol expander
12014 
12015 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12016 %{
12017   effect(DEF dst, USE src, USE shift);
12018 
12019   format %{ "rol    $dst, $src, $shift" %}
12020   ins_cost(INSN_COST * 3);
12021   ins_encode %{
12022     __ subw(rscratch1, zr, as_Register($shift$$reg));
12023     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12024             rscratch1);
12025     %}
12026   ins_pipe(ialu_reg_reg_vshift);
12027 %}
12028 
12029 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12030 %{
12031   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12032 
12033   expand %{
12034     rolL_rReg(dst, src, shift, cr);
12035   %}
12036 %}
12037 
12038 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12039 %{
12040   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12041 
12042   expand %{
12043     rolL_rReg(dst, src, shift, cr);
12044   %}
12045 %}
12046 
12047 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12048 %{
12049   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12050 
12051   expand %{
12052     rolL_rReg(dst, src, shift, cr);
12053   %}
12054 %}
12055 
12056 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12057 %{
12058   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12059 
12060   expand %{
12061     rolL_rReg(dst, src, shift, cr);
12062   %}
12063 %}
12064 
12065 // ror expander
12066 
12067 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12068 %{
12069   effect(DEF dst, USE src, USE shift);
12070 
12071   format %{ "ror    $dst, $src, $shift" %}
12072   ins_cost(INSN_COST);
12073   ins_encode %{
12074     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12075             as_Register($shift$$reg));
12076     %}
12077   ins_pipe(ialu_reg_reg_vshift);
12078 %}
12079 
12080 // ror expander
12081 
12082 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12083 %{
12084   effect(DEF dst, USE src, USE shift);
12085 
12086   format %{ "ror    $dst, $src, $shift" %}
12087   ins_cost(INSN_COST);
12088   ins_encode %{
12089     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12090             as_Register($shift$$reg));
12091     %}
12092   ins_pipe(ialu_reg_reg_vshift);
12093 %}
12094 
12095 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12096 %{
12097   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12098 
12099   expand %{
12100     rorL_rReg(dst, src, shift, cr);
12101   %}
12102 %}
12103 
12104 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12105 %{
12106   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12107 
12108   expand %{
12109     rorL_rReg(dst, src, shift, cr);
12110   %}
12111 %}
12112 
12113 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12114 %{
12115   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12116 
12117   expand %{
12118     rorL_rReg(dst, src, shift, cr);
12119   %}
12120 %}
12121 
12122 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12123 %{
12124   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12125 
12126   expand %{
12127     rorL_rReg(dst, src, shift, cr);
12128   %}
12129 %}
12130 
12131 // Add/subtract (extended)
12132 
12133 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12134 %{
12135   match(Set dst (AddL src1 (ConvI2L src2)));
12136   ins_cost(INSN_COST);
12137   format %{ "add  $dst, $src1, sxtw $src2" %}
12138 
12139    ins_encode %{
12140      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12141             as_Register($src2$$reg), ext::sxtw);
12142    %}
12143   ins_pipe(ialu_reg_reg);
12144 %};
12145 
12146 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12147 %{
12148   match(Set dst (SubL src1 (ConvI2L src2)));
12149   ins_cost(INSN_COST);
12150   format %{ "sub  $dst, $src1, sxtw $src2" %}
12151 
12152    ins_encode %{
12153      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12154             as_Register($src2$$reg), ext::sxtw);
12155    %}
12156   ins_pipe(ialu_reg_reg);
12157 %};
12158 
12159 
12160 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12161 %{
12162   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12163   ins_cost(INSN_COST);
12164   format %{ "add  $dst, $src1, sxth $src2" %}
12165 
12166    ins_encode %{
12167      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12168             as_Register($src2$$reg), ext::sxth);
12169    %}
12170   ins_pipe(ialu_reg_reg);
12171 %}
12172 
12173 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12174 %{
12175   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12176   ins_cost(INSN_COST);
12177   format %{ "add  $dst, $src1, sxtb $src2" %}
12178 
12179    ins_encode %{
12180      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12181             as_Register($src2$$reg), ext::sxtb);
12182    %}
12183   ins_pipe(ialu_reg_reg);
12184 %}
12185 
12186 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12187 %{
12188   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12189   ins_cost(INSN_COST);
12190   format %{ "add  $dst, $src1, uxtb $src2" %}
12191 
12192    ins_encode %{
12193      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12194             as_Register($src2$$reg), ext::uxtb);
12195    %}
12196   ins_pipe(ialu_reg_reg);
12197 %}
12198 
12199 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12200 %{
12201   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12202   ins_cost(INSN_COST);
12203   format %{ "add  $dst, $src1, sxth $src2" %}
12204 
12205    ins_encode %{
12206      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12207             as_Register($src2$$reg), ext::sxth);
12208    %}
12209   ins_pipe(ialu_reg_reg);
12210 %}
12211 
12212 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12213 %{
12214   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12215   ins_cost(INSN_COST);
12216   format %{ "add  $dst, $src1, sxtw $src2" %}
12217 
12218    ins_encode %{
12219      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12220             as_Register($src2$$reg), ext::sxtw);
12221    %}
12222   ins_pipe(ialu_reg_reg);
12223 %}
12224 
12225 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12226 %{
12227   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12228   ins_cost(INSN_COST);
12229   format %{ "add  $dst, $src1, sxtb $src2" %}
12230 
12231    ins_encode %{
12232      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12233             as_Register($src2$$reg), ext::sxtb);
12234    %}
12235   ins_pipe(ialu_reg_reg);
12236 %}
12237 
12238 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12239 %{
12240   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12241   ins_cost(INSN_COST);
12242   format %{ "add  $dst, $src1, uxtb $src2" %}
12243 
12244    ins_encode %{
12245      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12246             as_Register($src2$$reg), ext::uxtb);
12247    %}
12248   ins_pipe(ialu_reg_reg);
12249 %}
12250 
12251 
12252 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12253 %{
12254   match(Set dst (AddI src1 (AndI src2 mask)));
12255   ins_cost(INSN_COST);
12256   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12257 
12258    ins_encode %{
12259      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12260             as_Register($src2$$reg), ext::uxtb);
12261    %}
12262   ins_pipe(ialu_reg_reg);
12263 %}
12264 
12265 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12266 %{
12267   match(Set dst (AddI src1 (AndI src2 mask)));
12268   ins_cost(INSN_COST);
12269   format %{ "addw  $dst, $src1, $src2, uxth" %}
12270 
12271    ins_encode %{
12272      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12273             as_Register($src2$$reg), ext::uxth);
12274    %}
12275   ins_pipe(ialu_reg_reg);
12276 %}
12277 
12278 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12279 %{
12280   match(Set dst (AddL src1 (AndL src2 mask)));
12281   ins_cost(INSN_COST);
12282   format %{ "add  $dst, $src1, $src2, uxtb" %}
12283 
12284    ins_encode %{
12285      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12286             as_Register($src2$$reg), ext::uxtb);
12287    %}
12288   ins_pipe(ialu_reg_reg);
12289 %}
12290 
12291 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12292 %{
12293   match(Set dst (AddL src1 (AndL src2 mask)));
12294   ins_cost(INSN_COST);
12295   format %{ "add  $dst, $src1, $src2, uxth" %}
12296 
12297    ins_encode %{
12298      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12299             as_Register($src2$$reg), ext::uxth);
12300    %}
12301   ins_pipe(ialu_reg_reg);
12302 %}
12303 
12304 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12305 %{
12306   match(Set dst (AddL src1 (AndL src2 mask)));
12307   ins_cost(INSN_COST);
12308   format %{ "add  $dst, $src1, $src2, uxtw" %}
12309 
12310    ins_encode %{
12311      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12312             as_Register($src2$$reg), ext::uxtw);
12313    %}
12314   ins_pipe(ialu_reg_reg);
12315 %}
12316 
12317 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12318 %{
12319   match(Set dst (SubI src1 (AndI src2 mask)));
12320   ins_cost(INSN_COST);
12321   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12322 
12323    ins_encode %{
12324      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12325             as_Register($src2$$reg), ext::uxtb);
12326    %}
12327   ins_pipe(ialu_reg_reg);
12328 %}
12329 
12330 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12331 %{
12332   match(Set dst (SubI src1 (AndI src2 mask)));
12333   ins_cost(INSN_COST);
12334   format %{ "subw  $dst, $src1, $src2, uxth" %}
12335 
12336    ins_encode %{
12337      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12338             as_Register($src2$$reg), ext::uxth);
12339    %}
12340   ins_pipe(ialu_reg_reg);
12341 %}
12342 
12343 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12344 %{
12345   match(Set dst (SubL src1 (AndL src2 mask)));
12346   ins_cost(INSN_COST);
12347   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12348 
12349    ins_encode %{
12350      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12351             as_Register($src2$$reg), ext::uxtb);
12352    %}
12353   ins_pipe(ialu_reg_reg);
12354 %}
12355 
12356 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12357 %{
12358   match(Set dst (SubL src1 (AndL src2 mask)));
12359   ins_cost(INSN_COST);
12360   format %{ "sub  $dst, $src1, $src2, uxth" %}
12361 
12362    ins_encode %{
12363      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12364             as_Register($src2$$reg), ext::uxth);
12365    %}
12366   ins_pipe(ialu_reg_reg);
12367 %}
12368 
12369 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12370 %{
12371   match(Set dst (SubL src1 (AndL src2 mask)));
12372   ins_cost(INSN_COST);
12373   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12374 
12375    ins_encode %{
12376      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12377             as_Register($src2$$reg), ext::uxtw);
12378    %}
12379   ins_pipe(ialu_reg_reg);
12380 %}
12381 
12382 // END This section of the file is automatically generated. Do not edit --------------
12383 
12384 // ============================================================================
12385 // Floating Point Arithmetic Instructions
12386 
12387 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12388   match(Set dst (AddF src1 src2));
12389 
12390   ins_cost(INSN_COST * 5);
12391   format %{ "fadds   $dst, $src1, $src2" %}
12392 
12393   ins_encode %{
12394     __ fadds(as_FloatRegister($dst$$reg),
12395              as_FloatRegister($src1$$reg),
12396              as_FloatRegister($src2$$reg));
12397   %}
12398 
12399   ins_pipe(fp_dop_reg_reg_s);
12400 %}
12401 
12402 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12403   match(Set dst (AddD src1 src2));
12404 
12405   ins_cost(INSN_COST * 5);
12406   format %{ "faddd   $dst, $src1, $src2" %}
12407 
12408   ins_encode %{
12409     __ faddd(as_FloatRegister($dst$$reg),
12410              as_FloatRegister($src1$$reg),
12411              as_FloatRegister($src2$$reg));
12412   %}
12413 
12414   ins_pipe(fp_dop_reg_reg_d);
12415 %}
12416 
12417 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12418   match(Set dst (SubF src1 src2));
12419 
12420   ins_cost(INSN_COST * 5);
12421   format %{ "fsubs   $dst, $src1, $src2" %}
12422 
12423   ins_encode %{
12424     __ fsubs(as_FloatRegister($dst$$reg),
12425              as_FloatRegister($src1$$reg),
12426              as_FloatRegister($src2$$reg));
12427   %}
12428 
12429   ins_pipe(fp_dop_reg_reg_s);
12430 %}
12431 
12432 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12433   match(Set dst (SubD src1 src2));
12434 
12435   ins_cost(INSN_COST * 5);
12436   format %{ "fsubd   $dst, $src1, $src2" %}
12437 
12438   ins_encode %{
12439     __ fsubd(as_FloatRegister($dst$$reg),
12440              as_FloatRegister($src1$$reg),
12441              as_FloatRegister($src2$$reg));
12442   %}
12443 
12444   ins_pipe(fp_dop_reg_reg_d);
12445 %}
12446 
12447 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12448   match(Set dst (MulF src1 src2));
12449 
12450   ins_cost(INSN_COST * 6);
12451   format %{ "fmuls   $dst, $src1, $src2" %}
12452 
12453   ins_encode %{
12454     __ fmuls(as_FloatRegister($dst$$reg),
12455              as_FloatRegister($src1$$reg),
12456              as_FloatRegister($src2$$reg));
12457   %}
12458 
12459   ins_pipe(fp_dop_reg_reg_s);
12460 %}
12461 
12462 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12463   match(Set dst (MulD src1 src2));
12464 
12465   ins_cost(INSN_COST * 6);
12466   format %{ "fmuld   $dst, $src1, $src2" %}
12467 
12468   ins_encode %{
12469     __ fmuld(as_FloatRegister($dst$$reg),
12470              as_FloatRegister($src1$$reg),
12471              as_FloatRegister($src2$$reg));
12472   %}
12473 
12474   ins_pipe(fp_dop_reg_reg_d);
12475 %}
12476 
12477 // We cannot use these fused mul w add/sub ops because they don't
12478 // produce the same result as the equivalent separated ops
12479 // (essentially they don't round the intermediate result). that's a
12480 // shame. leaving them here in case we can idenitfy cases where it is
12481 // legitimate to use them
12482 
12483 
12484 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12485 //   match(Set dst (AddF (MulF src1 src2) src3));
12486 
12487 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12488 
12489 //   ins_encode %{
12490 //     __ fmadds(as_FloatRegister($dst$$reg),
12491 //              as_FloatRegister($src1$$reg),
12492 //              as_FloatRegister($src2$$reg),
12493 //              as_FloatRegister($src3$$reg));
12494 //   %}
12495 
12496 //   ins_pipe(pipe_class_default);
12497 // %}
12498 
12499 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12500 //   match(Set dst (AddD (MulD src1 src2) src3));
12501 
12502 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12503 
12504 //   ins_encode %{
12505 //     __ fmaddd(as_FloatRegister($dst$$reg),
12506 //              as_FloatRegister($src1$$reg),
12507 //              as_FloatRegister($src2$$reg),
12508 //              as_FloatRegister($src3$$reg));
12509 //   %}
12510 
12511 //   ins_pipe(pipe_class_default);
12512 // %}
12513 
12514 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12515 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12516 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12517 
12518 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12519 
12520 //   ins_encode %{
12521 //     __ fmsubs(as_FloatRegister($dst$$reg),
12522 //               as_FloatRegister($src1$$reg),
12523 //               as_FloatRegister($src2$$reg),
12524 //              as_FloatRegister($src3$$reg));
12525 //   %}
12526 
12527 //   ins_pipe(pipe_class_default);
12528 // %}
12529 
12530 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12531 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12532 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12533 
12534 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12535 
12536 //   ins_encode %{
12537 //     __ fmsubd(as_FloatRegister($dst$$reg),
12538 //               as_FloatRegister($src1$$reg),
12539 //               as_FloatRegister($src2$$reg),
12540 //               as_FloatRegister($src3$$reg));
12541 //   %}
12542 
12543 //   ins_pipe(pipe_class_default);
12544 // %}
12545 
12546 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12547 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12548 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12549 
12550 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12551 
12552 //   ins_encode %{
12553 //     __ fnmadds(as_FloatRegister($dst$$reg),
12554 //                as_FloatRegister($src1$$reg),
12555 //                as_FloatRegister($src2$$reg),
12556 //                as_FloatRegister($src3$$reg));
12557 //   %}
12558 
12559 //   ins_pipe(pipe_class_default);
12560 // %}
12561 
12562 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12563 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12564 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12565 
12566 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12567 
12568 //   ins_encode %{
12569 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12570 //                as_FloatRegister($src1$$reg),
12571 //                as_FloatRegister($src2$$reg),
12572 //                as_FloatRegister($src3$$reg));
12573 //   %}
12574 
12575 //   ins_pipe(pipe_class_default);
12576 // %}
12577 
12578 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12579 //   match(Set dst (SubF (MulF src1 src2) src3));
12580 
12581 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12582 
12583 //   ins_encode %{
12584 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12585 //                as_FloatRegister($src1$$reg),
12586 //                as_FloatRegister($src2$$reg),
12587 //                as_FloatRegister($src3$$reg));
12588 //   %}
12589 
12590 //   ins_pipe(pipe_class_default);
12591 // %}
12592 
12593 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12594 //   match(Set dst (SubD (MulD src1 src2) src3));
12595 
12596 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12597 
12598 //   ins_encode %{
12599 //   // n.b. insn name should be fnmsubd
12600 //     __ fnmsub(as_FloatRegister($dst$$reg),
12601 //                as_FloatRegister($src1$$reg),
12602 //                as_FloatRegister($src2$$reg),
12603 //                as_FloatRegister($src3$$reg));
12604 //   %}
12605 
12606 //   ins_pipe(pipe_class_default);
12607 // %}
12608 
12609 
12610 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12611   match(Set dst (DivF src1  src2));
12612 
12613   ins_cost(INSN_COST * 18);
12614   format %{ "fdivs   $dst, $src1, $src2" %}
12615 
12616   ins_encode %{
12617     __ fdivs(as_FloatRegister($dst$$reg),
12618              as_FloatRegister($src1$$reg),
12619              as_FloatRegister($src2$$reg));
12620   %}
12621 
12622   ins_pipe(fp_div_s);
12623 %}
12624 
12625 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12626   match(Set dst (DivD src1  src2));
12627 
12628   ins_cost(INSN_COST * 32);
12629   format %{ "fdivd   $dst, $src1, $src2" %}
12630 
12631   ins_encode %{
12632     __ fdivd(as_FloatRegister($dst$$reg),
12633              as_FloatRegister($src1$$reg),
12634              as_FloatRegister($src2$$reg));
12635   %}
12636 
12637   ins_pipe(fp_div_d);
12638 %}
12639 
12640 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12641   match(Set dst (NegF src));
12642 
12643   ins_cost(INSN_COST * 3);
12644   format %{ "fneg   $dst, $src" %}
12645 
12646   ins_encode %{
12647     __ fnegs(as_FloatRegister($dst$$reg),
12648              as_FloatRegister($src$$reg));
12649   %}
12650 
12651   ins_pipe(fp_uop_s);
12652 %}
12653 
12654 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12655   match(Set dst (NegD src));
12656 
12657   ins_cost(INSN_COST * 3);
12658   format %{ "fnegd   $dst, $src" %}
12659 
12660   ins_encode %{
12661     __ fnegd(as_FloatRegister($dst$$reg),
12662              as_FloatRegister($src$$reg));
12663   %}
12664 
12665   ins_pipe(fp_uop_d);
12666 %}
12667 
12668 instruct absF_reg(vRegF dst, vRegF src) %{
12669   match(Set dst (AbsF src));
12670 
12671   ins_cost(INSN_COST * 3);
12672   format %{ "fabss   $dst, $src" %}
12673   ins_encode %{
12674     __ fabss(as_FloatRegister($dst$$reg),
12675              as_FloatRegister($src$$reg));
12676   %}
12677 
12678   ins_pipe(fp_uop_s);
12679 %}
12680 
12681 instruct absD_reg(vRegD dst, vRegD src) %{
12682   match(Set dst (AbsD src));
12683 
12684   ins_cost(INSN_COST * 3);
12685   format %{ "fabsd   $dst, $src" %}
12686   ins_encode %{
12687     __ fabsd(as_FloatRegister($dst$$reg),
12688              as_FloatRegister($src$$reg));
12689   %}
12690 
12691   ins_pipe(fp_uop_d);
12692 %}
12693 
12694 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12695   match(Set dst (SqrtD src));
12696 
12697   ins_cost(INSN_COST * 50);
12698   format %{ "fsqrtd  $dst, $src" %}
12699   ins_encode %{
12700     __ fsqrtd(as_FloatRegister($dst$$reg),
12701              as_FloatRegister($src$$reg));
12702   %}
12703 
12704   ins_pipe(fp_div_s);
12705 %}
12706 
12707 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12708   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12709 
12710   ins_cost(INSN_COST * 50);
12711   format %{ "fsqrts  $dst, $src" %}
12712   ins_encode %{
12713     __ fsqrts(as_FloatRegister($dst$$reg),
12714              as_FloatRegister($src$$reg));
12715   %}
12716 
12717   ins_pipe(fp_div_d);
12718 %}
12719 
12720 // ============================================================================
12721 // Logical Instructions
12722 
12723 // Integer Logical Instructions
12724 
12725 // And Instructions
12726 
12727 
12728 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12729   match(Set dst (AndI src1 src2));
12730 
12731   format %{ "andw  $dst, $src1, $src2\t# int" %}
12732 
12733   ins_cost(INSN_COST);
12734   ins_encode %{
12735     __ andw(as_Register($dst$$reg),
12736             as_Register($src1$$reg),
12737             as_Register($src2$$reg));
12738   %}
12739 
12740   ins_pipe(ialu_reg_reg);
12741 %}
12742 
12743 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12744   match(Set dst (AndI src1 src2));
12745 
12746   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12747 
12748   ins_cost(INSN_COST);
12749   ins_encode %{
12750     __ andw(as_Register($dst$$reg),
12751             as_Register($src1$$reg),
12752             (unsigned long)($src2$$constant));
12753   %}
12754 
12755   ins_pipe(ialu_reg_imm);
12756 %}
12757 
12758 // Or Instructions
12759 
12760 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12761   match(Set dst (OrI src1 src2));
12762 
12763   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12764 
12765   ins_cost(INSN_COST);
12766   ins_encode %{
12767     __ orrw(as_Register($dst$$reg),
12768             as_Register($src1$$reg),
12769             as_Register($src2$$reg));
12770   %}
12771 
12772   ins_pipe(ialu_reg_reg);
12773 %}
12774 
12775 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12776   match(Set dst (OrI src1 src2));
12777 
12778   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12779 
12780   ins_cost(INSN_COST);
12781   ins_encode %{
12782     __ orrw(as_Register($dst$$reg),
12783             as_Register($src1$$reg),
12784             (unsigned long)($src2$$constant));
12785   %}
12786 
12787   ins_pipe(ialu_reg_imm);
12788 %}
12789 
12790 // Xor Instructions
12791 
12792 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12793   match(Set dst (XorI src1 src2));
12794 
12795   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12796 
12797   ins_cost(INSN_COST);
12798   ins_encode %{
12799     __ eorw(as_Register($dst$$reg),
12800             as_Register($src1$$reg),
12801             as_Register($src2$$reg));
12802   %}
12803 
12804   ins_pipe(ialu_reg_reg);
12805 %}
12806 
12807 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12808   match(Set dst (XorI src1 src2));
12809 
12810   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12811 
12812   ins_cost(INSN_COST);
12813   ins_encode %{
12814     __ eorw(as_Register($dst$$reg),
12815             as_Register($src1$$reg),
12816             (unsigned long)($src2$$constant));
12817   %}
12818 
12819   ins_pipe(ialu_reg_imm);
12820 %}
12821 
12822 // Long Logical Instructions
12823 // TODO
12824 
12825 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12826   match(Set dst (AndL src1 src2));
12827 
12828   format %{ "and  $dst, $src1, $src2\t# int" %}
12829 
12830   ins_cost(INSN_COST);
12831   ins_encode %{
12832     __ andr(as_Register($dst$$reg),
12833             as_Register($src1$$reg),
12834             as_Register($src2$$reg));
12835   %}
12836 
12837   ins_pipe(ialu_reg_reg);
12838 %}
12839 
12840 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12841   match(Set dst (AndL src1 src2));
12842 
12843   format %{ "and  $dst, $src1, $src2\t# int" %}
12844 
12845   ins_cost(INSN_COST);
12846   ins_encode %{
12847     __ andr(as_Register($dst$$reg),
12848             as_Register($src1$$reg),
12849             (unsigned long)($src2$$constant));
12850   %}
12851 
12852   ins_pipe(ialu_reg_imm);
12853 %}
12854 
12855 // Or Instructions
12856 
12857 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12858   match(Set dst (OrL src1 src2));
12859 
12860   format %{ "orr  $dst, $src1, $src2\t# int" %}
12861 
12862   ins_cost(INSN_COST);
12863   ins_encode %{
12864     __ orr(as_Register($dst$$reg),
12865            as_Register($src1$$reg),
12866            as_Register($src2$$reg));
12867   %}
12868 
12869   ins_pipe(ialu_reg_reg);
12870 %}
12871 
12872 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12873   match(Set dst (OrL src1 src2));
12874 
12875   format %{ "orr  $dst, $src1, $src2\t# int" %}
12876 
12877   ins_cost(INSN_COST);
12878   ins_encode %{
12879     __ orr(as_Register($dst$$reg),
12880            as_Register($src1$$reg),
12881            (unsigned long)($src2$$constant));
12882   %}
12883 
12884   ins_pipe(ialu_reg_imm);
12885 %}
12886 
12887 // Xor Instructions
12888 
12889 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12890   match(Set dst (XorL src1 src2));
12891 
12892   format %{ "eor  $dst, $src1, $src2\t# int" %}
12893 
12894   ins_cost(INSN_COST);
12895   ins_encode %{
12896     __ eor(as_Register($dst$$reg),
12897            as_Register($src1$$reg),
12898            as_Register($src2$$reg));
12899   %}
12900 
12901   ins_pipe(ialu_reg_reg);
12902 %}
12903 
12904 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12905   match(Set dst (XorL src1 src2));
12906 
12907   ins_cost(INSN_COST);
12908   format %{ "eor  $dst, $src1, $src2\t# int" %}
12909 
12910   ins_encode %{
12911     __ eor(as_Register($dst$$reg),
12912            as_Register($src1$$reg),
12913            (unsigned long)($src2$$constant));
12914   %}
12915 
12916   ins_pipe(ialu_reg_imm);
12917 %}
12918 
12919 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12920 %{
12921   match(Set dst (ConvI2L src));
12922 
12923   ins_cost(INSN_COST);
12924   format %{ "sxtw  $dst, $src\t# i2l" %}
12925   ins_encode %{
12926     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12927   %}
12928   ins_pipe(ialu_reg_shift);
12929 %}
12930 
12931 // this pattern occurs in bigmath arithmetic
12932 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12933 %{
12934   match(Set dst (AndL (ConvI2L src) mask));
12935 
12936   ins_cost(INSN_COST);
12937   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12938   ins_encode %{
12939     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12940   %}
12941 
12942   ins_pipe(ialu_reg_shift);
12943 %}
12944 
12945 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12946   match(Set dst (ConvL2I src));
12947 
12948   ins_cost(INSN_COST);
12949   format %{ "movw  $dst, $src \t// l2i" %}
12950 
12951   ins_encode %{
12952     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12953   %}
12954 
12955   ins_pipe(ialu_reg);
12956 %}
12957 
12958 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12959 %{
12960   match(Set dst (Conv2B src));
12961   effect(KILL cr);
12962 
12963   format %{
12964     "cmpw $src, zr\n\t"
12965     "cset $dst, ne"
12966   %}
12967 
12968   ins_encode %{
12969     __ cmpw(as_Register($src$$reg), zr);
12970     __ cset(as_Register($dst$$reg), Assembler::NE);
12971   %}
12972 
12973   ins_pipe(ialu_reg);
12974 %}
12975 
12976 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12977 %{
12978   match(Set dst (Conv2B src));
12979   effect(KILL cr);
12980 
12981   format %{
12982     "cmp  $src, zr\n\t"
12983     "cset $dst, ne"
12984   %}
12985 
12986   ins_encode %{
12987     __ cmp(as_Register($src$$reg), zr);
12988     __ cset(as_Register($dst$$reg), Assembler::NE);
12989   %}
12990 
12991   ins_pipe(ialu_reg);
12992 %}
12993 
12994 instruct convD2F_reg(vRegF dst, vRegD src) %{
12995   match(Set dst (ConvD2F src));
12996 
12997   ins_cost(INSN_COST * 5);
12998   format %{ "fcvtd  $dst, $src \t// d2f" %}
12999 
13000   ins_encode %{
13001     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13002   %}
13003 
13004   ins_pipe(fp_d2f);
13005 %}
13006 
13007 instruct convF2D_reg(vRegD dst, vRegF src) %{
13008   match(Set dst (ConvF2D src));
13009 
13010   ins_cost(INSN_COST * 5);
13011   format %{ "fcvts  $dst, $src \t// f2d" %}
13012 
13013   ins_encode %{
13014     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13015   %}
13016 
13017   ins_pipe(fp_f2d);
13018 %}
13019 
13020 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13021   match(Set dst (ConvF2I src));
13022 
13023   ins_cost(INSN_COST * 5);
13024   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13025 
13026   ins_encode %{
13027     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13028   %}
13029 
13030   ins_pipe(fp_f2i);
13031 %}
13032 
13033 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13034   match(Set dst (ConvF2L src));
13035 
13036   ins_cost(INSN_COST * 5);
13037   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13038 
13039   ins_encode %{
13040     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13041   %}
13042 
13043   ins_pipe(fp_f2l);
13044 %}
13045 
13046 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13047   match(Set dst (ConvI2F src));
13048 
13049   ins_cost(INSN_COST * 5);
13050   format %{ "scvtfws  $dst, $src \t// i2f" %}
13051 
13052   ins_encode %{
13053     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13054   %}
13055 
13056   ins_pipe(fp_i2f);
13057 %}
13058 
13059 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13060   match(Set dst (ConvL2F src));
13061 
13062   ins_cost(INSN_COST * 5);
13063   format %{ "scvtfs  $dst, $src \t// l2f" %}
13064 
13065   ins_encode %{
13066     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13067   %}
13068 
13069   ins_pipe(fp_l2f);
13070 %}
13071 
13072 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13073   match(Set dst (ConvD2I src));
13074 
13075   ins_cost(INSN_COST * 5);
13076   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13077 
13078   ins_encode %{
13079     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13080   %}
13081 
13082   ins_pipe(fp_d2i);
13083 %}
13084 
13085 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13086   match(Set dst (ConvD2L src));
13087 
13088   ins_cost(INSN_COST * 5);
13089   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13090 
13091   ins_encode %{
13092     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13093   %}
13094 
13095   ins_pipe(fp_d2l);
13096 %}
13097 
13098 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13099   match(Set dst (ConvI2D src));
13100 
13101   ins_cost(INSN_COST * 5);
13102   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13103 
13104   ins_encode %{
13105     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13106   %}
13107 
13108   ins_pipe(fp_i2d);
13109 %}
13110 
13111 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13112   match(Set dst (ConvL2D src));
13113 
13114   ins_cost(INSN_COST * 5);
13115   format %{ "scvtfd  $dst, $src \t// l2d" %}
13116 
13117   ins_encode %{
13118     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13119   %}
13120 
13121   ins_pipe(fp_l2d);
13122 %}
13123 
13124 // stack <-> reg and reg <-> reg shuffles with no conversion
13125 
13126 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13127 
13128   match(Set dst (MoveF2I src));
13129 
13130   effect(DEF dst, USE src);
13131 
13132   ins_cost(4 * INSN_COST);
13133 
13134   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13135 
13136   ins_encode %{
13137     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13138   %}
13139 
13140   ins_pipe(iload_reg_reg);
13141 
13142 %}
13143 
13144 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13145 
13146   match(Set dst (MoveI2F src));
13147 
13148   effect(DEF dst, USE src);
13149 
13150   ins_cost(4 * INSN_COST);
13151 
13152   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13153 
13154   ins_encode %{
13155     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13156   %}
13157 
13158   ins_pipe(pipe_class_memory);
13159 
13160 %}
13161 
13162 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13163 
13164   match(Set dst (MoveD2L src));
13165 
13166   effect(DEF dst, USE src);
13167 
13168   ins_cost(4 * INSN_COST);
13169 
13170   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13171 
13172   ins_encode %{
13173     __ ldr($dst$$Register, Address(sp, $src$$disp));
13174   %}
13175 
13176   ins_pipe(iload_reg_reg);
13177 
13178 %}
13179 
13180 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13181 
13182   match(Set dst (MoveL2D src));
13183 
13184   effect(DEF dst, USE src);
13185 
13186   ins_cost(4 * INSN_COST);
13187 
13188   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13189 
13190   ins_encode %{
13191     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13192   %}
13193 
13194   ins_pipe(pipe_class_memory);
13195 
13196 %}
13197 
13198 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13199 
13200   match(Set dst (MoveF2I src));
13201 
13202   effect(DEF dst, USE src);
13203 
13204   ins_cost(INSN_COST);
13205 
13206   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13207 
13208   ins_encode %{
13209     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13210   %}
13211 
13212   ins_pipe(pipe_class_memory);
13213 
13214 %}
13215 
13216 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13217 
13218   match(Set dst (MoveI2F src));
13219 
13220   effect(DEF dst, USE src);
13221 
13222   ins_cost(INSN_COST);
13223 
13224   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13225 
13226   ins_encode %{
13227     __ strw($src$$Register, Address(sp, $dst$$disp));
13228   %}
13229 
13230   ins_pipe(istore_reg_reg);
13231 
13232 %}
13233 
13234 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13235 
13236   match(Set dst (MoveD2L src));
13237 
13238   effect(DEF dst, USE src);
13239 
13240   ins_cost(INSN_COST);
13241 
13242   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13243 
13244   ins_encode %{
13245     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13246   %}
13247 
13248   ins_pipe(pipe_class_memory);
13249 
13250 %}
13251 
13252 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13253 
13254   match(Set dst (MoveL2D src));
13255 
13256   effect(DEF dst, USE src);
13257 
13258   ins_cost(INSN_COST);
13259 
13260   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13261 
13262   ins_encode %{
13263     __ str($src$$Register, Address(sp, $dst$$disp));
13264   %}
13265 
13266   ins_pipe(istore_reg_reg);
13267 
13268 %}
13269 
13270 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13271 
13272   match(Set dst (MoveF2I src));
13273 
13274   effect(DEF dst, USE src);
13275 
13276   ins_cost(INSN_COST);
13277 
13278   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13279 
13280   ins_encode %{
13281     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13282   %}
13283 
13284   ins_pipe(pipe_class_memory);
13285 
13286 %}
13287 
13288 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13289 
13290   match(Set dst (MoveI2F src));
13291 
13292   effect(DEF dst, USE src);
13293 
13294   ins_cost(INSN_COST);
13295 
13296   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13297 
13298   ins_encode %{
13299     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13300   %}
13301 
13302   ins_pipe(pipe_class_memory);
13303 
13304 %}
13305 
13306 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13307 
13308   match(Set dst (MoveD2L src));
13309 
13310   effect(DEF dst, USE src);
13311 
13312   ins_cost(INSN_COST);
13313 
13314   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13315 
13316   ins_encode %{
13317     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13318   %}
13319 
13320   ins_pipe(pipe_class_memory);
13321 
13322 %}
13323 
13324 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13325 
13326   match(Set dst (MoveL2D src));
13327 
13328   effect(DEF dst, USE src);
13329 
13330   ins_cost(INSN_COST);
13331 
13332   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13333 
13334   ins_encode %{
13335     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13336   %}
13337 
13338   ins_pipe(pipe_class_memory);
13339 
13340 %}
13341 
13342 // ============================================================================
13343 // clearing of an array
13344 
13345 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13346 %{
13347   match(Set dummy (ClearArray cnt base));
13348   effect(USE_KILL cnt, USE_KILL base);
13349 
13350   ins_cost(4 * INSN_COST);
13351   format %{ "ClearArray $cnt, $base" %}
13352 
13353   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
13354 
13355   ins_pipe(pipe_class_memory);
13356 %}
13357 
13358 // ============================================================================
13359 // Overflow Math Instructions
13360 
13361 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13362 %{
13363   match(Set cr (OverflowAddI op1 op2));
13364 
13365   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13366   ins_cost(INSN_COST);
13367   ins_encode %{
13368     __ cmnw($op1$$Register, $op2$$Register);
13369   %}
13370 
13371   ins_pipe(icmp_reg_reg);
13372 %}
13373 
13374 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13375 %{
13376   match(Set cr (OverflowAddI op1 op2));
13377 
13378   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13379   ins_cost(INSN_COST);
13380   ins_encode %{
13381     __ cmnw($op1$$Register, $op2$$constant);
13382   %}
13383 
13384   ins_pipe(icmp_reg_imm);
13385 %}
13386 
13387 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13388 %{
13389   match(Set cr (OverflowAddL op1 op2));
13390 
13391   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13392   ins_cost(INSN_COST);
13393   ins_encode %{
13394     __ cmn($op1$$Register, $op2$$Register);
13395   %}
13396 
13397   ins_pipe(icmp_reg_reg);
13398 %}
13399 
13400 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13401 %{
13402   match(Set cr (OverflowAddL op1 op2));
13403 
13404   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13405   ins_cost(INSN_COST);
13406   ins_encode %{
13407     __ cmn($op1$$Register, $op2$$constant);
13408   %}
13409 
13410   ins_pipe(icmp_reg_imm);
13411 %}
13412 
13413 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13414 %{
13415   match(Set cr (OverflowSubI op1 op2));
13416 
13417   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13418   ins_cost(INSN_COST);
13419   ins_encode %{
13420     __ cmpw($op1$$Register, $op2$$Register);
13421   %}
13422 
13423   ins_pipe(icmp_reg_reg);
13424 %}
13425 
13426 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13427 %{
13428   match(Set cr (OverflowSubI op1 op2));
13429 
13430   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13431   ins_cost(INSN_COST);
13432   ins_encode %{
13433     __ cmpw($op1$$Register, $op2$$constant);
13434   %}
13435 
13436   ins_pipe(icmp_reg_imm);
13437 %}
13438 
13439 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13440 %{
13441   match(Set cr (OverflowSubL op1 op2));
13442 
13443   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13444   ins_cost(INSN_COST);
13445   ins_encode %{
13446     __ cmp($op1$$Register, $op2$$Register);
13447   %}
13448 
13449   ins_pipe(icmp_reg_reg);
13450 %}
13451 
13452 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13453 %{
13454   match(Set cr (OverflowSubL op1 op2));
13455 
13456   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13457   ins_cost(INSN_COST);
13458   ins_encode %{
13459     __ cmp($op1$$Register, $op2$$constant);
13460   %}
13461 
13462   ins_pipe(icmp_reg_imm);
13463 %}
13464 
13465 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13466 %{
13467   match(Set cr (OverflowSubI zero op1));
13468 
13469   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13470   ins_cost(INSN_COST);
13471   ins_encode %{
13472     __ cmpw(zr, $op1$$Register);
13473   %}
13474 
13475   ins_pipe(icmp_reg_imm);
13476 %}
13477 
13478 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13479 %{
13480   match(Set cr (OverflowSubL zero op1));
13481 
13482   format %{ "cmp   zr, $op1\t# overflow check long" %}
13483   ins_cost(INSN_COST);
13484   ins_encode %{
13485     __ cmp(zr, $op1$$Register);
13486   %}
13487 
13488   ins_pipe(icmp_reg_imm);
13489 %}
13490 
13491 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13492 %{
13493   match(Set cr (OverflowMulI op1 op2));
13494 
13495   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13496             "cmp   rscratch1, rscratch1, sxtw\n\t"
13497             "movw  rscratch1, #0x80000000\n\t"
13498             "cselw rscratch1, rscratch1, zr, NE\n\t"
13499             "cmpw  rscratch1, #1" %}
13500   ins_cost(5 * INSN_COST);
13501   ins_encode %{
13502     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13503     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13504     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13505     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13506     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13507   %}
13508 
13509   ins_pipe(pipe_slow);
13510 %}
13511 
13512 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13513 %{
13514   match(If cmp (OverflowMulI op1 op2));
13515   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13516             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13517   effect(USE labl, KILL cr);
13518 
13519   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13520             "cmp   rscratch1, rscratch1, sxtw\n\t"
13521             "b$cmp   $labl" %}
13522   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13523   ins_encode %{
13524     Label* L = $labl$$label;
13525     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13526     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13527     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13528     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13529   %}
13530 
13531   ins_pipe(pipe_serial);
13532 %}
13533 
13534 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13535 %{
13536   match(Set cr (OverflowMulL op1 op2));
13537 
13538   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13539             "smulh rscratch2, $op1, $op2\n\t"
13540             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13541             "movw  rscratch1, #0x80000000\n\t"
13542             "cselw rscratch1, rscratch1, zr, NE\n\t"
13543             "cmpw  rscratch1, #1" %}
13544   ins_cost(6 * INSN_COST);
13545   ins_encode %{
13546     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13547     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13548     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13549     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13550     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13551     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13552   %}
13553 
13554   ins_pipe(pipe_slow);
13555 %}
13556 
13557 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13558 %{
13559   match(If cmp (OverflowMulL op1 op2));
13560   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13561             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13562   effect(USE labl, KILL cr);
13563 
13564   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13565             "smulh rscratch2, $op1, $op2\n\t"
13566             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13567             "b$cmp $labl" %}
13568   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13569   ins_encode %{
13570     Label* L = $labl$$label;
13571     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13572     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13573     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13574     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13575     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13576   %}
13577 
13578   ins_pipe(pipe_serial);
13579 %}
13580 
13581 // ============================================================================
13582 // Compare Instructions
13583 
13584 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13585 %{
13586   match(Set cr (CmpI op1 op2));
13587 
13588   effect(DEF cr, USE op1, USE op2);
13589 
13590   ins_cost(INSN_COST);
13591   format %{ "cmpw  $op1, $op2" %}
13592 
13593   ins_encode(aarch64_enc_cmpw(op1, op2));
13594 
13595   ins_pipe(icmp_reg_reg);
13596 %}
13597 
13598 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13599 %{
13600   match(Set cr (CmpI op1 zero));
13601 
13602   effect(DEF cr, USE op1);
13603 
13604   ins_cost(INSN_COST);
13605   format %{ "cmpw $op1, 0" %}
13606 
13607   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13608 
13609   ins_pipe(icmp_reg_imm);
13610 %}
13611 
13612 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13613 %{
13614   match(Set cr (CmpI op1 op2));
13615 
13616   effect(DEF cr, USE op1);
13617 
13618   ins_cost(INSN_COST);
13619   format %{ "cmpw  $op1, $op2" %}
13620 
13621   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13622 
13623   ins_pipe(icmp_reg_imm);
13624 %}
13625 
13626 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13627 %{
13628   match(Set cr (CmpI op1 op2));
13629 
13630   effect(DEF cr, USE op1);
13631 
13632   ins_cost(INSN_COST * 2);
13633   format %{ "cmpw  $op1, $op2" %}
13634 
13635   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13636 
13637   ins_pipe(icmp_reg_imm);
13638 %}
13639 
13640 // Unsigned compare Instructions; really, same as signed compare
13641 // except it should only be used to feed an If or a CMovI which takes a
13642 // cmpOpU.
13643 
13644 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13645 %{
13646   match(Set cr (CmpU op1 op2));
13647 
13648   effect(DEF cr, USE op1, USE op2);
13649 
13650   ins_cost(INSN_COST);
13651   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13652 
13653   ins_encode(aarch64_enc_cmpw(op1, op2));
13654 
13655   ins_pipe(icmp_reg_reg);
13656 %}
13657 
13658 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13659 %{
13660   match(Set cr (CmpU op1 zero));
13661 
13662   effect(DEF cr, USE op1);
13663 
13664   ins_cost(INSN_COST);
13665   format %{ "cmpw $op1, #0\t# unsigned" %}
13666 
13667   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13668 
13669   ins_pipe(icmp_reg_imm);
13670 %}
13671 
13672 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13673 %{
13674   match(Set cr (CmpU op1 op2));
13675 
13676   effect(DEF cr, USE op1);
13677 
13678   ins_cost(INSN_COST);
13679   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13680 
13681   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13682 
13683   ins_pipe(icmp_reg_imm);
13684 %}
13685 
13686 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13687 %{
13688   match(Set cr (CmpU op1 op2));
13689 
13690   effect(DEF cr, USE op1);
13691 
13692   ins_cost(INSN_COST * 2);
13693   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13694 
13695   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13696 
13697   ins_pipe(icmp_reg_imm);
13698 %}
13699 
13700 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13701 %{
13702   match(Set cr (CmpL op1 op2));
13703 
13704   effect(DEF cr, USE op1, USE op2);
13705 
13706   ins_cost(INSN_COST);
13707   format %{ "cmp  $op1, $op2" %}
13708 
13709   ins_encode(aarch64_enc_cmp(op1, op2));
13710 
13711   ins_pipe(icmp_reg_reg);
13712 %}
13713 
13714 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13715 %{
13716   match(Set cr (CmpL op1 zero));
13717 
13718   effect(DEF cr, USE op1);
13719 
13720   ins_cost(INSN_COST);
13721   format %{ "tst  $op1" %}
13722 
13723   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13724 
13725   ins_pipe(icmp_reg_imm);
13726 %}
13727 
13728 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13729 %{
13730   match(Set cr (CmpL op1 op2));
13731 
13732   effect(DEF cr, USE op1);
13733 
13734   ins_cost(INSN_COST);
13735   format %{ "cmp  $op1, $op2" %}
13736 
13737   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13738 
13739   ins_pipe(icmp_reg_imm);
13740 %}
13741 
13742 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13743 %{
13744   match(Set cr (CmpL op1 op2));
13745 
13746   effect(DEF cr, USE op1);
13747 
13748   ins_cost(INSN_COST * 2);
13749   format %{ "cmp  $op1, $op2" %}
13750 
13751   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13752 
13753   ins_pipe(icmp_reg_imm);
13754 %}
13755 
13756 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13757 %{
13758   match(Set cr (CmpP op1 op2));
13759 
13760   effect(DEF cr, USE op1, USE op2);
13761 
13762   ins_cost(INSN_COST);
13763   format %{ "cmp  $op1, $op2\t // ptr" %}
13764 
13765   ins_encode(aarch64_enc_cmpp(op1, op2));
13766 
13767   ins_pipe(icmp_reg_reg);
13768 %}
13769 
13770 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13771 %{
13772   match(Set cr (CmpN op1 op2));
13773 
13774   effect(DEF cr, USE op1, USE op2);
13775 
13776   ins_cost(INSN_COST);
13777   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13778 
13779   ins_encode(aarch64_enc_cmpn(op1, op2));
13780 
13781   ins_pipe(icmp_reg_reg);
13782 %}
13783 
13784 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13785 %{
13786   match(Set cr (CmpP op1 zero));
13787 
13788   effect(DEF cr, USE op1, USE zero);
13789 
13790   ins_cost(INSN_COST);
13791   format %{ "cmp  $op1, 0\t // ptr" %}
13792 
13793   ins_encode(aarch64_enc_testp(op1));
13794 
13795   ins_pipe(icmp_reg_imm);
13796 %}
13797 
13798 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13799 %{
13800   match(Set cr (CmpN op1 zero));
13801 
13802   effect(DEF cr, USE op1, USE zero);
13803 
13804   ins_cost(INSN_COST);
13805   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13806 
13807   ins_encode(aarch64_enc_testn(op1));
13808 
13809   ins_pipe(icmp_reg_imm);
13810 %}
13811 
13812 // FP comparisons
13813 //
13814 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13815 // using normal cmpOp. See declaration of rFlagsReg for details.
13816 
13817 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13818 %{
13819   match(Set cr (CmpF src1 src2));
13820 
13821   ins_cost(3 * INSN_COST);
13822   format %{ "fcmps $src1, $src2" %}
13823 
13824   ins_encode %{
13825     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13826   %}
13827 
13828   ins_pipe(pipe_class_compare);
13829 %}
13830 
13831 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13832 %{
13833   match(Set cr (CmpF src1 src2));
13834 
13835   ins_cost(3 * INSN_COST);
13836   format %{ "fcmps $src1, 0.0" %}
13837 
13838   ins_encode %{
13839     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13840   %}
13841 
13842   ins_pipe(pipe_class_compare);
13843 %}
13844 // FROM HERE
13845 
13846 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13847 %{
13848   match(Set cr (CmpD src1 src2));
13849 
13850   ins_cost(3 * INSN_COST);
13851   format %{ "fcmpd $src1, $src2" %}
13852 
13853   ins_encode %{
13854     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13855   %}
13856 
13857   ins_pipe(pipe_class_compare);
13858 %}
13859 
13860 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13861 %{
13862   match(Set cr (CmpD src1 src2));
13863 
13864   ins_cost(3 * INSN_COST);
13865   format %{ "fcmpd $src1, 0.0" %}
13866 
13867   ins_encode %{
13868     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13869   %}
13870 
13871   ins_pipe(pipe_class_compare);
13872 %}
13873 
13874 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13875 %{
13876   match(Set dst (CmpF3 src1 src2));
13877   effect(KILL cr);
13878 
13879   ins_cost(5 * INSN_COST);
13880   format %{ "fcmps $src1, $src2\n\t"
13881             "csinvw($dst, zr, zr, eq\n\t"
13882             "csnegw($dst, $dst, $dst, lt)"
13883   %}
13884 
13885   ins_encode %{
13886     Label done;
13887     FloatRegister s1 = as_FloatRegister($src1$$reg);
13888     FloatRegister s2 = as_FloatRegister($src2$$reg);
13889     Register d = as_Register($dst$$reg);
13890     __ fcmps(s1, s2);
13891     // installs 0 if EQ else -1
13892     __ csinvw(d, zr, zr, Assembler::EQ);
13893     // keeps -1 if less or unordered else installs 1
13894     __ csnegw(d, d, d, Assembler::LT);
13895     __ bind(done);
13896   %}
13897 
13898   ins_pipe(pipe_class_default);
13899 
13900 %}
13901 
13902 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13903 %{
13904   match(Set dst (CmpD3 src1 src2));
13905   effect(KILL cr);
13906 
13907   ins_cost(5 * INSN_COST);
13908   format %{ "fcmpd $src1, $src2\n\t"
13909             "csinvw($dst, zr, zr, eq\n\t"
13910             "csnegw($dst, $dst, $dst, lt)"
13911   %}
13912 
13913   ins_encode %{
13914     Label done;
13915     FloatRegister s1 = as_FloatRegister($src1$$reg);
13916     FloatRegister s2 = as_FloatRegister($src2$$reg);
13917     Register d = as_Register($dst$$reg);
13918     __ fcmpd(s1, s2);
13919     // installs 0 if EQ else -1
13920     __ csinvw(d, zr, zr, Assembler::EQ);
13921     // keeps -1 if less or unordered else installs 1
13922     __ csnegw(d, d, d, Assembler::LT);
13923     __ bind(done);
13924   %}
13925   ins_pipe(pipe_class_default);
13926 
13927 %}
13928 
13929 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13930 %{
13931   match(Set dst (CmpF3 src1 zero));
13932   effect(KILL cr);
13933 
13934   ins_cost(5 * INSN_COST);
13935   format %{ "fcmps $src1, 0.0\n\t"
13936             "csinvw($dst, zr, zr, eq\n\t"
13937             "csnegw($dst, $dst, $dst, lt)"
13938   %}
13939 
13940   ins_encode %{
13941     Label done;
13942     FloatRegister s1 = as_FloatRegister($src1$$reg);
13943     Register d = as_Register($dst$$reg);
13944     __ fcmps(s1, 0.0D);
13945     // installs 0 if EQ else -1
13946     __ csinvw(d, zr, zr, Assembler::EQ);
13947     // keeps -1 if less or unordered else installs 1
13948     __ csnegw(d, d, d, Assembler::LT);
13949     __ bind(done);
13950   %}
13951 
13952   ins_pipe(pipe_class_default);
13953 
13954 %}
13955 
13956 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13957 %{
13958   match(Set dst (CmpD3 src1 zero));
13959   effect(KILL cr);
13960 
13961   ins_cost(5 * INSN_COST);
13962   format %{ "fcmpd $src1, 0.0\n\t"
13963             "csinvw($dst, zr, zr, eq\n\t"
13964             "csnegw($dst, $dst, $dst, lt)"
13965   %}
13966 
13967   ins_encode %{
13968     Label done;
13969     FloatRegister s1 = as_FloatRegister($src1$$reg);
13970     Register d = as_Register($dst$$reg);
13971     __ fcmpd(s1, 0.0D);
13972     // installs 0 if EQ else -1
13973     __ csinvw(d, zr, zr, Assembler::EQ);
13974     // keeps -1 if less or unordered else installs 1
13975     __ csnegw(d, d, d, Assembler::LT);
13976     __ bind(done);
13977   %}
13978   ins_pipe(pipe_class_default);
13979 
13980 %}
13981 
13982 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13983 %{
13984   match(Set dst (CmpLTMask p q));
13985   effect(KILL cr);
13986 
13987   ins_cost(3 * INSN_COST);
13988 
13989   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13990             "csetw $dst, lt\n\t"
13991             "subw $dst, zr, $dst"
13992   %}
13993 
13994   ins_encode %{
13995     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13996     __ csetw(as_Register($dst$$reg), Assembler::LT);
13997     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13998   %}
13999 
14000   ins_pipe(ialu_reg_reg);
14001 %}
14002 
14003 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14004 %{
14005   match(Set dst (CmpLTMask src zero));
14006   effect(KILL cr);
14007 
14008   ins_cost(INSN_COST);
14009 
14010   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14011 
14012   ins_encode %{
14013     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14014   %}
14015 
14016   ins_pipe(ialu_reg_shift);
14017 %}
14018 
14019 // ============================================================================
14020 // Max and Min
14021 
14022 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14023 %{
14024   match(Set dst (MinI src1 src2));
14025 
14026   effect(DEF dst, USE src1, USE src2, KILL cr);
14027   size(8);
14028 
14029   ins_cost(INSN_COST * 3);
14030   format %{
14031     "cmpw $src1 $src2\t signed int\n\t"
14032     "cselw $dst, $src1, $src2 lt\t"
14033   %}
14034 
14035   ins_encode %{
14036     __ cmpw(as_Register($src1$$reg),
14037             as_Register($src2$$reg));
14038     __ cselw(as_Register($dst$$reg),
14039              as_Register($src1$$reg),
14040              as_Register($src2$$reg),
14041              Assembler::LT);
14042   %}
14043 
14044   ins_pipe(ialu_reg_reg);
14045 %}
14046 // FROM HERE
14047 
14048 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14049 %{
14050   match(Set dst (MaxI src1 src2));
14051 
14052   effect(DEF dst, USE src1, USE src2, KILL cr);
14053   size(8);
14054 
14055   ins_cost(INSN_COST * 3);
14056   format %{
14057     "cmpw $src1 $src2\t signed int\n\t"
14058     "cselw $dst, $src1, $src2 gt\t"
14059   %}
14060 
14061   ins_encode %{
14062     __ cmpw(as_Register($src1$$reg),
14063             as_Register($src2$$reg));
14064     __ cselw(as_Register($dst$$reg),
14065              as_Register($src1$$reg),
14066              as_Register($src2$$reg),
14067              Assembler::GT);
14068   %}
14069 
14070   ins_pipe(ialu_reg_reg);
14071 %}
14072 
14073 // ============================================================================
14074 // Branch Instructions
14075 
14076 // Direct Branch.
14077 instruct branch(label lbl)
14078 %{
14079   match(Goto);
14080 
14081   effect(USE lbl);
14082 
14083   ins_cost(BRANCH_COST);
14084   format %{ "b  $lbl" %}
14085 
14086   ins_encode(aarch64_enc_b(lbl));
14087 
14088   ins_pipe(pipe_branch);
14089 %}
14090 
14091 // Conditional Near Branch
14092 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14093 %{
14094   // Same match rule as `branchConFar'.
14095   match(If cmp cr);
14096 
14097   effect(USE lbl);
14098 
14099   ins_cost(BRANCH_COST);
14100   // If set to 1 this indicates that the current instruction is a
14101   // short variant of a long branch. This avoids using this
14102   // instruction in first-pass matching. It will then only be used in
14103   // the `Shorten_branches' pass.
14104   // ins_short_branch(1);
14105   format %{ "b$cmp  $lbl" %}
14106 
14107   ins_encode(aarch64_enc_br_con(cmp, lbl));
14108 
14109   ins_pipe(pipe_branch_cond);
14110 %}
14111 
14112 // Conditional Near Branch Unsigned
14113 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14114 %{
14115   // Same match rule as `branchConFar'.
14116   match(If cmp cr);
14117 
14118   effect(USE lbl);
14119 
14120   ins_cost(BRANCH_COST);
14121   // If set to 1 this indicates that the current instruction is a
14122   // short variant of a long branch. This avoids using this
14123   // instruction in first-pass matching. It will then only be used in
14124   // the `Shorten_branches' pass.
14125   // ins_short_branch(1);
14126   format %{ "b$cmp  $lbl\t# unsigned" %}
14127 
14128   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14129 
14130   ins_pipe(pipe_branch_cond);
14131 %}
14132 
14133 // Make use of CBZ and CBNZ.  These instructions, as well as being
14134 // shorter than (cmp; branch), have the additional benefit of not
14135 // killing the flags.
14136 
14137 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14138   match(If cmp (CmpI op1 op2));
14139   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14140             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14141   effect(USE labl);
14142 
14143   ins_cost(BRANCH_COST);
14144   format %{ "cbw$cmp   $op1, $labl" %}
14145   ins_encode %{
14146     Label* L = $labl$$label;
14147     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14148     if (cond == Assembler::EQ)
14149       __ cbzw($op1$$Register, *L);
14150     else
14151       __ cbnzw($op1$$Register, *L);
14152   %}
14153   ins_pipe(pipe_cmp_branch);
14154 %}
14155 
14156 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14157   match(If cmp (CmpL op1 op2));
14158   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14159             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14160   effect(USE labl);
14161 
14162   ins_cost(BRANCH_COST);
14163   format %{ "cb$cmp   $op1, $labl" %}
14164   ins_encode %{
14165     Label* L = $labl$$label;
14166     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14167     if (cond == Assembler::EQ)
14168       __ cbz($op1$$Register, *L);
14169     else
14170       __ cbnz($op1$$Register, *L);
14171   %}
14172   ins_pipe(pipe_cmp_branch);
14173 %}
14174 
14175 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14176   match(If cmp (CmpP op1 op2));
14177   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14178             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14179   effect(USE labl);
14180 
14181   ins_cost(BRANCH_COST);
14182   format %{ "cb$cmp   $op1, $labl" %}
14183   ins_encode %{
14184     Label* L = $labl$$label;
14185     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14186     if (cond == Assembler::EQ)
14187       __ cbz($op1$$Register, *L);
14188     else
14189       __ cbnz($op1$$Register, *L);
14190   %}
14191   ins_pipe(pipe_cmp_branch);
14192 %}
14193 
14194 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14195   match(If cmp (CmpP (DecodeN oop) zero));
14196   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14197             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14198   effect(USE labl);
14199 
14200   ins_cost(BRANCH_COST);
14201   format %{ "cb$cmp   $oop, $labl" %}
14202   ins_encode %{
14203     Label* L = $labl$$label;
14204     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14205     if (cond == Assembler::EQ)
14206       __ cbzw($oop$$Register, *L);
14207     else
14208       __ cbnzw($oop$$Register, *L);
14209   %}
14210   ins_pipe(pipe_cmp_branch);
14211 %}
14212 
14213 // Test bit and Branch
14214 
14215 // Patterns for short (< 32KiB) variants
14216 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14217   match(If cmp (CmpL op1 op2));
14218   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14219             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14220   effect(USE labl);
14221 
14222   ins_cost(BRANCH_COST);
14223   format %{ "cb$cmp   $op1, $labl # long" %}
14224   ins_encode %{
14225     Label* L = $labl$$label;
14226     Assembler::Condition cond =
14227       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14228     __ tbr(cond, $op1$$Register, 63, *L);
14229   %}
14230   ins_pipe(pipe_cmp_branch);
14231   ins_short_branch(1);
14232 %}
14233 
14234 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14235   match(If cmp (CmpI op1 op2));
14236   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14237             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14238   effect(USE labl);
14239 
14240   ins_cost(BRANCH_COST);
14241   format %{ "cb$cmp   $op1, $labl # int" %}
14242   ins_encode %{
14243     Label* L = $labl$$label;
14244     Assembler::Condition cond =
14245       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14246     __ tbr(cond, $op1$$Register, 31, *L);
14247   %}
14248   ins_pipe(pipe_cmp_branch);
14249   ins_short_branch(1);
14250 %}
14251 
14252 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14253   match(If cmp (CmpL (AndL op1 op2) op3));
14254   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14255             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14256             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14257   effect(USE labl);
14258 
14259   ins_cost(BRANCH_COST);
14260   format %{ "tb$cmp   $op1, $op2, $labl" %}
14261   ins_encode %{
14262     Label* L = $labl$$label;
14263     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14264     int bit = exact_log2($op2$$constant);
14265     __ tbr(cond, $op1$$Register, bit, *L);
14266   %}
14267   ins_pipe(pipe_cmp_branch);
14268   ins_short_branch(1);
14269 %}
14270 
14271 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14272   match(If cmp (CmpI (AndI op1 op2) op3));
14273   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14274             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14275             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14276   effect(USE labl);
14277 
14278   ins_cost(BRANCH_COST);
14279   format %{ "tb$cmp   $op1, $op2, $labl" %}
14280   ins_encode %{
14281     Label* L = $labl$$label;
14282     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14283     int bit = exact_log2($op2$$constant);
14284     __ tbr(cond, $op1$$Register, bit, *L);
14285   %}
14286   ins_pipe(pipe_cmp_branch);
14287   ins_short_branch(1);
14288 %}
14289 
14290 // And far variants
14291 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14292   match(If cmp (CmpL op1 op2));
14293   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14294             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14295   effect(USE labl);
14296 
14297   ins_cost(BRANCH_COST);
14298   format %{ "cb$cmp   $op1, $labl # long" %}
14299   ins_encode %{
14300     Label* L = $labl$$label;
14301     Assembler::Condition cond =
14302       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14303     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14304   %}
14305   ins_pipe(pipe_cmp_branch);
14306 %}
14307 
14308 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14309   match(If cmp (CmpI op1 op2));
14310   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14311             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14312   effect(USE labl);
14313 
14314   ins_cost(BRANCH_COST);
14315   format %{ "cb$cmp   $op1, $labl # int" %}
14316   ins_encode %{
14317     Label* L = $labl$$label;
14318     Assembler::Condition cond =
14319       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14320     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14321   %}
14322   ins_pipe(pipe_cmp_branch);
14323 %}
14324 
14325 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14326   match(If cmp (CmpL (AndL op1 op2) op3));
14327   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14328             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14329             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14330   effect(USE labl);
14331 
14332   ins_cost(BRANCH_COST);
14333   format %{ "tb$cmp   $op1, $op2, $labl" %}
14334   ins_encode %{
14335     Label* L = $labl$$label;
14336     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14337     int bit = exact_log2($op2$$constant);
14338     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14339   %}
14340   ins_pipe(pipe_cmp_branch);
14341 %}
14342 
14343 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14344   match(If cmp (CmpI (AndI op1 op2) op3));
14345   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14346             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14347             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14348   effect(USE labl);
14349 
14350   ins_cost(BRANCH_COST);
14351   format %{ "tb$cmp   $op1, $op2, $labl" %}
14352   ins_encode %{
14353     Label* L = $labl$$label;
14354     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14355     int bit = exact_log2($op2$$constant);
14356     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14357   %}
14358   ins_pipe(pipe_cmp_branch);
14359 %}
14360 
14361 // Test bits
14362 
14363 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14364   match(Set cr (CmpL (AndL op1 op2) op3));
14365   predicate(Assembler::operand_valid_for_logical_immediate
14366             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14367 
14368   ins_cost(INSN_COST);
14369   format %{ "tst $op1, $op2 # long" %}
14370   ins_encode %{
14371     __ tst($op1$$Register, $op2$$constant);
14372   %}
14373   ins_pipe(ialu_reg_reg);
14374 %}
14375 
14376 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14377   match(Set cr (CmpI (AndI op1 op2) op3));
14378   predicate(Assembler::operand_valid_for_logical_immediate
14379             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14380 
14381   ins_cost(INSN_COST);
14382   format %{ "tst $op1, $op2 # int" %}
14383   ins_encode %{
14384     __ tstw($op1$$Register, $op2$$constant);
14385   %}
14386   ins_pipe(ialu_reg_reg);
14387 %}
14388 
14389 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14390   match(Set cr (CmpL (AndL op1 op2) op3));
14391 
14392   ins_cost(INSN_COST);
14393   format %{ "tst $op1, $op2 # long" %}
14394   ins_encode %{
14395     __ tst($op1$$Register, $op2$$Register);
14396   %}
14397   ins_pipe(ialu_reg_reg);
14398 %}
14399 
14400 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14401   match(Set cr (CmpI (AndI op1 op2) op3));
14402 
14403   ins_cost(INSN_COST);
14404   format %{ "tstw $op1, $op2 # int" %}
14405   ins_encode %{
14406     __ tstw($op1$$Register, $op2$$Register);
14407   %}
14408   ins_pipe(ialu_reg_reg);
14409 %}
14410 
14411 
14412 // Conditional Far Branch
14413 // Conditional Far Branch Unsigned
14414 // TODO: fixme
14415 
14416 // counted loop end branch near
14417 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14418 %{
14419   match(CountedLoopEnd cmp cr);
14420 
14421   effect(USE lbl);
14422 
14423   ins_cost(BRANCH_COST);
14424   // short variant.
14425   // ins_short_branch(1);
14426   format %{ "b$cmp $lbl \t// counted loop end" %}
14427 
14428   ins_encode(aarch64_enc_br_con(cmp, lbl));
14429 
14430   ins_pipe(pipe_branch);
14431 %}
14432 
14433 // counted loop end branch near Unsigned
14434 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14435 %{
14436   match(CountedLoopEnd cmp cr);
14437 
14438   effect(USE lbl);
14439 
14440   ins_cost(BRANCH_COST);
14441   // short variant.
14442   // ins_short_branch(1);
14443   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14444 
14445   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14446 
14447   ins_pipe(pipe_branch);
14448 %}
14449 
14450 // counted loop end branch far
14451 // counted loop end branch far unsigned
14452 // TODO: fixme
14453 
14454 // ============================================================================
14455 // inlined locking and unlocking
14456 
14457 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14458 %{
14459   match(Set cr (FastLock object box));
14460   effect(TEMP tmp, TEMP tmp2);
14461 
14462   // TODO
14463   // identify correct cost
14464   ins_cost(5 * INSN_COST);
14465   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14466 
14467   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14468 
14469   ins_pipe(pipe_serial);
14470 %}
14471 
14472 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14473 %{
14474   match(Set cr (FastUnlock object box));
14475   effect(TEMP tmp, TEMP tmp2);
14476 
14477   ins_cost(5 * INSN_COST);
14478   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14479 
14480   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14481 
14482   ins_pipe(pipe_serial);
14483 %}
14484 
14485 
14486 // ============================================================================
14487 // Safepoint Instructions
14488 
14489 // TODO
14490 // provide a near and far version of this code
14491 
14492 instruct safePoint(iRegP poll)
14493 %{
14494   match(SafePoint poll);
14495 
14496   format %{
14497     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14498   %}
14499   ins_encode %{
14500     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14501   %}
14502   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14503 %}
14504 
14505 
14506 // ============================================================================
14507 // Procedure Call/Return Instructions
14508 
14509 // Call Java Static Instruction
14510 
14511 instruct CallStaticJavaDirect(method meth)
14512 %{
14513   match(CallStaticJava);
14514 
14515   effect(USE meth);
14516 
14517   ins_cost(CALL_COST);
14518 
14519   format %{ "call,static $meth \t// ==> " %}
14520 
14521   ins_encode( aarch64_enc_java_static_call(meth),
14522               aarch64_enc_call_epilog );
14523 
14524   ins_pipe(pipe_class_call);
14525 %}
14526 
14527 // TO HERE
14528 
14529 // Call Java Dynamic Instruction
14530 instruct CallDynamicJavaDirect(method meth)
14531 %{
14532   match(CallDynamicJava);
14533 
14534   effect(USE meth);
14535 
14536   ins_cost(CALL_COST);
14537 
14538   format %{ "CALL,dynamic $meth \t// ==> " %}
14539 
14540   ins_encode( aarch64_enc_java_dynamic_call(meth),
14541                aarch64_enc_call_epilog );
14542 
14543   ins_pipe(pipe_class_call);
14544 %}
14545 
14546 // Call Runtime Instruction
14547 
14548 instruct CallRuntimeDirect(method meth)
14549 %{
14550   match(CallRuntime);
14551 
14552   effect(USE meth);
14553 
14554   ins_cost(CALL_COST);
14555 
14556   format %{ "CALL, runtime $meth" %}
14557 
14558   ins_encode( aarch64_enc_java_to_runtime(meth) );
14559 
14560   ins_pipe(pipe_class_call);
14561 %}
14562 
14563 // Call Runtime Instruction
14564 
14565 instruct CallLeafDirect(method meth)
14566 %{
14567   match(CallLeaf);
14568 
14569   effect(USE meth);
14570 
14571   ins_cost(CALL_COST);
14572 
14573   format %{ "CALL, runtime leaf $meth" %}
14574 
14575   ins_encode( aarch64_enc_java_to_runtime(meth) );
14576 
14577   ins_pipe(pipe_class_call);
14578 %}
14579 
14580 // Call Runtime Instruction
14581 
14582 instruct CallLeafNoFPDirect(method meth)
14583 %{
14584   match(CallLeafNoFP);
14585 
14586   effect(USE meth);
14587 
14588   ins_cost(CALL_COST);
14589 
14590   format %{ "CALL, runtime leaf nofp $meth" %}
14591 
14592   ins_encode( aarch64_enc_java_to_runtime(meth) );
14593 
14594   ins_pipe(pipe_class_call);
14595 %}
14596 
14597 // Tail Call; Jump from runtime stub to Java code.
14598 // Also known as an 'interprocedural jump'.
14599 // Target of jump will eventually return to caller.
14600 // TailJump below removes the return address.
14601 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14602 %{
14603   match(TailCall jump_target method_oop);
14604 
14605   ins_cost(CALL_COST);
14606 
14607   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14608 
14609   ins_encode(aarch64_enc_tail_call(jump_target));
14610 
14611   ins_pipe(pipe_class_call);
14612 %}
14613 
14614 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14615 %{
14616   match(TailJump jump_target ex_oop);
14617 
14618   ins_cost(CALL_COST);
14619 
14620   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14621 
14622   ins_encode(aarch64_enc_tail_jmp(jump_target));
14623 
14624   ins_pipe(pipe_class_call);
14625 %}
14626 
14627 // Create exception oop: created by stack-crawling runtime code.
14628 // Created exception is now available to this handler, and is setup
14629 // just prior to jumping to this handler. No code emitted.
14630 // TODO check
14631 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14632 instruct CreateException(iRegP_R0 ex_oop)
14633 %{
14634   match(Set ex_oop (CreateEx));
14635 
14636   format %{ " -- \t// exception oop; no code emitted" %}
14637 
14638   size(0);
14639 
14640   ins_encode( /*empty*/ );
14641 
14642   ins_pipe(pipe_class_empty);
14643 %}
14644 
14645 // Rethrow exception: The exception oop will come in the first
14646 // argument position. Then JUMP (not call) to the rethrow stub code.
14647 instruct RethrowException() %{
14648   match(Rethrow);
14649   ins_cost(CALL_COST);
14650 
14651   format %{ "b rethrow_stub" %}
14652 
14653   ins_encode( aarch64_enc_rethrow() );
14654 
14655   ins_pipe(pipe_class_call);
14656 %}
14657 
14658 
14659 // Return Instruction
14660 // epilog node loads ret address into lr as part of frame pop
14661 instruct Ret()
14662 %{
14663   match(Return);
14664 
14665   format %{ "ret\t// return register" %}
14666 
14667   ins_encode( aarch64_enc_ret() );
14668 
14669   ins_pipe(pipe_branch);
14670 %}
14671 
14672 // Die now.
14673 instruct ShouldNotReachHere() %{
14674   match(Halt);
14675 
14676   ins_cost(CALL_COST);
14677   format %{ "ShouldNotReachHere" %}
14678 
14679   ins_encode %{
14680     // TODO
14681     // implement proper trap call here
14682     __ brk(999);
14683   %}
14684 
14685   ins_pipe(pipe_class_default);
14686 %}
14687 
14688 // ============================================================================
14689 // Partial Subtype Check
14690 //
14691 // superklass array for an instance of the superklass.  Set a hidden
14692 // internal cache on a hit (cache is checked with exposed code in
14693 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14694 // encoding ALSO sets flags.
14695 
14696 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14697 %{
14698   match(Set result (PartialSubtypeCheck sub super));
14699   effect(KILL cr, KILL temp);
14700 
14701   ins_cost(1100);  // slightly larger than the next version
14702   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14703 
14704   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14705 
14706   opcode(0x1); // Force zero of result reg on hit
14707 
14708   ins_pipe(pipe_class_memory);
14709 %}
14710 
14711 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14712 %{
14713   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14714   effect(KILL temp, KILL result);
14715 
14716   ins_cost(1100);  // slightly larger than the next version
14717   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14718 
14719   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14720 
14721   opcode(0x0); // Don't zero result reg on hit
14722 
14723   ins_pipe(pipe_class_memory);
14724 %}
14725 
14726 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14727                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14728 %{
14729   predicate(!CompactStrings);
14730   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14731   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14732 
14733   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14734   ins_encode %{
14735     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14736     __ asrw($cnt1$$Register, $cnt1$$Register, 1);
14737     __ asrw($cnt2$$Register, $cnt2$$Register, 1);
14738     __ string_compare($str1$$Register, $str2$$Register,
14739                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14740                       $tmp1$$Register);
14741   %}
14742   ins_pipe(pipe_class_memory);
14743 %}
14744 
14745 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14746        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14747 %{
14748   predicate(!CompactStrings);
14749   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14750   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14751          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14752   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14753 
14754   ins_encode %{
14755     __ string_indexof($str1$$Register, $str2$$Register,
14756                       $cnt1$$Register, $cnt2$$Register,
14757                       $tmp1$$Register, $tmp2$$Register,
14758                       $tmp3$$Register, $tmp4$$Register,
14759                       -1, $result$$Register);
14760   %}
14761   ins_pipe(pipe_class_memory);
14762 %}
14763 
14764 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14765                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14766                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14767 %{
14768   predicate(!CompactStrings);
14769   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14770   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14771          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14772   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14773 
14774   ins_encode %{
14775     int icnt2 = (int)$int_cnt2$$constant;
14776     __ string_indexof($str1$$Register, $str2$$Register,
14777                       $cnt1$$Register, zr,
14778                       $tmp1$$Register, $tmp2$$Register,
14779                       $tmp3$$Register, $tmp4$$Register,
14780                       icnt2, $result$$Register);
14781   %}
14782   ins_pipe(pipe_class_memory);
14783 %}
14784 
14785 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14786                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
14787 %{
14788   predicate(!CompactStrings);
14789   match(Set result (StrEquals (Binary str1 str2) cnt));
14790   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14791 
14792   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
14793   ins_encode %{
14794     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14795     __ asrw($cnt$$Register, $cnt$$Register, 1);
14796     __ string_equals($str1$$Register, $str2$$Register,
14797                       $cnt$$Register, $result$$Register,
14798                       $tmp$$Register);
14799   %}
14800   ins_pipe(pipe_class_memory);
14801 %}
14802 
14803 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14804                       iRegP_R10 tmp, rFlagsReg cr)
14805 %{
14806   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
14807   match(Set result (AryEq ary1 ary2));
14808   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14809 
14810   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14811   ins_encode %{
14812     __ byte_arrays_equals($ary1$$Register, $ary2$$Register,
14813                           $result$$Register, $tmp$$Register);
14814   %}
14815   ins_pipe(pipe_class_memory);
14816 %}
14817 
14818 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14819                       iRegP_R10 tmp, rFlagsReg cr)
14820 %{
14821   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14822   match(Set result (AryEq ary1 ary2));
14823   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14824 
14825   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14826   ins_encode %{
14827     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
14828                           $result$$Register, $tmp$$Register);
14829   %}
14830   ins_pipe(pipe_class_memory);
14831 %}
14832 
14833 // encode char[] to byte[] in ISO_8859_1
14834 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14835                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14836                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14837                           iRegI_R0 result, rFlagsReg cr)
14838 %{
14839   match(Set result (EncodeISOArray src (Binary dst len)));
14840   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14841          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14842 
14843   format %{ "Encode array $src,$dst,$len -> $result" %}
14844   ins_encode %{
14845     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14846          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14847          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14848   %}
14849   ins_pipe( pipe_class_memory );
14850 %}
14851 
14852 // ============================================================================
14853 // This name is KNOWN by the ADLC and cannot be changed.
14854 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14855 // for this guy.
14856 instruct tlsLoadP(thread_RegP dst)
14857 %{
14858   match(Set dst (ThreadLocal));
14859 
14860   ins_cost(0);
14861 
14862   format %{ " -- \t// $dst=Thread::current(), empty" %}
14863 
14864   size(0);
14865 
14866   ins_encode( /*empty*/ );
14867 
14868   ins_pipe(pipe_class_empty);
14869 %}
14870 
14871 // ====================VECTOR INSTRUCTIONS=====================================
14872 
14873 // Load vector (32 bits)
14874 instruct loadV4(vecD dst, vmem mem)
14875 %{
14876   predicate(n->as_LoadVector()->memory_size() == 4);
14877   match(Set dst (LoadVector mem));
14878   ins_cost(4 * INSN_COST);
14879   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14880   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14881   ins_pipe(vload_reg_mem64);
14882 %}
14883 
14884 // Load vector (64 bits)
14885 instruct loadV8(vecD dst, vmem mem)
14886 %{
14887   predicate(n->as_LoadVector()->memory_size() == 8);
14888   match(Set dst (LoadVector mem));
14889   ins_cost(4 * INSN_COST);
14890   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14891   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14892   ins_pipe(vload_reg_mem64);
14893 %}
14894 
14895 // Load Vector (128 bits)
14896 instruct loadV16(vecX dst, vmem mem)
14897 %{
14898   predicate(n->as_LoadVector()->memory_size() == 16);
14899   match(Set dst (LoadVector mem));
14900   ins_cost(4 * INSN_COST);
14901   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14902   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14903   ins_pipe(vload_reg_mem128);
14904 %}
14905 
14906 // Store Vector (32 bits)
14907 instruct storeV4(vecD src, vmem mem)
14908 %{
14909   predicate(n->as_StoreVector()->memory_size() == 4);
14910   match(Set mem (StoreVector mem src));
14911   ins_cost(4 * INSN_COST);
14912   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14913   ins_encode( aarch64_enc_strvS(src, mem) );
14914   ins_pipe(vstore_reg_mem64);
14915 %}
14916 
14917 // Store Vector (64 bits)
14918 instruct storeV8(vecD src, vmem mem)
14919 %{
14920   predicate(n->as_StoreVector()->memory_size() == 8);
14921   match(Set mem (StoreVector mem src));
14922   ins_cost(4 * INSN_COST);
14923   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14924   ins_encode( aarch64_enc_strvD(src, mem) );
14925   ins_pipe(vstore_reg_mem64);
14926 %}
14927 
14928 // Store Vector (128 bits)
14929 instruct storeV16(vecX src, vmem mem)
14930 %{
14931   predicate(n->as_StoreVector()->memory_size() == 16);
14932   match(Set mem (StoreVector mem src));
14933   ins_cost(4 * INSN_COST);
14934   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14935   ins_encode( aarch64_enc_strvQ(src, mem) );
14936   ins_pipe(vstore_reg_mem128);
14937 %}
14938 
14939 instruct replicate8B(vecD dst, iRegIorL2I src)
14940 %{
14941   predicate(n->as_Vector()->length() == 4 ||
14942             n->as_Vector()->length() == 8);
14943   match(Set dst (ReplicateB src));
14944   ins_cost(INSN_COST);
14945   format %{ "dup  $dst, $src\t# vector (8B)" %}
14946   ins_encode %{
14947     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14948   %}
14949   ins_pipe(vdup_reg_reg64);
14950 %}
14951 
14952 instruct replicate16B(vecX dst, iRegIorL2I src)
14953 %{
14954   predicate(n->as_Vector()->length() == 16);
14955   match(Set dst (ReplicateB src));
14956   ins_cost(INSN_COST);
14957   format %{ "dup  $dst, $src\t# vector (16B)" %}
14958   ins_encode %{
14959     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14960   %}
14961   ins_pipe(vdup_reg_reg128);
14962 %}
14963 
14964 instruct replicate8B_imm(vecD dst, immI con)
14965 %{
14966   predicate(n->as_Vector()->length() == 4 ||
14967             n->as_Vector()->length() == 8);
14968   match(Set dst (ReplicateB con));
14969   ins_cost(INSN_COST);
14970   format %{ "movi  $dst, $con\t# vector(8B)" %}
14971   ins_encode %{
14972     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14973   %}
14974   ins_pipe(vmovi_reg_imm64);
14975 %}
14976 
14977 instruct replicate16B_imm(vecX dst, immI con)
14978 %{
14979   predicate(n->as_Vector()->length() == 16);
14980   match(Set dst (ReplicateB con));
14981   ins_cost(INSN_COST);
14982   format %{ "movi  $dst, $con\t# vector(16B)" %}
14983   ins_encode %{
14984     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14985   %}
14986   ins_pipe(vmovi_reg_imm128);
14987 %}
14988 
14989 instruct replicate4S(vecD dst, iRegIorL2I src)
14990 %{
14991   predicate(n->as_Vector()->length() == 2 ||
14992             n->as_Vector()->length() == 4);
14993   match(Set dst (ReplicateS src));
14994   ins_cost(INSN_COST);
14995   format %{ "dup  $dst, $src\t# vector (4S)" %}
14996   ins_encode %{
14997     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14998   %}
14999   ins_pipe(vdup_reg_reg64);
15000 %}
15001 
15002 instruct replicate8S(vecX dst, iRegIorL2I src)
15003 %{
15004   predicate(n->as_Vector()->length() == 8);
15005   match(Set dst (ReplicateS src));
15006   ins_cost(INSN_COST);
15007   format %{ "dup  $dst, $src\t# vector (8S)" %}
15008   ins_encode %{
15009     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15010   %}
15011   ins_pipe(vdup_reg_reg128);
15012 %}
15013 
15014 instruct replicate4S_imm(vecD dst, immI con)
15015 %{
15016   predicate(n->as_Vector()->length() == 2 ||
15017             n->as_Vector()->length() == 4);
15018   match(Set dst (ReplicateS con));
15019   ins_cost(INSN_COST);
15020   format %{ "movi  $dst, $con\t# vector(4H)" %}
15021   ins_encode %{
15022     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15023   %}
15024   ins_pipe(vmovi_reg_imm64);
15025 %}
15026 
15027 instruct replicate8S_imm(vecX dst, immI con)
15028 %{
15029   predicate(n->as_Vector()->length() == 8);
15030   match(Set dst (ReplicateS con));
15031   ins_cost(INSN_COST);
15032   format %{ "movi  $dst, $con\t# vector(8H)" %}
15033   ins_encode %{
15034     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15035   %}
15036   ins_pipe(vmovi_reg_imm128);
15037 %}
15038 
15039 instruct replicate2I(vecD dst, iRegIorL2I src)
15040 %{
15041   predicate(n->as_Vector()->length() == 2);
15042   match(Set dst (ReplicateI src));
15043   ins_cost(INSN_COST);
15044   format %{ "dup  $dst, $src\t# vector (2I)" %}
15045   ins_encode %{
15046     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15047   %}
15048   ins_pipe(vdup_reg_reg64);
15049 %}
15050 
15051 instruct replicate4I(vecX dst, iRegIorL2I src)
15052 %{
15053   predicate(n->as_Vector()->length() == 4);
15054   match(Set dst (ReplicateI src));
15055   ins_cost(INSN_COST);
15056   format %{ "dup  $dst, $src\t# vector (4I)" %}
15057   ins_encode %{
15058     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15059   %}
15060   ins_pipe(vdup_reg_reg128);
15061 %}
15062 
15063 instruct replicate2I_imm(vecD dst, immI con)
15064 %{
15065   predicate(n->as_Vector()->length() == 2);
15066   match(Set dst (ReplicateI con));
15067   ins_cost(INSN_COST);
15068   format %{ "movi  $dst, $con\t# vector(2I)" %}
15069   ins_encode %{
15070     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15071   %}
15072   ins_pipe(vmovi_reg_imm64);
15073 %}
15074 
15075 instruct replicate4I_imm(vecX dst, immI con)
15076 %{
15077   predicate(n->as_Vector()->length() == 4);
15078   match(Set dst (ReplicateI con));
15079   ins_cost(INSN_COST);
15080   format %{ "movi  $dst, $con\t# vector(4I)" %}
15081   ins_encode %{
15082     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15083   %}
15084   ins_pipe(vmovi_reg_imm128);
15085 %}
15086 
15087 instruct replicate2L(vecX dst, iRegL src)
15088 %{
15089   predicate(n->as_Vector()->length() == 2);
15090   match(Set dst (ReplicateL src));
15091   ins_cost(INSN_COST);
15092   format %{ "dup  $dst, $src\t# vector (2L)" %}
15093   ins_encode %{
15094     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15095   %}
15096   ins_pipe(vdup_reg_reg128);
15097 %}
15098 
15099 instruct replicate2L_zero(vecX dst, immI0 zero)
15100 %{
15101   predicate(n->as_Vector()->length() == 2);
15102   match(Set dst (ReplicateI zero));
15103   ins_cost(INSN_COST);
15104   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15105   ins_encode %{
15106     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15107            as_FloatRegister($dst$$reg),
15108            as_FloatRegister($dst$$reg));
15109   %}
15110   ins_pipe(vmovi_reg_imm128);
15111 %}
15112 
15113 instruct replicate2F(vecD dst, vRegF src)
15114 %{
15115   predicate(n->as_Vector()->length() == 2);
15116   match(Set dst (ReplicateF src));
15117   ins_cost(INSN_COST);
15118   format %{ "dup  $dst, $src\t# vector (2F)" %}
15119   ins_encode %{
15120     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15121            as_FloatRegister($src$$reg));
15122   %}
15123   ins_pipe(vdup_reg_freg64);
15124 %}
15125 
15126 instruct replicate4F(vecX dst, vRegF src)
15127 %{
15128   predicate(n->as_Vector()->length() == 4);
15129   match(Set dst (ReplicateF src));
15130   ins_cost(INSN_COST);
15131   format %{ "dup  $dst, $src\t# vector (4F)" %}
15132   ins_encode %{
15133     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15134            as_FloatRegister($src$$reg));
15135   %}
15136   ins_pipe(vdup_reg_freg128);
15137 %}
15138 
15139 instruct replicate2D(vecX dst, vRegD src)
15140 %{
15141   predicate(n->as_Vector()->length() == 2);
15142   match(Set dst (ReplicateD src));
15143   ins_cost(INSN_COST);
15144   format %{ "dup  $dst, $src\t# vector (2D)" %}
15145   ins_encode %{
15146     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15147            as_FloatRegister($src$$reg));
15148   %}
15149   ins_pipe(vdup_reg_dreg128);
15150 %}
15151 
15152 // ====================REDUCTION ARITHMETIC====================================
15153 
15154 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15155 %{
15156   match(Set dst (AddReductionVI src1 src2));
15157   ins_cost(INSN_COST);
15158   effect(TEMP tmp, TEMP tmp2);
15159   format %{ "umov  $tmp, $src2, S, 0\n\t"
15160             "umov  $tmp2, $src2, S, 1\n\t"
15161             "addw  $dst, $src1, $tmp\n\t"
15162             "addw  $dst, $dst, $tmp2\t add reduction2i"
15163   %}
15164   ins_encode %{
15165     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15166     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15167     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15168     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15169   %}
15170   ins_pipe(pipe_class_default);
15171 %}
15172 
15173 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15174 %{
15175   match(Set dst (AddReductionVI src1 src2));
15176   ins_cost(INSN_COST);
15177   effect(TEMP tmp, TEMP tmp2);
15178   format %{ "addv  $tmp, T4S, $src2\n\t"
15179             "umov  $tmp2, $tmp, S, 0\n\t"
15180             "addw  $dst, $tmp2, $src1\t add reduction4i"
15181   %}
15182   ins_encode %{
15183     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15184             as_FloatRegister($src2$$reg));
15185     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15186     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15187   %}
15188   ins_pipe(pipe_class_default);
15189 %}
15190 
15191 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15192 %{
15193   match(Set dst (MulReductionVI src1 src2));
15194   ins_cost(INSN_COST);
15195   effect(TEMP tmp, TEMP dst);
15196   format %{ "umov  $tmp, $src2, S, 0\n\t"
15197             "mul   $dst, $tmp, $src1\n\t"
15198             "umov  $tmp, $src2, S, 1\n\t"
15199             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15200   %}
15201   ins_encode %{
15202     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15203     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15204     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15205     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15206   %}
15207   ins_pipe(pipe_class_default);
15208 %}
15209 
15210 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15211 %{
15212   match(Set dst (MulReductionVI src1 src2));
15213   ins_cost(INSN_COST);
15214   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15215   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15216             "mul   $tmp, $tmp, $src2\n\t"
15217             "umov  $tmp2, $tmp, S, 0\n\t"
15218             "mul   $dst, $tmp2, $src1\n\t"
15219             "umov  $tmp2, $tmp, S, 1\n\t"
15220             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15221   %}
15222   ins_encode %{
15223     __ ins(as_FloatRegister($tmp$$reg), __ D,
15224            as_FloatRegister($src2$$reg), 0, 1);
15225     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15226            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15227     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15228     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15229     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15230     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15231   %}
15232   ins_pipe(pipe_class_default);
15233 %}
15234 
15235 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15236 %{
15237   match(Set dst (AddReductionVF src1 src2));
15238   ins_cost(INSN_COST);
15239   effect(TEMP tmp, TEMP dst);
15240   format %{ "fadds $dst, $src1, $src2\n\t"
15241             "ins   $tmp, S, $src2, 0, 1\n\t"
15242             "fadds $dst, $dst, $tmp\t add reduction2f"
15243   %}
15244   ins_encode %{
15245     __ fadds(as_FloatRegister($dst$$reg),
15246              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15247     __ ins(as_FloatRegister($tmp$$reg), __ S,
15248            as_FloatRegister($src2$$reg), 0, 1);
15249     __ fadds(as_FloatRegister($dst$$reg),
15250              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15251   %}
15252   ins_pipe(pipe_class_default);
15253 %}
15254 
15255 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15256 %{
15257   match(Set dst (AddReductionVF src1 src2));
15258   ins_cost(INSN_COST);
15259   effect(TEMP tmp, TEMP dst);
15260   format %{ "fadds $dst, $src1, $src2\n\t"
15261             "ins   $tmp, S, $src2, 0, 1\n\t"
15262             "fadds $dst, $dst, $tmp\n\t"
15263             "ins   $tmp, S, $src2, 0, 2\n\t"
15264             "fadds $dst, $dst, $tmp\n\t"
15265             "ins   $tmp, S, $src2, 0, 3\n\t"
15266             "fadds $dst, $dst, $tmp\t add reduction4f"
15267   %}
15268   ins_encode %{
15269     __ fadds(as_FloatRegister($dst$$reg),
15270              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15271     __ ins(as_FloatRegister($tmp$$reg), __ S,
15272            as_FloatRegister($src2$$reg), 0, 1);
15273     __ fadds(as_FloatRegister($dst$$reg),
15274              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15275     __ ins(as_FloatRegister($tmp$$reg), __ S,
15276            as_FloatRegister($src2$$reg), 0, 2);
15277     __ fadds(as_FloatRegister($dst$$reg),
15278              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15279     __ ins(as_FloatRegister($tmp$$reg), __ S,
15280            as_FloatRegister($src2$$reg), 0, 3);
15281     __ fadds(as_FloatRegister($dst$$reg),
15282              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15283   %}
15284   ins_pipe(pipe_class_default);
15285 %}
15286 
15287 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15288 %{
15289   match(Set dst (MulReductionVF src1 src2));
15290   ins_cost(INSN_COST);
15291   effect(TEMP tmp, TEMP dst);
15292   format %{ "fmuls $dst, $src1, $src2\n\t"
15293             "ins   $tmp, S, $src2, 0, 1\n\t"
15294             "fmuls $dst, $dst, $tmp\t add reduction4f"
15295   %}
15296   ins_encode %{
15297     __ fmuls(as_FloatRegister($dst$$reg),
15298              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15299     __ ins(as_FloatRegister($tmp$$reg), __ S,
15300            as_FloatRegister($src2$$reg), 0, 1);
15301     __ fmuls(as_FloatRegister($dst$$reg),
15302              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15303   %}
15304   ins_pipe(pipe_class_default);
15305 %}
15306 
15307 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15308 %{
15309   match(Set dst (MulReductionVF src1 src2));
15310   ins_cost(INSN_COST);
15311   effect(TEMP tmp, TEMP dst);
15312   format %{ "fmuls $dst, $src1, $src2\n\t"
15313             "ins   $tmp, S, $src2, 0, 1\n\t"
15314             "fmuls $dst, $dst, $tmp\n\t"
15315             "ins   $tmp, S, $src2, 0, 2\n\t"
15316             "fmuls $dst, $dst, $tmp\n\t"
15317             "ins   $tmp, S, $src2, 0, 3\n\t"
15318             "fmuls $dst, $dst, $tmp\t add reduction4f"
15319   %}
15320   ins_encode %{
15321     __ fmuls(as_FloatRegister($dst$$reg),
15322              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15323     __ ins(as_FloatRegister($tmp$$reg), __ S,
15324            as_FloatRegister($src2$$reg), 0, 1);
15325     __ fmuls(as_FloatRegister($dst$$reg),
15326              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15327     __ ins(as_FloatRegister($tmp$$reg), __ S,
15328            as_FloatRegister($src2$$reg), 0, 2);
15329     __ fmuls(as_FloatRegister($dst$$reg),
15330              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15331     __ ins(as_FloatRegister($tmp$$reg), __ S,
15332            as_FloatRegister($src2$$reg), 0, 3);
15333     __ fmuls(as_FloatRegister($dst$$reg),
15334              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15335   %}
15336   ins_pipe(pipe_class_default);
15337 %}
15338 
15339 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15340 %{
15341   match(Set dst (AddReductionVD src1 src2));
15342   ins_cost(INSN_COST);
15343   effect(TEMP tmp, TEMP dst);
15344   format %{ "faddd $dst, $src1, $src2\n\t"
15345             "ins   $tmp, D, $src2, 0, 1\n\t"
15346             "faddd $dst, $dst, $tmp\t add reduction2d"
15347   %}
15348   ins_encode %{
15349     __ faddd(as_FloatRegister($dst$$reg),
15350              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15351     __ ins(as_FloatRegister($tmp$$reg), __ D,
15352            as_FloatRegister($src2$$reg), 0, 1);
15353     __ faddd(as_FloatRegister($dst$$reg),
15354              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15355   %}
15356   ins_pipe(pipe_class_default);
15357 %}
15358 
15359 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15360 %{
15361   match(Set dst (MulReductionVD src1 src2));
15362   ins_cost(INSN_COST);
15363   effect(TEMP tmp, TEMP dst);
15364   format %{ "fmuld $dst, $src1, $src2\n\t"
15365             "ins   $tmp, D, $src2, 0, 1\n\t"
15366             "fmuld $dst, $dst, $tmp\t add reduction2d"
15367   %}
15368   ins_encode %{
15369     __ fmuld(as_FloatRegister($dst$$reg),
15370              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15371     __ ins(as_FloatRegister($tmp$$reg), __ D,
15372            as_FloatRegister($src2$$reg), 0, 1);
15373     __ fmuld(as_FloatRegister($dst$$reg),
15374              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15375   %}
15376   ins_pipe(pipe_class_default);
15377 %}
15378 
15379 // ====================VECTOR ARITHMETIC=======================================
15380 
15381 // --------------------------------- ADD --------------------------------------
15382 
15383 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15384 %{
15385   predicate(n->as_Vector()->length() == 4 ||
15386             n->as_Vector()->length() == 8);
15387   match(Set dst (AddVB src1 src2));
15388   ins_cost(INSN_COST);
15389   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15390   ins_encode %{
15391     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15392             as_FloatRegister($src1$$reg),
15393             as_FloatRegister($src2$$reg));
15394   %}
15395   ins_pipe(vdop64);
15396 %}
15397 
15398 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15399 %{
15400   predicate(n->as_Vector()->length() == 16);
15401   match(Set dst (AddVB src1 src2));
15402   ins_cost(INSN_COST);
15403   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15404   ins_encode %{
15405     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15406             as_FloatRegister($src1$$reg),
15407             as_FloatRegister($src2$$reg));
15408   %}
15409   ins_pipe(vdop128);
15410 %}
15411 
15412 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15413 %{
15414   predicate(n->as_Vector()->length() == 2 ||
15415             n->as_Vector()->length() == 4);
15416   match(Set dst (AddVS src1 src2));
15417   ins_cost(INSN_COST);
15418   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15419   ins_encode %{
15420     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15421             as_FloatRegister($src1$$reg),
15422             as_FloatRegister($src2$$reg));
15423   %}
15424   ins_pipe(vdop64);
15425 %}
15426 
15427 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15428 %{
15429   predicate(n->as_Vector()->length() == 8);
15430   match(Set dst (AddVS src1 src2));
15431   ins_cost(INSN_COST);
15432   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15433   ins_encode %{
15434     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15435             as_FloatRegister($src1$$reg),
15436             as_FloatRegister($src2$$reg));
15437   %}
15438   ins_pipe(vdop128);
15439 %}
15440 
15441 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15442 %{
15443   predicate(n->as_Vector()->length() == 2);
15444   match(Set dst (AddVI src1 src2));
15445   ins_cost(INSN_COST);
15446   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15447   ins_encode %{
15448     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15449             as_FloatRegister($src1$$reg),
15450             as_FloatRegister($src2$$reg));
15451   %}
15452   ins_pipe(vdop64);
15453 %}
15454 
15455 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15456 %{
15457   predicate(n->as_Vector()->length() == 4);
15458   match(Set dst (AddVI src1 src2));
15459   ins_cost(INSN_COST);
15460   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15461   ins_encode %{
15462     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15463             as_FloatRegister($src1$$reg),
15464             as_FloatRegister($src2$$reg));
15465   %}
15466   ins_pipe(vdop128);
15467 %}
15468 
15469 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15470 %{
15471   predicate(n->as_Vector()->length() == 2);
15472   match(Set dst (AddVL src1 src2));
15473   ins_cost(INSN_COST);
15474   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15475   ins_encode %{
15476     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15477             as_FloatRegister($src1$$reg),
15478             as_FloatRegister($src2$$reg));
15479   %}
15480   ins_pipe(vdop128);
15481 %}
15482 
15483 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15484 %{
15485   predicate(n->as_Vector()->length() == 2);
15486   match(Set dst (AddVF src1 src2));
15487   ins_cost(INSN_COST);
15488   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15489   ins_encode %{
15490     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15491             as_FloatRegister($src1$$reg),
15492             as_FloatRegister($src2$$reg));
15493   %}
15494   ins_pipe(vdop_fp64);
15495 %}
15496 
15497 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15498 %{
15499   predicate(n->as_Vector()->length() == 4);
15500   match(Set dst (AddVF src1 src2));
15501   ins_cost(INSN_COST);
15502   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15503   ins_encode %{
15504     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15505             as_FloatRegister($src1$$reg),
15506             as_FloatRegister($src2$$reg));
15507   %}
15508   ins_pipe(vdop_fp128);
15509 %}
15510 
15511 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15512 %{
15513   match(Set dst (AddVD src1 src2));
15514   ins_cost(INSN_COST);
15515   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15516   ins_encode %{
15517     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15518             as_FloatRegister($src1$$reg),
15519             as_FloatRegister($src2$$reg));
15520   %}
15521   ins_pipe(vdop_fp128);
15522 %}
15523 
15524 // --------------------------------- SUB --------------------------------------
15525 
15526 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15527 %{
15528   predicate(n->as_Vector()->length() == 4 ||
15529             n->as_Vector()->length() == 8);
15530   match(Set dst (SubVB src1 src2));
15531   ins_cost(INSN_COST);
15532   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15533   ins_encode %{
15534     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15535             as_FloatRegister($src1$$reg),
15536             as_FloatRegister($src2$$reg));
15537   %}
15538   ins_pipe(vdop64);
15539 %}
15540 
15541 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15542 %{
15543   predicate(n->as_Vector()->length() == 16);
15544   match(Set dst (SubVB src1 src2));
15545   ins_cost(INSN_COST);
15546   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15547   ins_encode %{
15548     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15549             as_FloatRegister($src1$$reg),
15550             as_FloatRegister($src2$$reg));
15551   %}
15552   ins_pipe(vdop128);
15553 %}
15554 
15555 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15556 %{
15557   predicate(n->as_Vector()->length() == 2 ||
15558             n->as_Vector()->length() == 4);
15559   match(Set dst (SubVS src1 src2));
15560   ins_cost(INSN_COST);
15561   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15562   ins_encode %{
15563     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15564             as_FloatRegister($src1$$reg),
15565             as_FloatRegister($src2$$reg));
15566   %}
15567   ins_pipe(vdop64);
15568 %}
15569 
15570 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15571 %{
15572   predicate(n->as_Vector()->length() == 8);
15573   match(Set dst (SubVS src1 src2));
15574   ins_cost(INSN_COST);
15575   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15576   ins_encode %{
15577     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15578             as_FloatRegister($src1$$reg),
15579             as_FloatRegister($src2$$reg));
15580   %}
15581   ins_pipe(vdop128);
15582 %}
15583 
15584 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15585 %{
15586   predicate(n->as_Vector()->length() == 2);
15587   match(Set dst (SubVI src1 src2));
15588   ins_cost(INSN_COST);
15589   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15590   ins_encode %{
15591     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15592             as_FloatRegister($src1$$reg),
15593             as_FloatRegister($src2$$reg));
15594   %}
15595   ins_pipe(vdop64);
15596 %}
15597 
15598 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15599 %{
15600   predicate(n->as_Vector()->length() == 4);
15601   match(Set dst (SubVI src1 src2));
15602   ins_cost(INSN_COST);
15603   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15604   ins_encode %{
15605     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15606             as_FloatRegister($src1$$reg),
15607             as_FloatRegister($src2$$reg));
15608   %}
15609   ins_pipe(vdop128);
15610 %}
15611 
15612 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15613 %{
15614   predicate(n->as_Vector()->length() == 2);
15615   match(Set dst (SubVL src1 src2));
15616   ins_cost(INSN_COST);
15617   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15618   ins_encode %{
15619     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15620             as_FloatRegister($src1$$reg),
15621             as_FloatRegister($src2$$reg));
15622   %}
15623   ins_pipe(vdop128);
15624 %}
15625 
15626 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15627 %{
15628   predicate(n->as_Vector()->length() == 2);
15629   match(Set dst (SubVF src1 src2));
15630   ins_cost(INSN_COST);
15631   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15632   ins_encode %{
15633     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15634             as_FloatRegister($src1$$reg),
15635             as_FloatRegister($src2$$reg));
15636   %}
15637   ins_pipe(vdop_fp64);
15638 %}
15639 
15640 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15641 %{
15642   predicate(n->as_Vector()->length() == 4);
15643   match(Set dst (SubVF src1 src2));
15644   ins_cost(INSN_COST);
15645   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15646   ins_encode %{
15647     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15648             as_FloatRegister($src1$$reg),
15649             as_FloatRegister($src2$$reg));
15650   %}
15651   ins_pipe(vdop_fp128);
15652 %}
15653 
15654 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15655 %{
15656   predicate(n->as_Vector()->length() == 2);
15657   match(Set dst (SubVD src1 src2));
15658   ins_cost(INSN_COST);
15659   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15660   ins_encode %{
15661     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15662             as_FloatRegister($src1$$reg),
15663             as_FloatRegister($src2$$reg));
15664   %}
15665   ins_pipe(vdop_fp128);
15666 %}
15667 
15668 // --------------------------------- MUL --------------------------------------
15669 
15670 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15671 %{
15672   predicate(n->as_Vector()->length() == 2 ||
15673             n->as_Vector()->length() == 4);
15674   match(Set dst (MulVS src1 src2));
15675   ins_cost(INSN_COST);
15676   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15677   ins_encode %{
15678     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15679             as_FloatRegister($src1$$reg),
15680             as_FloatRegister($src2$$reg));
15681   %}
15682   ins_pipe(vmul64);
15683 %}
15684 
15685 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15686 %{
15687   predicate(n->as_Vector()->length() == 8);
15688   match(Set dst (MulVS src1 src2));
15689   ins_cost(INSN_COST);
15690   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15691   ins_encode %{
15692     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15693             as_FloatRegister($src1$$reg),
15694             as_FloatRegister($src2$$reg));
15695   %}
15696   ins_pipe(vmul128);
15697 %}
15698 
15699 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15700 %{
15701   predicate(n->as_Vector()->length() == 2);
15702   match(Set dst (MulVI src1 src2));
15703   ins_cost(INSN_COST);
15704   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15705   ins_encode %{
15706     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15707             as_FloatRegister($src1$$reg),
15708             as_FloatRegister($src2$$reg));
15709   %}
15710   ins_pipe(vmul64);
15711 %}
15712 
15713 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15714 %{
15715   predicate(n->as_Vector()->length() == 4);
15716   match(Set dst (MulVI src1 src2));
15717   ins_cost(INSN_COST);
15718   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15719   ins_encode %{
15720     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15721             as_FloatRegister($src1$$reg),
15722             as_FloatRegister($src2$$reg));
15723   %}
15724   ins_pipe(vmul128);
15725 %}
15726 
15727 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15728 %{
15729   predicate(n->as_Vector()->length() == 2);
15730   match(Set dst (MulVF src1 src2));
15731   ins_cost(INSN_COST);
15732   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15733   ins_encode %{
15734     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15735             as_FloatRegister($src1$$reg),
15736             as_FloatRegister($src2$$reg));
15737   %}
15738   ins_pipe(vmuldiv_fp64);
15739 %}
15740 
15741 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15742 %{
15743   predicate(n->as_Vector()->length() == 4);
15744   match(Set dst (MulVF src1 src2));
15745   ins_cost(INSN_COST);
15746   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15747   ins_encode %{
15748     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15749             as_FloatRegister($src1$$reg),
15750             as_FloatRegister($src2$$reg));
15751   %}
15752   ins_pipe(vmuldiv_fp128);
15753 %}
15754 
15755 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15756 %{
15757   predicate(n->as_Vector()->length() == 2);
15758   match(Set dst (MulVD src1 src2));
15759   ins_cost(INSN_COST);
15760   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15761   ins_encode %{
15762     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15763             as_FloatRegister($src1$$reg),
15764             as_FloatRegister($src2$$reg));
15765   %}
15766   ins_pipe(vmuldiv_fp128);
15767 %}
15768 
15769 // --------------------------------- MLA --------------------------------------
15770 
15771 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15772 %{
15773   predicate(n->as_Vector()->length() == 2 ||
15774             n->as_Vector()->length() == 4);
15775   match(Set dst (AddVS dst (MulVS src1 src2)));
15776   ins_cost(INSN_COST);
15777   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
15778   ins_encode %{
15779     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
15780             as_FloatRegister($src1$$reg),
15781             as_FloatRegister($src2$$reg));
15782   %}
15783   ins_pipe(vmla64);
15784 %}
15785 
15786 instruct vmla8S(vecX dst, vecX src1, vecX src2)
15787 %{
15788   predicate(n->as_Vector()->length() == 8);
15789   match(Set dst (AddVS dst (MulVS src1 src2)));
15790   ins_cost(INSN_COST);
15791   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
15792   ins_encode %{
15793     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
15794             as_FloatRegister($src1$$reg),
15795             as_FloatRegister($src2$$reg));
15796   %}
15797   ins_pipe(vmla128);
15798 %}
15799 
15800 instruct vmla2I(vecD dst, vecD src1, vecD src2)
15801 %{
15802   predicate(n->as_Vector()->length() == 2);
15803   match(Set dst (AddVI dst (MulVI src1 src2)));
15804   ins_cost(INSN_COST);
15805   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
15806   ins_encode %{
15807     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
15808             as_FloatRegister($src1$$reg),
15809             as_FloatRegister($src2$$reg));
15810   %}
15811   ins_pipe(vmla64);
15812 %}
15813 
15814 instruct vmla4I(vecX dst, vecX src1, vecX src2)
15815 %{
15816   predicate(n->as_Vector()->length() == 4);
15817   match(Set dst (AddVI dst (MulVI src1 src2)));
15818   ins_cost(INSN_COST);
15819   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
15820   ins_encode %{
15821     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
15822             as_FloatRegister($src1$$reg),
15823             as_FloatRegister($src2$$reg));
15824   %}
15825   ins_pipe(vmla128);
15826 %}
15827 
15828 // --------------------------------- MLS --------------------------------------
15829 
15830 instruct vmls4S(vecD dst, vecD src1, vecD src2)
15831 %{
15832   predicate(n->as_Vector()->length() == 2 ||
15833             n->as_Vector()->length() == 4);
15834   match(Set dst (SubVS dst (MulVS src1 src2)));
15835   ins_cost(INSN_COST);
15836   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
15837   ins_encode %{
15838     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
15839             as_FloatRegister($src1$$reg),
15840             as_FloatRegister($src2$$reg));
15841   %}
15842   ins_pipe(vmla64);
15843 %}
15844 
15845 instruct vmls8S(vecX dst, vecX src1, vecX src2)
15846 %{
15847   predicate(n->as_Vector()->length() == 8);
15848   match(Set dst (SubVS dst (MulVS src1 src2)));
15849   ins_cost(INSN_COST);
15850   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
15851   ins_encode %{
15852     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
15853             as_FloatRegister($src1$$reg),
15854             as_FloatRegister($src2$$reg));
15855   %}
15856   ins_pipe(vmla128);
15857 %}
15858 
15859 instruct vmls2I(vecD dst, vecD src1, vecD src2)
15860 %{
15861   predicate(n->as_Vector()->length() == 2);
15862   match(Set dst (SubVI dst (MulVI src1 src2)));
15863   ins_cost(INSN_COST);
15864   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
15865   ins_encode %{
15866     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
15867             as_FloatRegister($src1$$reg),
15868             as_FloatRegister($src2$$reg));
15869   %}
15870   ins_pipe(vmla64);
15871 %}
15872 
15873 instruct vmls4I(vecX dst, vecX src1, vecX src2)
15874 %{
15875   predicate(n->as_Vector()->length() == 4);
15876   match(Set dst (SubVI dst (MulVI src1 src2)));
15877   ins_cost(INSN_COST);
15878   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
15879   ins_encode %{
15880     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
15881             as_FloatRegister($src1$$reg),
15882             as_FloatRegister($src2$$reg));
15883   %}
15884   ins_pipe(vmla128);
15885 %}
15886 
15887 // --------------------------------- DIV --------------------------------------
15888 
15889 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15890 %{
15891   predicate(n->as_Vector()->length() == 2);
15892   match(Set dst (DivVF src1 src2));
15893   ins_cost(INSN_COST);
15894   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15895   ins_encode %{
15896     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15897             as_FloatRegister($src1$$reg),
15898             as_FloatRegister($src2$$reg));
15899   %}
15900   ins_pipe(vmuldiv_fp64);
15901 %}
15902 
15903 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15904 %{
15905   predicate(n->as_Vector()->length() == 4);
15906   match(Set dst (DivVF src1 src2));
15907   ins_cost(INSN_COST);
15908   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15909   ins_encode %{
15910     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15911             as_FloatRegister($src1$$reg),
15912             as_FloatRegister($src2$$reg));
15913   %}
15914   ins_pipe(vmuldiv_fp128);
15915 %}
15916 
15917 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15918 %{
15919   predicate(n->as_Vector()->length() == 2);
15920   match(Set dst (DivVD src1 src2));
15921   ins_cost(INSN_COST);
15922   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15923   ins_encode %{
15924     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15925             as_FloatRegister($src1$$reg),
15926             as_FloatRegister($src2$$reg));
15927   %}
15928   ins_pipe(vmuldiv_fp128);
15929 %}
15930 
15931 // --------------------------------- SQRT -------------------------------------
15932 
15933 instruct vsqrt2D(vecX dst, vecX src)
15934 %{
15935   predicate(n->as_Vector()->length() == 2);
15936   match(Set dst (SqrtVD src));
15937   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
15938   ins_encode %{
15939     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
15940              as_FloatRegister($src$$reg));
15941   %}
15942   ins_pipe(vsqrt_fp128);
15943 %}
15944 
15945 // --------------------------------- ABS --------------------------------------
15946 
15947 instruct vabs2F(vecD dst, vecD src)
15948 %{
15949   predicate(n->as_Vector()->length() == 2);
15950   match(Set dst (AbsVF src));
15951   ins_cost(INSN_COST * 3);
15952   format %{ "fabs  $dst,$src\t# vector (2S)" %}
15953   ins_encode %{
15954     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
15955             as_FloatRegister($src$$reg));
15956   %}
15957   ins_pipe(vunop_fp64);
15958 %}
15959 
15960 instruct vabs4F(vecX dst, vecX src)
15961 %{
15962   predicate(n->as_Vector()->length() == 4);
15963   match(Set dst (AbsVF src));
15964   ins_cost(INSN_COST * 3);
15965   format %{ "fabs  $dst,$src\t# vector (4S)" %}
15966   ins_encode %{
15967     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
15968             as_FloatRegister($src$$reg));
15969   %}
15970   ins_pipe(vunop_fp128);
15971 %}
15972 
15973 instruct vabs2D(vecX dst, vecX src)
15974 %{
15975   predicate(n->as_Vector()->length() == 2);
15976   match(Set dst (AbsVD src));
15977   ins_cost(INSN_COST * 3);
15978   format %{ "fabs  $dst,$src\t# vector (2D)" %}
15979   ins_encode %{
15980     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
15981             as_FloatRegister($src$$reg));
15982   %}
15983   ins_pipe(vunop_fp128);
15984 %}
15985 
15986 // --------------------------------- NEG --------------------------------------
15987 
15988 instruct vneg2F(vecD dst, vecD src)
15989 %{
15990   predicate(n->as_Vector()->length() == 2);
15991   match(Set dst (NegVF src));
15992   ins_cost(INSN_COST * 3);
15993   format %{ "fneg  $dst,$src\t# vector (2S)" %}
15994   ins_encode %{
15995     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
15996             as_FloatRegister($src$$reg));
15997   %}
15998   ins_pipe(vunop_fp64);
15999 %}
16000 
16001 instruct vneg4F(vecX dst, vecX src)
16002 %{
16003   predicate(n->as_Vector()->length() == 4);
16004   match(Set dst (NegVF src));
16005   ins_cost(INSN_COST * 3);
16006   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16007   ins_encode %{
16008     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16009             as_FloatRegister($src$$reg));
16010   %}
16011   ins_pipe(vunop_fp128);
16012 %}
16013 
16014 instruct vneg2D(vecX dst, vecX src)
16015 %{
16016   predicate(n->as_Vector()->length() == 2);
16017   match(Set dst (NegVD src));
16018   ins_cost(INSN_COST * 3);
16019   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16020   ins_encode %{
16021     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16022             as_FloatRegister($src$$reg));
16023   %}
16024   ins_pipe(vunop_fp128);
16025 %}
16026 
16027 // --------------------------------- AND --------------------------------------
16028 
16029 instruct vand8B(vecD dst, vecD src1, vecD src2)
16030 %{
16031   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16032             n->as_Vector()->length_in_bytes() == 8);
16033   match(Set dst (AndV src1 src2));
16034   ins_cost(INSN_COST);
16035   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16036   ins_encode %{
16037     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16038             as_FloatRegister($src1$$reg),
16039             as_FloatRegister($src2$$reg));
16040   %}
16041   ins_pipe(vlogical64);
16042 %}
16043 
16044 instruct vand16B(vecX dst, vecX src1, vecX src2)
16045 %{
16046   predicate(n->as_Vector()->length_in_bytes() == 16);
16047   match(Set dst (AndV src1 src2));
16048   ins_cost(INSN_COST);
16049   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16050   ins_encode %{
16051     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16052             as_FloatRegister($src1$$reg),
16053             as_FloatRegister($src2$$reg));
16054   %}
16055   ins_pipe(vlogical128);
16056 %}
16057 
16058 // --------------------------------- OR ---------------------------------------
16059 
16060 instruct vor8B(vecD dst, vecD src1, vecD src2)
16061 %{
16062   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16063             n->as_Vector()->length_in_bytes() == 8);
16064   match(Set dst (OrV src1 src2));
16065   ins_cost(INSN_COST);
16066   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16067   ins_encode %{
16068     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16069             as_FloatRegister($src1$$reg),
16070             as_FloatRegister($src2$$reg));
16071   %}
16072   ins_pipe(vlogical64);
16073 %}
16074 
16075 instruct vor16B(vecX dst, vecX src1, vecX src2)
16076 %{
16077   predicate(n->as_Vector()->length_in_bytes() == 16);
16078   match(Set dst (OrV src1 src2));
16079   ins_cost(INSN_COST);
16080   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16081   ins_encode %{
16082     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16083             as_FloatRegister($src1$$reg),
16084             as_FloatRegister($src2$$reg));
16085   %}
16086   ins_pipe(vlogical128);
16087 %}
16088 
16089 // --------------------------------- XOR --------------------------------------
16090 
16091 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16092 %{
16093   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16094             n->as_Vector()->length_in_bytes() == 8);
16095   match(Set dst (XorV src1 src2));
16096   ins_cost(INSN_COST);
16097   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16098   ins_encode %{
16099     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16100             as_FloatRegister($src1$$reg),
16101             as_FloatRegister($src2$$reg));
16102   %}
16103   ins_pipe(vlogical64);
16104 %}
16105 
16106 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16107 %{
16108   predicate(n->as_Vector()->length_in_bytes() == 16);
16109   match(Set dst (XorV src1 src2));
16110   ins_cost(INSN_COST);
16111   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16112   ins_encode %{
16113     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16114             as_FloatRegister($src1$$reg),
16115             as_FloatRegister($src2$$reg));
16116   %}
16117   ins_pipe(vlogical128);
16118 %}
16119 
16120 // ------------------------------ Shift ---------------------------------------
16121 
16122 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16123   match(Set dst (LShiftCntV cnt));
16124   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16125   ins_encode %{
16126     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16127   %}
16128   ins_pipe(vdup_reg_reg128);
16129 %}
16130 
16131 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16132 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16133   match(Set dst (RShiftCntV cnt));
16134   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16135   ins_encode %{
16136     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16137     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16138   %}
16139   ins_pipe(vdup_reg_reg128);
16140 %}
16141 
16142 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16143   predicate(n->as_Vector()->length() == 4 ||
16144             n->as_Vector()->length() == 8);
16145   match(Set dst (LShiftVB src shift));
16146   match(Set dst (RShiftVB src shift));
16147   ins_cost(INSN_COST);
16148   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16149   ins_encode %{
16150     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16151             as_FloatRegister($src$$reg),
16152             as_FloatRegister($shift$$reg));
16153   %}
16154   ins_pipe(vshift64);
16155 %}
16156 
16157 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16158   predicate(n->as_Vector()->length() == 16);
16159   match(Set dst (LShiftVB src shift));
16160   match(Set dst (RShiftVB src shift));
16161   ins_cost(INSN_COST);
16162   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16163   ins_encode %{
16164     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16165             as_FloatRegister($src$$reg),
16166             as_FloatRegister($shift$$reg));
16167   %}
16168   ins_pipe(vshift128);
16169 %}
16170 
16171 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16172   predicate(n->as_Vector()->length() == 4 ||
16173             n->as_Vector()->length() == 8);
16174   match(Set dst (URShiftVB src shift));
16175   ins_cost(INSN_COST);
16176   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16177   ins_encode %{
16178     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16179             as_FloatRegister($src$$reg),
16180             as_FloatRegister($shift$$reg));
16181   %}
16182   ins_pipe(vshift64);
16183 %}
16184 
16185 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16186   predicate(n->as_Vector()->length() == 16);
16187   match(Set dst (URShiftVB src shift));
16188   ins_cost(INSN_COST);
16189   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16190   ins_encode %{
16191     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16192             as_FloatRegister($src$$reg),
16193             as_FloatRegister($shift$$reg));
16194   %}
16195   ins_pipe(vshift128);
16196 %}
16197 
16198 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16199   predicate(n->as_Vector()->length() == 4 ||
16200             n->as_Vector()->length() == 8);
16201   match(Set dst (LShiftVB src shift));
16202   ins_cost(INSN_COST);
16203   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16204   ins_encode %{
16205     int sh = (int)$shift$$constant & 31;
16206     if (sh >= 8) {
16207       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16208              as_FloatRegister($src$$reg),
16209              as_FloatRegister($src$$reg));
16210     } else {
16211       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16212              as_FloatRegister($src$$reg), sh);
16213     }
16214   %}
16215   ins_pipe(vshift64_imm);
16216 %}
16217 
16218 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16219   predicate(n->as_Vector()->length() == 16);
16220   match(Set dst (LShiftVB src shift));
16221   ins_cost(INSN_COST);
16222   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16223   ins_encode %{
16224     int sh = (int)$shift$$constant & 31;
16225     if (sh >= 8) {
16226       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16227              as_FloatRegister($src$$reg),
16228              as_FloatRegister($src$$reg));
16229     } else {
16230       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16231              as_FloatRegister($src$$reg), sh);
16232     }
16233   %}
16234   ins_pipe(vshift128_imm);
16235 %}
16236 
16237 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16238   predicate(n->as_Vector()->length() == 4 ||
16239             n->as_Vector()->length() == 8);
16240   match(Set dst (RShiftVB src shift));
16241   ins_cost(INSN_COST);
16242   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16243   ins_encode %{
16244     int sh = (int)$shift$$constant & 31;
16245     if (sh >= 8) sh = 7;
16246     sh = -sh & 7;
16247     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16248            as_FloatRegister($src$$reg), sh);
16249   %}
16250   ins_pipe(vshift64_imm);
16251 %}
16252 
16253 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16254   predicate(n->as_Vector()->length() == 16);
16255   match(Set dst (RShiftVB src shift));
16256   ins_cost(INSN_COST);
16257   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16258   ins_encode %{
16259     int sh = (int)$shift$$constant & 31;
16260     if (sh >= 8) sh = 7;
16261     sh = -sh & 7;
16262     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16263            as_FloatRegister($src$$reg), sh);
16264   %}
16265   ins_pipe(vshift128_imm);
16266 %}
16267 
16268 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16269   predicate(n->as_Vector()->length() == 4 ||
16270             n->as_Vector()->length() == 8);
16271   match(Set dst (URShiftVB src shift));
16272   ins_cost(INSN_COST);
16273   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16274   ins_encode %{
16275     int sh = (int)$shift$$constant & 31;
16276     if (sh >= 8) {
16277       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16278              as_FloatRegister($src$$reg),
16279              as_FloatRegister($src$$reg));
16280     } else {
16281       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16282              as_FloatRegister($src$$reg), -sh & 7);
16283     }
16284   %}
16285   ins_pipe(vshift64_imm);
16286 %}
16287 
16288 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16289   predicate(n->as_Vector()->length() == 16);
16290   match(Set dst (URShiftVB src shift));
16291   ins_cost(INSN_COST);
16292   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16293   ins_encode %{
16294     int sh = (int)$shift$$constant & 31;
16295     if (sh >= 8) {
16296       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16297              as_FloatRegister($src$$reg),
16298              as_FloatRegister($src$$reg));
16299     } else {
16300       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16301              as_FloatRegister($src$$reg), -sh & 7);
16302     }
16303   %}
16304   ins_pipe(vshift128_imm);
16305 %}
16306 
16307 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16308   predicate(n->as_Vector()->length() == 2 ||
16309             n->as_Vector()->length() == 4);
16310   match(Set dst (LShiftVS src shift));
16311   match(Set dst (RShiftVS src shift));
16312   ins_cost(INSN_COST);
16313   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16314   ins_encode %{
16315     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16316             as_FloatRegister($src$$reg),
16317             as_FloatRegister($shift$$reg));
16318   %}
16319   ins_pipe(vshift64);
16320 %}
16321 
16322 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16323   predicate(n->as_Vector()->length() == 8);
16324   match(Set dst (LShiftVS src shift));
16325   match(Set dst (RShiftVS src shift));
16326   ins_cost(INSN_COST);
16327   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16328   ins_encode %{
16329     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16330             as_FloatRegister($src$$reg),
16331             as_FloatRegister($shift$$reg));
16332   %}
16333   ins_pipe(vshift128);
16334 %}
16335 
16336 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16337   predicate(n->as_Vector()->length() == 2 ||
16338             n->as_Vector()->length() == 4);
16339   match(Set dst (URShiftVS src shift));
16340   ins_cost(INSN_COST);
16341   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16342   ins_encode %{
16343     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16344             as_FloatRegister($src$$reg),
16345             as_FloatRegister($shift$$reg));
16346   %}
16347   ins_pipe(vshift64);
16348 %}
16349 
16350 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16351   predicate(n->as_Vector()->length() == 8);
16352   match(Set dst (URShiftVS src shift));
16353   ins_cost(INSN_COST);
16354   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16355   ins_encode %{
16356     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16357             as_FloatRegister($src$$reg),
16358             as_FloatRegister($shift$$reg));
16359   %}
16360   ins_pipe(vshift128);
16361 %}
16362 
16363 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16364   predicate(n->as_Vector()->length() == 2 ||
16365             n->as_Vector()->length() == 4);
16366   match(Set dst (LShiftVS src shift));
16367   ins_cost(INSN_COST);
16368   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16369   ins_encode %{
16370     int sh = (int)$shift$$constant & 31;
16371     if (sh >= 16) {
16372       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16373              as_FloatRegister($src$$reg),
16374              as_FloatRegister($src$$reg));
16375     } else {
16376       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16377              as_FloatRegister($src$$reg), sh);
16378     }
16379   %}
16380   ins_pipe(vshift64_imm);
16381 %}
16382 
16383 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16384   predicate(n->as_Vector()->length() == 8);
16385   match(Set dst (LShiftVS src shift));
16386   ins_cost(INSN_COST);
16387   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16388   ins_encode %{
16389     int sh = (int)$shift$$constant & 31;
16390     if (sh >= 16) {
16391       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16392              as_FloatRegister($src$$reg),
16393              as_FloatRegister($src$$reg));
16394     } else {
16395       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16396              as_FloatRegister($src$$reg), sh);
16397     }
16398   %}
16399   ins_pipe(vshift128_imm);
16400 %}
16401 
16402 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16403   predicate(n->as_Vector()->length() == 2 ||
16404             n->as_Vector()->length() == 4);
16405   match(Set dst (RShiftVS src shift));
16406   ins_cost(INSN_COST);
16407   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16408   ins_encode %{
16409     int sh = (int)$shift$$constant & 31;
16410     if (sh >= 16) sh = 15;
16411     sh = -sh & 15;
16412     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16413            as_FloatRegister($src$$reg), sh);
16414   %}
16415   ins_pipe(vshift64_imm);
16416 %}
16417 
16418 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16419   predicate(n->as_Vector()->length() == 8);
16420   match(Set dst (RShiftVS src shift));
16421   ins_cost(INSN_COST);
16422   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16423   ins_encode %{
16424     int sh = (int)$shift$$constant & 31;
16425     if (sh >= 16) sh = 15;
16426     sh = -sh & 15;
16427     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16428            as_FloatRegister($src$$reg), sh);
16429   %}
16430   ins_pipe(vshift128_imm);
16431 %}
16432 
16433 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16434   predicate(n->as_Vector()->length() == 2 ||
16435             n->as_Vector()->length() == 4);
16436   match(Set dst (URShiftVS src shift));
16437   ins_cost(INSN_COST);
16438   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16439   ins_encode %{
16440     int sh = (int)$shift$$constant & 31;
16441     if (sh >= 16) {
16442       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16443              as_FloatRegister($src$$reg),
16444              as_FloatRegister($src$$reg));
16445     } else {
16446       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16447              as_FloatRegister($src$$reg), -sh & 15);
16448     }
16449   %}
16450   ins_pipe(vshift64_imm);
16451 %}
16452 
16453 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16454   predicate(n->as_Vector()->length() == 8);
16455   match(Set dst (URShiftVS src shift));
16456   ins_cost(INSN_COST);
16457   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16458   ins_encode %{
16459     int sh = (int)$shift$$constant & 31;
16460     if (sh >= 16) {
16461       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16462              as_FloatRegister($src$$reg),
16463              as_FloatRegister($src$$reg));
16464     } else {
16465       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16466              as_FloatRegister($src$$reg), -sh & 15);
16467     }
16468   %}
16469   ins_pipe(vshift128_imm);
16470 %}
16471 
16472 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16473   predicate(n->as_Vector()->length() == 2);
16474   match(Set dst (LShiftVI src shift));
16475   match(Set dst (RShiftVI src shift));
16476   ins_cost(INSN_COST);
16477   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16478   ins_encode %{
16479     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16480             as_FloatRegister($src$$reg),
16481             as_FloatRegister($shift$$reg));
16482   %}
16483   ins_pipe(vshift64_imm);
16484 %}
16485 
16486 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16487   predicate(n->as_Vector()->length() == 4);
16488   match(Set dst (LShiftVI src shift));
16489   match(Set dst (RShiftVI src shift));
16490   ins_cost(INSN_COST);
16491   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16492   ins_encode %{
16493     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16494             as_FloatRegister($src$$reg),
16495             as_FloatRegister($shift$$reg));
16496   %}
16497   ins_pipe(vshift128_imm);
16498 %}
16499 
16500 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16501   predicate(n->as_Vector()->length() == 2);
16502   match(Set dst (URShiftVI src shift));
16503   ins_cost(INSN_COST);
16504   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16505   ins_encode %{
16506     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16507             as_FloatRegister($src$$reg),
16508             as_FloatRegister($shift$$reg));
16509   %}
16510   ins_pipe(vshift64_imm);
16511 %}
16512 
16513 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16514   predicate(n->as_Vector()->length() == 4);
16515   match(Set dst (URShiftVI src shift));
16516   ins_cost(INSN_COST);
16517   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16518   ins_encode %{
16519     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16520             as_FloatRegister($src$$reg),
16521             as_FloatRegister($shift$$reg));
16522   %}
16523   ins_pipe(vshift128_imm);
16524 %}
16525 
16526 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16527   predicate(n->as_Vector()->length() == 2);
16528   match(Set dst (LShiftVI src shift));
16529   ins_cost(INSN_COST);
16530   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16531   ins_encode %{
16532     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16533            as_FloatRegister($src$$reg),
16534            (int)$shift$$constant & 31);
16535   %}
16536   ins_pipe(vshift64_imm);
16537 %}
16538 
16539 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16540   predicate(n->as_Vector()->length() == 4);
16541   match(Set dst (LShiftVI src shift));
16542   ins_cost(INSN_COST);
16543   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16544   ins_encode %{
16545     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16546            as_FloatRegister($src$$reg),
16547            (int)$shift$$constant & 31);
16548   %}
16549   ins_pipe(vshift128_imm);
16550 %}
16551 
16552 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16553   predicate(n->as_Vector()->length() == 2);
16554   match(Set dst (RShiftVI src shift));
16555   ins_cost(INSN_COST);
16556   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16557   ins_encode %{
16558     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16559             as_FloatRegister($src$$reg),
16560             -(int)$shift$$constant & 31);
16561   %}
16562   ins_pipe(vshift64_imm);
16563 %}
16564 
16565 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16566   predicate(n->as_Vector()->length() == 4);
16567   match(Set dst (RShiftVI src shift));
16568   ins_cost(INSN_COST);
16569   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16570   ins_encode %{
16571     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16572             as_FloatRegister($src$$reg),
16573             -(int)$shift$$constant & 31);
16574   %}
16575   ins_pipe(vshift128_imm);
16576 %}
16577 
16578 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16579   predicate(n->as_Vector()->length() == 2);
16580   match(Set dst (URShiftVI src shift));
16581   ins_cost(INSN_COST);
16582   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16583   ins_encode %{
16584     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16585             as_FloatRegister($src$$reg),
16586             -(int)$shift$$constant & 31);
16587   %}
16588   ins_pipe(vshift64_imm);
16589 %}
16590 
16591 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16592   predicate(n->as_Vector()->length() == 4);
16593   match(Set dst (URShiftVI src shift));
16594   ins_cost(INSN_COST);
16595   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16596   ins_encode %{
16597     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16598             as_FloatRegister($src$$reg),
16599             -(int)$shift$$constant & 31);
16600   %}
16601   ins_pipe(vshift128_imm);
16602 %}
16603 
16604 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16605   predicate(n->as_Vector()->length() == 2);
16606   match(Set dst (LShiftVL src shift));
16607   match(Set dst (RShiftVL src shift));
16608   ins_cost(INSN_COST);
16609   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16610   ins_encode %{
16611     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16612             as_FloatRegister($src$$reg),
16613             as_FloatRegister($shift$$reg));
16614   %}
16615   ins_pipe(vshift128);
16616 %}
16617 
16618 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16619   predicate(n->as_Vector()->length() == 2);
16620   match(Set dst (URShiftVL src shift));
16621   ins_cost(INSN_COST);
16622   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16623   ins_encode %{
16624     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16625             as_FloatRegister($src$$reg),
16626             as_FloatRegister($shift$$reg));
16627   %}
16628   ins_pipe(vshift128);
16629 %}
16630 
16631 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16632   predicate(n->as_Vector()->length() == 2);
16633   match(Set dst (LShiftVL src shift));
16634   ins_cost(INSN_COST);
16635   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16636   ins_encode %{
16637     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16638            as_FloatRegister($src$$reg),
16639            (int)$shift$$constant & 63);
16640   %}
16641   ins_pipe(vshift128);
16642 %}
16643 
16644 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16645   predicate(n->as_Vector()->length() == 2);
16646   match(Set dst (RShiftVL src shift));
16647   ins_cost(INSN_COST);
16648   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16649   ins_encode %{
16650     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16651             as_FloatRegister($src$$reg),
16652             -(int)$shift$$constant & 63);
16653   %}
16654   ins_pipe(vshift128_imm);
16655 %}
16656 
16657 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16658   predicate(n->as_Vector()->length() == 2);
16659   match(Set dst (URShiftVL src shift));
16660   ins_cost(INSN_COST);
16661   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16662   ins_encode %{
16663     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16664             as_FloatRegister($src$$reg),
16665             -(int)$shift$$constant & 63);
16666   %}
16667   ins_pipe(vshift128_imm);
16668 %}
16669 
16670 //----------PEEPHOLE RULES-----------------------------------------------------
16671 // These must follow all instruction definitions as they use the names
16672 // defined in the instructions definitions.
16673 //
16674 // peepmatch ( root_instr_name [preceding_instruction]* );
16675 //
16676 // peepconstraint %{
16677 // (instruction_number.operand_name relational_op instruction_number.operand_name
16678 //  [, ...] );
16679 // // instruction numbers are zero-based using left to right order in peepmatch
16680 //
16681 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16682 // // provide an instruction_number.operand_name for each operand that appears
16683 // // in the replacement instruction's match rule
16684 //
16685 // ---------VM FLAGS---------------------------------------------------------
16686 //
16687 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16688 //
16689 // Each peephole rule is given an identifying number starting with zero and
16690 // increasing by one in the order seen by the parser.  An individual peephole
16691 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16692 // on the command-line.
16693 //
16694 // ---------CURRENT LIMITATIONS----------------------------------------------
16695 //
16696 // Only match adjacent instructions in same basic block
16697 // Only equality constraints
16698 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16699 // Only one replacement instruction
16700 //
16701 // ---------EXAMPLE----------------------------------------------------------
16702 //
16703 // // pertinent parts of existing instructions in architecture description
16704 // instruct movI(iRegINoSp dst, iRegI src)
16705 // %{
16706 //   match(Set dst (CopyI src));
16707 // %}
16708 //
16709 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16710 // %{
16711 //   match(Set dst (AddI dst src));
16712 //   effect(KILL cr);
16713 // %}
16714 //
16715 // // Change (inc mov) to lea
16716 // peephole %{
16717 //   // increment preceeded by register-register move
16718 //   peepmatch ( incI_iReg movI );
16719 //   // require that the destination register of the increment
16720 //   // match the destination register of the move
16721 //   peepconstraint ( 0.dst == 1.dst );
16722 //   // construct a replacement instruction that sets
16723 //   // the destination to ( move's source register + one )
16724 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16725 // %}
16726 //
16727 
16728 // Implementation no longer uses movX instructions since
16729 // machine-independent system no longer uses CopyX nodes.
16730 //
16731 // peephole
16732 // %{
16733 //   peepmatch (incI_iReg movI);
16734 //   peepconstraint (0.dst == 1.dst);
16735 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16736 // %}
16737 
16738 // peephole
16739 // %{
16740 //   peepmatch (decI_iReg movI);
16741 //   peepconstraint (0.dst == 1.dst);
16742 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16743 // %}
16744 
16745 // peephole
16746 // %{
16747 //   peepmatch (addI_iReg_imm movI);
16748 //   peepconstraint (0.dst == 1.dst);
16749 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16750 // %}
16751 
16752 // peephole
16753 // %{
16754 //   peepmatch (incL_iReg movL);
16755 //   peepconstraint (0.dst == 1.dst);
16756 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16757 // %}
16758 
16759 // peephole
16760 // %{
16761 //   peepmatch (decL_iReg movL);
16762 //   peepconstraint (0.dst == 1.dst);
16763 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16764 // %}
16765 
16766 // peephole
16767 // %{
16768 //   peepmatch (addL_iReg_imm movL);
16769 //   peepconstraint (0.dst == 1.dst);
16770 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16771 // %}
16772 
16773 // peephole
16774 // %{
16775 //   peepmatch (addP_iReg_imm movP);
16776 //   peepconstraint (0.dst == 1.dst);
16777 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16778 // %}
16779 
16780 // // Change load of spilled value to only a spill
16781 // instruct storeI(memory mem, iRegI src)
16782 // %{
16783 //   match(Set mem (StoreI mem src));
16784 // %}
16785 //
16786 // instruct loadI(iRegINoSp dst, memory mem)
16787 // %{
16788 //   match(Set dst (LoadI mem));
16789 // %}
16790 //
16791 
16792 //----------SMARTSPILL RULES---------------------------------------------------
16793 // These must follow all instruction definitions as they use the names
16794 // defined in the instructions definitions.
16795 
16796 // Local Variables:
16797 // mode: c++
16798 // End: