1 //
   2 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "asm/macroAssembler.hpp"
 999 #include "gc/shared/cardTable.hpp"
1000 #include "gc/shared/cardTableBarrierSet.hpp"
1001 #include "gc/shared/collectedHeap.hpp"
1002 #include "gc/shenandoah/brooksPointer.hpp"
1003 #include "opto/addnode.hpp"
1004 
1005 class CallStubImpl {
1006 
1007   //--------------------------------------------------------------
1008   //---<  Used for optimization in Compile::shorten_branches  >---
1009   //--------------------------------------------------------------
1010 
1011  public:
1012   // Size of call trampoline stub.
1013   static uint size_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 
1017   // number of relocations needed by a call trampoline stub
1018   static uint reloc_call_trampoline() {
1019     return 0; // no call trampolines on this platform
1020   }
1021 };
1022 
1023 class HandlerImpl {
1024 
1025  public:
1026 
1027   static int emit_exception_handler(CodeBuffer &cbuf);
1028   static int emit_deopt_handler(CodeBuffer& cbuf);
1029 
1030   static uint size_exception_handler() {
1031     return MacroAssembler::far_branch_size();
1032   }
1033 
1034   static uint size_deopt_handler() {
1035     // count one adr and one far branch instruction
1036     return 4 * NativeInstruction::instruction_size;
1037   }
1038 };
1039 
1040   // graph traversal helpers
1041 
1042   MemBarNode *parent_membar(const Node *n);
1043   MemBarNode *child_membar(const MemBarNode *n);
1044   bool leading_membar(const MemBarNode *barrier);
1045 
1046   bool is_card_mark_membar(const MemBarNode *barrier);
1047   bool is_CAS(int opcode);
1048 
1049   MemBarNode *leading_to_normal(MemBarNode *leading);
1050   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1051   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1052   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1053   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1054 
1055   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1056 
1057   bool unnecessary_acquire(const Node *barrier);
1058   bool needs_acquiring_load(const Node *load);
1059 
1060   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1061 
1062   bool unnecessary_release(const Node *barrier);
1063   bool unnecessary_volatile(const Node *barrier);
1064   bool needs_releasing_store(const Node *store);
1065 
1066   // predicate controlling translation of CompareAndSwapX
1067   bool needs_acquiring_load_exclusive(const Node *load);
1068 
1069   // predicate controlling translation of StoreCM
1070   bool unnecessary_storestore(const Node *storecm);
1071 
1072   // predicate controlling addressing modes
1073   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1074 %}
1075 
1076 source %{
1077 
1078   // Optimizaton of volatile gets and puts
1079   // -------------------------------------
1080   //
1081   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1082   // use to implement volatile reads and writes. For a volatile read
1083   // we simply need
1084   //
1085   //   ldar<x>
1086   //
1087   // and for a volatile write we need
1088   //
1089   //   stlr<x>
1090   //
1091   // Alternatively, we can implement them by pairing a normal
1092   // load/store with a memory barrier. For a volatile read we need
1093   //
1094   //   ldr<x>
1095   //   dmb ishld
1096   //
1097   // for a volatile write
1098   //
1099   //   dmb ish
1100   //   str<x>
1101   //   dmb ish
1102   //
1103   // We can also use ldaxr and stlxr to implement compare and swap CAS
1104   // sequences. These are normally translated to an instruction
1105   // sequence like the following
1106   //
1107   //   dmb      ish
1108   // retry:
1109   //   ldxr<x>   rval raddr
1110   //   cmp       rval rold
1111   //   b.ne done
1112   //   stlxr<x>  rval, rnew, rold
1113   //   cbnz      rval retry
1114   // done:
1115   //   cset      r0, eq
1116   //   dmb ishld
1117   //
1118   // Note that the exclusive store is already using an stlxr
1119   // instruction. That is required to ensure visibility to other
1120   // threads of the exclusive write (assuming it succeeds) before that
1121   // of any subsequent writes.
1122   //
1123   // The following instruction sequence is an improvement on the above
1124   //
1125   // retry:
1126   //   ldaxr<x>  rval raddr
1127   //   cmp       rval rold
1128   //   b.ne done
1129   //   stlxr<x>  rval, rnew, rold
1130   //   cbnz      rval retry
1131   // done:
1132   //   cset      r0, eq
1133   //
1134   // We don't need the leading dmb ish since the stlxr guarantees
1135   // visibility of prior writes in the case that the swap is
1136   // successful. Crucially we don't have to worry about the case where
1137   // the swap is not successful since no valid program should be
1138   // relying on visibility of prior changes by the attempting thread
1139   // in the case where the CAS fails.
1140   //
1141   // Similarly, we don't need the trailing dmb ishld if we substitute
1142   // an ldaxr instruction since that will provide all the guarantees we
1143   // require regarding observation of changes made by other threads
1144   // before any change to the CAS address observed by the load.
1145   //
1146   // In order to generate the desired instruction sequence we need to
1147   // be able to identify specific 'signature' ideal graph node
1148   // sequences which i) occur as a translation of a volatile reads or
1149   // writes or CAS operations and ii) do not occur through any other
1150   // translation or graph transformation. We can then provide
1151   // alternative aldc matching rules which translate these node
1152   // sequences to the desired machine code sequences. Selection of the
1153   // alternative rules can be implemented by predicates which identify
1154   // the relevant node sequences.
1155   //
1156   // The ideal graph generator translates a volatile read to the node
1157   // sequence
1158   //
1159   //   LoadX[mo_acquire]
1160   //   MemBarAcquire
1161   //
1162   // As a special case when using the compressed oops optimization we
1163   // may also see this variant
1164   //
1165   //   LoadN[mo_acquire]
1166   //   DecodeN
1167   //   MemBarAcquire
1168   //
1169   // A volatile write is translated to the node sequence
1170   //
1171   //   MemBarRelease
1172   //   StoreX[mo_release] {CardMark}-optional
1173   //   MemBarVolatile
1174   //
1175   // n.b. the above node patterns are generated with a strict
1176   // 'signature' configuration of input and output dependencies (see
1177   // the predicates below for exact details). The card mark may be as
1178   // simple as a few extra nodes or, in a few GC configurations, may
1179   // include more complex control flow between the leading and
1180   // trailing memory barriers. However, whatever the card mark
1181   // configuration these signatures are unique to translated volatile
1182   // reads/stores -- they will not appear as a result of any other
1183   // bytecode translation or inlining nor as a consequence of
1184   // optimizing transforms.
1185   //
1186   // We also want to catch inlined unsafe volatile gets and puts and
1187   // be able to implement them using either ldar<x>/stlr<x> or some
1188   // combination of ldr<x>/stlr<x> and dmb instructions.
1189   //
1190   // Inlined unsafe volatiles puts manifest as a minor variant of the
1191   // normal volatile put node sequence containing an extra cpuorder
1192   // membar
1193   //
1194   //   MemBarRelease
1195   //   MemBarCPUOrder
1196   //   StoreX[mo_release] {CardMark}-optional
1197   //   MemBarCPUOrder
1198   //   MemBarVolatile
1199   //
1200   // n.b. as an aside, a cpuorder membar is not itself subject to
1201   // matching and translation by adlc rules.  However, the rule
1202   // predicates need to detect its presence in order to correctly
1203   // select the desired adlc rules.
1204   //
1205   // Inlined unsafe volatile gets manifest as a slightly different
1206   // node sequence to a normal volatile get because of the
1207   // introduction of some CPUOrder memory barriers to bracket the
1208   // Load. However, but the same basic skeleton of a LoadX feeding a
1209   // MemBarAcquire, possibly thorugh an optional DecodeN, is still
1210   // present
1211   //
1212   //   MemBarCPUOrder
1213   //        ||       \\
1214   //   MemBarCPUOrder LoadX[mo_acquire]
1215   //        ||            |
1216   //        ||       {DecodeN} optional
1217   //        ||       /
1218   //     MemBarAcquire
1219   //
1220   // In this case the acquire membar does not directly depend on the
1221   // load. However, we can be sure that the load is generated from an
1222   // inlined unsafe volatile get if we see it dependent on this unique
1223   // sequence of membar nodes. Similarly, given an acquire membar we
1224   // can know that it was added because of an inlined unsafe volatile
1225   // get if it is fed and feeds a cpuorder membar and if its feed
1226   // membar also feeds an acquiring load.
1227   //
1228   // Finally an inlined (Unsafe) CAS operation is translated to the
1229   // following ideal graph
1230   //
1231   //   MemBarRelease
1232   //   MemBarCPUOrder
1233   //   CompareAndSwapX {CardMark}-optional
1234   //   MemBarCPUOrder
1235   //   MemBarAcquire
1236   //
1237   // So, where we can identify these volatile read and write
1238   // signatures we can choose to plant either of the above two code
1239   // sequences. For a volatile read we can simply plant a normal
1240   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1241   // also choose to inhibit translation of the MemBarAcquire and
1242   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1243   //
1244   // When we recognise a volatile store signature we can choose to
1245   // plant at a dmb ish as a translation for the MemBarRelease, a
1246   // normal str<x> and then a dmb ish for the MemBarVolatile.
1247   // Alternatively, we can inhibit translation of the MemBarRelease
1248   // and MemBarVolatile and instead plant a simple stlr<x>
1249   // instruction.
1250   //
1251   // when we recognise a CAS signature we can choose to plant a dmb
1252   // ish as a translation for the MemBarRelease, the conventional
1253   // macro-instruction sequence for the CompareAndSwap node (which
1254   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1255   // Alternatively, we can elide generation of the dmb instructions
1256   // and plant the alternative CompareAndSwap macro-instruction
1257   // sequence (which uses ldaxr<x>).
1258   //
1259   // Of course, the above only applies when we see these signature
1260   // configurations. We still want to plant dmb instructions in any
1261   // other cases where we may see a MemBarAcquire, MemBarRelease or
1262   // MemBarVolatile. For example, at the end of a constructor which
1263   // writes final/volatile fields we will see a MemBarRelease
1264   // instruction and this needs a 'dmb ish' lest we risk the
1265   // constructed object being visible without making the
1266   // final/volatile field writes visible.
1267   //
1268   // n.b. the translation rules below which rely on detection of the
1269   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1270   // If we see anything other than the signature configurations we
1271   // always just translate the loads and stores to ldr<x> and str<x>
1272   // and translate acquire, release and volatile membars to the
1273   // relevant dmb instructions.
1274   //
1275 
1276   // graph traversal helpers used for volatile put/get and CAS
1277   // optimization
1278 
1279   // 1) general purpose helpers
1280 
1281   // if node n is linked to a parent MemBarNode by an intervening
1282   // Control and Memory ProjNode return the MemBarNode otherwise return
1283   // NULL.
1284   //
1285   // n may only be a Load or a MemBar.
1286 
1287   MemBarNode *parent_membar(const Node *n)
1288   {
1289     Node *ctl = NULL;
1290     Node *mem = NULL;
1291     Node *membar = NULL;
1292 
1293     if (n->is_Load()) {
1294       ctl = n->lookup(LoadNode::Control);
1295       mem = n->lookup(LoadNode::Memory);
1296     } else if (n->is_MemBar()) {
1297       ctl = n->lookup(TypeFunc::Control);
1298       mem = n->lookup(TypeFunc::Memory);
1299     } else {
1300         return NULL;
1301     }
1302 
1303     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1304       return NULL;
1305     }
1306 
1307     membar = ctl->lookup(0);
1308 
1309     if (!membar || !membar->is_MemBar()) {
1310       return NULL;
1311     }
1312 
1313     if (mem->lookup(0) != membar) {
1314       return NULL;
1315     }
1316 
1317     return membar->as_MemBar();
1318   }
1319 
1320   // if n is linked to a child MemBarNode by intervening Control and
1321   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1322 
1323   MemBarNode *child_membar(const MemBarNode *n)
1324   {
1325     ProjNode *ctl = n->proj_out_or_null(TypeFunc::Control);
1326     ProjNode *mem = n->proj_out_or_null(TypeFunc::Memory);
1327 
1328     // MemBar needs to have both a Ctl and Mem projection
1329     if (! ctl || ! mem)
1330       return NULL;
1331 
1332     MemBarNode *child = NULL;
1333     Node *x;
1334 
1335     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1336       x = ctl->fast_out(i);
1337       // if we see a membar we keep hold of it. we may also see a new
1338       // arena copy of the original but it will appear later
1339       if (x->is_MemBar()) {
1340           child = x->as_MemBar();
1341           break;
1342       }
1343     }
1344 
1345     if (child == NULL) {
1346       return NULL;
1347     }
1348 
1349     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1350       x = mem->fast_out(i);
1351       // if we see a membar we keep hold of it. we may also see a new
1352       // arena copy of the original but it will appear later
1353       if (x == child) {
1354         return child;
1355       }
1356     }
1357     return NULL;
1358   }
1359 
1360   // helper predicate use to filter candidates for a leading memory
1361   // barrier
1362   //
1363   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1364   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1365 
1366   bool leading_membar(const MemBarNode *barrier)
1367   {
1368     int opcode = barrier->Opcode();
1369     // if this is a release membar we are ok
1370     if (opcode == Op_MemBarRelease) {
1371       return true;
1372     }
1373     // if its a cpuorder membar . . .
1374     if (opcode != Op_MemBarCPUOrder) {
1375       return false;
1376     }
1377     // then the parent has to be a release membar
1378     MemBarNode *parent = parent_membar(barrier);
1379     if (!parent) {
1380       return false;
1381     }
1382     opcode = parent->Opcode();
1383     return opcode == Op_MemBarRelease;
1384   }
1385 
1386   // 2) card mark detection helper
1387 
1388   // helper predicate which can be used to detect a volatile membar
1389   // introduced as part of a conditional card mark sequence either by
1390   // G1 or by CMS when UseCondCardMark is true.
1391   //
1392   // membar can be definitively determined to be part of a card mark
1393   // sequence if and only if all the following hold
1394   //
1395   // i) it is a MemBarVolatile
1396   //
1397   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1398   // true
1399   //
1400   // iii) the node's Mem projection feeds a StoreCM node.
1401 
1402   bool is_card_mark_membar(const MemBarNode *barrier)
1403   {
1404     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1405       return false;
1406     }
1407 
1408     if (barrier->Opcode() != Op_MemBarVolatile) {
1409       return false;
1410     }
1411 
1412     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1413 
1414     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1415       Node *y = mem->fast_out(i);
1416       if (y->Opcode() == Op_StoreCM) {
1417         return true;
1418       }
1419     }
1420 
1421     return false;
1422   }
1423 
1424 
1425   // 3) helper predicates to traverse volatile put or CAS graphs which
1426   // may contain GC barrier subgraphs
1427 
1428   // Preamble
1429   // --------
1430   //
1431   // for volatile writes we can omit generating barriers and employ a
1432   // releasing store when we see a node sequence sequence with a
1433   // leading MemBarRelease and a trailing MemBarVolatile as follows
1434   //
1435   //   MemBarRelease
1436   //  {      ||      } -- optional
1437   //  {MemBarCPUOrder}
1438   //         ||     \\
1439   //         ||     StoreX[mo_release]
1440   //         | \     /
1441   //         | MergeMem
1442   //         | /
1443   //  {MemBarCPUOrder} -- optional
1444   //  {      ||      }
1445   //   MemBarVolatile
1446   //
1447   // where
1448   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1449   //  | \ and / indicate further routing of the Ctl and Mem feeds
1450   //
1451   // this is the graph we see for non-object stores. however, for a
1452   // volatile Object store (StoreN/P) we may see other nodes below the
1453   // leading membar because of the need for a GC pre- or post-write
1454   // barrier.
1455   //
1456   // with most GC configurations we with see this simple variant which
1457   // includes a post-write barrier card mark.
1458   //
1459   //   MemBarRelease______________________________
1460   //         ||    \\               Ctl \        \\
1461   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1462   //         | \     /                       . . .  /
1463   //         | MergeMem
1464   //         | /
1465   //         ||      /
1466   //  {MemBarCPUOrder} -- optional
1467   //  {      ||      }
1468   //   MemBarVolatile
1469   //
1470   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1471   // the object address to an int used to compute the card offset) and
1472   // Ctl+Mem to a StoreB node (which does the actual card mark).
1473   //
1474   // n.b. a StoreCM node will only appear in this configuration when
1475   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1476   // because it implies a requirement to order visibility of the card
1477   // mark (StoreCM) relative to the object put (StoreP/N) using a
1478   // StoreStore memory barrier (arguably this ought to be represented
1479   // explicitly in the ideal graph but that is not how it works). This
1480   // ordering is required for both non-volatile and volatile
1481   // puts. Normally that means we need to translate a StoreCM using
1482   // the sequence
1483   //
1484   //   dmb ishst
1485   //   stlrb
1486   //
1487   // However, in the case of a volatile put if we can recognise this
1488   // configuration and plant an stlr for the object write then we can
1489   // omit the dmb and just plant an strb since visibility of the stlr
1490   // is ordered before visibility of subsequent stores. StoreCM nodes
1491   // also arise when using G1 or using CMS with conditional card
1492   // marking. In these cases (as we shall see) we don't need to insert
1493   // the dmb when translating StoreCM because there is already an
1494   // intervening StoreLoad barrier between it and the StoreP/N.
1495   //
1496   // It is also possible to perform the card mark conditionally on it
1497   // currently being unmarked in which case the volatile put graph
1498   // will look slightly different
1499   //
1500   //   MemBarRelease____________________________________________
1501   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1502   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1503   //         | \     /                              \            |
1504   //         | MergeMem                            . . .      StoreB
1505   //         | /                                                /
1506   //         ||     /
1507   //   MemBarVolatile
1508   //
1509   // It is worth noting at this stage that both the above
1510   // configurations can be uniquely identified by checking that the
1511   // memory flow includes the following subgraph:
1512   //
1513   //   MemBarRelease
1514   //  {MemBarCPUOrder}
1515   //          |  \      . . .
1516   //          |  StoreX[mo_release]  . . .
1517   //          |   /
1518   //         MergeMem
1519   //          |
1520   //  {MemBarCPUOrder}
1521   //   MemBarVolatile
1522   //
1523   // This is referred to as a *normal* subgraph. It can easily be
1524   // detected starting from any candidate MemBarRelease,
1525   // StoreX[mo_release] or MemBarVolatile.
1526   //
1527   // A simple variation on this normal case occurs for an unsafe CAS
1528   // operation. The basic graph for a non-object CAS is
1529   //
1530   //   MemBarRelease
1531   //         ||
1532   //   MemBarCPUOrder
1533   //         ||     \\   . . .
1534   //         ||     CompareAndSwapX
1535   //         ||       |
1536   //         ||     SCMemProj
1537   //         | \     /
1538   //         | MergeMem
1539   //         | /
1540   //   MemBarCPUOrder
1541   //         ||
1542   //   MemBarAcquire
1543   //
1544   // The same basic variations on this arrangement (mutatis mutandis)
1545   // occur when a card mark is introduced. i.e. we se the same basic
1546   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1547   // tail of the graph is a pair comprising a MemBarCPUOrder +
1548   // MemBarAcquire.
1549   //
1550   // So, in the case of a CAS the normal graph has the variant form
1551   //
1552   //   MemBarRelease
1553   //   MemBarCPUOrder
1554   //          |   \      . . .
1555   //          |  CompareAndSwapX  . . .
1556   //          |    |
1557   //          |   SCMemProj
1558   //          |   /  . . .
1559   //         MergeMem
1560   //          |
1561   //   MemBarCPUOrder
1562   //   MemBarAcquire
1563   //
1564   // This graph can also easily be detected starting from any
1565   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1566   //
1567   // the code below uses two helper predicates, leading_to_normal and
1568   // normal_to_leading to identify these normal graphs, one validating
1569   // the layout starting from the top membar and searching down and
1570   // the other validating the layout starting from the lower membar
1571   // and searching up.
1572   //
1573   // There are two special case GC configurations when a normal graph
1574   // may not be generated: when using G1 (which always employs a
1575   // conditional card mark); and when using CMS with conditional card
1576   // marking configured. These GCs are both concurrent rather than
1577   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1578   // graph between the leading and trailing membar nodes, in
1579   // particular enforcing stronger memory serialisation beween the
1580   // object put and the corresponding conditional card mark. CMS
1581   // employs a post-write GC barrier while G1 employs both a pre- and
1582   // post-write GC barrier. Of course the extra nodes may be absent --
1583   // they are only inserted for object puts/swaps. This significantly
1584   // complicates the task of identifying whether a MemBarRelease,
1585   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1586   // when using these GC configurations (see below). It adds similar
1587   // complexity to the task of identifying whether a MemBarRelease,
1588   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1589   //
1590   // In both cases the post-write subtree includes an auxiliary
1591   // MemBarVolatile (StoreLoad barrier) separating the object put/swap
1592   // and the read of the corresponding card. This poses two additional
1593   // problems.
1594   //
1595   // Firstly, a card mark MemBarVolatile needs to be distinguished
1596   // from a normal trailing MemBarVolatile. Resolving this first
1597   // problem is straightforward: a card mark MemBarVolatile always
1598   // projects a Mem feed to a StoreCM node and that is a unique marker
1599   //
1600   //      MemBarVolatile (card mark)
1601   //       C |    \     . . .
1602   //         |   StoreCM   . . .
1603   //       . . .
1604   //
1605   // The second problem is how the code generator is to translate the
1606   // card mark barrier? It always needs to be translated to a "dmb
1607   // ish" instruction whether or not it occurs as part of a volatile
1608   // put. A StoreLoad barrier is needed after the object put to ensure
1609   // i) visibility to GC threads of the object put and ii) visibility
1610   // to the mutator thread of any card clearing write by a GC
1611   // thread. Clearly a normal store (str) will not guarantee this
1612   // ordering but neither will a releasing store (stlr). The latter
1613   // guarantees that the object put is visible but does not guarantee
1614   // that writes by other threads have also been observed.
1615   //
1616   // So, returning to the task of translating the object put and the
1617   // leading/trailing membar nodes: what do the non-normal node graph
1618   // look like for these 2 special cases? and how can we determine the
1619   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1620   // in both normal and non-normal cases?
1621   //
1622   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1623   // which selects conditonal execution based on the value loaded
1624   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1625   // intervening StoreLoad barrier (MemBarVolatile).
1626   //
1627   // So, with CMS we may see a node graph for a volatile object store
1628   // which looks like this
1629   //
1630   //   MemBarRelease
1631   //  {MemBarCPUOrder}_(leading)_________________
1632   //     C |    M \       \\                   C \
1633   //       |       \    StoreN/P[mo_release]  CastP2X
1634   //       |    Bot \    /
1635   //       |       MergeMem
1636   //       |         /
1637   //      MemBarVolatile (card mark)
1638   //     C |  ||    M |
1639   //       | LoadB    |
1640   //       |   |      |
1641   //       | Cmp      |\
1642   //       | /        | \
1643   //       If         |  \
1644   //       | \        |   \
1645   // IfFalse  IfTrue  |    \
1646   //       \     / \  |     \
1647   //        \   / StoreCM    |
1648   //         \ /      |      |
1649   //        Region   . . .   |
1650   //          | \           /
1651   //          |  . . .  \  / Bot
1652   //          |       MergeMem
1653   //          |          |
1654   //       {MemBarCPUOrder}
1655   //        MemBarVolatile (trailing)
1656   //
1657   // The first MergeMem merges the AliasIdxBot Mem slice from the
1658   // leading membar and the oopptr Mem slice from the Store into the
1659   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1660   // Mem slice from the card mark membar and the AliasIdxRaw slice
1661   // from the StoreCM into the trailing membar (n.b. the latter
1662   // proceeds via a Phi associated with the If region).
1663   //
1664   // The graph for a CAS varies slightly, the difference being
1665   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1666   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1667   // MemBarAcquire pair (also the MemBarCPUOrder nodes are not optional).
1668   //
1669   //   MemBarRelease
1670   //   MemBarCPUOrder_(leading)_______________
1671   //     C |    M \       \\                C \
1672   //       |       \    CompareAndSwapN/P  CastP2X
1673   //       |        \      |
1674   //       |         \   SCMemProj
1675   //       |      Bot \   /
1676   //       |        MergeMem
1677   //       |         /
1678   //      MemBarVolatile (card mark)
1679   //     C |  ||    M |
1680   //       | LoadB    |
1681   //       |   |      |
1682   //       | Cmp      |\
1683   //       | /        | \
1684   //       If         |  \
1685   //       | \        |   \
1686   // IfFalse  IfTrue  |    \
1687   //       \     / \  |     \
1688   //        \   / StoreCM    |
1689   //         \ /      |      |
1690   //        Region   . . .   |
1691   //          | \           /
1692   //          |  . . .  \  / Bot
1693   //          |       MergeMem
1694   //          |          |
1695   //        MemBarCPUOrder
1696   //        MemBarVolatile (trailing)
1697   //
1698   //
1699   // G1 is quite a lot more complicated. The nodes inserted on behalf
1700   // of G1 may comprise: a pre-write graph which adds the old value to
1701   // the SATB queue; the releasing store itself; and, finally, a
1702   // post-write graph which performs a card mark.
1703   //
1704   // The pre-write graph may be omitted, but only when the put is
1705   // writing to a newly allocated (young gen) object and then only if
1706   // there is a direct memory chain to the Initialize node for the
1707   // object allocation. This will not happen for a volatile put since
1708   // any memory chain passes through the leading membar.
1709   //
1710   // The pre-write graph includes a series of 3 If tests. The outermost
1711   // If tests whether SATB is enabled (no else case). The next If tests
1712   // whether the old value is non-NULL (no else case). The third tests
1713   // whether the SATB queue index is > 0, if so updating the queue. The
1714   // else case for this third If calls out to the runtime to allocate a
1715   // new queue buffer.
1716   //
1717   // So with G1 the pre-write and releasing store subgraph looks like
1718   // this (the nested Ifs are omitted).
1719   //
1720   //  MemBarRelease
1721   // {MemBarCPUOrder}_(leading)___________
1722   //     C |  ||  M \   M \    M \  M \ . . .
1723   //       | LoadB   \  LoadL  LoadN   \
1724   //       | /        \                 \
1725   //       If         |\                 \
1726   //       | \        | \                 \
1727   //  IfFalse  IfTrue |  \                 \
1728   //       |     |    |   \                 |
1729   //       |     If   |   /\                |
1730   //       |     |          \               |
1731   //       |                 \              |
1732   //       |    . . .         \             |
1733   //       | /       | /       |            |
1734   //      Region  Phi[M]       |            |
1735   //       | \       |         |            |
1736   //       |  \_____ | ___     |            |
1737   //     C | C \     |   C \ M |            |
1738   //       | CastP2X | StoreN/P[mo_release] |
1739   //       |         |         |            |
1740   //     C |       M |       M |          M |
1741   //        \        |         |           /
1742   //                  . . .
1743   //          (post write subtree elided)
1744   //                    . . .
1745   //             C \         M /
1746   //                \         /
1747   //             {MemBarCPUOrder}
1748   //              MemBarVolatile (trailing)
1749   //
1750   // n.b. the LoadB in this subgraph is not the card read -- it's a
1751   // read of the SATB queue active flag.
1752   //
1753   // The G1 post-write subtree is also optional, this time when the
1754   // new value being written is either null or can be identified as a
1755   // newly allocated (young gen) object with no intervening control
1756   // flow. The latter cannot happen but the former may, in which case
1757   // the card mark membar is omitted and the memory feeds form the
1758   // leading membar and the SToreN/P are merged direct into the
1759   // trailing membar as per the normal subgraph. So, the only special
1760   // case which arises is when the post-write subgraph is generated.
1761   //
1762   // The kernel of the post-write G1 subgraph is the card mark itself
1763   // which includes a card mark memory barrier (MemBarVolatile), a
1764   // card test (LoadB), and a conditional update (If feeding a
1765   // StoreCM). These nodes are surrounded by a series of nested Ifs
1766   // which try to avoid doing the card mark. The top level If skips if
1767   // the object reference does not cross regions (i.e. it tests if
1768   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1769   // need not be recorded. The next If, which skips on a NULL value,
1770   // may be absent (it is not generated if the type of value is >=
1771   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1772   // checking if card_val != young).  n.b. although this test requires
1773   // a pre-read of the card it can safely be done before the StoreLoad
1774   // barrier. However that does not bypass the need to reread the card
1775   // after the barrier. A final, 4th If tests if the card is already
1776   // marked.
1777   //
1778   //                (pre-write subtree elided)
1779   //        . . .                  . . .    . . .  . . .
1780   //        C |                    M |     M |    M |
1781   //       Region                  Phi[M] StoreN    |
1782   //          |                     / \      |      |
1783   //         / \_______            /   \     |      |
1784   //      C / C \      . . .            \    |      |
1785   //       If   CastP2X . . .            |   |      |
1786   //       / \                           |   |      |
1787   //      /   \                          |   |      |
1788   // IfFalse IfTrue                      |   |      |
1789   //   |       |                         |   |     /|
1790   //   |       If                        |   |    / |
1791   //   |      / \                        |   |   /  |
1792   //   |     /   \                        \  |  /   |
1793   //   | IfFalse IfTrue                   MergeMem  |
1794   //   |  . . .    / \                       /      |
1795   //   |          /   \                     /       |
1796   //   |     IfFalse IfTrue                /        |
1797   //   |      . . .    |                  /         |
1798   //   |               If                /          |
1799   //   |               / \              /           |
1800   //   |              /   \            /            |
1801   //   |         IfFalse IfTrue       /             |
1802   //   |           . . .   |         /              |
1803   //   |                    \       /               |
1804   //   |                     \     /                |
1805   //   |             MemBarVolatile__(card mark)    |
1806   //   |                ||   C |  M \  M \          |
1807   //   |               LoadB   If    |    |         |
1808   //   |                      / \    |    |         |
1809   //   |                     . . .   |    |         |
1810   //   |                          \  |    |        /
1811   //   |                        StoreCM   |       /
1812   //   |                          . . .   |      /
1813   //   |                        _________/      /
1814   //   |                       /  _____________/
1815   //   |   . . .       . . .  |  /            /
1816   //   |    |                 | /   _________/
1817   //   |    |               Phi[M] /        /
1818   //   |    |                 |   /        /
1819   //   |    |                 |  /        /
1820   //   |  Region  . . .     Phi[M]  _____/
1821   //   |    /                 |    /
1822   //   |                      |   /
1823   //   | . . .   . . .        |  /
1824   //   | /                    | /
1825   // Region           |  |  Phi[M]
1826   //   |              |  |  / Bot
1827   //    \            MergeMem
1828   //     \            /
1829   //    {MemBarCPUOrder}
1830   //     MemBarVolatile
1831   //
1832   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1833   // from the leading membar and the oopptr Mem slice from the Store
1834   // into the card mark membar i.e. the memory flow to the card mark
1835   // membar still looks like a normal graph.
1836   //
1837   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1838   // Mem slices (from the StoreCM and other card mark queue stores).
1839   // However in this case the AliasIdxBot Mem slice does not come
1840   // direct from the card mark membar. It is merged through a series
1841   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1842   // from the leading membar with the Mem feed from the card mark
1843   // membar. Each Phi corresponds to one of the Ifs which may skip
1844   // around the card mark membar. So when the If implementing the NULL
1845   // value check has been elided the total number of Phis is 2
1846   // otherwise it is 3.
1847   //
1848   // The CAS graph when using G1GC also includes a pre-write subgraph
1849   // and an optional post-write subgraph. The same variations are
1850   // introduced as for CMS with conditional card marking i.e. the
1851   // StoreP/N is swapped for a CompareAndSwapP/N with a following
1852   // SCMemProj, the trailing MemBarVolatile for a MemBarCPUOrder +
1853   // MemBarAcquire pair. There may be an extra If test introduced in
1854   // the CAS case, when the boolean result of the CAS is tested by the
1855   // caller. In that case an extra Region and AliasIdxBot Phi may be
1856   // introduced before the MergeMem
1857   //
1858   // So, the upshot is that in all cases the subgraph will include a
1859   // *normal* memory subgraph betwen the leading membar and its child
1860   // membar: either a normal volatile put graph including a releasing
1861   // StoreX and terminating with a trailing volatile membar or card
1862   // mark volatile membar; or a normal CAS graph including a
1863   // CompareAndSwapX + SCMemProj pair and terminating with a card mark
1864   // volatile membar or a trailing cpu order and acquire membar
1865   // pair. If the child membar is not a (volatile) card mark membar
1866   // then it marks the end of the volatile put or CAS subgraph. If the
1867   // child is a card mark membar then the normal subgraph will form
1868   // part of a larger volatile put or CAS subgraph if and only if the
1869   // child feeds an AliasIdxBot Mem feed to a trailing barrier via a
1870   // MergeMem. That feed is either direct (for CMS) or via 2, 3 or 4
1871   // Phi nodes merging the leading barrier memory flow (for G1).
1872   //
1873   // The predicates controlling generation of instructions for store
1874   // and barrier nodes employ a few simple helper functions (described
1875   // below) which identify the presence or absence of all these
1876   // subgraph configurations and provide a means of traversing from
1877   // one node in the subgraph to another.
1878 
1879   // is_CAS(int opcode)
1880   //
1881   // return true if opcode is one of the possible CompareAndSwapX
1882   // values otherwise false.
1883 
1884   bool is_CAS(int opcode)
1885   {
1886     switch(opcode) {
1887       // We handle these
1888     case Op_CompareAndSwapI:
1889     case Op_CompareAndSwapL:
1890     case Op_CompareAndSwapP:
1891     case Op_CompareAndSwapN:
1892  // case Op_CompareAndSwapB:
1893  // case Op_CompareAndSwapS:
1894       return true;
1895       // These are TBD
1896     case Op_WeakCompareAndSwapB:
1897     case Op_WeakCompareAndSwapS:
1898     case Op_WeakCompareAndSwapI:
1899     case Op_WeakCompareAndSwapL:
1900     case Op_WeakCompareAndSwapP:
1901     case Op_WeakCompareAndSwapN:
1902     case Op_CompareAndExchangeB:
1903     case Op_CompareAndExchangeS:
1904     case Op_CompareAndExchangeI:
1905     case Op_CompareAndExchangeL:
1906     case Op_CompareAndExchangeP:
1907     case Op_CompareAndExchangeN:
1908       return false;
1909     default:
1910       return false;
1911     }
1912   }
1913 
1914   // helper to determine the maximum number of Phi nodes we may need to
1915   // traverse when searching from a card mark membar for the merge mem
1916   // feeding a trailing membar or vice versa
1917 
1918   int max_phis()
1919   {
1920     if (UseG1GC) {
1921       return 4;
1922     } else if (UseConcMarkSweepGC && UseCondCardMark) {
1923       return 1;
1924     } else {
1925       return 0;
1926     }
1927   }
1928 
1929   // leading_to_normal
1930   //
1931   // graph traversal helper which detects the normal case Mem feed
1932   // from a release membar (or, optionally, its cpuorder child) to a
1933   // dependent volatile or acquire membar i.e. it ensures that one of
1934   // the following 3 Mem flow subgraphs is present.
1935   //
1936   //   MemBarRelease
1937   //  {MemBarCPUOrder} {leading}
1938   //          |  \      . . .
1939   //          |  StoreN/P[mo_release]  . . .
1940   //          |   /
1941   //         MergeMem
1942   //          |
1943   //  {MemBarCPUOrder}
1944   //   MemBarVolatile {trailing or card mark}
1945   //
1946   //   MemBarRelease
1947   //   MemBarCPUOrder {leading}
1948   //          |  \      . . .
1949   //          |  CompareAndSwapX  . . .
1950   //          |   /
1951   //         MergeMem
1952   //          |
1953   //   MemBarVolatile {card mark}
1954   //
1955   //   MemBarRelease
1956   //   MemBarCPUOrder {leading}
1957   //          |  \      . . .
1958   //          |  CompareAndSwapX  . . .
1959   //          |   /
1960   //         MergeMem
1961   //          |
1962   //   MemBarCPUOrder
1963   //   MemBarAcquire {trailing}
1964   //
1965   // if the correct configuration is present returns the trailing
1966   // or cardmark membar otherwise NULL.
1967   //
1968   // the input membar is expected to be either a cpuorder membar or a
1969   // release membar. in the latter case it should not have a cpu membar
1970   // child.
1971   //
1972   // the returned value may be a card mark or trailing membar
1973   //
1974 
1975   MemBarNode *leading_to_normal(MemBarNode *leading)
1976   {
1977     assert((leading->Opcode() == Op_MemBarRelease ||
1978             leading->Opcode() == Op_MemBarCPUOrder),
1979            "expecting a volatile or cpuroder membar!");
1980 
1981     // check the mem flow
1982     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1983 
1984     if (!mem) {
1985       return NULL;
1986     }
1987 
1988     Node *x = NULL;
1989     StoreNode * st = NULL;
1990     LoadStoreNode *cas = NULL;
1991     MergeMemNode *mm = NULL;
1992     MergeMemNode *mm2 = NULL;
1993 
1994     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1995       x = mem->fast_out(i);
1996       if (x->is_MergeMem()) {
1997         if (UseShenandoahGC) {
1998           // three merge mems is one too many for Shenandoah
1999           if (mm == NULL) {
2000             mm = x->as_MergeMem();
2001           } else if (mm2 == NULL) {
2002             mm2 = x->as_MergeMem();
2003           } else {
2004             return NULL;
2005           }
2006         } else {
2007           // two merge mems is one too many
2008           if (mm != NULL) {
2009             return NULL;
2010           }
2011           mm = x->as_MergeMem();
2012         }
2013       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2014         // two releasing stores/CAS nodes is one too many
2015         if (st != NULL || cas != NULL) {
2016           return NULL;
2017         }
2018         st = x->as_Store();
2019       } else if (is_CAS(x->Opcode())) {
2020         if (st != NULL || cas != NULL) {
2021           return NULL;
2022         }
2023         cas = x->as_LoadStore();
2024       }
2025     }
2026 
2027     // must have a store or a cas
2028     if (!st && !cas) {
2029       return NULL;
2030     }
2031 
2032     // must have a merge if we also have st
2033     if (st && (!mm || (UseShenandoahGC && mm2))) {
2034       return NULL;
2035     }
2036 
2037     Node *feed = NULL;
2038     if (cas) {
2039       // look for an SCMemProj
2040       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2041         x = cas->fast_out(i);
2042         if (x->Opcode() == Op_SCMemProj) {
2043           feed = x;
2044           break;
2045         }
2046       }
2047       if (feed == NULL) {
2048         return NULL;
2049       }
2050     } else {
2051       feed = st;
2052     }
2053     // ensure the feed node feeds the existing mergemem;
2054     for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2055       x = feed->fast_out(i);
2056       if (x == mm) {
2057         break;
2058       }
2059     }
2060     if (x != mm) {
2061       return NULL;
2062     }
2063 
2064     MemBarNode *mbar = NULL;
2065     // ensure the merge feeds to the expected type of membar
2066     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2067       x = mm->fast_out(i);
2068       if (x->is_MemBar()) {
2069         if (x->Opcode() == Op_MemBarCPUOrder) {
2070           // with a store any cpu order membar should precede a
2071           // trailing volatile membar. with a cas it should precede a
2072           // trailing acquire membar. in either case try to skip to
2073           // that next membar
2074           MemBarNode *y =  x->as_MemBar();
2075           y = child_membar(y);
2076           if (y != NULL) {
2077             // skip to this new membar to do the check
2078             x = y;
2079           }
2080           
2081         }
2082         if (x->Opcode() == Op_MemBarVolatile) {
2083           mbar = x->as_MemBar();
2084           // for a volatile store this can be either a trailing membar
2085           // or a card mark membar. for a cas it must be a card mark
2086           // membar
2087           guarantee(cas == NULL || is_card_mark_membar(mbar),
2088                     "in CAS graph volatile membar must be a card mark");
2089         } else if (cas != NULL && x->Opcode() == Op_MemBarAcquire) {
2090           mbar = x->as_MemBar();
2091         }
2092         break;
2093       }
2094     }
2095 
2096     return mbar;
2097   }
2098 
2099   // normal_to_leading
2100   //
2101   // graph traversal helper which detects the normal case Mem feed
2102   // from either a card mark or a trailing membar to a preceding
2103   // release membar (optionally its cpuorder child) i.e. it ensures
2104   // that one of the following 3 Mem flow subgraphs is present.
2105   //
2106   //   MemBarRelease
2107   //  {MemBarCPUOrder} {leading}
2108   //          |  \      . . .
2109   //          |  StoreN/P[mo_release]  . . .
2110   //          |   /
2111   //         MergeMem
2112   //          |
2113   //  {MemBarCPUOrder}
2114   //   MemBarVolatile {trailing or card mark}
2115   //
2116   //   MemBarRelease
2117   //   MemBarCPUOrder {leading}
2118   //          |  \      . . .
2119   //          |  CompareAndSwapX  . . .
2120   //          |   /
2121   //         MergeMem
2122   //          |
2123   //   MemBarVolatile {card mark}
2124   //
2125   //   MemBarRelease
2126   //   MemBarCPUOrder {leading}
2127   //          |  \      . . .
2128   //          |  CompareAndSwapX  . . .
2129   //          |   /
2130   //         MergeMem
2131   //          |
2132   //   MemBarCPUOrder
2133   //   MemBarAcquire {trailing}
2134   //
2135   // this predicate checks for the same flow as the previous predicate
2136   // but starting from the bottom rather than the top.
2137   //
2138   // if the configuration is present returns the cpuorder member for
2139   // preference or when absent the release membar otherwise NULL.
2140   //
2141   // n.b. the input membar is expected to be a MemBarVolatile but
2142   // need not be a card mark membar.
2143 
2144   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2145   {
2146     // input must be a volatile membar
2147     assert((barrier->Opcode() == Op_MemBarVolatile ||
2148             barrier->Opcode() == Op_MemBarAcquire),
2149            "expecting a volatile or an acquire membar");
2150     bool barrier_is_acquire = barrier->Opcode() == Op_MemBarAcquire;
2151 
2152     // if we have an intervening cpu order membar then start the
2153     // search from it
2154     
2155     Node *x = parent_membar(barrier);
2156 
2157     if (x == NULL) {
2158       // stick with the original barrier
2159       x = (Node *)barrier;
2160     } else if (x->Opcode() != Op_MemBarCPUOrder) {
2161       // any other barrier means this is not the graph we want
2162       return NULL;
2163     }
2164 
2165     // the Mem feed to the membar should be a merge
2166     x = x ->in(TypeFunc::Memory);
2167     if (!x->is_MergeMem())
2168       return NULL;
2169 
2170     MergeMemNode *mm = x->as_MergeMem();
2171 
2172     // the merge should get its Bottom mem feed from the leading membar
2173     x = mm->in(Compile::AliasIdxBot);
2174 
2175     // ensure this is a non control projection
2176     if (!x->is_Proj() || x->is_CFG()) {
2177       return NULL;
2178     }
2179     // if it is fed by a membar that's the one we want
2180     x = x->in(0);
2181 
2182     if (!x->is_MemBar()) {
2183       return NULL;
2184     }
2185 
2186     MemBarNode *leading = x->as_MemBar();
2187     // reject invalid candidates
2188     if (!leading_membar(leading)) {
2189       return NULL;
2190     }
2191 
2192     // ok, we have a leading membar, now for the sanity clauses
2193 
2194     // the leading membar must feed Mem to a releasing store or CAS
2195     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2196     StoreNode *st = NULL;
2197     LoadStoreNode *cas = NULL;
2198     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2199       x = mem->fast_out(i);
2200       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2201         // two stores or CASes is one too many
2202         if (st != NULL || cas != NULL) {
2203           return NULL;
2204         }
2205         st = x->as_Store();
2206       } else if (is_CAS(x->Opcode())) {
2207         if (st != NULL || cas != NULL) {
2208           return NULL;
2209         }
2210         cas = x->as_LoadStore();
2211       }
2212     }
2213 
2214     // we cannot have both a store and a cas
2215     if (st == NULL && cas == NULL) {
2216       // we have neither -- this is not a normal graph
2217       return NULL;
2218     }
2219     if (st == NULL) {
2220       // if we started from a volatile membar and found a CAS then the
2221       // original membar ought to be for a card mark
2222       guarantee((barrier_is_acquire || is_card_mark_membar(barrier)),
2223                 "unexpected volatile barrier (i.e. not card mark) in CAS graph");
2224       // check that the CAS feeds the merge we used to get here via an
2225       // intermediary SCMemProj
2226       Node *scmemproj = NULL;
2227       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2228         x = cas->fast_out(i);
2229         if (x->Opcode() == Op_SCMemProj) {
2230           scmemproj = x;
2231           break;
2232         }
2233       }
2234       if (scmemproj == NULL) {
2235         return NULL;
2236       }
2237       for (DUIterator_Fast imax, i = scmemproj->fast_outs(imax); i < imax; i++) {
2238         x = scmemproj->fast_out(i);
2239         if (x == mm) {
2240           return leading;
2241         }
2242       }
2243     } else {
2244       // we should not have found a store if we started from an acquire
2245       guarantee(!barrier_is_acquire,
2246                 "unexpected trailing acquire barrier in volatile store graph");
2247 
2248       // the store should feed the merge we used to get here
2249       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2250         if (st->fast_out(i) == mm) {
2251           return leading;
2252         }
2253       }
2254     }
2255 
2256     return NULL;
2257   }
2258 
2259   // card_mark_to_trailing
2260   //
2261   // graph traversal helper which detects extra, non-normal Mem feed
2262   // from a card mark volatile membar to a trailing membar i.e. it
2263   // ensures that one of the following three GC post-write Mem flow
2264   // subgraphs is present.
2265   //
2266   // 1)
2267   //     . . .
2268   //       |
2269   //   MemBarVolatile (card mark)
2270   //      |          |
2271   //      |        StoreCM
2272   //      |          |
2273   //      |        . . .
2274   //  Bot |  /
2275   //   MergeMem
2276   //      |
2277   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2278   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2279   //                                 
2280   //
2281   // 2)
2282   //   MemBarRelease/CPUOrder (leading)
2283   //    |
2284   //    |
2285   //    |\       . . .
2286   //    | \        |
2287   //    |  \  MemBarVolatile (card mark)
2288   //    |   \   |     |
2289   //     \   \  |   StoreCM    . . .
2290   //      \   \ |
2291   //       \  Phi
2292   //        \ /
2293   //        Phi  . . .
2294   //     Bot |   /
2295   //       MergeMem
2296   //         |
2297   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2298   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2299   //
2300   // 3)
2301   //   MemBarRelease/CPUOrder (leading)
2302   //    |
2303   //    |\
2304   //    | \
2305   //    |  \      . . .
2306   //    |   \       |
2307   //    |\   \  MemBarVolatile (card mark)
2308   //    | \   \   |     |
2309   //    |  \   \  |   StoreCM    . . .
2310   //    |   \   \ |
2311   //     \   \  Phi
2312   //      \   \ /
2313   //       \  Phi
2314   //        \ /
2315   //        Phi  . . .
2316   //     Bot |   /
2317   //       MergeMem
2318   //         |
2319   //         |
2320   //   {MemBarCPUOrder}            OR  MemBarCPUOrder
2321   //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
2322   //
2323   // 4)
2324   //   MemBarRelease/CPUOrder (leading)
2325   //    |
2326   //    |\
2327   //    | \
2328   //    |  \
2329   //    |   \
2330   //    |\   \
2331   //    | \   \
2332   //    |  \   \        . . .
2333   //    |   \   \         |
2334   //    |\   \   \   MemBarVolatile (card mark)
2335   //    | \   \   \   /   |
2336   //    |  \   \   \ /  StoreCM    . . .
2337   //    |   \   \  Phi
2338   //     \   \   \ /
2339   //      \   \  Phi
2340   //       \   \ /
2341   //        \  Phi
2342   //         \ /
2343   //         Phi  . . .
2344   //      Bot |   /
2345   //       MergeMem
2346   //          |
2347   //          |
2348   //    MemBarCPUOrder
2349   //    MemBarAcquire {trailing}
2350   //
2351   // configuration 1 is only valid if UseConcMarkSweepGC &&
2352   // UseCondCardMark
2353   //
2354   // configuration 2, is only valid if UseConcMarkSweepGC &&
2355   // UseCondCardMark or if UseG1GC
2356   //
2357   // configurations 3 and 4 are only valid if UseG1GC.
2358   //
2359   // if a valid configuration is present returns the trailing membar
2360   // otherwise NULL.
2361   //
2362   // n.b. the supplied membar is expected to be a card mark
2363   // MemBarVolatile i.e. the caller must ensure the input node has the
2364   // correct operand and feeds Mem to a StoreCM node
2365 
2366   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2367   {
2368     // input must be a card mark volatile membar
2369     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2370 
2371     Node *feed = barrier->proj_out(TypeFunc::Memory);
2372     Node *x;
2373     MergeMemNode *mm = NULL;
2374 
2375     const int MAX_PHIS = max_phis(); // max phis we will search through
2376     int phicount = 0;                // current search count
2377 
2378     bool retry_feed = true;
2379     while (retry_feed) {
2380       // see if we have a direct MergeMem feed
2381       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2382         x = feed->fast_out(i);
2383         // the correct Phi will be merging a Bot memory slice
2384         if (x->is_MergeMem()) {
2385           mm = x->as_MergeMem();
2386           break;
2387         }
2388       }
2389       if (mm) {
2390         retry_feed = false;
2391       } else if (phicount++ < MAX_PHIS) {
2392         // the barrier may feed indirectly via one or two Phi nodes
2393         PhiNode *phi = NULL;
2394         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2395           x = feed->fast_out(i);
2396           // the correct Phi will be merging a Bot memory slice
2397           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2398             phi = x->as_Phi();
2399             break;
2400           }
2401         }
2402         if (!phi) {
2403           return NULL;
2404         }
2405         // look for another merge below this phi
2406         feed = phi;
2407       } else {
2408         // couldn't find a merge
2409         return NULL;
2410       }
2411     }
2412 
2413     // sanity check this feed turns up as the expected slice
2414     guarantee(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2415 
2416     MemBarNode *trailing = NULL;
2417     // be sure we have a trailing membar fed by the merge
2418     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2419       x = mm->fast_out(i);
2420       if (x->is_MemBar()) {
2421         // if this is an intervening cpu order membar skip to the
2422         // following membar
2423         if (x->Opcode() == Op_MemBarCPUOrder) {
2424           MemBarNode *y =  x->as_MemBar();
2425           y = child_membar(y);
2426           if (y != NULL) {
2427             x = y;
2428           }
2429         }
2430         if (x->Opcode() == Op_MemBarVolatile ||
2431             x->Opcode() == Op_MemBarAcquire) {
2432           trailing = x->as_MemBar();
2433         }
2434         break;
2435       }
2436     }
2437 
2438     return trailing;
2439   }
2440 
2441   // trailing_to_card_mark
2442   //
2443   // graph traversal helper which detects extra, non-normal Mem feed
2444   // from a trailing volatile membar to a preceding card mark volatile
2445   // membar i.e. it identifies whether one of the three possible extra
2446   // GC post-write Mem flow subgraphs is present
2447   //
2448   // this predicate checks for the same flow as the previous predicate
2449   // but starting from the bottom rather than the top.
2450   //
2451   // if the configuration is present returns the card mark membar
2452   // otherwise NULL
2453   //
2454   // n.b. the supplied membar is expected to be a trailing
2455   // MemBarVolatile or MemBarAcquire i.e. the caller must ensure the
2456   // input node has the correct opcode
2457 
2458   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2459   {
2460     assert(trailing->Opcode() == Op_MemBarVolatile ||
2461            trailing->Opcode() == Op_MemBarAcquire,
2462            "expecting a volatile or acquire membar");
2463     assert(!is_card_mark_membar(trailing),
2464            "not expecting a card mark membar");
2465 
2466     Node *x = (Node *)trailing;
2467 
2468     // look for a preceding cpu order membar
2469     MemBarNode *y = parent_membar(x->as_MemBar());
2470     if (y != NULL) {
2471       // make sure it is a cpu order membar
2472       if (y->Opcode() != Op_MemBarCPUOrder) {
2473         // this is nto the graph we were looking for
2474         return NULL;
2475       }
2476       // start the search from here
2477       x = y;
2478     }
2479 
2480     // the Mem feed to the membar should be a merge
2481     x = x->in(TypeFunc::Memory);
2482     if (!x->is_MergeMem()) {
2483       return NULL;
2484     }
2485 
2486     MergeMemNode *mm = x->as_MergeMem();
2487 
2488     x = mm->in(Compile::AliasIdxBot);
2489     // with G1 we may possibly see a Phi or two before we see a Memory
2490     // Proj from the card mark membar
2491 
2492     const int MAX_PHIS = max_phis(); // max phis we will search through
2493     int phicount = 0;                    // current search count
2494 
2495     bool retry_feed = !x->is_Proj();
2496 
2497     while (retry_feed) {
2498       if (x->is_Phi() && phicount++ < MAX_PHIS) {
2499         PhiNode *phi = x->as_Phi();
2500         ProjNode *proj = NULL;
2501         PhiNode *nextphi = NULL;
2502         bool found_leading = false;
2503         for (uint i = 1; i < phi->req(); i++) {
2504           x = phi->in(i);
2505           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2506             nextphi = x->as_Phi();
2507           } else if (x->is_Proj()) {
2508             int opcode = x->in(0)->Opcode();
2509             if (opcode == Op_MemBarVolatile) {
2510               proj = x->as_Proj();
2511             } else if (opcode == Op_MemBarRelease ||
2512                        opcode == Op_MemBarCPUOrder) {
2513               // probably a leading membar
2514               found_leading = true;
2515             }
2516           }
2517         }
2518         // if we found a correct looking proj then retry from there
2519         // otherwise we must see a leading and a phi or this the
2520         // wrong config
2521         if (proj != NULL) {
2522           x = proj;
2523           retry_feed = false;
2524         } else if (found_leading && nextphi != NULL) {
2525           // retry from this phi to check phi2
2526           x = nextphi;
2527         } else {
2528           // not what we were looking for
2529           return NULL;
2530         }
2531       } else {
2532         return NULL;
2533       }
2534     }
2535     // the proj has to come from the card mark membar
2536     x = x->in(0);
2537     if (!x->is_MemBar()) {
2538       return NULL;
2539     }
2540 
2541     MemBarNode *card_mark_membar = x->as_MemBar();
2542 
2543     if (!is_card_mark_membar(card_mark_membar)) {
2544       return NULL;
2545     }
2546 
2547     return card_mark_membar;
2548   }
2549 
2550   // trailing_to_leading
2551   //
2552   // graph traversal helper which checks the Mem flow up the graph
2553   // from a (non-card mark) trailing membar attempting to locate and
2554   // return an associated leading membar. it first looks for a
2555   // subgraph in the normal configuration (relying on helper
2556   // normal_to_leading). failing that it then looks for one of the
2557   // possible post-write card mark subgraphs linking the trailing node
2558   // to a the card mark membar (relying on helper
2559   // trailing_to_card_mark), and then checks that the card mark membar
2560   // is fed by a leading membar (once again relying on auxiliary
2561   // predicate normal_to_leading).
2562   //
2563   // if the configuration is valid returns the cpuorder member for
2564   // preference or when absent the release membar otherwise NULL.
2565   //
2566   // n.b. the input membar is expected to be either a volatile or
2567   // acquire membar but in the former case must *not* be a card mark
2568   // membar.
2569 
2570   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2571   {
2572     assert((trailing->Opcode() == Op_MemBarAcquire ||
2573             trailing->Opcode() == Op_MemBarVolatile),
2574            "expecting an acquire or volatile membar");
2575     assert((trailing->Opcode() != Op_MemBarVolatile ||
2576             !is_card_mark_membar(trailing)),
2577            "not expecting a card mark membar");
2578 
2579     MemBarNode *leading = normal_to_leading(trailing);
2580 
2581     if (leading) {
2582       return leading;
2583     }
2584 
2585     // there is no normal path from trailing to leading membar. see if
2586     // we can arrive via a card mark membar
2587 
2588     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2589 
2590     if (!card_mark_membar) {
2591       return NULL;
2592     }
2593 
2594     return normal_to_leading(card_mark_membar);
2595   }
2596 
2597   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2598 
2599 bool unnecessary_acquire(const Node *barrier)
2600 {
2601   assert(barrier->is_MemBar(), "expecting a membar");
2602 
2603   if (UseBarriersForVolatile) {
2604     // we need to plant a dmb
2605     return false;
2606   }
2607 
2608   // a volatile read derived from bytecode (or also from an inlined
2609   // SHA field read via LibraryCallKit::load_field_from_object)
2610   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2611   // with a bogus read dependency on it's preceding load. so in those
2612   // cases we will find the load node at the PARMS offset of the
2613   // acquire membar.  n.b. there may be an intervening DecodeN node.
2614 
2615   Node *x = barrier->lookup(TypeFunc::Parms);
2616   if (x) {
2617     // we are starting from an acquire and it has a fake dependency
2618     //
2619     // need to check for
2620     //
2621     //   LoadX[mo_acquire]
2622     //   {  |1   }
2623     //   {DecodeN}
2624     //      |Parms
2625     //   MemBarAcquire*
2626     //
2627     // where * tags node we were passed
2628     // and |k means input k
2629     if (x->is_DecodeNarrowPtr()) {
2630       x = x->in(1);
2631     }
2632 
2633     return (x->is_Load() && x->as_Load()->is_acquire());
2634   }
2635 
2636   // other option for unnecessary membar is that it is a trailing node
2637   // belonging to a CAS
2638 
2639   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2640 
2641   return leading != NULL;
2642 }
2643 
2644 bool needs_acquiring_load(const Node *n)
2645 {
2646   assert(n->is_Load(), "expecting a load");
2647   if (UseBarriersForVolatile) {
2648     // we use a normal load and a dmb
2649     return false;
2650   }
2651 
2652   LoadNode *ld = n->as_Load();
2653 
2654   if (!ld->is_acquire()) {
2655     return false;
2656   }
2657 
2658   // check if this load is feeding an acquire membar
2659   //
2660   //   LoadX[mo_acquire]
2661   //   {  |1   }
2662   //   {DecodeN}
2663   //      |Parms
2664   //   MemBarAcquire*
2665   //
2666   // where * tags node we were passed
2667   // and |k means input k
2668 
2669   Node *start = ld;
2670   Node *mbacq = NULL;
2671 
2672   // if we hit a DecodeNarrowPtr we reset the start node and restart
2673   // the search through the outputs
2674  restart:
2675 
2676   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2677     Node *x = start->fast_out(i);
2678     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2679       mbacq = x;
2680     } else if (!mbacq &&
2681                (x->is_DecodeNarrowPtr() ||
2682                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2683       start = x;
2684       goto restart;
2685     }
2686   }
2687 
2688   if (mbacq) {
2689     return true;
2690   }
2691 
2692   return false;
2693 }
2694 
2695 bool unnecessary_release(const Node *n)
2696 {
2697   assert((n->is_MemBar() &&
2698           n->Opcode() == Op_MemBarRelease),
2699          "expecting a release membar");
2700 
2701   if (UseBarriersForVolatile) {
2702     // we need to plant a dmb
2703     return false;
2704   }
2705 
2706   // if there is a dependent CPUOrder barrier then use that as the
2707   // leading
2708 
2709   MemBarNode *barrier = n->as_MemBar();
2710   // check for an intervening cpuorder membar
2711   MemBarNode *b = child_membar(barrier);
2712   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2713     // ok, so start the check from the dependent cpuorder barrier
2714     barrier = b;
2715   }
2716 
2717   // must start with a normal feed
2718   MemBarNode *child_barrier = leading_to_normal(barrier);
2719 
2720   if (!child_barrier) {
2721     return false;
2722   }
2723 
2724   if (!is_card_mark_membar(child_barrier)) {
2725     // this is the trailing membar and we are done
2726     return true;
2727   }
2728 
2729   // must be sure this card mark feeds a trailing membar
2730   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2731   return (trailing != NULL);
2732 }
2733 
2734 bool unnecessary_volatile(const Node *n)
2735 {
2736   // assert n->is_MemBar();
2737   if (UseBarriersForVolatile) {
2738     // we need to plant a dmb
2739     return false;
2740   }
2741 
2742   MemBarNode *mbvol = n->as_MemBar();
2743 
2744   // first we check if this is part of a card mark. if so then we have
2745   // to generate a StoreLoad barrier
2746 
2747   if (is_card_mark_membar(mbvol)) {
2748       return false;
2749   }
2750 
2751   // ok, if it's not a card mark then we still need to check if it is
2752   // a trailing membar of a volatile put graph.
2753 
2754   return (trailing_to_leading(mbvol) != NULL);
2755 }
2756 
2757 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2758 
2759 bool needs_releasing_store(const Node *n)
2760 {
2761   // assert n->is_Store();
2762   if (UseBarriersForVolatile) {
2763     // we use a normal store and dmb combination
2764     return false;
2765   }
2766 
2767   StoreNode *st = n->as_Store();
2768 
2769   // the store must be marked as releasing
2770   if (!st->is_release()) {
2771     return false;
2772   }
2773 
2774   // the store must be fed by a membar
2775 
2776   Node *x = st->lookup(StoreNode::Memory);
2777 
2778   if (! x || !x->is_Proj()) {
2779     return false;
2780   }
2781 
2782   ProjNode *proj = x->as_Proj();
2783 
2784   x = proj->lookup(0);
2785 
2786   if (!x || !x->is_MemBar()) {
2787     return false;
2788   }
2789 
2790   MemBarNode *barrier = x->as_MemBar();
2791 
2792   // if the barrier is a release membar or a cpuorder mmebar fed by a
2793   // release membar then we need to check whether that forms part of a
2794   // volatile put graph.
2795 
2796   // reject invalid candidates
2797   if (!leading_membar(barrier)) {
2798     return false;
2799   }
2800 
2801   // does this lead a normal subgraph?
2802   MemBarNode *mbvol = leading_to_normal(barrier);
2803 
2804   if (!mbvol) {
2805     return false;
2806   }
2807 
2808   // all done unless this is a card mark
2809   if (!is_card_mark_membar(mbvol)) {
2810     return true;
2811   }
2812 
2813   // we found a card mark -- just make sure we have a trailing barrier
2814 
2815   return (card_mark_to_trailing(mbvol) != NULL);
2816 }
2817 
2818 // predicate controlling translation of CAS
2819 //
2820 // returns true if CAS needs to use an acquiring load otherwise false
2821 
2822 bool needs_acquiring_load_exclusive(const Node *n)
2823 {
2824   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2825   if (UseBarriersForVolatile) {
2826     return false;
2827   }
2828 
2829   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2830 #ifdef ASSERT
2831   LoadStoreNode *st = n->as_LoadStore();
2832 
2833   // the store must be fed by a membar
2834 
2835   Node *x = st->lookup(StoreNode::Memory);
2836 
2837   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2838 
2839   ProjNode *proj = x->as_Proj();
2840 
2841   x = proj->lookup(0);
2842 
2843   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2844 
2845   MemBarNode *barrier = x->as_MemBar();
2846 
2847   // the barrier must be a cpuorder mmebar fed by a release membar
2848 
2849   guarantee(barrier->Opcode() == Op_MemBarCPUOrder,
2850             "CAS not fed by cpuorder membar!");
2851 
2852   MemBarNode *b = parent_membar(barrier);
2853   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2854           "CAS not fed by cpuorder+release membar pair!");
2855 
2856   // does this lead a normal subgraph?
2857   MemBarNode *mbar = leading_to_normal(barrier);
2858 
2859   guarantee(mbar != NULL, "CAS not embedded in normal graph!");
2860 
2861   // if this is a card mark membar check we have a trailing acquire
2862 
2863   if (is_card_mark_membar(mbar)) {
2864     mbar = card_mark_to_trailing(mbar);
2865   }
2866 
2867   guarantee(mbar != NULL, "card mark membar for CAS not embedded in normal graph!");
2868 
2869   guarantee(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2870 #endif // ASSERT
2871   // so we can just return true here
2872   return true;
2873 }
2874 
2875 // predicate controlling translation of StoreCM
2876 //
2877 // returns true if a StoreStore must precede the card write otherwise
2878 // false
2879 
2880 bool unnecessary_storestore(const Node *storecm)
2881 {
2882   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2883 
2884   // we only ever need to generate a dmb ishst between an object put
2885   // and the associated card mark when we are using CMS without
2886   // conditional card marking
2887 
2888   if (!UseConcMarkSweepGC || UseCondCardMark) {
2889     return true;
2890   }
2891 
2892   // if we are implementing volatile puts using barriers then the
2893   // object put is an str so we must insert the dmb ishst
2894 
2895   if (UseBarriersForVolatile) {
2896     return false;
2897   }
2898 
2899   // we can omit the dmb ishst if this StoreCM is part of a volatile
2900   // put because in thta case the put will be implemented by stlr
2901   //
2902   // we need to check for a normal subgraph feeding this StoreCM.
2903   // that means the StoreCM must be fed Memory from a leading membar,
2904   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2905   // leading membar must be part of a normal subgraph
2906 
2907   Node *x = storecm->in(StoreNode::Memory);
2908 
2909   if (!x->is_Proj()) {
2910     return false;
2911   }
2912 
2913   x = x->in(0);
2914 
2915   if (!x->is_MemBar()) {
2916     return false;
2917   }
2918 
2919   MemBarNode *leading = x->as_MemBar();
2920 
2921   // reject invalid candidates
2922   if (!leading_membar(leading)) {
2923     return false;
2924   }
2925 
2926   // we can omit the StoreStore if it is the head of a normal subgraph
2927   return (leading_to_normal(leading) != NULL);
2928 }
2929 
2930 
2931 #define __ _masm.
2932 
2933 // advance declarations for helper functions to convert register
2934 // indices to register objects
2935 
2936 // the ad file has to provide implementations of certain methods
2937 // expected by the generic code
2938 //
2939 // REQUIRED FUNCTIONALITY
2940 
2941 //=============================================================================
2942 
2943 // !!!!! Special hack to get all types of calls to specify the byte offset
2944 //       from the start of the call to the point where the return address
2945 //       will point.
2946 
2947 int MachCallStaticJavaNode::ret_addr_offset()
2948 {
2949   // call should be a simple bl
2950   int off = 4;
2951   return off;
2952 }
2953 
2954 int MachCallDynamicJavaNode::ret_addr_offset()
2955 {
2956   return 16; // movz, movk, movk, bl
2957 }
2958 
2959 int MachCallRuntimeNode::ret_addr_offset() {
2960   // for generated stubs the call will be
2961   //   far_call(addr)
2962   // for real runtime callouts it will be six instructions
2963   // see aarch64_enc_java_to_runtime
2964   //   adr(rscratch2, retaddr)
2965   //   lea(rscratch1, RuntimeAddress(addr)
2966   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2967   //   blrt rscratch1
2968   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2969   if (cb) {
2970     return MacroAssembler::far_branch_size();
2971   } else {
2972     return 6 * NativeInstruction::instruction_size;
2973   }
2974 }
2975 
2976 // Indicate if the safepoint node needs the polling page as an input
2977 
2978 // the shared code plants the oop data at the start of the generated
2979 // code for the safepoint node and that needs ot be at the load
2980 // instruction itself. so we cannot plant a mov of the safepoint poll
2981 // address followed by a load. setting this to true means the mov is
2982 // scheduled as a prior instruction. that's better for scheduling
2983 // anyway.
2984 
2985 bool SafePointNode::needs_polling_address_input()
2986 {
2987   return true;
2988 }
2989 
2990 //=============================================================================
2991 
2992 #ifndef PRODUCT
2993 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2994   st->print("BREAKPOINT");
2995 }
2996 #endif
2997 
2998 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2999   MacroAssembler _masm(&cbuf);
3000   __ brk(0);
3001 }
3002 
3003 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
3004   return MachNode::size(ra_);
3005 }
3006 
3007 //=============================================================================
3008 
3009 #ifndef PRODUCT
3010   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
3011     st->print("nop \t# %d bytes pad for loops and calls", _count);
3012   }
3013 #endif
3014 
3015   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
3016     MacroAssembler _masm(&cbuf);
3017     for (int i = 0; i < _count; i++) {
3018       __ nop();
3019     }
3020   }
3021 
3022   uint MachNopNode::size(PhaseRegAlloc*) const {
3023     return _count * NativeInstruction::instruction_size;
3024   }
3025 
3026 //=============================================================================
3027 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
3028 
3029 int Compile::ConstantTable::calculate_table_base_offset() const {
3030   return 0;  // absolute addressing, no offset
3031 }
3032 
3033 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
3034 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
3035   ShouldNotReachHere();
3036 }
3037 
3038 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
3039   // Empty encoding
3040 }
3041 
3042 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
3043   return 0;
3044 }
3045 
3046 #ifndef PRODUCT
3047 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3048   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3049 }
3050 #endif
3051 
3052 #ifndef PRODUCT
3053 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3054   Compile* C = ra_->C;
3055 
3056   int framesize = C->frame_slots() << LogBytesPerInt;
3057 
3058   if (C->need_stack_bang(framesize))
3059     st->print("# stack bang size=%d\n\t", framesize);
3060 
3061   if (framesize < ((1 << 9) + 2 * wordSize)) {
3062     st->print("sub  sp, sp, #%d\n\t", framesize);
3063     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3064     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3065   } else {
3066     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3067     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3068     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3069     st->print("sub  sp, sp, rscratch1");
3070   }
3071 }
3072 #endif
3073 
3074 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3075   Compile* C = ra_->C;
3076   MacroAssembler _masm(&cbuf);
3077 
3078   // n.b. frame size includes space for return pc and rfp
3079   const long framesize = C->frame_size_in_bytes();
3080   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3081 
3082   // insert a nop at the start of the prolog so we can patch in a
3083   // branch if we need to invalidate the method later
3084   __ nop();
3085 
3086   int bangsize = C->bang_size_in_bytes();
3087   if (C->need_stack_bang(bangsize) && UseStackBanging)
3088     __ generate_stack_overflow_check(bangsize);
3089 
3090   __ build_frame(framesize);
3091 
3092   if (NotifySimulator) {
3093     __ notify(Assembler::method_entry);
3094   }
3095 
3096   if (VerifyStackAtCalls) {
3097     Unimplemented();
3098   }
3099 
3100   C->set_frame_complete(cbuf.insts_size());
3101 
3102   if (C->has_mach_constant_base_node()) {
3103     // NOTE: We set the table base offset here because users might be
3104     // emitted before MachConstantBaseNode.
3105     Compile::ConstantTable& constant_table = C->constant_table();
3106     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3107   }
3108 }
3109 
3110 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3111 {
3112   return MachNode::size(ra_); // too many variables; just compute it
3113                               // the hard way
3114 }
3115 
3116 int MachPrologNode::reloc() const
3117 {
3118   return 0;
3119 }
3120 
3121 //=============================================================================
3122 
3123 #ifndef PRODUCT
3124 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3125   Compile* C = ra_->C;
3126   int framesize = C->frame_slots() << LogBytesPerInt;
3127 
3128   st->print("# pop frame %d\n\t",framesize);
3129 
3130   if (framesize == 0) {
3131     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3132   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3133     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3134     st->print("add  sp, sp, #%d\n\t", framesize);
3135   } else {
3136     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3137     st->print("add  sp, sp, rscratch1\n\t");
3138     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3139   }
3140 
3141   if (do_polling() && C->is_method_compilation()) {
3142     st->print("# touch polling page\n\t");
3143     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3144     st->print("ldr zr, [rscratch1]");
3145   }
3146 }
3147 #endif
3148 
3149 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3150   Compile* C = ra_->C;
3151   MacroAssembler _masm(&cbuf);
3152   int framesize = C->frame_slots() << LogBytesPerInt;
3153 
3154   __ remove_frame(framesize);
3155 
3156   if (NotifySimulator) {
3157     __ notify(Assembler::method_reentry);
3158   }
3159 
3160   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
3161     __ reserved_stack_check();
3162   }
3163 
3164   if (do_polling() && C->is_method_compilation()) {
3165     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3166   }
3167 }
3168 
3169 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3170   // Variable size. Determine dynamically.
3171   return MachNode::size(ra_);
3172 }
3173 
3174 int MachEpilogNode::reloc() const {
3175   // Return number of relocatable values contained in this instruction.
3176   return 1; // 1 for polling page.
3177 }
3178 
3179 const Pipeline * MachEpilogNode::pipeline() const {
3180   return MachNode::pipeline_class();
3181 }
3182 
3183 // This method seems to be obsolete. It is declared in machnode.hpp
3184 // and defined in all *.ad files, but it is never called. Should we
3185 // get rid of it?
3186 int MachEpilogNode::safepoint_offset() const {
3187   assert(do_polling(), "no return for this epilog node");
3188   return 4;
3189 }
3190 
3191 //=============================================================================
3192 
3193 // Figure out which register class each belongs in: rc_int, rc_float or
3194 // rc_stack.
3195 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3196 
3197 static enum RC rc_class(OptoReg::Name reg) {
3198 
3199   if (reg == OptoReg::Bad) {
3200     return rc_bad;
3201   }
3202 
3203   // we have 30 int registers * 2 halves
3204   // (rscratch1 and rscratch2 are omitted)
3205 
3206   if (reg < 60) {
3207     return rc_int;
3208   }
3209 
3210   // we have 32 float register * 2 halves
3211   if (reg < 60 + 128) {
3212     return rc_float;
3213   }
3214 
3215   // Between float regs & stack is the flags regs.
3216   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3217 
3218   return rc_stack;
3219 }
3220 
3221 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3222   Compile* C = ra_->C;
3223 
3224   // Get registers to move.
3225   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3226   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3227   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3228   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3229 
3230   enum RC src_hi_rc = rc_class(src_hi);
3231   enum RC src_lo_rc = rc_class(src_lo);
3232   enum RC dst_hi_rc = rc_class(dst_hi);
3233   enum RC dst_lo_rc = rc_class(dst_lo);
3234 
3235   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3236 
3237   if (src_hi != OptoReg::Bad) {
3238     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3239            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3240            "expected aligned-adjacent pairs");
3241   }
3242 
3243   if (src_lo == dst_lo && src_hi == dst_hi) {
3244     return 0;            // Self copy, no move.
3245   }
3246 
3247   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3248               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3249   int src_offset = ra_->reg2offset(src_lo);
3250   int dst_offset = ra_->reg2offset(dst_lo);
3251 
3252   if (bottom_type()->isa_vect() != NULL) {
3253     uint ireg = ideal_reg();
3254     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3255     if (cbuf) {
3256       MacroAssembler _masm(cbuf);
3257       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3258       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3259         // stack->stack
3260         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3261         if (ireg == Op_VecD) {
3262           __ unspill(rscratch1, true, src_offset);
3263           __ spill(rscratch1, true, dst_offset);
3264         } else {
3265           __ spill_copy128(src_offset, dst_offset);
3266         }
3267       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3268         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3269                ireg == Op_VecD ? __ T8B : __ T16B,
3270                as_FloatRegister(Matcher::_regEncode[src_lo]));
3271       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3272         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3273                        ireg == Op_VecD ? __ D : __ Q,
3274                        ra_->reg2offset(dst_lo));
3275       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3276         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3277                        ireg == Op_VecD ? __ D : __ Q,
3278                        ra_->reg2offset(src_lo));
3279       } else {
3280         ShouldNotReachHere();
3281       }
3282     }
3283   } else if (cbuf) {
3284     MacroAssembler _masm(cbuf);
3285     switch (src_lo_rc) {
3286     case rc_int:
3287       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3288         if (is64) {
3289             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3290                    as_Register(Matcher::_regEncode[src_lo]));
3291         } else {
3292             MacroAssembler _masm(cbuf);
3293             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3294                     as_Register(Matcher::_regEncode[src_lo]));
3295         }
3296       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3297         if (is64) {
3298             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3299                      as_Register(Matcher::_regEncode[src_lo]));
3300         } else {
3301             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3302                      as_Register(Matcher::_regEncode[src_lo]));
3303         }
3304       } else {                    // gpr --> stack spill
3305         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3306         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3307       }
3308       break;
3309     case rc_float:
3310       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3311         if (is64) {
3312             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3313                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3314         } else {
3315             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3316                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3317         }
3318       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3319           if (cbuf) {
3320             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3321                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3322         } else {
3323             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3324                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3325         }
3326       } else {                    // fpr --> stack spill
3327         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3328         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3329                  is64 ? __ D : __ S, dst_offset);
3330       }
3331       break;
3332     case rc_stack:
3333       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3334         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3335       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3336         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3337                    is64 ? __ D : __ S, src_offset);
3338       } else {                    // stack --> stack copy
3339         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3340         __ unspill(rscratch1, is64, src_offset);
3341         __ spill(rscratch1, is64, dst_offset);
3342       }
3343       break;
3344     default:
3345       assert(false, "bad rc_class for spill");
3346       ShouldNotReachHere();
3347     }
3348   }
3349 
3350   if (st) {
3351     st->print("spill ");
3352     if (src_lo_rc == rc_stack) {
3353       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3354     } else {
3355       st->print("%s -> ", Matcher::regName[src_lo]);
3356     }
3357     if (dst_lo_rc == rc_stack) {
3358       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3359     } else {
3360       st->print("%s", Matcher::regName[dst_lo]);
3361     }
3362     if (bottom_type()->isa_vect() != NULL) {
3363       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3364     } else {
3365       st->print("\t# spill size = %d", is64 ? 64:32);
3366     }
3367   }
3368 
3369   return 0;
3370 
3371 }
3372 
3373 #ifndef PRODUCT
3374 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3375   if (!ra_)
3376     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3377   else
3378     implementation(NULL, ra_, false, st);
3379 }
3380 #endif
3381 
3382 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3383   implementation(&cbuf, ra_, false, NULL);
3384 }
3385 
3386 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3387   return MachNode::size(ra_);
3388 }
3389 
3390 //=============================================================================
3391 
3392 #ifndef PRODUCT
3393 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3394   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3395   int reg = ra_->get_reg_first(this);
3396   st->print("add %s, rsp, #%d]\t# box lock",
3397             Matcher::regName[reg], offset);
3398 }
3399 #endif
3400 
3401 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3402   MacroAssembler _masm(&cbuf);
3403 
3404   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3405   int reg    = ra_->get_encode(this);
3406 
3407   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3408     __ add(as_Register(reg), sp, offset);
3409   } else {
3410     ShouldNotReachHere();
3411   }
3412 }
3413 
3414 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3415   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3416   return 4;
3417 }
3418 
3419 //=============================================================================
3420 
3421 #ifndef PRODUCT
3422 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3423 {
3424   st->print_cr("# MachUEPNode");
3425   if (UseCompressedClassPointers) {
3426     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3427     if (Universe::narrow_klass_shift() != 0) {
3428       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3429     }
3430   } else {
3431    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3432   }
3433   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3434   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3435 }
3436 #endif
3437 
3438 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3439 {
3440   // This is the unverified entry point.
3441   MacroAssembler _masm(&cbuf);
3442 
3443   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3444   Label skip;
3445   // TODO
3446   // can we avoid this skip and still use a reloc?
3447   __ br(Assembler::EQ, skip);
3448   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3449   __ bind(skip);
3450 }
3451 
3452 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3453 {
3454   return MachNode::size(ra_);
3455 }
3456 
3457 // REQUIRED EMIT CODE
3458 
3459 //=============================================================================
3460 
3461 // Emit exception handler code.
3462 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3463 {
3464   // mov rscratch1 #exception_blob_entry_point
3465   // br rscratch1
3466   // Note that the code buffer's insts_mark is always relative to insts.
3467   // That's why we must use the macroassembler to generate a handler.
3468   MacroAssembler _masm(&cbuf);
3469   address base = __ start_a_stub(size_exception_handler());
3470   if (base == NULL) {
3471     ciEnv::current()->record_failure("CodeCache is full");
3472     return 0;  // CodeBuffer::expand failed
3473   }
3474   int offset = __ offset();
3475   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3476   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3477   __ end_a_stub();
3478   return offset;
3479 }
3480 
3481 // Emit deopt handler code.
3482 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3483 {
3484   // Note that the code buffer's insts_mark is always relative to insts.
3485   // That's why we must use the macroassembler to generate a handler.
3486   MacroAssembler _masm(&cbuf);
3487   address base = __ start_a_stub(size_deopt_handler());
3488   if (base == NULL) {
3489     ciEnv::current()->record_failure("CodeCache is full");
3490     return 0;  // CodeBuffer::expand failed
3491   }
3492   int offset = __ offset();
3493 
3494   __ adr(lr, __ pc());
3495   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3496 
3497   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3498   __ end_a_stub();
3499   return offset;
3500 }
3501 
3502 // REQUIRED MATCHER CODE
3503 
3504 //=============================================================================
3505 
3506 const bool Matcher::match_rule_supported(int opcode) {
3507 
3508   switch (opcode) {
3509   default:
3510     break;
3511   }
3512 
3513   if (!has_match_rule(opcode)) {
3514     return false;
3515   }
3516 
3517   return true;  // Per default match rules are supported.
3518 }
3519 
3520 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3521 
3522   // TODO
3523   // identify extra cases that we might want to provide match rules for
3524   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3525   bool ret_value = match_rule_supported(opcode);
3526   // Add rules here.
3527 
3528   return ret_value;  // Per default match rules are supported.
3529 }
3530 
3531 const bool Matcher::has_predicated_vectors(void) {
3532   return false;
3533 }
3534 
3535 const int Matcher::float_pressure(int default_pressure_threshold) {
3536   return default_pressure_threshold;
3537 }
3538 
3539 int Matcher::regnum_to_fpu_offset(int regnum)
3540 {
3541   Unimplemented();
3542   return 0;
3543 }
3544 
3545 // Is this branch offset short enough that a short branch can be used?
3546 //
3547 // NOTE: If the platform does not provide any short branch variants, then
3548 //       this method should return false for offset 0.
3549 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3550   // The passed offset is relative to address of the branch.
3551 
3552   return (-32768 <= offset && offset < 32768);
3553 }
3554 
3555 const bool Matcher::isSimpleConstant64(jlong value) {
3556   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3557   // Probably always true, even if a temp register is required.
3558   return true;
3559 }
3560 
3561 // true just means we have fast l2f conversion
3562 const bool Matcher::convL2FSupported(void) {
3563   return true;
3564 }
3565 
3566 // Vector width in bytes.
3567 const int Matcher::vector_width_in_bytes(BasicType bt) {
3568   int size = MIN2(16,(int)MaxVectorSize);
3569   // Minimum 2 values in vector
3570   if (size < 2*type2aelembytes(bt)) size = 0;
3571   // But never < 4
3572   if (size < 4) size = 0;
3573   return size;
3574 }
3575 
3576 // Limits on vector size (number of elements) loaded into vector.
3577 const int Matcher::max_vector_size(const BasicType bt) {
3578   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3579 }
3580 const int Matcher::min_vector_size(const BasicType bt) {
3581 //  For the moment limit the vector size to 8 bytes
3582     int size = 8 / type2aelembytes(bt);
3583     if (size < 2) size = 2;
3584     return size;
3585 }
3586 
3587 // Vector ideal reg.
3588 const uint Matcher::vector_ideal_reg(int len) {
3589   switch(len) {
3590     case  8: return Op_VecD;
3591     case 16: return Op_VecX;
3592   }
3593   ShouldNotReachHere();
3594   return 0;
3595 }
3596 
3597 const uint Matcher::vector_shift_count_ideal_reg(int size) {
3598   return Op_VecX;
3599 }
3600 
3601 // AES support not yet implemented
3602 const bool Matcher::pass_original_key_for_aes() {
3603   return false;
3604 }
3605 
3606 // x86 supports misaligned vectors store/load.
3607 const bool Matcher::misaligned_vectors_ok() {
3608   return !AlignVector; // can be changed by flag
3609 }
3610 
3611 // false => size gets scaled to BytesPerLong, ok.
3612 const bool Matcher::init_array_count_is_in_bytes = false;
3613 
3614 // Use conditional move (CMOVL)
3615 const int Matcher::long_cmove_cost() {
3616   // long cmoves are no more expensive than int cmoves
3617   return 0;
3618 }
3619 
3620 const int Matcher::float_cmove_cost() {
3621   // float cmoves are no more expensive than int cmoves
3622   return 0;
3623 }
3624 
3625 // Does the CPU require late expand (see block.cpp for description of late expand)?
3626 const bool Matcher::require_postalloc_expand = false;
3627 
3628 // Do we need to mask the count passed to shift instructions or does
3629 // the cpu only look at the lower 5/6 bits anyway?
3630 const bool Matcher::need_masked_shift_count = false;
3631 
3632 // This affects two different things:
3633 //  - how Decode nodes are matched
3634 //  - how ImplicitNullCheck opportunities are recognized
3635 // If true, the matcher will try to remove all Decodes and match them
3636 // (as operands) into nodes. NullChecks are not prepared to deal with
3637 // Decodes by final_graph_reshaping().
3638 // If false, final_graph_reshaping() forces the decode behind the Cmp
3639 // for a NullCheck. The matcher matches the Decode node into a register.
3640 // Implicit_null_check optimization moves the Decode along with the
3641 // memory operation back up before the NullCheck.
3642 bool Matcher::narrow_oop_use_complex_address() {
3643   return Universe::narrow_oop_shift() == 0;
3644 }
3645 
3646 bool Matcher::narrow_klass_use_complex_address() {
3647 // TODO
3648 // decide whether we need to set this to true
3649   return false;
3650 }
3651 
3652 bool Matcher::const_oop_prefer_decode() {
3653   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
3654   return Universe::narrow_oop_base() == NULL;
3655 }
3656 
3657 bool Matcher::const_klass_prefer_decode() {
3658   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
3659   return Universe::narrow_klass_base() == NULL;
3660 }
3661 
3662 // Is it better to copy float constants, or load them directly from
3663 // memory?  Intel can load a float constant from a direct address,
3664 // requiring no extra registers.  Most RISCs will have to materialize
3665 // an address into a register first, so they would do better to copy
3666 // the constant from stack.
3667 const bool Matcher::rematerialize_float_constants = false;
3668 
3669 // If CPU can load and store mis-aligned doubles directly then no
3670 // fixup is needed.  Else we split the double into 2 integer pieces
3671 // and move it piece-by-piece.  Only happens when passing doubles into
3672 // C code as the Java calling convention forces doubles to be aligned.
3673 const bool Matcher::misaligned_doubles_ok = true;
3674 
3675 // No-op on amd64
3676 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3677   Unimplemented();
3678 }
3679 
3680 // Advertise here if the CPU requires explicit rounding operations to
3681 // implement the UseStrictFP mode.
3682 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3683 
3684 // Are floats converted to double when stored to stack during
3685 // deoptimization?
3686 bool Matcher::float_in_double() { return false; }
3687 
3688 // Do ints take an entire long register or just half?
3689 // The relevant question is how the int is callee-saved:
3690 // the whole long is written but de-opt'ing will have to extract
3691 // the relevant 32 bits.
3692 const bool Matcher::int_in_long = true;
3693 
3694 // Return whether or not this register is ever used as an argument.
3695 // This function is used on startup to build the trampoline stubs in
3696 // generateOptoStub.  Registers not mentioned will be killed by the VM
3697 // call in the trampoline, and arguments in those registers not be
3698 // available to the callee.
3699 bool Matcher::can_be_java_arg(int reg)
3700 {
3701   return
3702     reg ==  R0_num || reg == R0_H_num ||
3703     reg ==  R1_num || reg == R1_H_num ||
3704     reg ==  R2_num || reg == R2_H_num ||
3705     reg ==  R3_num || reg == R3_H_num ||
3706     reg ==  R4_num || reg == R4_H_num ||
3707     reg ==  R5_num || reg == R5_H_num ||
3708     reg ==  R6_num || reg == R6_H_num ||
3709     reg ==  R7_num || reg == R7_H_num ||
3710     reg ==  V0_num || reg == V0_H_num ||
3711     reg ==  V1_num || reg == V1_H_num ||
3712     reg ==  V2_num || reg == V2_H_num ||
3713     reg ==  V3_num || reg == V3_H_num ||
3714     reg ==  V4_num || reg == V4_H_num ||
3715     reg ==  V5_num || reg == V5_H_num ||
3716     reg ==  V6_num || reg == V6_H_num ||
3717     reg ==  V7_num || reg == V7_H_num;
3718 }
3719 
3720 bool Matcher::is_spillable_arg(int reg)
3721 {
3722   return can_be_java_arg(reg);
3723 }
3724 
3725 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3726   return false;
3727 }
3728 
3729 RegMask Matcher::divI_proj_mask() {
3730   ShouldNotReachHere();
3731   return RegMask();
3732 }
3733 
3734 // Register for MODI projection of divmodI.
3735 RegMask Matcher::modI_proj_mask() {
3736   ShouldNotReachHere();
3737   return RegMask();
3738 }
3739 
3740 // Register for DIVL projection of divmodL.
3741 RegMask Matcher::divL_proj_mask() {
3742   ShouldNotReachHere();
3743   return RegMask();
3744 }
3745 
3746 // Register for MODL projection of divmodL.
3747 RegMask Matcher::modL_proj_mask() {
3748   ShouldNotReachHere();
3749   return RegMask();
3750 }
3751 
3752 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3753   return FP_REG_mask();
3754 }
3755 
3756 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
3757   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3758     Node* u = addp->fast_out(i);
3759     if (u->is_Mem()) {
3760       int opsize = u->as_Mem()->memory_size();
3761       assert(opsize > 0, "unexpected memory operand size");
3762       if (u->as_Mem()->memory_size() != (1<<shift)) {
3763         return false;
3764       }
3765     }
3766   }
3767   return true;
3768 }
3769 
3770 const bool Matcher::convi2l_type_required = false;
3771 
3772 // Should the Matcher clone shifts on addressing modes, expecting them
3773 // to be subsumed into complex addressing expressions or compute them
3774 // into registers?
3775 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
3776   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
3777     return true;
3778   }
3779 
3780   Node *off = m->in(AddPNode::Offset);
3781   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
3782       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
3783       // Are there other uses besides address expressions?
3784       !is_visited(off)) {
3785     address_visited.set(off->_idx); // Flag as address_visited
3786     mstack.push(off->in(2), Visit);
3787     Node *conv = off->in(1);
3788     if (conv->Opcode() == Op_ConvI2L &&
3789         // Are there other uses besides address expressions?
3790         !is_visited(conv)) {
3791       address_visited.set(conv->_idx); // Flag as address_visited
3792       mstack.push(conv->in(1), Pre_Visit);
3793     } else {
3794       mstack.push(conv, Pre_Visit);
3795     }
3796     address_visited.test_set(m->_idx); // Flag as address_visited
3797     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3798     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3799     return true;
3800   } else if (off->Opcode() == Op_ConvI2L &&
3801              // Are there other uses besides address expressions?
3802              !is_visited(off)) {
3803     address_visited.test_set(m->_idx); // Flag as address_visited
3804     address_visited.set(off->_idx); // Flag as address_visited
3805     mstack.push(off->in(1), Pre_Visit);
3806     mstack.push(m->in(AddPNode::Address), Pre_Visit);
3807     mstack.push(m->in(AddPNode::Base), Pre_Visit);
3808     return true;
3809   }
3810   return false;
3811 }
3812 
3813 void Compile::reshape_address(AddPNode* addp) {
3814 }
3815 
3816 // helper for encoding java_to_runtime calls on sim
3817 //
3818 // this is needed to compute the extra arguments required when
3819 // planting a call to the simulator blrt instruction. the TypeFunc
3820 // can be queried to identify the counts for integral, and floating
3821 // arguments and the return type
3822 
3823 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3824 {
3825   int gps = 0;
3826   int fps = 0;
3827   const TypeTuple *domain = tf->domain();
3828   int max = domain->cnt();
3829   for (int i = TypeFunc::Parms; i < max; i++) {
3830     const Type *t = domain->field_at(i);
3831     switch(t->basic_type()) {
3832     case T_FLOAT:
3833     case T_DOUBLE:
3834       fps++;
3835     default:
3836       gps++;
3837     }
3838   }
3839   gpcnt = gps;
3840   fpcnt = fps;
3841   BasicType rt = tf->return_type();
3842   switch (rt) {
3843   case T_VOID:
3844     rtype = MacroAssembler::ret_type_void;
3845     break;
3846   default:
3847     rtype = MacroAssembler::ret_type_integral;
3848     break;
3849   case T_FLOAT:
3850     rtype = MacroAssembler::ret_type_float;
3851     break;
3852   case T_DOUBLE:
3853     rtype = MacroAssembler::ret_type_double;
3854     break;
3855   }
3856 }
3857 
3858 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3859   MacroAssembler _masm(&cbuf);                                          \
3860   {                                                                     \
3861     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3862     guarantee(DISP == 0, "mode not permitted for volatile");            \
3863     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3864     __ INSN(REG, as_Register(BASE));                                    \
3865   }
3866 
3867 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3868 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3869 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3870                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3871 
3872   // Used for all non-volatile memory accesses.  The use of
3873   // $mem->opcode() to discover whether this pattern uses sign-extended
3874   // offsets is something of a kludge.
3875   static void loadStore(MacroAssembler masm, mem_insn insn,
3876                          Register reg, int opcode,
3877                          Register base, int index, int size, int disp)
3878   {
3879     Address::extend scale;
3880 
3881     // Hooboy, this is fugly.  We need a way to communicate to the
3882     // encoder that the index needs to be sign extended, so we have to
3883     // enumerate all the cases.
3884     switch (opcode) {
3885     case INDINDEXSCALEDI2L:
3886     case INDINDEXSCALEDI2LN:
3887     case INDINDEXI2L:
3888     case INDINDEXI2LN:
3889       scale = Address::sxtw(size);
3890       break;
3891     default:
3892       scale = Address::lsl(size);
3893     }
3894 
3895     if (index == -1) {
3896       (masm.*insn)(reg, Address(base, disp));
3897     } else {
3898       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3899       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3900     }
3901   }
3902 
3903   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3904                          FloatRegister reg, int opcode,
3905                          Register base, int index, int size, int disp)
3906   {
3907     Address::extend scale;
3908 
3909     switch (opcode) {
3910     case INDINDEXSCALEDI2L:
3911     case INDINDEXSCALEDI2LN:
3912       scale = Address::sxtw(size);
3913       break;
3914     default:
3915       scale = Address::lsl(size);
3916     }
3917 
3918      if (index == -1) {
3919       (masm.*insn)(reg, Address(base, disp));
3920     } else {
3921       assert(disp == 0, "unsupported address mode: disp = %d", disp);
3922       (masm.*insn)(reg, Address(base, as_Register(index), scale));
3923     }
3924   }
3925 
3926   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3927                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3928                          int opcode, Register base, int index, int size, int disp)
3929   {
3930     if (index == -1) {
3931       (masm.*insn)(reg, T, Address(base, disp));
3932     } else {
3933       assert(disp == 0, "unsupported address mode");
3934       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3935     }
3936   }
3937 
3938 %}
3939 
3940 
3941 
3942 //----------ENCODING BLOCK-----------------------------------------------------
3943 // This block specifies the encoding classes used by the compiler to
3944 // output byte streams.  Encoding classes are parameterized macros
3945 // used by Machine Instruction Nodes in order to generate the bit
3946 // encoding of the instruction.  Operands specify their base encoding
3947 // interface with the interface keyword.  There are currently
3948 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3949 // COND_INTER.  REG_INTER causes an operand to generate a function
3950 // which returns its register number when queried.  CONST_INTER causes
3951 // an operand to generate a function which returns the value of the
3952 // constant when queried.  MEMORY_INTER causes an operand to generate
3953 // four functions which return the Base Register, the Index Register,
3954 // the Scale Value, and the Offset Value of the operand when queried.
3955 // COND_INTER causes an operand to generate six functions which return
3956 // the encoding code (ie - encoding bits for the instruction)
3957 // associated with each basic boolean condition for a conditional
3958 // instruction.
3959 //
3960 // Instructions specify two basic values for encoding.  Again, a
3961 // function is available to check if the constant displacement is an
3962 // oop. They use the ins_encode keyword to specify their encoding
3963 // classes (which must be a sequence of enc_class names, and their
3964 // parameters, specified in the encoding block), and they use the
3965 // opcode keyword to specify, in order, their primary, secondary, and
3966 // tertiary opcode.  Only the opcode sections which a particular
3967 // instruction needs for encoding need to be specified.
3968 encode %{
3969   // Build emit functions for each basic byte or larger field in the
3970   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3971   // from C++ code in the enc_class source block.  Emit functions will
3972   // live in the main source block for now.  In future, we can
3973   // generalize this by adding a syntax that specifies the sizes of
3974   // fields in an order, so that the adlc can build the emit functions
3975   // automagically
3976 
3977   // catch all for unimplemented encodings
3978   enc_class enc_unimplemented %{
3979     MacroAssembler _masm(&cbuf);
3980     __ unimplemented("C2 catch all");
3981   %}
3982 
3983   // BEGIN Non-volatile memory access
3984 
3985   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3986     Register dst_reg = as_Register($dst$$reg);
3987     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3989   %}
3990 
3991   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3992     Register dst_reg = as_Register($dst$$reg);
3993     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3994                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3995   %}
3996 
3997   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3998     Register dst_reg = as_Register($dst$$reg);
3999     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
4000                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4001   %}
4002 
4003   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
4004     Register dst_reg = as_Register($dst$$reg);
4005     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
4006                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4007   %}
4008 
4009   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
4010     Register dst_reg = as_Register($dst$$reg);
4011     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
4012                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4013   %}
4014 
4015   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
4016     Register dst_reg = as_Register($dst$$reg);
4017     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
4018                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4019   %}
4020 
4021   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
4022     Register dst_reg = as_Register($dst$$reg);
4023     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4024                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4025   %}
4026 
4027   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
4028     Register dst_reg = as_Register($dst$$reg);
4029     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
4030                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4031   %}
4032 
4033   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
4034     Register dst_reg = as_Register($dst$$reg);
4035     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4036                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4037   %}
4038 
4039   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
4040     Register dst_reg = as_Register($dst$$reg);
4041     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
4042                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4043   %}
4044 
4045   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
4046     Register dst_reg = as_Register($dst$$reg);
4047     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
4048                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4049   %}
4050 
4051   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
4052     Register dst_reg = as_Register($dst$$reg);
4053     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
4054                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4055   %}
4056 
4057   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
4058     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4059     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
4060                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4061   %}
4062 
4063   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
4064     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4065     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
4066                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4067   %}
4068 
4069   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
4070     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4071     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
4072        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4073   %}
4074 
4075   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
4076     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4077     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
4078        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4079   %}
4080 
4081   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
4082     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
4083     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
4084        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4085   %}
4086 
4087   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
4088     Register src_reg = as_Register($src$$reg);
4089     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
4090                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4091   %}
4092 
4093   enc_class aarch64_enc_strb0(memory mem) %{
4094     MacroAssembler _masm(&cbuf);
4095     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4096                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4097   %}
4098 
4099   enc_class aarch64_enc_strb0_ordered(memory mem) %{
4100     MacroAssembler _masm(&cbuf);
4101     __ membar(Assembler::StoreStore);
4102     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
4103                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4104   %}
4105 
4106   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
4107     Register src_reg = as_Register($src$$reg);
4108     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4109                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4110   %}
4111 
4112   enc_class aarch64_enc_strh0(memory mem) %{
4113     MacroAssembler _masm(&cbuf);
4114     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4115                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4116   %}
4117 
4118   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4119     Register src_reg = as_Register($src$$reg);
4120     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4121                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4122   %}
4123 
4124   enc_class aarch64_enc_strw0(memory mem) %{
4125     MacroAssembler _masm(&cbuf);
4126     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4127                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4128   %}
4129 
4130   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4131     Register src_reg = as_Register($src$$reg);
4132     // we sometimes get asked to store the stack pointer into the
4133     // current thread -- we cannot do that directly on AArch64
4134     if (src_reg == r31_sp) {
4135       MacroAssembler _masm(&cbuf);
4136       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4137       __ mov(rscratch2, sp);
4138       src_reg = rscratch2;
4139     }
4140     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4141                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4142   %}
4143 
4144   enc_class aarch64_enc_str0(memory mem) %{
4145     MacroAssembler _masm(&cbuf);
4146     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4147                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4148   %}
4149 
4150   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4151     FloatRegister src_reg = as_FloatRegister($src$$reg);
4152     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4153                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4154   %}
4155 
4156   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4157     FloatRegister src_reg = as_FloatRegister($src$$reg);
4158     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4159                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4160   %}
4161 
4162   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4163     FloatRegister src_reg = as_FloatRegister($src$$reg);
4164     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4165        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4166   %}
4167 
4168   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4169     FloatRegister src_reg = as_FloatRegister($src$$reg);
4170     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4171        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4172   %}
4173 
4174   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4175     FloatRegister src_reg = as_FloatRegister($src$$reg);
4176     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4177        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4178   %}
4179 
4180   // END Non-volatile memory access
4181 
4182   // volatile loads and stores
4183 
4184   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4185     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4186                  rscratch1, stlrb);
4187   %}
4188 
4189   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4190     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4191                  rscratch1, stlrh);
4192   %}
4193 
4194   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4195     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4196                  rscratch1, stlrw);
4197   %}
4198 
4199 
4200   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4201     Register dst_reg = as_Register($dst$$reg);
4202     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4203              rscratch1, ldarb);
4204     __ sxtbw(dst_reg, dst_reg);
4205   %}
4206 
4207   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4208     Register dst_reg = as_Register($dst$$reg);
4209     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4210              rscratch1, ldarb);
4211     __ sxtb(dst_reg, dst_reg);
4212   %}
4213 
4214   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4215     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4216              rscratch1, ldarb);
4217   %}
4218 
4219   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4220     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4221              rscratch1, ldarb);
4222   %}
4223 
4224   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4225     Register dst_reg = as_Register($dst$$reg);
4226     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4227              rscratch1, ldarh);
4228     __ sxthw(dst_reg, dst_reg);
4229   %}
4230 
4231   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4232     Register dst_reg = as_Register($dst$$reg);
4233     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4234              rscratch1, ldarh);
4235     __ sxth(dst_reg, dst_reg);
4236   %}
4237 
4238   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4239     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4240              rscratch1, ldarh);
4241   %}
4242 
4243   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4244     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4245              rscratch1, ldarh);
4246   %}
4247 
4248   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4249     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4250              rscratch1, ldarw);
4251   %}
4252 
4253   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4254     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4255              rscratch1, ldarw);
4256   %}
4257 
4258   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4259     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4260              rscratch1, ldar);
4261   %}
4262 
4263   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4264     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4265              rscratch1, ldarw);
4266     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4267   %}
4268 
4269   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4270     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4271              rscratch1, ldar);
4272     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4273   %}
4274 
4275   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4276     Register src_reg = as_Register($src$$reg);
4277     // we sometimes get asked to store the stack pointer into the
4278     // current thread -- we cannot do that directly on AArch64
4279     if (src_reg == r31_sp) {
4280         MacroAssembler _masm(&cbuf);
4281       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4282       __ mov(rscratch2, sp);
4283       src_reg = rscratch2;
4284     }
4285     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4286                  rscratch1, stlr);
4287   %}
4288 
4289   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4290     {
4291       MacroAssembler _masm(&cbuf);
4292       FloatRegister src_reg = as_FloatRegister($src$$reg);
4293       __ fmovs(rscratch2, src_reg);
4294     }
4295     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4296                  rscratch1, stlrw);
4297   %}
4298 
4299   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4300     {
4301       MacroAssembler _masm(&cbuf);
4302       FloatRegister src_reg = as_FloatRegister($src$$reg);
4303       __ fmovd(rscratch2, src_reg);
4304     }
4305     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4306                  rscratch1, stlr);
4307   %}
4308 
4309   // synchronized read/update encodings
4310 
4311   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4312     MacroAssembler _masm(&cbuf);
4313     Register dst_reg = as_Register($dst$$reg);
4314     Register base = as_Register($mem$$base);
4315     int index = $mem$$index;
4316     int scale = $mem$$scale;
4317     int disp = $mem$$disp;
4318     if (index == -1) {
4319        if (disp != 0) {
4320         __ lea(rscratch1, Address(base, disp));
4321         __ ldaxr(dst_reg, rscratch1);
4322       } else {
4323         // TODO
4324         // should we ever get anything other than this case?
4325         __ ldaxr(dst_reg, base);
4326       }
4327     } else {
4328       Register index_reg = as_Register(index);
4329       if (disp == 0) {
4330         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4331         __ ldaxr(dst_reg, rscratch1);
4332       } else {
4333         __ lea(rscratch1, Address(base, disp));
4334         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4335         __ ldaxr(dst_reg, rscratch1);
4336       }
4337     }
4338   %}
4339 
4340   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4341     MacroAssembler _masm(&cbuf);
4342     Register src_reg = as_Register($src$$reg);
4343     Register base = as_Register($mem$$base);
4344     int index = $mem$$index;
4345     int scale = $mem$$scale;
4346     int disp = $mem$$disp;
4347     if (index == -1) {
4348        if (disp != 0) {
4349         __ lea(rscratch2, Address(base, disp));
4350         __ stlxr(rscratch1, src_reg, rscratch2);
4351       } else {
4352         // TODO
4353         // should we ever get anything other than this case?
4354         __ stlxr(rscratch1, src_reg, base);
4355       }
4356     } else {
4357       Register index_reg = as_Register(index);
4358       if (disp == 0) {
4359         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4360         __ stlxr(rscratch1, src_reg, rscratch2);
4361       } else {
4362         __ lea(rscratch2, Address(base, disp));
4363         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4364         __ stlxr(rscratch1, src_reg, rscratch2);
4365       }
4366     }
4367     __ cmpw(rscratch1, zr);
4368   %}
4369 
4370   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4371     MacroAssembler _masm(&cbuf);
4372     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4373     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4374                Assembler::xword, /*acquire*/ false, /*release*/ true,
4375                /*weak*/ false, noreg);
4376   %}
4377 
4378   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4379     MacroAssembler _masm(&cbuf);
4380     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4381     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4382                Assembler::word, /*acquire*/ false, /*release*/ true,
4383                /*weak*/ false, noreg);
4384   %}
4385 
4386   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4387     MacroAssembler _masm(&cbuf);
4388     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4389     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4390                Assembler::halfword, /*acquire*/ false, /*release*/ true,
4391                /*weak*/ false, noreg);
4392   %}
4393 
4394   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4395     MacroAssembler _masm(&cbuf);
4396     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4397     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4398                Assembler::byte, /*acquire*/ false, /*release*/ true,
4399                /*weak*/ false, noreg);
4400   %}
4401 
4402 
4403   enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
4404     MacroAssembler _masm(&cbuf);
4405     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4406     Register tmp = $tmp$$Register;
4407     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
4408     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register,
4409                    /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
4410   %}
4411 
4412   // The only difference between aarch64_enc_cmpxchg and
4413   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4414   // CompareAndSwap sequence to serve as a barrier on acquiring a
4415   // lock.
4416   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4417     MacroAssembler _masm(&cbuf);
4418     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4419     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4420                Assembler::xword, /*acquire*/ true, /*release*/ true,
4421                /*weak*/ false, noreg);
4422   %}
4423 
4424   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4425     MacroAssembler _masm(&cbuf);
4426     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4427     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4428                Assembler::word, /*acquire*/ true, /*release*/ true,
4429                /*weak*/ false, noreg);
4430   %}
4431 
4432 
4433   enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
4434     MacroAssembler _masm(&cbuf);
4435     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4436     Register tmp = $tmp$$Register;
4437     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
4438     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register,
4439                    /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
4440   %}
4441 
4442   // auxiliary used for CompareAndSwapX to set result register
4443   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4444     MacroAssembler _masm(&cbuf);
4445     Register res_reg = as_Register($res$$reg);
4446     __ cset(res_reg, Assembler::EQ);
4447   %}
4448 
4449   // prefetch encodings
4450 
4451   enc_class aarch64_enc_prefetchw(memory mem) %{
4452     MacroAssembler _masm(&cbuf);
4453     Register base = as_Register($mem$$base);
4454     int index = $mem$$index;
4455     int scale = $mem$$scale;
4456     int disp = $mem$$disp;
4457     if (index == -1) {
4458       __ prfm(Address(base, disp), PSTL1KEEP);
4459     } else {
4460       Register index_reg = as_Register(index);
4461       if (disp == 0) {
4462         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4463       } else {
4464         __ lea(rscratch1, Address(base, disp));
4465         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4466       }
4467     }
4468   %}
4469 
4470   /// mov envcodings
4471 
4472   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4473     MacroAssembler _masm(&cbuf);
4474     u_int32_t con = (u_int32_t)$src$$constant;
4475     Register dst_reg = as_Register($dst$$reg);
4476     if (con == 0) {
4477       __ movw(dst_reg, zr);
4478     } else {
4479       __ movw(dst_reg, con);
4480     }
4481   %}
4482 
4483   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4484     MacroAssembler _masm(&cbuf);
4485     Register dst_reg = as_Register($dst$$reg);
4486     u_int64_t con = (u_int64_t)$src$$constant;
4487     if (con == 0) {
4488       __ mov(dst_reg, zr);
4489     } else {
4490       __ mov(dst_reg, con);
4491     }
4492   %}
4493 
4494   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4495     MacroAssembler _masm(&cbuf);
4496     Register dst_reg = as_Register($dst$$reg);
4497     address con = (address)$src$$constant;
4498     if (con == NULL || con == (address)1) {
4499       ShouldNotReachHere();
4500     } else {
4501       relocInfo::relocType rtype = $src->constant_reloc();
4502       if (rtype == relocInfo::oop_type) {
4503         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4504       } else if (rtype == relocInfo::metadata_type) {
4505         __ mov_metadata(dst_reg, (Metadata*)con);
4506       } else {
4507         assert(rtype == relocInfo::none, "unexpected reloc type");
4508         if (con < (address)(uintptr_t)os::vm_page_size()) {
4509           __ mov(dst_reg, con);
4510         } else {
4511           unsigned long offset;
4512           __ adrp(dst_reg, con, offset);
4513           __ add(dst_reg, dst_reg, offset);
4514         }
4515       }
4516     }
4517   %}
4518 
4519   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4520     MacroAssembler _masm(&cbuf);
4521     Register dst_reg = as_Register($dst$$reg);
4522     __ mov(dst_reg, zr);
4523   %}
4524 
4525   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4526     MacroAssembler _masm(&cbuf);
4527     Register dst_reg = as_Register($dst$$reg);
4528     __ mov(dst_reg, (u_int64_t)1);
4529   %}
4530 
4531   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4532     MacroAssembler _masm(&cbuf);
4533     address page = (address)$src$$constant;
4534     Register dst_reg = as_Register($dst$$reg);
4535     unsigned long off;
4536     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4537     assert(off == 0, "assumed offset == 0");
4538   %}
4539 
4540   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4541     MacroAssembler _masm(&cbuf);
4542     __ load_byte_map_base($dst$$Register);
4543   %}
4544 
4545   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4546     MacroAssembler _masm(&cbuf);
4547     Register dst_reg = as_Register($dst$$reg);
4548     address con = (address)$src$$constant;
4549     if (con == NULL) {
4550       ShouldNotReachHere();
4551     } else {
4552       relocInfo::relocType rtype = $src->constant_reloc();
4553       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4554       __ set_narrow_oop(dst_reg, (jobject)con);
4555     }
4556   %}
4557 
4558   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4559     MacroAssembler _masm(&cbuf);
4560     Register dst_reg = as_Register($dst$$reg);
4561     __ mov(dst_reg, zr);
4562   %}
4563 
4564   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4565     MacroAssembler _masm(&cbuf);
4566     Register dst_reg = as_Register($dst$$reg);
4567     address con = (address)$src$$constant;
4568     if (con == NULL) {
4569       ShouldNotReachHere();
4570     } else {
4571       relocInfo::relocType rtype = $src->constant_reloc();
4572       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4573       __ set_narrow_klass(dst_reg, (Klass *)con);
4574     }
4575   %}
4576 
4577   // arithmetic encodings
4578 
4579   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4580     MacroAssembler _masm(&cbuf);
4581     Register dst_reg = as_Register($dst$$reg);
4582     Register src_reg = as_Register($src1$$reg);
4583     int32_t con = (int32_t)$src2$$constant;
4584     // add has primary == 0, subtract has primary == 1
4585     if ($primary) { con = -con; }
4586     if (con < 0) {
4587       __ subw(dst_reg, src_reg, -con);
4588     } else {
4589       __ addw(dst_reg, src_reg, con);
4590     }
4591   %}
4592 
4593   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4594     MacroAssembler _masm(&cbuf);
4595     Register dst_reg = as_Register($dst$$reg);
4596     Register src_reg = as_Register($src1$$reg);
4597     int32_t con = (int32_t)$src2$$constant;
4598     // add has primary == 0, subtract has primary == 1
4599     if ($primary) { con = -con; }
4600     if (con < 0) {
4601       __ sub(dst_reg, src_reg, -con);
4602     } else {
4603       __ add(dst_reg, src_reg, con);
4604     }
4605   %}
4606 
4607   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4608     MacroAssembler _masm(&cbuf);
4609    Register dst_reg = as_Register($dst$$reg);
4610    Register src1_reg = as_Register($src1$$reg);
4611    Register src2_reg = as_Register($src2$$reg);
4612     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4613   %}
4614 
4615   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4616     MacroAssembler _masm(&cbuf);
4617    Register dst_reg = as_Register($dst$$reg);
4618    Register src1_reg = as_Register($src1$$reg);
4619    Register src2_reg = as_Register($src2$$reg);
4620     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4621   %}
4622 
4623   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4624     MacroAssembler _masm(&cbuf);
4625    Register dst_reg = as_Register($dst$$reg);
4626    Register src1_reg = as_Register($src1$$reg);
4627    Register src2_reg = as_Register($src2$$reg);
4628     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4629   %}
4630 
4631   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4632     MacroAssembler _masm(&cbuf);
4633    Register dst_reg = as_Register($dst$$reg);
4634    Register src1_reg = as_Register($src1$$reg);
4635    Register src2_reg = as_Register($src2$$reg);
4636     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4637   %}
4638 
4639   // compare instruction encodings
4640 
4641   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4642     MacroAssembler _masm(&cbuf);
4643     Register reg1 = as_Register($src1$$reg);
4644     Register reg2 = as_Register($src2$$reg);
4645     __ cmpw(reg1, reg2);
4646   %}
4647 
4648   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4649     MacroAssembler _masm(&cbuf);
4650     Register reg = as_Register($src1$$reg);
4651     int32_t val = $src2$$constant;
4652     if (val >= 0) {
4653       __ subsw(zr, reg, val);
4654     } else {
4655       __ addsw(zr, reg, -val);
4656     }
4657   %}
4658 
4659   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4660     MacroAssembler _masm(&cbuf);
4661     Register reg1 = as_Register($src1$$reg);
4662     u_int32_t val = (u_int32_t)$src2$$constant;
4663     __ movw(rscratch1, val);
4664     __ cmpw(reg1, rscratch1);
4665   %}
4666 
4667   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4668     MacroAssembler _masm(&cbuf);
4669     Register reg1 = as_Register($src1$$reg);
4670     Register reg2 = as_Register($src2$$reg);
4671     __ cmp(reg1, reg2);
4672   %}
4673 
4674   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4675     MacroAssembler _masm(&cbuf);
4676     Register reg = as_Register($src1$$reg);
4677     int64_t val = $src2$$constant;
4678     if (val >= 0) {
4679       __ subs(zr, reg, val);
4680     } else if (val != -val) {
4681       __ adds(zr, reg, -val);
4682     } else {
4683     // aargh, Long.MIN_VALUE is a special case
4684       __ orr(rscratch1, zr, (u_int64_t)val);
4685       __ subs(zr, reg, rscratch1);
4686     }
4687   %}
4688 
4689   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4690     MacroAssembler _masm(&cbuf);
4691     Register reg1 = as_Register($src1$$reg);
4692     u_int64_t val = (u_int64_t)$src2$$constant;
4693     __ mov(rscratch1, val);
4694     __ cmp(reg1, rscratch1);
4695   %}
4696 
4697   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4698     MacroAssembler _masm(&cbuf);
4699     Register reg1 = as_Register($src1$$reg);
4700     Register reg2 = as_Register($src2$$reg);
4701     __ cmp(reg1, reg2);
4702   %}
4703 
4704   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4705     MacroAssembler _masm(&cbuf);
4706     Register reg1 = as_Register($src1$$reg);
4707     Register reg2 = as_Register($src2$$reg);
4708     __ cmpw(reg1, reg2);
4709   %}
4710 
4711   enc_class aarch64_enc_testp(iRegP src) %{
4712     MacroAssembler _masm(&cbuf);
4713     Register reg = as_Register($src$$reg);
4714     __ cmp(reg, zr);
4715   %}
4716 
4717   enc_class aarch64_enc_testn(iRegN src) %{
4718     MacroAssembler _masm(&cbuf);
4719     Register reg = as_Register($src$$reg);
4720     __ cmpw(reg, zr);
4721   %}
4722 
4723   enc_class aarch64_enc_b(label lbl) %{
4724     MacroAssembler _masm(&cbuf);
4725     Label *L = $lbl$$label;
4726     __ b(*L);
4727   %}
4728 
4729   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4730     MacroAssembler _masm(&cbuf);
4731     Label *L = $lbl$$label;
4732     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4733   %}
4734 
4735   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4736     MacroAssembler _masm(&cbuf);
4737     Label *L = $lbl$$label;
4738     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4739   %}
4740 
4741   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4742   %{
4743      Register sub_reg = as_Register($sub$$reg);
4744      Register super_reg = as_Register($super$$reg);
4745      Register temp_reg = as_Register($temp$$reg);
4746      Register result_reg = as_Register($result$$reg);
4747 
4748      Label miss;
4749      MacroAssembler _masm(&cbuf);
4750      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4751                                      NULL, &miss,
4752                                      /*set_cond_codes:*/ true);
4753      if ($primary) {
4754        __ mov(result_reg, zr);
4755      }
4756      __ bind(miss);
4757   %}
4758 
4759   enc_class aarch64_enc_java_static_call(method meth) %{
4760     MacroAssembler _masm(&cbuf);
4761 
4762     address addr = (address)$meth$$method;
4763     address call;
4764     if (!_method) {
4765       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4766       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4767     } else {
4768       int method_index = resolved_method_index(cbuf);
4769       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4770                                                   : static_call_Relocation::spec(method_index);
4771       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4772 
4773       // Emit stub for static call
4774       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4775       if (stub == NULL) {
4776         ciEnv::current()->record_failure("CodeCache is full");
4777         return;
4778       }
4779     }
4780     if (call == NULL) {
4781       ciEnv::current()->record_failure("CodeCache is full");
4782       return;
4783     }
4784   %}
4785 
4786   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4787     MacroAssembler _masm(&cbuf);
4788     int method_index = resolved_method_index(cbuf);
4789     address call = __ ic_call((address)$meth$$method, method_index);
4790     if (call == NULL) {
4791       ciEnv::current()->record_failure("CodeCache is full");
4792       return;
4793     }
4794   %}
4795 
4796   enc_class aarch64_enc_call_epilog() %{
4797     MacroAssembler _masm(&cbuf);
4798     if (VerifyStackAtCalls) {
4799       // Check that stack depth is unchanged: find majik cookie on stack
4800       __ call_Unimplemented();
4801     }
4802   %}
4803 
4804   enc_class aarch64_enc_java_to_runtime(method meth) %{
4805     MacroAssembler _masm(&cbuf);
4806 
4807     // some calls to generated routines (arraycopy code) are scheduled
4808     // by C2 as runtime calls. if so we can call them using a br (they
4809     // will be in a reachable segment) otherwise we have to use a blrt
4810     // which loads the absolute address into a register.
4811     address entry = (address)$meth$$method;
4812     CodeBlob *cb = CodeCache::find_blob(entry);
4813     if (cb) {
4814       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4815       if (call == NULL) {
4816         ciEnv::current()->record_failure("CodeCache is full");
4817         return;
4818       }
4819     } else {
4820       int gpcnt;
4821       int fpcnt;
4822       int rtype;
4823       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4824       Label retaddr;
4825       __ adr(rscratch2, retaddr);
4826       __ lea(rscratch1, RuntimeAddress(entry));
4827       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
4828       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4829       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4830       __ bind(retaddr);
4831       __ add(sp, sp, 2 * wordSize);
4832     }
4833   %}
4834 
4835   enc_class aarch64_enc_rethrow() %{
4836     MacroAssembler _masm(&cbuf);
4837     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4838   %}
4839 
4840   enc_class aarch64_enc_ret() %{
4841     MacroAssembler _masm(&cbuf);
4842     __ ret(lr);
4843   %}
4844 
4845   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4846     MacroAssembler _masm(&cbuf);
4847     Register target_reg = as_Register($jump_target$$reg);
4848     __ br(target_reg);
4849   %}
4850 
4851   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4852     MacroAssembler _masm(&cbuf);
4853     Register target_reg = as_Register($jump_target$$reg);
4854     // exception oop should be in r0
4855     // ret addr has been popped into lr
4856     // callee expects it in r3
4857     __ mov(r3, lr);
4858     __ br(target_reg);
4859   %}
4860 
4861   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4862     MacroAssembler _masm(&cbuf);
4863     Register oop = as_Register($object$$reg);
4864     Register box = as_Register($box$$reg);
4865     Register disp_hdr = as_Register($tmp$$reg);
4866     Register tmp = as_Register($tmp2$$reg);
4867     Label cont;
4868     Label object_has_monitor;
4869     Label cas_failed;
4870 
4871     assert_different_registers(oop, box, tmp, disp_hdr);
4872 
4873     // Load markOop from object into displaced_header.
4874     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4875 
4876     // Always do locking in runtime.
4877     if (EmitSync & 0x01) {
4878       __ cmp(oop, zr);
4879       return;
4880     }
4881 
4882     if (UseBiasedLocking && !UseOptoBiasInlining) {
4883       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4884     }
4885 
4886     // Handle existing monitor
4887     if ((EmitSync & 0x02) == 0) {
4888       // we can use AArch64's bit test and branch here but
4889       // markoopDesc does not define a bit index just the bit value
4890       // so assert in case the bit pos changes
4891 #     define __monitor_value_log2 1
4892       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4893       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4894 #     undef __monitor_value_log2
4895     }
4896 
4897     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4898     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4899 
4900     // Load Compare Value application register.
4901 
4902     // Initialize the box. (Must happen before we update the object mark!)
4903     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4904 
4905     // Compare object markOop with mark and if equal exchange scratch1
4906     // with object markOop.
4907     if (UseLSE) {
4908       __ mov(tmp, disp_hdr);
4909       __ casal(Assembler::xword, tmp, box, oop);
4910       __ cmp(tmp, disp_hdr);
4911       __ br(Assembler::EQ, cont);
4912     } else {
4913       Label retry_load;
4914       if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4915         __ prfm(Address(oop), PSTL1STRM);
4916       __ bind(retry_load);
4917       __ ldaxr(tmp, oop);
4918       __ cmp(tmp, disp_hdr);
4919       __ br(Assembler::NE, cas_failed);
4920       // use stlxr to ensure update is immediately visible
4921       __ stlxr(tmp, box, oop);
4922       __ cbzw(tmp, cont);
4923       __ b(retry_load);
4924     }
4925 
4926     // Formerly:
4927     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4928     //               /*newv=*/box,
4929     //               /*addr=*/oop,
4930     //               /*tmp=*/tmp,
4931     //               cont,
4932     //               /*fail*/NULL);
4933 
4934     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4935 
4936     // If the compare-and-exchange succeeded, then we found an unlocked
4937     // object, will have now locked it will continue at label cont
4938 
4939     __ bind(cas_failed);
4940     // We did not see an unlocked object so try the fast recursive case.
4941 
4942     // Check if the owner is self by comparing the value in the
4943     // markOop of object (disp_hdr) with the stack pointer.
4944     __ mov(rscratch1, sp);
4945     __ sub(disp_hdr, disp_hdr, rscratch1);
4946     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4947     // If condition is true we are cont and hence we can store 0 as the
4948     // displaced header in the box, which indicates that it is a recursive lock.
4949     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4950     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4951 
4952     // Handle existing monitor.
4953     if ((EmitSync & 0x02) == 0) {
4954       __ b(cont);
4955 
4956       __ bind(object_has_monitor);
4957       // The object's monitor m is unlocked iff m->owner == NULL,
4958       // otherwise m->owner may contain a thread or a stack address.
4959       //
4960       // Try to CAS m->owner from NULL to current thread.
4961       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4962       __ mov(disp_hdr, zr);
4963 
4964       if (UseLSE) {
4965         __ mov(rscratch1, disp_hdr);
4966         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4967         __ cmp(rscratch1, disp_hdr);
4968       } else {
4969         Label retry_load, fail;
4970         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
4971           __ prfm(Address(tmp), PSTL1STRM);
4972         __ bind(retry_load);
4973         __ ldaxr(rscratch1, tmp);
4974         __ cmp(disp_hdr, rscratch1);
4975         __ br(Assembler::NE, fail);
4976         // use stlxr to ensure update is immediately visible
4977         __ stlxr(rscratch1, rthread, tmp);
4978         __ cbnzw(rscratch1, retry_load);
4979         __ bind(fail);
4980       }
4981 
4982       // Label next;
4983       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4984       //               /*newv=*/rthread,
4985       //               /*addr=*/tmp,
4986       //               /*tmp=*/rscratch1,
4987       //               /*succeed*/next,
4988       //               /*fail*/NULL);
4989       // __ bind(next);
4990 
4991       // store a non-null value into the box.
4992       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4993 
4994       // PPC port checks the following invariants
4995       // #ifdef ASSERT
4996       // bne(flag, cont);
4997       // We have acquired the monitor, check some invariants.
4998       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4999       // Invariant 1: _recursions should be 0.
5000       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
5001       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
5002       //                        "monitor->_recursions should be 0", -1);
5003       // Invariant 2: OwnerIsThread shouldn't be 0.
5004       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
5005       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
5006       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
5007       // #endif
5008     }
5009 
5010     __ bind(cont);
5011     // flag == EQ indicates success
5012     // flag == NE indicates failure
5013 
5014   %}
5015 
5016   // TODO
5017   // reimplement this with custom cmpxchgptr code
5018   // which avoids some of the unnecessary branching
5019   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
5020     MacroAssembler _masm(&cbuf);
5021     Register oop = as_Register($object$$reg);
5022     Register box = as_Register($box$$reg);
5023     Register disp_hdr = as_Register($tmp$$reg);
5024     Register tmp = as_Register($tmp2$$reg);
5025     Label cont;
5026     Label object_has_monitor;
5027     Label cas_failed;
5028 
5029     assert_different_registers(oop, box, tmp, disp_hdr);
5030 
5031     // Always do locking in runtime.
5032     if (EmitSync & 0x01) {
5033       __ cmp(oop, zr); // Oop can't be 0 here => always false.
5034       return;
5035     }
5036 
5037     if (UseBiasedLocking && !UseOptoBiasInlining) {
5038       __ biased_locking_exit(oop, tmp, cont);
5039     }
5040 
5041     // Find the lock address and load the displaced header from the stack.
5042     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
5043 
5044     // If the displaced header is 0, we have a recursive unlock.
5045     __ cmp(disp_hdr, zr);
5046     __ br(Assembler::EQ, cont);
5047 
5048 
5049     // Handle existing monitor.
5050     if ((EmitSync & 0x02) == 0) {
5051       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
5052       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
5053     }
5054 
5055     // Check if it is still a light weight lock, this is is true if we
5056     // see the stack address of the basicLock in the markOop of the
5057     // object.
5058 
5059       if (UseLSE) {
5060         __ mov(tmp, box);
5061         __ casl(Assembler::xword, tmp, disp_hdr, oop);
5062         __ cmp(tmp, box);
5063       } else {
5064         Label retry_load;
5065         if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
5066           __ prfm(Address(oop), PSTL1STRM);
5067         __ bind(retry_load);
5068         __ ldxr(tmp, oop);
5069         __ cmp(box, tmp);
5070         __ br(Assembler::NE, cas_failed);
5071         // use stlxr to ensure update is immediately visible
5072         __ stlxr(tmp, disp_hdr, oop);
5073         __ cbzw(tmp, cont);
5074         __ b(retry_load);
5075       }
5076 
5077     // __ cmpxchgptr(/*compare_value=*/box,
5078     //               /*exchange_value=*/disp_hdr,
5079     //               /*where=*/oop,
5080     //               /*result=*/tmp,
5081     //               cont,
5082     //               /*cas_failed*/NULL);
5083     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
5084 
5085     __ bind(cas_failed);
5086 
5087     // Handle existing monitor.
5088     if ((EmitSync & 0x02) == 0) {
5089       __ b(cont);
5090 
5091       __ bind(object_has_monitor);
5092       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
5093       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5094       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
5095       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
5096       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
5097       __ cmp(rscratch1, zr);
5098       __ br(Assembler::NE, cont);
5099 
5100       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
5101       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
5102       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
5103       __ cmp(rscratch1, zr);
5104       __ cbnz(rscratch1, cont);
5105       // need a release store here
5106       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
5107       __ stlr(rscratch1, tmp); // rscratch1 is zero
5108     }
5109 
5110     __ bind(cont);
5111     // flag == EQ indicates success
5112     // flag == NE indicates failure
5113   %}
5114 
5115 %}
5116 
5117 //----------FRAME--------------------------------------------------------------
5118 // Definition of frame structure and management information.
5119 //
5120 //  S T A C K   L A Y O U T    Allocators stack-slot number
5121 //                             |   (to get allocators register number
5122 //  G  Owned by    |        |  v    add OptoReg::stack0())
5123 //  r   CALLER     |        |
5124 //  o     |        +--------+      pad to even-align allocators stack-slot
5125 //  w     V        |  pad0  |        numbers; owned by CALLER
5126 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5127 //  h     ^        |   in   |  5
5128 //        |        |  args  |  4   Holes in incoming args owned by SELF
5129 //  |     |        |        |  3
5130 //  |     |        +--------+
5131 //  V     |        | old out|      Empty on Intel, window on Sparc
5132 //        |    old |preserve|      Must be even aligned.
5133 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5134 //        |        |   in   |  3   area for Intel ret address
5135 //     Owned by    |preserve|      Empty on Sparc.
5136 //       SELF      +--------+
5137 //        |        |  pad2  |  2   pad to align old SP
5138 //        |        +--------+  1
5139 //        |        | locks  |  0
5140 //        |        +--------+----> OptoReg::stack0(), even aligned
5141 //        |        |  pad1  | 11   pad to align new SP
5142 //        |        +--------+
5143 //        |        |        | 10
5144 //        |        | spills |  9   spills
5145 //        V        |        |  8   (pad0 slot for callee)
5146 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5147 //        ^        |  out   |  7
5148 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5149 //     Owned by    +--------+
5150 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5151 //        |    new |preserve|      Must be even-aligned.
5152 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5153 //        |        |        |
5154 //
5155 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5156 //         known from SELF's arguments and the Java calling convention.
5157 //         Region 6-7 is determined per call site.
5158 // Note 2: If the calling convention leaves holes in the incoming argument
5159 //         area, those holes are owned by SELF.  Holes in the outgoing area
5160 //         are owned by the CALLEE.  Holes should not be nessecary in the
5161 //         incoming area, as the Java calling convention is completely under
5162 //         the control of the AD file.  Doubles can be sorted and packed to
5163 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5164 //         varargs C calling conventions.
5165 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5166 //         even aligned with pad0 as needed.
5167 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5168 //           (the latter is true on Intel but is it false on AArch64?)
5169 //         region 6-11 is even aligned; it may be padded out more so that
5170 //         the region from SP to FP meets the minimum stack alignment.
5171 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5172 //         alignment.  Region 11, pad1, may be dynamically extended so that
5173 //         SP meets the minimum alignment.
5174 
5175 frame %{
5176   // What direction does stack grow in (assumed to be same for C & Java)
5177   stack_direction(TOWARDS_LOW);
5178 
5179   // These three registers define part of the calling convention
5180   // between compiled code and the interpreter.
5181 
5182   // Inline Cache Register or methodOop for I2C.
5183   inline_cache_reg(R12);
5184 
5185   // Method Oop Register when calling interpreter.
5186   interpreter_method_oop_reg(R12);
5187 
5188   // Number of stack slots consumed by locking an object
5189   sync_stack_slots(2);
5190 
5191   // Compiled code's Frame Pointer
5192   frame_pointer(R31);
5193 
5194   // Interpreter stores its frame pointer in a register which is
5195   // stored to the stack by I2CAdaptors.
5196   // I2CAdaptors convert from interpreted java to compiled java.
5197   interpreter_frame_pointer(R29);
5198 
5199   // Stack alignment requirement
5200   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5201 
5202   // Number of stack slots between incoming argument block and the start of
5203   // a new frame.  The PROLOG must add this many slots to the stack.  The
5204   // EPILOG must remove this many slots. aarch64 needs two slots for
5205   // return address and fp.
5206   // TODO think this is correct but check
5207   in_preserve_stack_slots(4);
5208 
5209   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5210   // for calls to C.  Supports the var-args backing area for register parms.
5211   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5212 
5213   // The after-PROLOG location of the return address.  Location of
5214   // return address specifies a type (REG or STACK) and a number
5215   // representing the register number (i.e. - use a register name) or
5216   // stack slot.
5217   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5218   // Otherwise, it is above the locks and verification slot and alignment word
5219   // TODO this may well be correct but need to check why that - 2 is there
5220   // ppc port uses 0 but we definitely need to allow for fixed_slots
5221   // which folds in the space used for monitors
5222   return_addr(STACK - 2 +
5223               align_up((Compile::current()->in_preserve_stack_slots() +
5224                         Compile::current()->fixed_slots()),
5225                        stack_alignment_in_slots()));
5226 
5227   // Body of function which returns an integer array locating
5228   // arguments either in registers or in stack slots.  Passed an array
5229   // of ideal registers called "sig" and a "length" count.  Stack-slot
5230   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5231   // arguments for a CALLEE.  Incoming stack arguments are
5232   // automatically biased by the preserve_stack_slots field above.
5233 
5234   calling_convention
5235   %{
5236     // No difference between ingoing/outgoing just pass false
5237     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5238   %}
5239 
5240   c_calling_convention
5241   %{
5242     // This is obviously always outgoing
5243     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5244   %}
5245 
5246   // Location of compiled Java return values.  Same as C for now.
5247   return_value
5248   %{
5249     // TODO do we allow ideal_reg == Op_RegN???
5250     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5251            "only return normal values");
5252 
5253     static const int lo[Op_RegL + 1] = { // enum name
5254       0,                                 // Op_Node
5255       0,                                 // Op_Set
5256       R0_num,                            // Op_RegN
5257       R0_num,                            // Op_RegI
5258       R0_num,                            // Op_RegP
5259       V0_num,                            // Op_RegF
5260       V0_num,                            // Op_RegD
5261       R0_num                             // Op_RegL
5262     };
5263 
5264     static const int hi[Op_RegL + 1] = { // enum name
5265       0,                                 // Op_Node
5266       0,                                 // Op_Set
5267       OptoReg::Bad,                       // Op_RegN
5268       OptoReg::Bad,                      // Op_RegI
5269       R0_H_num,                          // Op_RegP
5270       OptoReg::Bad,                      // Op_RegF
5271       V0_H_num,                          // Op_RegD
5272       R0_H_num                           // Op_RegL
5273     };
5274 
5275     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5276   %}
5277 %}
5278 
5279 //----------ATTRIBUTES---------------------------------------------------------
5280 //----------Operand Attributes-------------------------------------------------
5281 op_attrib op_cost(1);        // Required cost attribute
5282 
5283 //----------Instruction Attributes---------------------------------------------
5284 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5285 ins_attrib ins_size(32);        // Required size attribute (in bits)
5286 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5287                                 // a non-matching short branch variant
5288                                 // of some long branch?
5289 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5290                                 // be a power of 2) specifies the
5291                                 // alignment that some part of the
5292                                 // instruction (not necessarily the
5293                                 // start) requires.  If > 1, a
5294                                 // compute_padding() function must be
5295                                 // provided for the instruction
5296 
5297 //----------OPERANDS-----------------------------------------------------------
5298 // Operand definitions must precede instruction definitions for correct parsing
5299 // in the ADLC because operands constitute user defined types which are used in
5300 // instruction definitions.
5301 
5302 //----------Simple Operands----------------------------------------------------
5303 
5304 // Integer operands 32 bit
5305 // 32 bit immediate
5306 operand immI()
5307 %{
5308   match(ConI);
5309 
5310   op_cost(0);
5311   format %{ %}
5312   interface(CONST_INTER);
5313 %}
5314 
5315 // 32 bit zero
5316 operand immI0()
5317 %{
5318   predicate(n->get_int() == 0);
5319   match(ConI);
5320 
5321   op_cost(0);
5322   format %{ %}
5323   interface(CONST_INTER);
5324 %}
5325 
5326 // 32 bit unit increment
5327 operand immI_1()
5328 %{
5329   predicate(n->get_int() == 1);
5330   match(ConI);
5331 
5332   op_cost(0);
5333   format %{ %}
5334   interface(CONST_INTER);
5335 %}
5336 
5337 // 32 bit unit decrement
5338 operand immI_M1()
5339 %{
5340   predicate(n->get_int() == -1);
5341   match(ConI);
5342 
5343   op_cost(0);
5344   format %{ %}
5345   interface(CONST_INTER);
5346 %}
5347 
5348 // Shift values for add/sub extension shift
5349 operand immIExt()
5350 %{
5351   predicate(0 <= n->get_int() && (n->get_int() <= 4));
5352   match(ConI);
5353 
5354   op_cost(0);
5355   format %{ %}
5356   interface(CONST_INTER);
5357 %}
5358 
5359 operand immI_le_4()
5360 %{
5361   predicate(n->get_int() <= 4);
5362   match(ConI);
5363 
5364   op_cost(0);
5365   format %{ %}
5366   interface(CONST_INTER);
5367 %}
5368 
5369 operand immI_31()
5370 %{
5371   predicate(n->get_int() == 31);
5372   match(ConI);
5373 
5374   op_cost(0);
5375   format %{ %}
5376   interface(CONST_INTER);
5377 %}
5378 
5379 operand immI_8()
5380 %{
5381   predicate(n->get_int() == 8);
5382   match(ConI);
5383 
5384   op_cost(0);
5385   format %{ %}
5386   interface(CONST_INTER);
5387 %}
5388 
5389 operand immI_16()
5390 %{
5391   predicate(n->get_int() == 16);
5392   match(ConI);
5393 
5394   op_cost(0);
5395   format %{ %}
5396   interface(CONST_INTER);
5397 %}
5398 
5399 operand immI_24()
5400 %{
5401   predicate(n->get_int() == 24);
5402   match(ConI);
5403 
5404   op_cost(0);
5405   format %{ %}
5406   interface(CONST_INTER);
5407 %}
5408 
5409 operand immI_32()
5410 %{
5411   predicate(n->get_int() == 32);
5412   match(ConI);
5413 
5414   op_cost(0);
5415   format %{ %}
5416   interface(CONST_INTER);
5417 %}
5418 
5419 operand immI_48()
5420 %{
5421   predicate(n->get_int() == 48);
5422   match(ConI);
5423 
5424   op_cost(0);
5425   format %{ %}
5426   interface(CONST_INTER);
5427 %}
5428 
5429 operand immI_56()
5430 %{
5431   predicate(n->get_int() == 56);
5432   match(ConI);
5433 
5434   op_cost(0);
5435   format %{ %}
5436   interface(CONST_INTER);
5437 %}
5438 
5439 operand immI_63()
5440 %{
5441   predicate(n->get_int() == 63);
5442   match(ConI);
5443 
5444   op_cost(0);
5445   format %{ %}
5446   interface(CONST_INTER);
5447 %}
5448 
5449 operand immI_64()
5450 %{
5451   predicate(n->get_int() == 64);
5452   match(ConI);
5453 
5454   op_cost(0);
5455   format %{ %}
5456   interface(CONST_INTER);
5457 %}
5458 
5459 operand immI_255()
5460 %{
5461   predicate(n->get_int() == 255);
5462   match(ConI);
5463 
5464   op_cost(0);
5465   format %{ %}
5466   interface(CONST_INTER);
5467 %}
5468 
5469 operand immI_65535()
5470 %{
5471   predicate(n->get_int() == 65535);
5472   match(ConI);
5473 
5474   op_cost(0);
5475   format %{ %}
5476   interface(CONST_INTER);
5477 %}
5478 
5479 operand immL_255()
5480 %{
5481   predicate(n->get_long() == 255L);
5482   match(ConL);
5483 
5484   op_cost(0);
5485   format %{ %}
5486   interface(CONST_INTER);
5487 %}
5488 
5489 operand immL_65535()
5490 %{
5491   predicate(n->get_long() == 65535L);
5492   match(ConL);
5493 
5494   op_cost(0);
5495   format %{ %}
5496   interface(CONST_INTER);
5497 %}
5498 
5499 operand immL_4294967295()
5500 %{
5501   predicate(n->get_long() == 4294967295L);
5502   match(ConL);
5503 
5504   op_cost(0);
5505   format %{ %}
5506   interface(CONST_INTER);
5507 %}
5508 
5509 operand immL_bitmask()
5510 %{
5511   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5512             && is_power_of_2(n->get_long() + 1));
5513   match(ConL);
5514 
5515   op_cost(0);
5516   format %{ %}
5517   interface(CONST_INTER);
5518 %}
5519 
5520 operand immI_bitmask()
5521 %{
5522   predicate(((n->get_int() & 0xc0000000) == 0)
5523             && is_power_of_2(n->get_int() + 1));
5524   match(ConI);
5525 
5526   op_cost(0);
5527   format %{ %}
5528   interface(CONST_INTER);
5529 %}
5530 
5531 // Scale values for scaled offset addressing modes (up to long but not quad)
5532 operand immIScale()
5533 %{
5534   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5535   match(ConI);
5536 
5537   op_cost(0);
5538   format %{ %}
5539   interface(CONST_INTER);
5540 %}
5541 
5542 // 26 bit signed offset -- for pc-relative branches
5543 operand immI26()
5544 %{
5545   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5546   match(ConI);
5547 
5548   op_cost(0);
5549   format %{ %}
5550   interface(CONST_INTER);
5551 %}
5552 
5553 // 19 bit signed offset -- for pc-relative loads
5554 operand immI19()
5555 %{
5556   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5557   match(ConI);
5558 
5559   op_cost(0);
5560   format %{ %}
5561   interface(CONST_INTER);
5562 %}
5563 
5564 // 12 bit unsigned offset -- for base plus immediate loads
5565 operand immIU12()
5566 %{
5567   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5568   match(ConI);
5569 
5570   op_cost(0);
5571   format %{ %}
5572   interface(CONST_INTER);
5573 %}
5574 
5575 operand immLU12()
5576 %{
5577   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5578   match(ConL);
5579 
5580   op_cost(0);
5581   format %{ %}
5582   interface(CONST_INTER);
5583 %}
5584 
5585 // Offset for scaled or unscaled immediate loads and stores
5586 operand immIOffset()
5587 %{
5588   predicate(Address::offset_ok_for_immed(n->get_int()));
5589   match(ConI);
5590 
5591   op_cost(0);
5592   format %{ %}
5593   interface(CONST_INTER);
5594 %}
5595 
5596 operand immIOffset4()
5597 %{
5598   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5599   match(ConI);
5600 
5601   op_cost(0);
5602   format %{ %}
5603   interface(CONST_INTER);
5604 %}
5605 
5606 operand immIOffset8()
5607 %{
5608   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5609   match(ConI);
5610 
5611   op_cost(0);
5612   format %{ %}
5613   interface(CONST_INTER);
5614 %}
5615 
5616 operand immIOffset16()
5617 %{
5618   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5619   match(ConI);
5620 
5621   op_cost(0);
5622   format %{ %}
5623   interface(CONST_INTER);
5624 %}
5625 
5626 operand immLoffset()
5627 %{
5628   predicate(Address::offset_ok_for_immed(n->get_long()));
5629   match(ConL);
5630 
5631   op_cost(0);
5632   format %{ %}
5633   interface(CONST_INTER);
5634 %}
5635 
5636 operand immLoffset4()
5637 %{
5638   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5639   match(ConL);
5640 
5641   op_cost(0);
5642   format %{ %}
5643   interface(CONST_INTER);
5644 %}
5645 
5646 operand immLoffset8()
5647 %{
5648   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5649   match(ConL);
5650 
5651   op_cost(0);
5652   format %{ %}
5653   interface(CONST_INTER);
5654 %}
5655 
5656 operand immLoffset16()
5657 %{
5658   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5659   match(ConL);
5660 
5661   op_cost(0);
5662   format %{ %}
5663   interface(CONST_INTER);
5664 %}
5665 
5666 // 32 bit integer valid for add sub immediate
5667 operand immIAddSub()
5668 %{
5669   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5670   match(ConI);
5671   op_cost(0);
5672   format %{ %}
5673   interface(CONST_INTER);
5674 %}
5675 
5676 // 32 bit unsigned integer valid for logical immediate
5677 // TODO -- check this is right when e.g the mask is 0x80000000
5678 operand immILog()
5679 %{
5680   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5681   match(ConI);
5682 
5683   op_cost(0);
5684   format %{ %}
5685   interface(CONST_INTER);
5686 %}
5687 
5688 // Integer operands 64 bit
5689 // 64 bit immediate
5690 operand immL()
5691 %{
5692   match(ConL);
5693 
5694   op_cost(0);
5695   format %{ %}
5696   interface(CONST_INTER);
5697 %}
5698 
5699 // 64 bit zero
5700 operand immL0()
5701 %{
5702   predicate(n->get_long() == 0);
5703   match(ConL);
5704 
5705   op_cost(0);
5706   format %{ %}
5707   interface(CONST_INTER);
5708 %}
5709 
5710 // 64 bit unit increment
5711 operand immL_1()
5712 %{
5713   predicate(n->get_long() == 1);
5714   match(ConL);
5715 
5716   op_cost(0);
5717   format %{ %}
5718   interface(CONST_INTER);
5719 %}
5720 
5721 // 64 bit unit decrement
5722 operand immL_M1()
5723 %{
5724   predicate(n->get_long() == -1);
5725   match(ConL);
5726 
5727   op_cost(0);
5728   format %{ %}
5729   interface(CONST_INTER);
5730 %}
5731 
5732 // 32 bit offset of pc in thread anchor
5733 
5734 operand immL_pc_off()
5735 %{
5736   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5737                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5738   match(ConL);
5739 
5740   op_cost(0);
5741   format %{ %}
5742   interface(CONST_INTER);
5743 %}
5744 
5745 // 64 bit integer valid for add sub immediate
5746 operand immLAddSub()
5747 %{
5748   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5749   match(ConL);
5750   op_cost(0);
5751   format %{ %}
5752   interface(CONST_INTER);
5753 %}
5754 
5755 // 64 bit integer valid for logical immediate
5756 operand immLLog()
5757 %{
5758   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5759   match(ConL);
5760   op_cost(0);
5761   format %{ %}
5762   interface(CONST_INTER);
5763 %}
5764 
5765 // Long Immediate: low 32-bit mask
5766 operand immL_32bits()
5767 %{
5768   predicate(n->get_long() == 0xFFFFFFFFL);
5769   match(ConL);
5770   op_cost(0);
5771   format %{ %}
5772   interface(CONST_INTER);
5773 %}
5774 
5775 // Pointer operands
5776 // Pointer Immediate
5777 operand immP()
5778 %{
5779   match(ConP);
5780 
5781   op_cost(0);
5782   format %{ %}
5783   interface(CONST_INTER);
5784 %}
5785 
5786 // NULL Pointer Immediate
5787 operand immP0()
5788 %{
5789   predicate(n->get_ptr() == 0);
5790   match(ConP);
5791 
5792   op_cost(0);
5793   format %{ %}
5794   interface(CONST_INTER);
5795 %}
5796 
5797 // Pointer Immediate One
5798 // this is used in object initialization (initial object header)
5799 operand immP_1()
5800 %{
5801   predicate(n->get_ptr() == 1);
5802   match(ConP);
5803 
5804   op_cost(0);
5805   format %{ %}
5806   interface(CONST_INTER);
5807 %}
5808 
5809 // Polling Page Pointer Immediate
5810 operand immPollPage()
5811 %{
5812   predicate((address)n->get_ptr() == os::get_polling_page());
5813   match(ConP);
5814 
5815   op_cost(0);
5816   format %{ %}
5817   interface(CONST_INTER);
5818 %}
5819 
5820 // Card Table Byte Map Base
5821 operand immByteMapBase()
5822 %{
5823   // Get base of card map
5824   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
5825             (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
5826   match(ConP);
5827 
5828   op_cost(0);
5829   format %{ %}
5830   interface(CONST_INTER);
5831 %}
5832 
5833 // Pointer Immediate Minus One
5834 // this is used when we want to write the current PC to the thread anchor
5835 operand immP_M1()
5836 %{
5837   predicate(n->get_ptr() == -1);
5838   match(ConP);
5839 
5840   op_cost(0);
5841   format %{ %}
5842   interface(CONST_INTER);
5843 %}
5844 
5845 // Pointer Immediate Minus Two
5846 // this is used when we want to write the current PC to the thread anchor
5847 operand immP_M2()
5848 %{
5849   predicate(n->get_ptr() == -2);
5850   match(ConP);
5851 
5852   op_cost(0);
5853   format %{ %}
5854   interface(CONST_INTER);
5855 %}
5856 
5857 // Float and Double operands
5858 // Double Immediate
5859 operand immD()
5860 %{
5861   match(ConD);
5862   op_cost(0);
5863   format %{ %}
5864   interface(CONST_INTER);
5865 %}
5866 
5867 // Double Immediate: +0.0d
5868 operand immD0()
5869 %{
5870   predicate(jlong_cast(n->getd()) == 0);
5871   match(ConD);
5872 
5873   op_cost(0);
5874   format %{ %}
5875   interface(CONST_INTER);
5876 %}
5877 
5878 // constant 'double +0.0'.
5879 operand immDPacked()
5880 %{
5881   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5882   match(ConD);
5883   op_cost(0);
5884   format %{ %}
5885   interface(CONST_INTER);
5886 %}
5887 
5888 // Float Immediate
5889 operand immF()
5890 %{
5891   match(ConF);
5892   op_cost(0);
5893   format %{ %}
5894   interface(CONST_INTER);
5895 %}
5896 
5897 // Float Immediate: +0.0f.
5898 operand immF0()
5899 %{
5900   predicate(jint_cast(n->getf()) == 0);
5901   match(ConF);
5902 
5903   op_cost(0);
5904   format %{ %}
5905   interface(CONST_INTER);
5906 %}
5907 
5908 //
5909 operand immFPacked()
5910 %{
5911   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5912   match(ConF);
5913   op_cost(0);
5914   format %{ %}
5915   interface(CONST_INTER);
5916 %}
5917 
5918 // Narrow pointer operands
5919 // Narrow Pointer Immediate
5920 operand immN()
5921 %{
5922   match(ConN);
5923 
5924   op_cost(0);
5925   format %{ %}
5926   interface(CONST_INTER);
5927 %}
5928 
5929 // Narrow NULL Pointer Immediate
5930 operand immN0()
5931 %{
5932   predicate(n->get_narrowcon() == 0);
5933   match(ConN);
5934 
5935   op_cost(0);
5936   format %{ %}
5937   interface(CONST_INTER);
5938 %}
5939 
5940 operand immNKlass()
5941 %{
5942   match(ConNKlass);
5943 
5944   op_cost(0);
5945   format %{ %}
5946   interface(CONST_INTER);
5947 %}
5948 
5949 // Integer 32 bit Register Operands
5950 // Integer 32 bitRegister (excludes SP)
5951 operand iRegI()
5952 %{
5953   constraint(ALLOC_IN_RC(any_reg32));
5954   match(RegI);
5955   match(iRegINoSp);
5956   op_cost(0);
5957   format %{ %}
5958   interface(REG_INTER);
5959 %}
5960 
5961 // Integer 32 bit Register not Special
5962 operand iRegINoSp()
5963 %{
5964   constraint(ALLOC_IN_RC(no_special_reg32));
5965   match(RegI);
5966   op_cost(0);
5967   format %{ %}
5968   interface(REG_INTER);
5969 %}
5970 
5971 // Integer 64 bit Register Operands
5972 // Integer 64 bit Register (includes SP)
5973 operand iRegL()
5974 %{
5975   constraint(ALLOC_IN_RC(any_reg));
5976   match(RegL);
5977   match(iRegLNoSp);
5978   op_cost(0);
5979   format %{ %}
5980   interface(REG_INTER);
5981 %}
5982 
5983 // Integer 64 bit Register not Special
5984 operand iRegLNoSp()
5985 %{
5986   constraint(ALLOC_IN_RC(no_special_reg));
5987   match(RegL);
5988   match(iRegL_R0);
5989   format %{ %}
5990   interface(REG_INTER);
5991 %}
5992 
5993 // Pointer Register Operands
5994 // Pointer Register
5995 operand iRegP()
5996 %{
5997   constraint(ALLOC_IN_RC(ptr_reg));
5998   match(RegP);
5999   match(iRegPNoSp);
6000   match(iRegP_R0);
6001   //match(iRegP_R2);
6002   //match(iRegP_R4);
6003   //match(iRegP_R5);
6004   match(thread_RegP);
6005   op_cost(0);
6006   format %{ %}
6007   interface(REG_INTER);
6008 %}
6009 
6010 // Pointer 64 bit Register not Special
6011 operand iRegPNoSp()
6012 %{
6013   constraint(ALLOC_IN_RC(no_special_ptr_reg));
6014   match(RegP);
6015   // match(iRegP);
6016   // match(iRegP_R0);
6017   // match(iRegP_R2);
6018   // match(iRegP_R4);
6019   // match(iRegP_R5);
6020   // match(thread_RegP);
6021   op_cost(0);
6022   format %{ %}
6023   interface(REG_INTER);
6024 %}
6025 
6026 // Pointer 64 bit Register R0 only
6027 operand iRegP_R0()
6028 %{
6029   constraint(ALLOC_IN_RC(r0_reg));
6030   match(RegP);
6031   // match(iRegP);
6032   match(iRegPNoSp);
6033   op_cost(0);
6034   format %{ %}
6035   interface(REG_INTER);
6036 %}
6037 
6038 // Pointer 64 bit Register R1 only
6039 operand iRegP_R1()
6040 %{
6041   constraint(ALLOC_IN_RC(r1_reg));
6042   match(RegP);
6043   // match(iRegP);
6044   match(iRegPNoSp);
6045   op_cost(0);
6046   format %{ %}
6047   interface(REG_INTER);
6048 %}
6049 
6050 // Pointer 64 bit Register R2 only
6051 operand iRegP_R2()
6052 %{
6053   constraint(ALLOC_IN_RC(r2_reg));
6054   match(RegP);
6055   // match(iRegP);
6056   match(iRegPNoSp);
6057   op_cost(0);
6058   format %{ %}
6059   interface(REG_INTER);
6060 %}
6061 
6062 // Pointer 64 bit Register R3 only
6063 operand iRegP_R3()
6064 %{
6065   constraint(ALLOC_IN_RC(r3_reg));
6066   match(RegP);
6067   // match(iRegP);
6068   match(iRegPNoSp);
6069   op_cost(0);
6070   format %{ %}
6071   interface(REG_INTER);
6072 %}
6073 
6074 // Pointer 64 bit Register R4 only
6075 operand iRegP_R4()
6076 %{
6077   constraint(ALLOC_IN_RC(r4_reg));
6078   match(RegP);
6079   // match(iRegP);
6080   match(iRegPNoSp);
6081   op_cost(0);
6082   format %{ %}
6083   interface(REG_INTER);
6084 %}
6085 
6086 // Pointer 64 bit Register R5 only
6087 operand iRegP_R5()
6088 %{
6089   constraint(ALLOC_IN_RC(r5_reg));
6090   match(RegP);
6091   // match(iRegP);
6092   match(iRegPNoSp);
6093   op_cost(0);
6094   format %{ %}
6095   interface(REG_INTER);
6096 %}
6097 
6098 // Pointer 64 bit Register R10 only
6099 operand iRegP_R10()
6100 %{
6101   constraint(ALLOC_IN_RC(r10_reg));
6102   match(RegP);
6103   // match(iRegP);
6104   match(iRegPNoSp);
6105   op_cost(0);
6106   format %{ %}
6107   interface(REG_INTER);
6108 %}
6109 
6110 // Long 64 bit Register R0 only
6111 operand iRegL_R0()
6112 %{
6113   constraint(ALLOC_IN_RC(r0_reg));
6114   match(RegL);
6115   match(iRegLNoSp);
6116   op_cost(0);
6117   format %{ %}
6118   interface(REG_INTER);
6119 %}
6120 
6121 // Long 64 bit Register R2 only
6122 operand iRegL_R2()
6123 %{
6124   constraint(ALLOC_IN_RC(r2_reg));
6125   match(RegL);
6126   match(iRegLNoSp);
6127   op_cost(0);
6128   format %{ %}
6129   interface(REG_INTER);
6130 %}
6131 
6132 // Long 64 bit Register R3 only
6133 operand iRegL_R3()
6134 %{
6135   constraint(ALLOC_IN_RC(r3_reg));
6136   match(RegL);
6137   match(iRegLNoSp);
6138   op_cost(0);
6139   format %{ %}
6140   interface(REG_INTER);
6141 %}
6142 
6143 // Long 64 bit Register R11 only
6144 operand iRegL_R11()
6145 %{
6146   constraint(ALLOC_IN_RC(r11_reg));
6147   match(RegL);
6148   match(iRegLNoSp);
6149   op_cost(0);
6150   format %{ %}
6151   interface(REG_INTER);
6152 %}
6153 
6154 // Pointer 64 bit Register FP only
6155 operand iRegP_FP()
6156 %{
6157   constraint(ALLOC_IN_RC(fp_reg));
6158   match(RegP);
6159   // match(iRegP);
6160   op_cost(0);
6161   format %{ %}
6162   interface(REG_INTER);
6163 %}
6164 
6165 // Register R0 only
6166 operand iRegI_R0()
6167 %{
6168   constraint(ALLOC_IN_RC(int_r0_reg));
6169   match(RegI);
6170   match(iRegINoSp);
6171   op_cost(0);
6172   format %{ %}
6173   interface(REG_INTER);
6174 %}
6175 
6176 // Register R2 only
6177 operand iRegI_R2()
6178 %{
6179   constraint(ALLOC_IN_RC(int_r2_reg));
6180   match(RegI);
6181   match(iRegINoSp);
6182   op_cost(0);
6183   format %{ %}
6184   interface(REG_INTER);
6185 %}
6186 
6187 // Register R3 only
6188 operand iRegI_R3()
6189 %{
6190   constraint(ALLOC_IN_RC(int_r3_reg));
6191   match(RegI);
6192   match(iRegINoSp);
6193   op_cost(0);
6194   format %{ %}
6195   interface(REG_INTER);
6196 %}
6197 
6198 
6199 // Register R4 only
6200 operand iRegI_R4()
6201 %{
6202   constraint(ALLOC_IN_RC(int_r4_reg));
6203   match(RegI);
6204   match(iRegINoSp);
6205   op_cost(0);
6206   format %{ %}
6207   interface(REG_INTER);
6208 %}
6209 
6210 
6211 // Pointer Register Operands
6212 // Narrow Pointer Register
6213 operand iRegN()
6214 %{
6215   constraint(ALLOC_IN_RC(any_reg32));
6216   match(RegN);
6217   match(iRegNNoSp);
6218   op_cost(0);
6219   format %{ %}
6220   interface(REG_INTER);
6221 %}
6222 
6223 operand iRegN_R0()
6224 %{
6225   constraint(ALLOC_IN_RC(r0_reg));
6226   match(iRegN);
6227   op_cost(0);
6228   format %{ %}
6229   interface(REG_INTER);
6230 %}
6231 
6232 operand iRegN_R2()
6233 %{
6234   constraint(ALLOC_IN_RC(r2_reg));
6235   match(iRegN);
6236   op_cost(0);
6237   format %{ %}
6238   interface(REG_INTER);
6239 %}
6240 
6241 operand iRegN_R3()
6242 %{
6243   constraint(ALLOC_IN_RC(r3_reg));
6244   match(iRegN);
6245   op_cost(0);
6246   format %{ %}
6247   interface(REG_INTER);
6248 %}
6249 
6250 // Integer 64 bit Register not Special
6251 operand iRegNNoSp()
6252 %{
6253   constraint(ALLOC_IN_RC(no_special_reg32));
6254   match(RegN);
6255   op_cost(0);
6256   format %{ %}
6257   interface(REG_INTER);
6258 %}
6259 
6260 // heap base register -- used for encoding immN0
6261 
6262 operand iRegIHeapbase()
6263 %{
6264   constraint(ALLOC_IN_RC(heapbase_reg));
6265   match(RegI);
6266   op_cost(0);
6267   format %{ %}
6268   interface(REG_INTER);
6269 %}
6270 
6271 // Float Register
6272 // Float register operands
6273 operand vRegF()
6274 %{
6275   constraint(ALLOC_IN_RC(float_reg));
6276   match(RegF);
6277 
6278   op_cost(0);
6279   format %{ %}
6280   interface(REG_INTER);
6281 %}
6282 
6283 // Double Register
6284 // Double register operands
6285 operand vRegD()
6286 %{
6287   constraint(ALLOC_IN_RC(double_reg));
6288   match(RegD);
6289 
6290   op_cost(0);
6291   format %{ %}
6292   interface(REG_INTER);
6293 %}
6294 
6295 operand vecD()
6296 %{
6297   constraint(ALLOC_IN_RC(vectord_reg));
6298   match(VecD);
6299 
6300   op_cost(0);
6301   format %{ %}
6302   interface(REG_INTER);
6303 %}
6304 
6305 operand vecX()
6306 %{
6307   constraint(ALLOC_IN_RC(vectorx_reg));
6308   match(VecX);
6309 
6310   op_cost(0);
6311   format %{ %}
6312   interface(REG_INTER);
6313 %}
6314 
6315 operand vRegD_V0()
6316 %{
6317   constraint(ALLOC_IN_RC(v0_reg));
6318   match(RegD);
6319   op_cost(0);
6320   format %{ %}
6321   interface(REG_INTER);
6322 %}
6323 
6324 operand vRegD_V1()
6325 %{
6326   constraint(ALLOC_IN_RC(v1_reg));
6327   match(RegD);
6328   op_cost(0);
6329   format %{ %}
6330   interface(REG_INTER);
6331 %}
6332 
6333 operand vRegD_V2()
6334 %{
6335   constraint(ALLOC_IN_RC(v2_reg));
6336   match(RegD);
6337   op_cost(0);
6338   format %{ %}
6339   interface(REG_INTER);
6340 %}
6341 
6342 operand vRegD_V3()
6343 %{
6344   constraint(ALLOC_IN_RC(v3_reg));
6345   match(RegD);
6346   op_cost(0);
6347   format %{ %}
6348   interface(REG_INTER);
6349 %}
6350 
6351 // Flags register, used as output of signed compare instructions
6352 
6353 // note that on AArch64 we also use this register as the output for
6354 // for floating point compare instructions (CmpF CmpD). this ensures
6355 // that ordered inequality tests use GT, GE, LT or LE none of which
6356 // pass through cases where the result is unordered i.e. one or both
6357 // inputs to the compare is a NaN. this means that the ideal code can
6358 // replace e.g. a GT with an LE and not end up capturing the NaN case
6359 // (where the comparison should always fail). EQ and NE tests are
6360 // always generated in ideal code so that unordered folds into the NE
6361 // case, matching the behaviour of AArch64 NE.
6362 //
6363 // This differs from x86 where the outputs of FP compares use a
6364 // special FP flags registers and where compares based on this
6365 // register are distinguished into ordered inequalities (cmpOpUCF) and
6366 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6367 // to explicitly handle the unordered case in branches. x86 also has
6368 // to include extra CMoveX rules to accept a cmpOpUCF input.
6369 
6370 operand rFlagsReg()
6371 %{
6372   constraint(ALLOC_IN_RC(int_flags));
6373   match(RegFlags);
6374 
6375   op_cost(0);
6376   format %{ "RFLAGS" %}
6377   interface(REG_INTER);
6378 %}
6379 
6380 // Flags register, used as output of unsigned compare instructions
6381 operand rFlagsRegU()
6382 %{
6383   constraint(ALLOC_IN_RC(int_flags));
6384   match(RegFlags);
6385 
6386   op_cost(0);
6387   format %{ "RFLAGSU" %}
6388   interface(REG_INTER);
6389 %}
6390 
6391 // Special Registers
6392 
6393 // Method Register
6394 operand inline_cache_RegP(iRegP reg)
6395 %{
6396   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6397   match(reg);
6398   match(iRegPNoSp);
6399   op_cost(0);
6400   format %{ %}
6401   interface(REG_INTER);
6402 %}
6403 
6404 operand interpreter_method_oop_RegP(iRegP reg)
6405 %{
6406   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6407   match(reg);
6408   match(iRegPNoSp);
6409   op_cost(0);
6410   format %{ %}
6411   interface(REG_INTER);
6412 %}
6413 
6414 // Thread Register
6415 operand thread_RegP(iRegP reg)
6416 %{
6417   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6418   match(reg);
6419   op_cost(0);
6420   format %{ %}
6421   interface(REG_INTER);
6422 %}
6423 
6424 operand lr_RegP(iRegP reg)
6425 %{
6426   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6427   match(reg);
6428   op_cost(0);
6429   format %{ %}
6430   interface(REG_INTER);
6431 %}
6432 
6433 //----------Memory Operands----------------------------------------------------
6434 
6435 operand indirect(iRegP reg)
6436 %{
6437   constraint(ALLOC_IN_RC(ptr_reg));
6438   match(reg);
6439   op_cost(0);
6440   format %{ "[$reg]" %}
6441   interface(MEMORY_INTER) %{
6442     base($reg);
6443     index(0xffffffff);
6444     scale(0x0);
6445     disp(0x0);
6446   %}
6447 %}
6448 
6449 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6450 %{
6451   constraint(ALLOC_IN_RC(ptr_reg));
6452   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6453   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6454   op_cost(0);
6455   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6456   interface(MEMORY_INTER) %{
6457     base($reg);
6458     index($ireg);
6459     scale($scale);
6460     disp(0x0);
6461   %}
6462 %}
6463 
6464 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6465 %{
6466   constraint(ALLOC_IN_RC(ptr_reg));
6467   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6468   match(AddP reg (LShiftL lreg scale));
6469   op_cost(0);
6470   format %{ "$reg, $lreg lsl($scale)" %}
6471   interface(MEMORY_INTER) %{
6472     base($reg);
6473     index($lreg);
6474     scale($scale);
6475     disp(0x0);
6476   %}
6477 %}
6478 
6479 operand indIndexI2L(iRegP reg, iRegI ireg)
6480 %{
6481   constraint(ALLOC_IN_RC(ptr_reg));
6482   match(AddP reg (ConvI2L ireg));
6483   op_cost(0);
6484   format %{ "$reg, $ireg, 0, I2L" %}
6485   interface(MEMORY_INTER) %{
6486     base($reg);
6487     index($ireg);
6488     scale(0x0);
6489     disp(0x0);
6490   %}
6491 %}
6492 
6493 operand indIndex(iRegP reg, iRegL lreg)
6494 %{
6495   constraint(ALLOC_IN_RC(ptr_reg));
6496   match(AddP reg lreg);
6497   op_cost(0);
6498   format %{ "$reg, $lreg" %}
6499   interface(MEMORY_INTER) %{
6500     base($reg);
6501     index($lreg);
6502     scale(0x0);
6503     disp(0x0);
6504   %}
6505 %}
6506 
6507 operand indOffI(iRegP reg, immIOffset off)
6508 %{
6509   constraint(ALLOC_IN_RC(ptr_reg));
6510   match(AddP reg off);
6511   op_cost(0);
6512   format %{ "[$reg, $off]" %}
6513   interface(MEMORY_INTER) %{
6514     base($reg);
6515     index(0xffffffff);
6516     scale(0x0);
6517     disp($off);
6518   %}
6519 %}
6520 
6521 operand indOffI4(iRegP reg, immIOffset4 off)
6522 %{
6523   constraint(ALLOC_IN_RC(ptr_reg));
6524   match(AddP reg off);
6525   op_cost(0);
6526   format %{ "[$reg, $off]" %}
6527   interface(MEMORY_INTER) %{
6528     base($reg);
6529     index(0xffffffff);
6530     scale(0x0);
6531     disp($off);
6532   %}
6533 %}
6534 
6535 operand indOffI8(iRegP reg, immIOffset8 off)
6536 %{
6537   constraint(ALLOC_IN_RC(ptr_reg));
6538   match(AddP reg off);
6539   op_cost(0);
6540   format %{ "[$reg, $off]" %}
6541   interface(MEMORY_INTER) %{
6542     base($reg);
6543     index(0xffffffff);
6544     scale(0x0);
6545     disp($off);
6546   %}
6547 %}
6548 
6549 operand indOffI16(iRegP reg, immIOffset16 off)
6550 %{
6551   constraint(ALLOC_IN_RC(ptr_reg));
6552   match(AddP reg off);
6553   op_cost(0);
6554   format %{ "[$reg, $off]" %}
6555   interface(MEMORY_INTER) %{
6556     base($reg);
6557     index(0xffffffff);
6558     scale(0x0);
6559     disp($off);
6560   %}
6561 %}
6562 
6563 operand indOffL(iRegP reg, immLoffset off)
6564 %{
6565   constraint(ALLOC_IN_RC(ptr_reg));
6566   match(AddP reg off);
6567   op_cost(0);
6568   format %{ "[$reg, $off]" %}
6569   interface(MEMORY_INTER) %{
6570     base($reg);
6571     index(0xffffffff);
6572     scale(0x0);
6573     disp($off);
6574   %}
6575 %}
6576 
6577 operand indOffL4(iRegP reg, immLoffset4 off)
6578 %{
6579   constraint(ALLOC_IN_RC(ptr_reg));
6580   match(AddP reg off);
6581   op_cost(0);
6582   format %{ "[$reg, $off]" %}
6583   interface(MEMORY_INTER) %{
6584     base($reg);
6585     index(0xffffffff);
6586     scale(0x0);
6587     disp($off);
6588   %}
6589 %}
6590 
6591 operand indOffL8(iRegP reg, immLoffset8 off)
6592 %{
6593   constraint(ALLOC_IN_RC(ptr_reg));
6594   match(AddP reg off);
6595   op_cost(0);
6596   format %{ "[$reg, $off]" %}
6597   interface(MEMORY_INTER) %{
6598     base($reg);
6599     index(0xffffffff);
6600     scale(0x0);
6601     disp($off);
6602   %}
6603 %}
6604 
6605 operand indOffL16(iRegP reg, immLoffset16 off)
6606 %{
6607   constraint(ALLOC_IN_RC(ptr_reg));
6608   match(AddP reg off);
6609   op_cost(0);
6610   format %{ "[$reg, $off]" %}
6611   interface(MEMORY_INTER) %{
6612     base($reg);
6613     index(0xffffffff);
6614     scale(0x0);
6615     disp($off);
6616   %}
6617 %}
6618 
6619 operand indirectN(iRegN reg)
6620 %{
6621   predicate(Universe::narrow_oop_shift() == 0);
6622   constraint(ALLOC_IN_RC(ptr_reg));
6623   match(DecodeN reg);
6624   op_cost(0);
6625   format %{ "[$reg]\t# narrow" %}
6626   interface(MEMORY_INTER) %{
6627     base($reg);
6628     index(0xffffffff);
6629     scale(0x0);
6630     disp(0x0);
6631   %}
6632 %}
6633 
6634 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6635 %{
6636   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6637   constraint(ALLOC_IN_RC(ptr_reg));
6638   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6639   op_cost(0);
6640   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6641   interface(MEMORY_INTER) %{
6642     base($reg);
6643     index($ireg);
6644     scale($scale);
6645     disp(0x0);
6646   %}
6647 %}
6648 
6649 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6650 %{
6651   predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
6652   constraint(ALLOC_IN_RC(ptr_reg));
6653   match(AddP (DecodeN reg) (LShiftL lreg scale));
6654   op_cost(0);
6655   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6656   interface(MEMORY_INTER) %{
6657     base($reg);
6658     index($lreg);
6659     scale($scale);
6660     disp(0x0);
6661   %}
6662 %}
6663 
6664 operand indIndexI2LN(iRegN reg, iRegI ireg)
6665 %{
6666   predicate(Universe::narrow_oop_shift() == 0);
6667   constraint(ALLOC_IN_RC(ptr_reg));
6668   match(AddP (DecodeN reg) (ConvI2L ireg));
6669   op_cost(0);
6670   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
6671   interface(MEMORY_INTER) %{
6672     base($reg);
6673     index($ireg);
6674     scale(0x0);
6675     disp(0x0);
6676   %}
6677 %}
6678 
6679 operand indIndexN(iRegN reg, iRegL lreg)
6680 %{
6681   predicate(Universe::narrow_oop_shift() == 0);
6682   constraint(ALLOC_IN_RC(ptr_reg));
6683   match(AddP (DecodeN reg) lreg);
6684   op_cost(0);
6685   format %{ "$reg, $lreg\t# narrow" %}
6686   interface(MEMORY_INTER) %{
6687     base($reg);
6688     index($lreg);
6689     scale(0x0);
6690     disp(0x0);
6691   %}
6692 %}
6693 
6694 operand indOffIN(iRegN reg, immIOffset off)
6695 %{
6696   predicate(Universe::narrow_oop_shift() == 0);
6697   constraint(ALLOC_IN_RC(ptr_reg));
6698   match(AddP (DecodeN reg) off);
6699   op_cost(0);
6700   format %{ "[$reg, $off]\t# narrow" %}
6701   interface(MEMORY_INTER) %{
6702     base($reg);
6703     index(0xffffffff);
6704     scale(0x0);
6705     disp($off);
6706   %}
6707 %}
6708 
6709 operand indOffLN(iRegN reg, immLoffset off)
6710 %{
6711   predicate(Universe::narrow_oop_shift() == 0);
6712   constraint(ALLOC_IN_RC(ptr_reg));
6713   match(AddP (DecodeN reg) off);
6714   op_cost(0);
6715   format %{ "[$reg, $off]\t# narrow" %}
6716   interface(MEMORY_INTER) %{
6717     base($reg);
6718     index(0xffffffff);
6719     scale(0x0);
6720     disp($off);
6721   %}
6722 %}
6723 
6724 
6725 
6726 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6727 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6728 %{
6729   constraint(ALLOC_IN_RC(ptr_reg));
6730   match(AddP reg off);
6731   op_cost(0);
6732   format %{ "[$reg, $off]" %}
6733   interface(MEMORY_INTER) %{
6734     base($reg);
6735     index(0xffffffff);
6736     scale(0x0);
6737     disp($off);
6738   %}
6739 %}
6740 
6741 //----------Special Memory Operands--------------------------------------------
6742 // Stack Slot Operand - This operand is used for loading and storing temporary
6743 //                      values on the stack where a match requires a value to
6744 //                      flow through memory.
6745 operand stackSlotP(sRegP reg)
6746 %{
6747   constraint(ALLOC_IN_RC(stack_slots));
6748   op_cost(100);
6749   // No match rule because this operand is only generated in matching
6750   // match(RegP);
6751   format %{ "[$reg]" %}
6752   interface(MEMORY_INTER) %{
6753     base(0x1e);  // RSP
6754     index(0x0);  // No Index
6755     scale(0x0);  // No Scale
6756     disp($reg);  // Stack Offset
6757   %}
6758 %}
6759 
6760 operand stackSlotI(sRegI reg)
6761 %{
6762   constraint(ALLOC_IN_RC(stack_slots));
6763   // No match rule because this operand is only generated in matching
6764   // match(RegI);
6765   format %{ "[$reg]" %}
6766   interface(MEMORY_INTER) %{
6767     base(0x1e);  // RSP
6768     index(0x0);  // No Index
6769     scale(0x0);  // No Scale
6770     disp($reg);  // Stack Offset
6771   %}
6772 %}
6773 
6774 operand stackSlotF(sRegF reg)
6775 %{
6776   constraint(ALLOC_IN_RC(stack_slots));
6777   // No match rule because this operand is only generated in matching
6778   // match(RegF);
6779   format %{ "[$reg]" %}
6780   interface(MEMORY_INTER) %{
6781     base(0x1e);  // RSP
6782     index(0x0);  // No Index
6783     scale(0x0);  // No Scale
6784     disp($reg);  // Stack Offset
6785   %}
6786 %}
6787 
6788 operand stackSlotD(sRegD reg)
6789 %{
6790   constraint(ALLOC_IN_RC(stack_slots));
6791   // No match rule because this operand is only generated in matching
6792   // match(RegD);
6793   format %{ "[$reg]" %}
6794   interface(MEMORY_INTER) %{
6795     base(0x1e);  // RSP
6796     index(0x0);  // No Index
6797     scale(0x0);  // No Scale
6798     disp($reg);  // Stack Offset
6799   %}
6800 %}
6801 
6802 operand stackSlotL(sRegL reg)
6803 %{
6804   constraint(ALLOC_IN_RC(stack_slots));
6805   // No match rule because this operand is only generated in matching
6806   // match(RegL);
6807   format %{ "[$reg]" %}
6808   interface(MEMORY_INTER) %{
6809     base(0x1e);  // RSP
6810     index(0x0);  // No Index
6811     scale(0x0);  // No Scale
6812     disp($reg);  // Stack Offset
6813   %}
6814 %}
6815 
6816 // Operands for expressing Control Flow
6817 // NOTE: Label is a predefined operand which should not be redefined in
6818 //       the AD file. It is generically handled within the ADLC.
6819 
6820 //----------Conditional Branch Operands----------------------------------------
6821 // Comparison Op  - This is the operation of the comparison, and is limited to
6822 //                  the following set of codes:
6823 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6824 //
6825 // Other attributes of the comparison, such as unsignedness, are specified
6826 // by the comparison instruction that sets a condition code flags register.
6827 // That result is represented by a flags operand whose subtype is appropriate
6828 // to the unsignedness (etc.) of the comparison.
6829 //
6830 // Later, the instruction which matches both the Comparison Op (a Bool) and
6831 // the flags (produced by the Cmp) specifies the coding of the comparison op
6832 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6833 
6834 // used for signed integral comparisons and fp comparisons
6835 
6836 operand cmpOp()
6837 %{
6838   match(Bool);
6839 
6840   format %{ "" %}
6841   interface(COND_INTER) %{
6842     equal(0x0, "eq");
6843     not_equal(0x1, "ne");
6844     less(0xb, "lt");
6845     greater_equal(0xa, "ge");
6846     less_equal(0xd, "le");
6847     greater(0xc, "gt");
6848     overflow(0x6, "vs");
6849     no_overflow(0x7, "vc");
6850   %}
6851 %}
6852 
6853 // used for unsigned integral comparisons
6854 
6855 operand cmpOpU()
6856 %{
6857   match(Bool);
6858 
6859   format %{ "" %}
6860   interface(COND_INTER) %{
6861     equal(0x0, "eq");
6862     not_equal(0x1, "ne");
6863     less(0x3, "lo");
6864     greater_equal(0x2, "hs");
6865     less_equal(0x9, "ls");
6866     greater(0x8, "hi");
6867     overflow(0x6, "vs");
6868     no_overflow(0x7, "vc");
6869   %}
6870 %}
6871 
6872 // used for certain integral comparisons which can be
6873 // converted to cbxx or tbxx instructions
6874 
6875 operand cmpOpEqNe()
6876 %{
6877   match(Bool);
6878   match(CmpOp);
6879   op_cost(0);
6880   predicate(n->as_Bool()->_test._test == BoolTest::ne
6881             || n->as_Bool()->_test._test == BoolTest::eq);
6882 
6883   format %{ "" %}
6884   interface(COND_INTER) %{
6885     equal(0x0, "eq");
6886     not_equal(0x1, "ne");
6887     less(0xb, "lt");
6888     greater_equal(0xa, "ge");
6889     less_equal(0xd, "le");
6890     greater(0xc, "gt");
6891     overflow(0x6, "vs");
6892     no_overflow(0x7, "vc");
6893   %}
6894 %}
6895 
6896 // used for certain integral comparisons which can be
6897 // converted to cbxx or tbxx instructions
6898 
6899 operand cmpOpLtGe()
6900 %{
6901   match(Bool);
6902   match(CmpOp);
6903   op_cost(0);
6904 
6905   predicate(n->as_Bool()->_test._test == BoolTest::lt
6906             || n->as_Bool()->_test._test == BoolTest::ge);
6907 
6908   format %{ "" %}
6909   interface(COND_INTER) %{
6910     equal(0x0, "eq");
6911     not_equal(0x1, "ne");
6912     less(0xb, "lt");
6913     greater_equal(0xa, "ge");
6914     less_equal(0xd, "le");
6915     greater(0xc, "gt");
6916     overflow(0x6, "vs");
6917     no_overflow(0x7, "vc");
6918   %}
6919 %}
6920 
6921 // used for certain unsigned integral comparisons which can be
6922 // converted to cbxx or tbxx instructions
6923 
6924 operand cmpOpUEqNeLtGe()
6925 %{
6926   match(Bool);
6927   match(CmpOp);
6928   op_cost(0);
6929 
6930   predicate(n->as_Bool()->_test._test == BoolTest::eq
6931             || n->as_Bool()->_test._test == BoolTest::ne
6932             || n->as_Bool()->_test._test == BoolTest::lt
6933             || n->as_Bool()->_test._test == BoolTest::ge);
6934 
6935   format %{ "" %}
6936   interface(COND_INTER) %{
6937     equal(0x0, "eq");
6938     not_equal(0x1, "ne");
6939     less(0xb, "lt");
6940     greater_equal(0xa, "ge");
6941     less_equal(0xd, "le");
6942     greater(0xc, "gt");
6943     overflow(0x6, "vs");
6944     no_overflow(0x7, "vc");
6945   %}
6946 %}
6947 
6948 // Special operand allowing long args to int ops to be truncated for free
6949 
6950 operand iRegL2I(iRegL reg) %{
6951 
6952   op_cost(0);
6953 
6954   match(ConvL2I reg);
6955 
6956   format %{ "l2i($reg)" %}
6957 
6958   interface(REG_INTER)
6959 %}
6960 
6961 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6962 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6963 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6964 
6965 //----------OPERAND CLASSES----------------------------------------------------
6966 // Operand Classes are groups of operands that are used as to simplify
6967 // instruction definitions by not requiring the AD writer to specify
6968 // separate instructions for every form of operand when the
6969 // instruction accepts multiple operand types with the same basic
6970 // encoding and format. The classic case of this is memory operands.
6971 
6972 // memory is used to define read/write location for load/store
6973 // instruction defs. we can turn a memory op into an Address
6974 
6975 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
6976                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
6977 
6978 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6979 // operations. it allows the src to be either an iRegI or a (ConvL2I
6980 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6981 // can be elided because the 32-bit instruction will just employ the
6982 // lower 32 bits anyway.
6983 //
6984 // n.b. this does not elide all L2I conversions. if the truncated
6985 // value is consumed by more than one operation then the ConvL2I
6986 // cannot be bundled into the consuming nodes so an l2i gets planted
6987 // (actually a movw $dst $src) and the downstream instructions consume
6988 // the result of the l2i as an iRegI input. That's a shame since the
6989 // movw is actually redundant but its not too costly.
6990 
6991 opclass iRegIorL2I(iRegI, iRegL2I);
6992 
6993 //----------PIPELINE-----------------------------------------------------------
6994 // Rules which define the behavior of the target architectures pipeline.
6995 
6996 // For specific pipelines, eg A53, define the stages of that pipeline
6997 //pipe_desc(ISS, EX1, EX2, WR);
6998 #define ISS S0
6999 #define EX1 S1
7000 #define EX2 S2
7001 #define WR  S3
7002 
7003 // Integer ALU reg operation
7004 pipeline %{
7005 
7006 attributes %{
7007   // ARM instructions are of fixed length
7008   fixed_size_instructions;        // Fixed size instructions TODO does
7009   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
7010   // ARM instructions come in 32-bit word units
7011   instruction_unit_size = 4;         // An instruction is 4 bytes long
7012   instruction_fetch_unit_size = 64;  // The processor fetches one line
7013   instruction_fetch_units = 1;       // of 64 bytes
7014 
7015   // List of nop instructions
7016   nops( MachNop );
7017 %}
7018 
7019 // We don't use an actual pipeline model so don't care about resources
7020 // or description. we do use pipeline classes to introduce fixed
7021 // latencies
7022 
7023 //----------RESOURCES----------------------------------------------------------
7024 // Resources are the functional units available to the machine
7025 
7026 resources( INS0, INS1, INS01 = INS0 | INS1,
7027            ALU0, ALU1, ALU = ALU0 | ALU1,
7028            MAC,
7029            DIV,
7030            BRANCH,
7031            LDST,
7032            NEON_FP);
7033 
7034 //----------PIPELINE DESCRIPTION-----------------------------------------------
7035 // Pipeline Description specifies the stages in the machine's pipeline
7036 
7037 // Define the pipeline as a generic 6 stage pipeline
7038 pipe_desc(S0, S1, S2, S3, S4, S5);
7039 
7040 //----------PIPELINE CLASSES---------------------------------------------------
7041 // Pipeline Classes describe the stages in which input and output are
7042 // referenced by the hardware pipeline.
7043 
7044 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
7045 %{
7046   single_instruction;
7047   src1   : S1(read);
7048   src2   : S2(read);
7049   dst    : S5(write);
7050   INS01  : ISS;
7051   NEON_FP : S5;
7052 %}
7053 
7054 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
7055 %{
7056   single_instruction;
7057   src1   : S1(read);
7058   src2   : S2(read);
7059   dst    : S5(write);
7060   INS01  : ISS;
7061   NEON_FP : S5;
7062 %}
7063 
7064 pipe_class fp_uop_s(vRegF dst, vRegF src)
7065 %{
7066   single_instruction;
7067   src    : S1(read);
7068   dst    : S5(write);
7069   INS01  : ISS;
7070   NEON_FP : S5;
7071 %}
7072 
7073 pipe_class fp_uop_d(vRegD dst, vRegD src)
7074 %{
7075   single_instruction;
7076   src    : S1(read);
7077   dst    : S5(write);
7078   INS01  : ISS;
7079   NEON_FP : S5;
7080 %}
7081 
7082 pipe_class fp_d2f(vRegF dst, vRegD src)
7083 %{
7084   single_instruction;
7085   src    : S1(read);
7086   dst    : S5(write);
7087   INS01  : ISS;
7088   NEON_FP : S5;
7089 %}
7090 
7091 pipe_class fp_f2d(vRegD dst, vRegF src)
7092 %{
7093   single_instruction;
7094   src    : S1(read);
7095   dst    : S5(write);
7096   INS01  : ISS;
7097   NEON_FP : S5;
7098 %}
7099 
7100 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
7101 %{
7102   single_instruction;
7103   src    : S1(read);
7104   dst    : S5(write);
7105   INS01  : ISS;
7106   NEON_FP : S5;
7107 %}
7108 
7109 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
7110 %{
7111   single_instruction;
7112   src    : S1(read);
7113   dst    : S5(write);
7114   INS01  : ISS;
7115   NEON_FP : S5;
7116 %}
7117 
7118 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
7119 %{
7120   single_instruction;
7121   src    : S1(read);
7122   dst    : S5(write);
7123   INS01  : ISS;
7124   NEON_FP : S5;
7125 %}
7126 
7127 pipe_class fp_l2f(vRegF dst, iRegL src)
7128 %{
7129   single_instruction;
7130   src    : S1(read);
7131   dst    : S5(write);
7132   INS01  : ISS;
7133   NEON_FP : S5;
7134 %}
7135 
7136 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
7137 %{
7138   single_instruction;
7139   src    : S1(read);
7140   dst    : S5(write);
7141   INS01  : ISS;
7142   NEON_FP : S5;
7143 %}
7144 
7145 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
7146 %{
7147   single_instruction;
7148   src    : S1(read);
7149   dst    : S5(write);
7150   INS01  : ISS;
7151   NEON_FP : S5;
7152 %}
7153 
7154 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
7155 %{
7156   single_instruction;
7157   src    : S1(read);
7158   dst    : S5(write);
7159   INS01  : ISS;
7160   NEON_FP : S5;
7161 %}
7162 
7163 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
7164 %{
7165   single_instruction;
7166   src    : S1(read);
7167   dst    : S5(write);
7168   INS01  : ISS;
7169   NEON_FP : S5;
7170 %}
7171 
7172 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
7173 %{
7174   single_instruction;
7175   src1   : S1(read);
7176   src2   : S2(read);
7177   dst    : S5(write);
7178   INS0   : ISS;
7179   NEON_FP : S5;
7180 %}
7181 
7182 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
7183 %{
7184   single_instruction;
7185   src1   : S1(read);
7186   src2   : S2(read);
7187   dst    : S5(write);
7188   INS0   : ISS;
7189   NEON_FP : S5;
7190 %}
7191 
7192 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
7193 %{
7194   single_instruction;
7195   cr     : S1(read);
7196   src1   : S1(read);
7197   src2   : S1(read);
7198   dst    : S3(write);
7199   INS01  : ISS;
7200   NEON_FP : S3;
7201 %}
7202 
7203 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
7204 %{
7205   single_instruction;
7206   cr     : S1(read);
7207   src1   : S1(read);
7208   src2   : S1(read);
7209   dst    : S3(write);
7210   INS01  : ISS;
7211   NEON_FP : S3;
7212 %}
7213 
7214 pipe_class fp_imm_s(vRegF dst)
7215 %{
7216   single_instruction;
7217   dst    : S3(write);
7218   INS01  : ISS;
7219   NEON_FP : S3;
7220 %}
7221 
7222 pipe_class fp_imm_d(vRegD dst)
7223 %{
7224   single_instruction;
7225   dst    : S3(write);
7226   INS01  : ISS;
7227   NEON_FP : S3;
7228 %}
7229 
7230 pipe_class fp_load_constant_s(vRegF dst)
7231 %{
7232   single_instruction;
7233   dst    : S4(write);
7234   INS01  : ISS;
7235   NEON_FP : S4;
7236 %}
7237 
7238 pipe_class fp_load_constant_d(vRegD dst)
7239 %{
7240   single_instruction;
7241   dst    : S4(write);
7242   INS01  : ISS;
7243   NEON_FP : S4;
7244 %}
7245 
7246 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
7247 %{
7248   single_instruction;
7249   dst    : S5(write);
7250   src1   : S1(read);
7251   src2   : S1(read);
7252   INS01  : ISS;
7253   NEON_FP : S5;
7254 %}
7255 
7256 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
7257 %{
7258   single_instruction;
7259   dst    : S5(write);
7260   src1   : S1(read);
7261   src2   : S1(read);
7262   INS0   : ISS;
7263   NEON_FP : S5;
7264 %}
7265 
7266 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
7267 %{
7268   single_instruction;
7269   dst    : S5(write);
7270   src1   : S1(read);
7271   src2   : S1(read);
7272   dst    : S1(read);
7273   INS01  : ISS;
7274   NEON_FP : S5;
7275 %}
7276 
7277 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
7278 %{
7279   single_instruction;
7280   dst    : S5(write);
7281   src1   : S1(read);
7282   src2   : S1(read);
7283   dst    : S1(read);
7284   INS0   : ISS;
7285   NEON_FP : S5;
7286 %}
7287 
7288 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
7289 %{
7290   single_instruction;
7291   dst    : S4(write);
7292   src1   : S2(read);
7293   src2   : S2(read);
7294   INS01  : ISS;
7295   NEON_FP : S4;
7296 %}
7297 
7298 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
7299 %{
7300   single_instruction;
7301   dst    : S4(write);
7302   src1   : S2(read);
7303   src2   : S2(read);
7304   INS0   : ISS;
7305   NEON_FP : S4;
7306 %}
7307 
7308 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
7309 %{
7310   single_instruction;
7311   dst    : S3(write);
7312   src1   : S2(read);
7313   src2   : S2(read);
7314   INS01  : ISS;
7315   NEON_FP : S3;
7316 %}
7317 
7318 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
7319 %{
7320   single_instruction;
7321   dst    : S3(write);
7322   src1   : S2(read);
7323   src2   : S2(read);
7324   INS0   : ISS;
7325   NEON_FP : S3;
7326 %}
7327 
7328 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7329 %{
7330   single_instruction;
7331   dst    : S3(write);
7332   src    : S1(read);
7333   shift  : S1(read);
7334   INS01  : ISS;
7335   NEON_FP : S3;
7336 %}
7337 
7338 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7339 %{
7340   single_instruction;
7341   dst    : S3(write);
7342   src    : S1(read);
7343   shift  : S1(read);
7344   INS0   : ISS;
7345   NEON_FP : S3;
7346 %}
7347 
7348 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7349 %{
7350   single_instruction;
7351   dst    : S3(write);
7352   src    : S1(read);
7353   INS01  : ISS;
7354   NEON_FP : S3;
7355 %}
7356 
7357 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7358 %{
7359   single_instruction;
7360   dst    : S3(write);
7361   src    : S1(read);
7362   INS0   : ISS;
7363   NEON_FP : S3;
7364 %}
7365 
7366 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7367 %{
7368   single_instruction;
7369   dst    : S5(write);
7370   src1   : S1(read);
7371   src2   : S1(read);
7372   INS01  : ISS;
7373   NEON_FP : S5;
7374 %}
7375 
7376 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7377 %{
7378   single_instruction;
7379   dst    : S5(write);
7380   src1   : S1(read);
7381   src2   : S1(read);
7382   INS0   : ISS;
7383   NEON_FP : S5;
7384 %}
7385 
7386 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7387 %{
7388   single_instruction;
7389   dst    : S5(write);
7390   src1   : S1(read);
7391   src2   : S1(read);
7392   INS0   : ISS;
7393   NEON_FP : S5;
7394 %}
7395 
7396 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7397 %{
7398   single_instruction;
7399   dst    : S5(write);
7400   src1   : S1(read);
7401   src2   : S1(read);
7402   INS0   : ISS;
7403   NEON_FP : S5;
7404 %}
7405 
7406 pipe_class vsqrt_fp128(vecX dst, vecX src)
7407 %{
7408   single_instruction;
7409   dst    : S5(write);
7410   src    : S1(read);
7411   INS0   : ISS;
7412   NEON_FP : S5;
7413 %}
7414 
7415 pipe_class vunop_fp64(vecD dst, vecD src)
7416 %{
7417   single_instruction;
7418   dst    : S5(write);
7419   src    : S1(read);
7420   INS01  : ISS;
7421   NEON_FP : S5;
7422 %}
7423 
7424 pipe_class vunop_fp128(vecX dst, vecX src)
7425 %{
7426   single_instruction;
7427   dst    : S5(write);
7428   src    : S1(read);
7429   INS0   : ISS;
7430   NEON_FP : S5;
7431 %}
7432 
7433 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7434 %{
7435   single_instruction;
7436   dst    : S3(write);
7437   src    : S1(read);
7438   INS01  : ISS;
7439   NEON_FP : S3;
7440 %}
7441 
7442 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7443 %{
7444   single_instruction;
7445   dst    : S3(write);
7446   src    : S1(read);
7447   INS01  : ISS;
7448   NEON_FP : S3;
7449 %}
7450 
7451 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7452 %{
7453   single_instruction;
7454   dst    : S3(write);
7455   src    : S1(read);
7456   INS01  : ISS;
7457   NEON_FP : S3;
7458 %}
7459 
7460 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7461 %{
7462   single_instruction;
7463   dst    : S3(write);
7464   src    : S1(read);
7465   INS01  : ISS;
7466   NEON_FP : S3;
7467 %}
7468 
7469 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7470 %{
7471   single_instruction;
7472   dst    : S3(write);
7473   src    : S1(read);
7474   INS01  : ISS;
7475   NEON_FP : S3;
7476 %}
7477 
7478 pipe_class vmovi_reg_imm64(vecD dst)
7479 %{
7480   single_instruction;
7481   dst    : S3(write);
7482   INS01  : ISS;
7483   NEON_FP : S3;
7484 %}
7485 
7486 pipe_class vmovi_reg_imm128(vecX dst)
7487 %{
7488   single_instruction;
7489   dst    : S3(write);
7490   INS0   : ISS;
7491   NEON_FP : S3;
7492 %}
7493 
7494 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7495 %{
7496   single_instruction;
7497   dst    : S5(write);
7498   mem    : ISS(read);
7499   INS01  : ISS;
7500   NEON_FP : S3;
7501 %}
7502 
7503 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7504 %{
7505   single_instruction;
7506   dst    : S5(write);
7507   mem    : ISS(read);
7508   INS01  : ISS;
7509   NEON_FP : S3;
7510 %}
7511 
7512 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7513 %{
7514   single_instruction;
7515   mem    : ISS(read);
7516   src    : S2(read);
7517   INS01  : ISS;
7518   NEON_FP : S3;
7519 %}
7520 
7521 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7522 %{
7523   single_instruction;
7524   mem    : ISS(read);
7525   src    : S2(read);
7526   INS01  : ISS;
7527   NEON_FP : S3;
7528 %}
7529 
7530 //------- Integer ALU operations --------------------------
7531 
7532 // Integer ALU reg-reg operation
7533 // Operands needed in EX1, result generated in EX2
7534 // Eg.  ADD     x0, x1, x2
7535 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7536 %{
7537   single_instruction;
7538   dst    : EX2(write);
7539   src1   : EX1(read);
7540   src2   : EX1(read);
7541   INS01  : ISS; // Dual issue as instruction 0 or 1
7542   ALU    : EX2;
7543 %}
7544 
7545 // Integer ALU reg-reg operation with constant shift
7546 // Shifted register must be available in LATE_ISS instead of EX1
7547 // Eg.  ADD     x0, x1, x2, LSL #2
7548 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7549 %{
7550   single_instruction;
7551   dst    : EX2(write);
7552   src1   : EX1(read);
7553   src2   : ISS(read);
7554   INS01  : ISS;
7555   ALU    : EX2;
7556 %}
7557 
7558 // Integer ALU reg operation with constant shift
7559 // Eg.  LSL     x0, x1, #shift
7560 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7561 %{
7562   single_instruction;
7563   dst    : EX2(write);
7564   src1   : ISS(read);
7565   INS01  : ISS;
7566   ALU    : EX2;
7567 %}
7568 
7569 // Integer ALU reg-reg operation with variable shift
7570 // Both operands must be available in LATE_ISS instead of EX1
7571 // Result is available in EX1 instead of EX2
7572 // Eg.  LSLV    x0, x1, x2
7573 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7574 %{
7575   single_instruction;
7576   dst    : EX1(write);
7577   src1   : ISS(read);
7578   src2   : ISS(read);
7579   INS01  : ISS;
7580   ALU    : EX1;
7581 %}
7582 
7583 // Integer ALU reg-reg operation with extract
7584 // As for _vshift above, but result generated in EX2
7585 // Eg.  EXTR    x0, x1, x2, #N
7586 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7587 %{
7588   single_instruction;
7589   dst    : EX2(write);
7590   src1   : ISS(read);
7591   src2   : ISS(read);
7592   INS1   : ISS; // Can only dual issue as Instruction 1
7593   ALU    : EX1;
7594 %}
7595 
7596 // Integer ALU reg operation
7597 // Eg.  NEG     x0, x1
7598 pipe_class ialu_reg(iRegI dst, iRegI src)
7599 %{
7600   single_instruction;
7601   dst    : EX2(write);
7602   src    : EX1(read);
7603   INS01  : ISS;
7604   ALU    : EX2;
7605 %}
7606 
7607 // Integer ALU reg mmediate operation
7608 // Eg.  ADD     x0, x1, #N
7609 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7610 %{
7611   single_instruction;
7612   dst    : EX2(write);
7613   src1   : EX1(read);
7614   INS01  : ISS;
7615   ALU    : EX2;
7616 %}
7617 
7618 // Integer ALU immediate operation (no source operands)
7619 // Eg.  MOV     x0, #N
7620 pipe_class ialu_imm(iRegI dst)
7621 %{
7622   single_instruction;
7623   dst    : EX1(write);
7624   INS01  : ISS;
7625   ALU    : EX1;
7626 %}
7627 
7628 //------- Compare operation -------------------------------
7629 
7630 // Compare reg-reg
7631 // Eg.  CMP     x0, x1
7632 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7633 %{
7634   single_instruction;
7635 //  fixed_latency(16);
7636   cr     : EX2(write);
7637   op1    : EX1(read);
7638   op2    : EX1(read);
7639   INS01  : ISS;
7640   ALU    : EX2;
7641 %}
7642 
7643 // Compare reg-reg
7644 // Eg.  CMP     x0, #N
7645 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7646 %{
7647   single_instruction;
7648 //  fixed_latency(16);
7649   cr     : EX2(write);
7650   op1    : EX1(read);
7651   INS01  : ISS;
7652   ALU    : EX2;
7653 %}
7654 
7655 //------- Conditional instructions ------------------------
7656 
7657 // Conditional no operands
7658 // Eg.  CSINC   x0, zr, zr, <cond>
7659 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7660 %{
7661   single_instruction;
7662   cr     : EX1(read);
7663   dst    : EX2(write);
7664   INS01  : ISS;
7665   ALU    : EX2;
7666 %}
7667 
7668 // Conditional 2 operand
7669 // EG.  CSEL    X0, X1, X2, <cond>
7670 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7671 %{
7672   single_instruction;
7673   cr     : EX1(read);
7674   src1   : EX1(read);
7675   src2   : EX1(read);
7676   dst    : EX2(write);
7677   INS01  : ISS;
7678   ALU    : EX2;
7679 %}
7680 
7681 // Conditional 2 operand
7682 // EG.  CSEL    X0, X1, X2, <cond>
7683 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7684 %{
7685   single_instruction;
7686   cr     : EX1(read);
7687   src    : EX1(read);
7688   dst    : EX2(write);
7689   INS01  : ISS;
7690   ALU    : EX2;
7691 %}
7692 
7693 //------- Multiply pipeline operations --------------------
7694 
7695 // Multiply reg-reg
7696 // Eg.  MUL     w0, w1, w2
7697 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7698 %{
7699   single_instruction;
7700   dst    : WR(write);
7701   src1   : ISS(read);
7702   src2   : ISS(read);
7703   INS01  : ISS;
7704   MAC    : WR;
7705 %}
7706 
7707 // Multiply accumulate
7708 // Eg.  MADD    w0, w1, w2, w3
7709 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7710 %{
7711   single_instruction;
7712   dst    : WR(write);
7713   src1   : ISS(read);
7714   src2   : ISS(read);
7715   src3   : ISS(read);
7716   INS01  : ISS;
7717   MAC    : WR;
7718 %}
7719 
7720 // Eg.  MUL     w0, w1, w2
7721 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7722 %{
7723   single_instruction;
7724   fixed_latency(3); // Maximum latency for 64 bit mul
7725   dst    : WR(write);
7726   src1   : ISS(read);
7727   src2   : ISS(read);
7728   INS01  : ISS;
7729   MAC    : WR;
7730 %}
7731 
7732 // Multiply accumulate
7733 // Eg.  MADD    w0, w1, w2, w3
7734 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7735 %{
7736   single_instruction;
7737   fixed_latency(3); // Maximum latency for 64 bit mul
7738   dst    : WR(write);
7739   src1   : ISS(read);
7740   src2   : ISS(read);
7741   src3   : ISS(read);
7742   INS01  : ISS;
7743   MAC    : WR;
7744 %}
7745 
7746 //------- Divide pipeline operations --------------------
7747 
7748 // Eg.  SDIV    w0, w1, w2
7749 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7750 %{
7751   single_instruction;
7752   fixed_latency(8); // Maximum latency for 32 bit divide
7753   dst    : WR(write);
7754   src1   : ISS(read);
7755   src2   : ISS(read);
7756   INS0   : ISS; // Can only dual issue as instruction 0
7757   DIV    : WR;
7758 %}
7759 
7760 // Eg.  SDIV    x0, x1, x2
7761 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7762 %{
7763   single_instruction;
7764   fixed_latency(16); // Maximum latency for 64 bit divide
7765   dst    : WR(write);
7766   src1   : ISS(read);
7767   src2   : ISS(read);
7768   INS0   : ISS; // Can only dual issue as instruction 0
7769   DIV    : WR;
7770 %}
7771 
7772 //------- Load pipeline operations ------------------------
7773 
7774 // Load - prefetch
7775 // Eg.  PFRM    <mem>
7776 pipe_class iload_prefetch(memory mem)
7777 %{
7778   single_instruction;
7779   mem    : ISS(read);
7780   INS01  : ISS;
7781   LDST   : WR;
7782 %}
7783 
7784 // Load - reg, mem
7785 // Eg.  LDR     x0, <mem>
7786 pipe_class iload_reg_mem(iRegI dst, memory mem)
7787 %{
7788   single_instruction;
7789   dst    : WR(write);
7790   mem    : ISS(read);
7791   INS01  : ISS;
7792   LDST   : WR;
7793 %}
7794 
7795 // Load - reg, reg
7796 // Eg.  LDR     x0, [sp, x1]
7797 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7798 %{
7799   single_instruction;
7800   dst    : WR(write);
7801   src    : ISS(read);
7802   INS01  : ISS;
7803   LDST   : WR;
7804 %}
7805 
7806 //------- Store pipeline operations -----------------------
7807 
7808 // Store - zr, mem
7809 // Eg.  STR     zr, <mem>
7810 pipe_class istore_mem(memory mem)
7811 %{
7812   single_instruction;
7813   mem    : ISS(read);
7814   INS01  : ISS;
7815   LDST   : WR;
7816 %}
7817 
7818 // Store - reg, mem
7819 // Eg.  STR     x0, <mem>
7820 pipe_class istore_reg_mem(iRegI src, memory mem)
7821 %{
7822   single_instruction;
7823   mem    : ISS(read);
7824   src    : EX2(read);
7825   INS01  : ISS;
7826   LDST   : WR;
7827 %}
7828 
7829 // Store - reg, reg
7830 // Eg. STR      x0, [sp, x1]
7831 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7832 %{
7833   single_instruction;
7834   dst    : ISS(read);
7835   src    : EX2(read);
7836   INS01  : ISS;
7837   LDST   : WR;
7838 %}
7839 
7840 //------- Store pipeline operations -----------------------
7841 
7842 // Branch
7843 pipe_class pipe_branch()
7844 %{
7845   single_instruction;
7846   INS01  : ISS;
7847   BRANCH : EX1;
7848 %}
7849 
7850 // Conditional branch
7851 pipe_class pipe_branch_cond(rFlagsReg cr)
7852 %{
7853   single_instruction;
7854   cr     : EX1(read);
7855   INS01  : ISS;
7856   BRANCH : EX1;
7857 %}
7858 
7859 // Compare & Branch
7860 // EG.  CBZ/CBNZ
7861 pipe_class pipe_cmp_branch(iRegI op1)
7862 %{
7863   single_instruction;
7864   op1    : EX1(read);
7865   INS01  : ISS;
7866   BRANCH : EX1;
7867 %}
7868 
7869 //------- Synchronisation operations ----------------------
7870 
7871 // Any operation requiring serialization.
7872 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7873 pipe_class pipe_serial()
7874 %{
7875   single_instruction;
7876   force_serialization;
7877   fixed_latency(16);
7878   INS01  : ISS(2); // Cannot dual issue with any other instruction
7879   LDST   : WR;
7880 %}
7881 
7882 // Generic big/slow expanded idiom - also serialized
7883 pipe_class pipe_slow()
7884 %{
7885   instruction_count(10);
7886   multiple_bundles;
7887   force_serialization;
7888   fixed_latency(16);
7889   INS01  : ISS(2); // Cannot dual issue with any other instruction
7890   LDST   : WR;
7891 %}
7892 
7893 // Empty pipeline class
7894 pipe_class pipe_class_empty()
7895 %{
7896   single_instruction;
7897   fixed_latency(0);
7898 %}
7899 
7900 // Default pipeline class.
7901 pipe_class pipe_class_default()
7902 %{
7903   single_instruction;
7904   fixed_latency(2);
7905 %}
7906 
7907 // Pipeline class for compares.
7908 pipe_class pipe_class_compare()
7909 %{
7910   single_instruction;
7911   fixed_latency(16);
7912 %}
7913 
7914 // Pipeline class for memory operations.
7915 pipe_class pipe_class_memory()
7916 %{
7917   single_instruction;
7918   fixed_latency(16);
7919 %}
7920 
7921 // Pipeline class for call.
7922 pipe_class pipe_class_call()
7923 %{
7924   single_instruction;
7925   fixed_latency(100);
7926 %}
7927 
7928 // Define the class for the Nop node.
7929 define %{
7930    MachNop = pipe_class_empty;
7931 %}
7932 
7933 %}
7934 //----------INSTRUCTIONS-------------------------------------------------------
7935 //
7936 // match      -- States which machine-independent subtree may be replaced
7937 //               by this instruction.
7938 // ins_cost   -- The estimated cost of this instruction is used by instruction
7939 //               selection to identify a minimum cost tree of machine
7940 //               instructions that matches a tree of machine-independent
7941 //               instructions.
7942 // format     -- A string providing the disassembly for this instruction.
7943 //               The value of an instruction's operand may be inserted
7944 //               by referring to it with a '$' prefix.
7945 // opcode     -- Three instruction opcodes may be provided.  These are referred
7946 //               to within an encode class as $primary, $secondary, and $tertiary
7947 //               rrspectively.  The primary opcode is commonly used to
7948 //               indicate the type of machine instruction, while secondary
7949 //               and tertiary are often used for prefix options or addressing
7950 //               modes.
7951 // ins_encode -- A list of encode classes with parameters. The encode class
7952 //               name must have been defined in an 'enc_class' specification
7953 //               in the encode section of the architecture description.
7954 
7955 // ============================================================================
7956 // Memory (Load/Store) Instructions
7957 
7958 // Load Instructions
7959 
7960 // Load Byte (8 bit signed)
7961 instruct loadB(iRegINoSp dst, memory mem)
7962 %{
7963   match(Set dst (LoadB mem));
7964   predicate(!needs_acquiring_load(n));
7965 
7966   ins_cost(4 * INSN_COST);
7967   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7968 
7969   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7970 
7971   ins_pipe(iload_reg_mem);
7972 %}
7973 
7974 // Load Byte (8 bit signed) into long
7975 instruct loadB2L(iRegLNoSp dst, memory mem)
7976 %{
7977   match(Set dst (ConvI2L (LoadB mem)));
7978   predicate(!needs_acquiring_load(n->in(1)));
7979 
7980   ins_cost(4 * INSN_COST);
7981   format %{ "ldrsb  $dst, $mem\t# byte" %}
7982 
7983   ins_encode(aarch64_enc_ldrsb(dst, mem));
7984 
7985   ins_pipe(iload_reg_mem);
7986 %}
7987 
7988 // Load Byte (8 bit unsigned)
7989 instruct loadUB(iRegINoSp dst, memory mem)
7990 %{
7991   match(Set dst (LoadUB mem));
7992   predicate(!needs_acquiring_load(n));
7993 
7994   ins_cost(4 * INSN_COST);
7995   format %{ "ldrbw  $dst, $mem\t# byte" %}
7996 
7997   ins_encode(aarch64_enc_ldrb(dst, mem));
7998 
7999   ins_pipe(iload_reg_mem);
8000 %}
8001 
8002 // Load Byte (8 bit unsigned) into long
8003 instruct loadUB2L(iRegLNoSp dst, memory mem)
8004 %{
8005   match(Set dst (ConvI2L (LoadUB mem)));
8006   predicate(!needs_acquiring_load(n->in(1)));
8007 
8008   ins_cost(4 * INSN_COST);
8009   format %{ "ldrb  $dst, $mem\t# byte" %}
8010 
8011   ins_encode(aarch64_enc_ldrb(dst, mem));
8012 
8013   ins_pipe(iload_reg_mem);
8014 %}
8015 
8016 // Load Short (16 bit signed)
8017 instruct loadS(iRegINoSp dst, memory mem)
8018 %{
8019   match(Set dst (LoadS mem));
8020   predicate(!needs_acquiring_load(n));
8021 
8022   ins_cost(4 * INSN_COST);
8023   format %{ "ldrshw  $dst, $mem\t# short" %}
8024 
8025   ins_encode(aarch64_enc_ldrshw(dst, mem));
8026 
8027   ins_pipe(iload_reg_mem);
8028 %}
8029 
8030 // Load Short (16 bit signed) into long
8031 instruct loadS2L(iRegLNoSp dst, memory mem)
8032 %{
8033   match(Set dst (ConvI2L (LoadS mem)));
8034   predicate(!needs_acquiring_load(n->in(1)));
8035 
8036   ins_cost(4 * INSN_COST);
8037   format %{ "ldrsh  $dst, $mem\t# short" %}
8038 
8039   ins_encode(aarch64_enc_ldrsh(dst, mem));
8040 
8041   ins_pipe(iload_reg_mem);
8042 %}
8043 
8044 // Load Char (16 bit unsigned)
8045 instruct loadUS(iRegINoSp dst, memory mem)
8046 %{
8047   match(Set dst (LoadUS mem));
8048   predicate(!needs_acquiring_load(n));
8049 
8050   ins_cost(4 * INSN_COST);
8051   format %{ "ldrh  $dst, $mem\t# short" %}
8052 
8053   ins_encode(aarch64_enc_ldrh(dst, mem));
8054 
8055   ins_pipe(iload_reg_mem);
8056 %}
8057 
8058 // Load Short/Char (16 bit unsigned) into long
8059 instruct loadUS2L(iRegLNoSp dst, memory mem)
8060 %{
8061   match(Set dst (ConvI2L (LoadUS mem)));
8062   predicate(!needs_acquiring_load(n->in(1)));
8063 
8064   ins_cost(4 * INSN_COST);
8065   format %{ "ldrh  $dst, $mem\t# short" %}
8066 
8067   ins_encode(aarch64_enc_ldrh(dst, mem));
8068 
8069   ins_pipe(iload_reg_mem);
8070 %}
8071 
8072 // Load Integer (32 bit signed)
8073 instruct loadI(iRegINoSp dst, memory mem)
8074 %{
8075   match(Set dst (LoadI mem));
8076   predicate(!needs_acquiring_load(n));
8077 
8078   ins_cost(4 * INSN_COST);
8079   format %{ "ldrw  $dst, $mem\t# int" %}
8080 
8081   ins_encode(aarch64_enc_ldrw(dst, mem));
8082 
8083   ins_pipe(iload_reg_mem);
8084 %}
8085 
8086 // Load Integer (32 bit signed) into long
8087 instruct loadI2L(iRegLNoSp dst, memory mem)
8088 %{
8089   match(Set dst (ConvI2L (LoadI mem)));
8090   predicate(!needs_acquiring_load(n->in(1)));
8091 
8092   ins_cost(4 * INSN_COST);
8093   format %{ "ldrsw  $dst, $mem\t# int" %}
8094 
8095   ins_encode(aarch64_enc_ldrsw(dst, mem));
8096 
8097   ins_pipe(iload_reg_mem);
8098 %}
8099 
8100 // Load Integer (32 bit unsigned) into long
8101 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
8102 %{
8103   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8104   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
8105 
8106   ins_cost(4 * INSN_COST);
8107   format %{ "ldrw  $dst, $mem\t# int" %}
8108 
8109   ins_encode(aarch64_enc_ldrw(dst, mem));
8110 
8111   ins_pipe(iload_reg_mem);
8112 %}
8113 
8114 // Load Long (64 bit signed)
8115 instruct loadL(iRegLNoSp dst, memory mem)
8116 %{
8117   match(Set dst (LoadL mem));
8118   predicate(!needs_acquiring_load(n));
8119 
8120   ins_cost(4 * INSN_COST);
8121   format %{ "ldr  $dst, $mem\t# int" %}
8122 
8123   ins_encode(aarch64_enc_ldr(dst, mem));
8124 
8125   ins_pipe(iload_reg_mem);
8126 %}
8127 
8128 // Load Range
8129 instruct loadRange(iRegINoSp dst, memory mem)
8130 %{
8131   match(Set dst (LoadRange mem));
8132 
8133   ins_cost(4 * INSN_COST);
8134   format %{ "ldrw  $dst, $mem\t# range" %}
8135 
8136   ins_encode(aarch64_enc_ldrw(dst, mem));
8137 
8138   ins_pipe(iload_reg_mem);
8139 %}
8140 
8141 // Load Pointer
8142 instruct loadP(iRegPNoSp dst, memory mem)
8143 %{
8144   match(Set dst (LoadP mem));
8145   predicate(!needs_acquiring_load(n));
8146 
8147   ins_cost(4 * INSN_COST);
8148   format %{ "ldr  $dst, $mem\t# ptr" %}
8149 
8150   ins_encode(aarch64_enc_ldr(dst, mem));
8151 
8152   ins_pipe(iload_reg_mem);
8153 %}
8154 
8155 // Load Compressed Pointer
8156 instruct loadN(iRegNNoSp dst, memory mem)
8157 %{
8158   match(Set dst (LoadN mem));
8159   predicate(!needs_acquiring_load(n));
8160 
8161   ins_cost(4 * INSN_COST);
8162   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
8163 
8164   ins_encode(aarch64_enc_ldrw(dst, mem));
8165 
8166   ins_pipe(iload_reg_mem);
8167 %}
8168 
8169 // Load Klass Pointer
8170 instruct loadKlass(iRegPNoSp dst, memory mem)
8171 %{
8172   match(Set dst (LoadKlass mem));
8173   predicate(!needs_acquiring_load(n));
8174 
8175   ins_cost(4 * INSN_COST);
8176   format %{ "ldr  $dst, $mem\t# class" %}
8177 
8178   ins_encode(aarch64_enc_ldr(dst, mem));
8179 
8180   ins_pipe(iload_reg_mem);
8181 %}
8182 
8183 // Load Narrow Klass Pointer
8184 instruct loadNKlass(iRegNNoSp dst, memory mem)
8185 %{
8186   match(Set dst (LoadNKlass mem));
8187   predicate(!needs_acquiring_load(n));
8188 
8189   ins_cost(4 * INSN_COST);
8190   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
8191 
8192   ins_encode(aarch64_enc_ldrw(dst, mem));
8193 
8194   ins_pipe(iload_reg_mem);
8195 %}
8196 
8197 // Load Float
8198 instruct loadF(vRegF dst, memory mem)
8199 %{
8200   match(Set dst (LoadF mem));
8201   predicate(!needs_acquiring_load(n));
8202 
8203   ins_cost(4 * INSN_COST);
8204   format %{ "ldrs  $dst, $mem\t# float" %}
8205 
8206   ins_encode( aarch64_enc_ldrs(dst, mem) );
8207 
8208   ins_pipe(pipe_class_memory);
8209 %}
8210 
8211 // Load Double
8212 instruct loadD(vRegD dst, memory mem)
8213 %{
8214   match(Set dst (LoadD mem));
8215   predicate(!needs_acquiring_load(n));
8216 
8217   ins_cost(4 * INSN_COST);
8218   format %{ "ldrd  $dst, $mem\t# double" %}
8219 
8220   ins_encode( aarch64_enc_ldrd(dst, mem) );
8221 
8222   ins_pipe(pipe_class_memory);
8223 %}
8224 
8225 
8226 // Load Int Constant
8227 instruct loadConI(iRegINoSp dst, immI src)
8228 %{
8229   match(Set dst src);
8230 
8231   ins_cost(INSN_COST);
8232   format %{ "mov $dst, $src\t# int" %}
8233 
8234   ins_encode( aarch64_enc_movw_imm(dst, src) );
8235 
8236   ins_pipe(ialu_imm);
8237 %}
8238 
8239 // Load Long Constant
8240 instruct loadConL(iRegLNoSp dst, immL src)
8241 %{
8242   match(Set dst src);
8243 
8244   ins_cost(INSN_COST);
8245   format %{ "mov $dst, $src\t# long" %}
8246 
8247   ins_encode( aarch64_enc_mov_imm(dst, src) );
8248 
8249   ins_pipe(ialu_imm);
8250 %}
8251 
8252 // Load Pointer Constant
8253 
8254 instruct loadConP(iRegPNoSp dst, immP con)
8255 %{
8256   match(Set dst con);
8257 
8258   ins_cost(INSN_COST * 4);
8259   format %{
8260     "mov  $dst, $con\t# ptr\n\t"
8261   %}
8262 
8263   ins_encode(aarch64_enc_mov_p(dst, con));
8264 
8265   ins_pipe(ialu_imm);
8266 %}
8267 
8268 // Load Null Pointer Constant
8269 
8270 instruct loadConP0(iRegPNoSp dst, immP0 con)
8271 %{
8272   match(Set dst con);
8273 
8274   ins_cost(INSN_COST);
8275   format %{ "mov  $dst, $con\t# NULL ptr" %}
8276 
8277   ins_encode(aarch64_enc_mov_p0(dst, con));
8278 
8279   ins_pipe(ialu_imm);
8280 %}
8281 
8282 // Load Pointer Constant One
8283 
8284 instruct loadConP1(iRegPNoSp dst, immP_1 con)
8285 %{
8286   match(Set dst con);
8287 
8288   ins_cost(INSN_COST);
8289   format %{ "mov  $dst, $con\t# NULL ptr" %}
8290 
8291   ins_encode(aarch64_enc_mov_p1(dst, con));
8292 
8293   ins_pipe(ialu_imm);
8294 %}
8295 
8296 // Load Poll Page Constant
8297 
8298 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
8299 %{
8300   match(Set dst con);
8301 
8302   ins_cost(INSN_COST);
8303   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
8304 
8305   ins_encode(aarch64_enc_mov_poll_page(dst, con));
8306 
8307   ins_pipe(ialu_imm);
8308 %}
8309 
8310 // Load Byte Map Base Constant
8311 
8312 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
8313 %{
8314   match(Set dst con);
8315 
8316   ins_cost(INSN_COST);
8317   format %{ "adr  $dst, $con\t# Byte Map Base" %}
8318 
8319   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
8320 
8321   ins_pipe(ialu_imm);
8322 %}
8323 
8324 // Load Narrow Pointer Constant
8325 
8326 instruct loadConN(iRegNNoSp dst, immN con)
8327 %{
8328   match(Set dst con);
8329 
8330   ins_cost(INSN_COST * 4);
8331   format %{ "mov  $dst, $con\t# compressed ptr" %}
8332 
8333   ins_encode(aarch64_enc_mov_n(dst, con));
8334 
8335   ins_pipe(ialu_imm);
8336 %}
8337 
8338 // Load Narrow Null Pointer Constant
8339 
8340 instruct loadConN0(iRegNNoSp dst, immN0 con)
8341 %{
8342   match(Set dst con);
8343 
8344   ins_cost(INSN_COST);
8345   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8346 
8347   ins_encode(aarch64_enc_mov_n0(dst, con));
8348 
8349   ins_pipe(ialu_imm);
8350 %}
8351 
8352 // Load Narrow Klass Constant
8353 
8354 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8355 %{
8356   match(Set dst con);
8357 
8358   ins_cost(INSN_COST);
8359   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8360 
8361   ins_encode(aarch64_enc_mov_nk(dst, con));
8362 
8363   ins_pipe(ialu_imm);
8364 %}
8365 
8366 // Load Packed Float Constant
8367 
8368 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8369   match(Set dst con);
8370   ins_cost(INSN_COST * 4);
8371   format %{ "fmovs  $dst, $con"%}
8372   ins_encode %{
8373     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8374   %}
8375 
8376   ins_pipe(fp_imm_s);
8377 %}
8378 
8379 // Load Float Constant
8380 
8381 instruct loadConF(vRegF dst, immF con) %{
8382   match(Set dst con);
8383 
8384   ins_cost(INSN_COST * 4);
8385 
8386   format %{
8387     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8388   %}
8389 
8390   ins_encode %{
8391     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8392   %}
8393 
8394   ins_pipe(fp_load_constant_s);
8395 %}
8396 
8397 // Load Packed Double Constant
8398 
8399 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8400   match(Set dst con);
8401   ins_cost(INSN_COST);
8402   format %{ "fmovd  $dst, $con"%}
8403   ins_encode %{
8404     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8405   %}
8406 
8407   ins_pipe(fp_imm_d);
8408 %}
8409 
8410 // Load Double Constant
8411 
8412 instruct loadConD(vRegD dst, immD con) %{
8413   match(Set dst con);
8414 
8415   ins_cost(INSN_COST * 5);
8416   format %{
8417     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8418   %}
8419 
8420   ins_encode %{
8421     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8422   %}
8423 
8424   ins_pipe(fp_load_constant_d);
8425 %}
8426 
8427 // Store Instructions
8428 
8429 // Store CMS card-mark Immediate
8430 instruct storeimmCM0(immI0 zero, memory mem)
8431 %{
8432   match(Set mem (StoreCM mem zero));
8433   predicate(unnecessary_storestore(n));
8434 
8435   ins_cost(INSN_COST);
8436   format %{ "storestore (elided)\n\t"
8437             "strb zr, $mem\t# byte" %}
8438 
8439   ins_encode(aarch64_enc_strb0(mem));
8440 
8441   ins_pipe(istore_mem);
8442 %}
8443 
8444 // Store CMS card-mark Immediate with intervening StoreStore
8445 // needed when using CMS with no conditional card marking
8446 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8447 %{
8448   match(Set mem (StoreCM mem zero));
8449 
8450   ins_cost(INSN_COST * 2);
8451   format %{ "storestore\n\t"
8452             "dmb ishst"
8453             "\n\tstrb zr, $mem\t# byte" %}
8454 
8455   ins_encode(aarch64_enc_strb0_ordered(mem));
8456 
8457   ins_pipe(istore_mem);
8458 %}
8459 
8460 // Store Byte
8461 instruct storeB(iRegIorL2I src, memory mem)
8462 %{
8463   match(Set mem (StoreB mem src));
8464   predicate(!needs_releasing_store(n));
8465 
8466   ins_cost(INSN_COST);
8467   format %{ "strb  $src, $mem\t# byte" %}
8468 
8469   ins_encode(aarch64_enc_strb(src, mem));
8470 
8471   ins_pipe(istore_reg_mem);
8472 %}
8473 
8474 
8475 instruct storeimmB0(immI0 zero, memory mem)
8476 %{
8477   match(Set mem (StoreB mem zero));
8478   predicate(!needs_releasing_store(n));
8479 
8480   ins_cost(INSN_COST);
8481   format %{ "strb rscractch2, $mem\t# byte" %}
8482 
8483   ins_encode(aarch64_enc_strb0(mem));
8484 
8485   ins_pipe(istore_mem);
8486 %}
8487 
8488 // Store Char/Short
8489 instruct storeC(iRegIorL2I src, memory mem)
8490 %{
8491   match(Set mem (StoreC mem src));
8492   predicate(!needs_releasing_store(n));
8493 
8494   ins_cost(INSN_COST);
8495   format %{ "strh  $src, $mem\t# short" %}
8496 
8497   ins_encode(aarch64_enc_strh(src, mem));
8498 
8499   ins_pipe(istore_reg_mem);
8500 %}
8501 
8502 instruct storeimmC0(immI0 zero, memory mem)
8503 %{
8504   match(Set mem (StoreC mem zero));
8505   predicate(!needs_releasing_store(n));
8506 
8507   ins_cost(INSN_COST);
8508   format %{ "strh  zr, $mem\t# short" %}
8509 
8510   ins_encode(aarch64_enc_strh0(mem));
8511 
8512   ins_pipe(istore_mem);
8513 %}
8514 
8515 // Store Integer
8516 
8517 instruct storeI(iRegIorL2I src, memory mem)
8518 %{
8519   match(Set mem(StoreI mem src));
8520   predicate(!needs_releasing_store(n));
8521 
8522   ins_cost(INSN_COST);
8523   format %{ "strw  $src, $mem\t# int" %}
8524 
8525   ins_encode(aarch64_enc_strw(src, mem));
8526 
8527   ins_pipe(istore_reg_mem);
8528 %}
8529 
8530 instruct storeimmI0(immI0 zero, memory mem)
8531 %{
8532   match(Set mem(StoreI mem zero));
8533   predicate(!needs_releasing_store(n));
8534 
8535   ins_cost(INSN_COST);
8536   format %{ "strw  zr, $mem\t# int" %}
8537 
8538   ins_encode(aarch64_enc_strw0(mem));
8539 
8540   ins_pipe(istore_mem);
8541 %}
8542 
8543 // Store Long (64 bit signed)
8544 instruct storeL(iRegL src, memory mem)
8545 %{
8546   match(Set mem (StoreL mem src));
8547   predicate(!needs_releasing_store(n));
8548 
8549   ins_cost(INSN_COST);
8550   format %{ "str  $src, $mem\t# int" %}
8551 
8552   ins_encode(aarch64_enc_str(src, mem));
8553 
8554   ins_pipe(istore_reg_mem);
8555 %}
8556 
8557 // Store Long (64 bit signed)
8558 instruct storeimmL0(immL0 zero, memory mem)
8559 %{
8560   match(Set mem (StoreL mem zero));
8561   predicate(!needs_releasing_store(n));
8562 
8563   ins_cost(INSN_COST);
8564   format %{ "str  zr, $mem\t# int" %}
8565 
8566   ins_encode(aarch64_enc_str0(mem));
8567 
8568   ins_pipe(istore_mem);
8569 %}
8570 
8571 // Store Pointer
8572 instruct storeP(iRegP src, memory mem)
8573 %{
8574   match(Set mem (StoreP mem src));
8575   predicate(!needs_releasing_store(n));
8576 
8577   ins_cost(INSN_COST);
8578   format %{ "str  $src, $mem\t# ptr" %}
8579 
8580   ins_encode(aarch64_enc_str(src, mem));
8581 
8582   ins_pipe(istore_reg_mem);
8583 %}
8584 
8585 // Store Pointer
8586 instruct storeimmP0(immP0 zero, memory mem)
8587 %{
8588   match(Set mem (StoreP mem zero));
8589   predicate(!needs_releasing_store(n));
8590 
8591   ins_cost(INSN_COST);
8592   format %{ "str zr, $mem\t# ptr" %}
8593 
8594   ins_encode(aarch64_enc_str0(mem));
8595 
8596   ins_pipe(istore_mem);
8597 %}
8598 
8599 // Store Compressed Pointer
8600 instruct storeN(iRegN src, memory mem)
8601 %{
8602   match(Set mem (StoreN mem src));
8603   predicate(!needs_releasing_store(n));
8604 
8605   ins_cost(INSN_COST);
8606   format %{ "strw  $src, $mem\t# compressed ptr" %}
8607 
8608   ins_encode(aarch64_enc_strw(src, mem));
8609 
8610   ins_pipe(istore_reg_mem);
8611 %}
8612 
8613 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8614 %{
8615   match(Set mem (StoreN mem zero));
8616   predicate(Universe::narrow_oop_base() == NULL &&
8617             Universe::narrow_klass_base() == NULL &&
8618             (!needs_releasing_store(n)));
8619 
8620   ins_cost(INSN_COST);
8621   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8622 
8623   ins_encode(aarch64_enc_strw(heapbase, mem));
8624 
8625   ins_pipe(istore_reg_mem);
8626 %}
8627 
8628 // Store Float
8629 instruct storeF(vRegF src, memory mem)
8630 %{
8631   match(Set mem (StoreF mem src));
8632   predicate(!needs_releasing_store(n));
8633 
8634   ins_cost(INSN_COST);
8635   format %{ "strs  $src, $mem\t# float" %}
8636 
8637   ins_encode( aarch64_enc_strs(src, mem) );
8638 
8639   ins_pipe(pipe_class_memory);
8640 %}
8641 
8642 // TODO
8643 // implement storeImmF0 and storeFImmPacked
8644 
8645 // Store Double
8646 instruct storeD(vRegD src, memory mem)
8647 %{
8648   match(Set mem (StoreD mem src));
8649   predicate(!needs_releasing_store(n));
8650 
8651   ins_cost(INSN_COST);
8652   format %{ "strd  $src, $mem\t# double" %}
8653 
8654   ins_encode( aarch64_enc_strd(src, mem) );
8655 
8656   ins_pipe(pipe_class_memory);
8657 %}
8658 
8659 // Store Compressed Klass Pointer
8660 instruct storeNKlass(iRegN src, memory mem)
8661 %{
8662   predicate(!needs_releasing_store(n));
8663   match(Set mem (StoreNKlass mem src));
8664 
8665   ins_cost(INSN_COST);
8666   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8667 
8668   ins_encode(aarch64_enc_strw(src, mem));
8669 
8670   ins_pipe(istore_reg_mem);
8671 %}
8672 
8673 // TODO
8674 // implement storeImmD0 and storeDImmPacked
8675 
8676 // prefetch instructions
8677 // Must be safe to execute with invalid address (cannot fault).
8678 
8679 instruct prefetchalloc( memory mem ) %{
8680   match(PrefetchAllocation mem);
8681 
8682   ins_cost(INSN_COST);
8683   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8684 
8685   ins_encode( aarch64_enc_prefetchw(mem) );
8686 
8687   ins_pipe(iload_prefetch);
8688 %}
8689 
8690 //  ---------------- volatile loads and stores ----------------
8691 
8692 // Load Byte (8 bit signed)
8693 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8694 %{
8695   match(Set dst (LoadB mem));
8696 
8697   ins_cost(VOLATILE_REF_COST);
8698   format %{ "ldarsb  $dst, $mem\t# byte" %}
8699 
8700   ins_encode(aarch64_enc_ldarsb(dst, mem));
8701 
8702   ins_pipe(pipe_serial);
8703 %}
8704 
8705 // Load Byte (8 bit signed) into long
8706 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8707 %{
8708   match(Set dst (ConvI2L (LoadB mem)));
8709 
8710   ins_cost(VOLATILE_REF_COST);
8711   format %{ "ldarsb  $dst, $mem\t# byte" %}
8712 
8713   ins_encode(aarch64_enc_ldarsb(dst, mem));
8714 
8715   ins_pipe(pipe_serial);
8716 %}
8717 
8718 // Load Byte (8 bit unsigned)
8719 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8720 %{
8721   match(Set dst (LoadUB mem));
8722 
8723   ins_cost(VOLATILE_REF_COST);
8724   format %{ "ldarb  $dst, $mem\t# byte" %}
8725 
8726   ins_encode(aarch64_enc_ldarb(dst, mem));
8727 
8728   ins_pipe(pipe_serial);
8729 %}
8730 
8731 // Load Byte (8 bit unsigned) into long
8732 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8733 %{
8734   match(Set dst (ConvI2L (LoadUB mem)));
8735 
8736   ins_cost(VOLATILE_REF_COST);
8737   format %{ "ldarb  $dst, $mem\t# byte" %}
8738 
8739   ins_encode(aarch64_enc_ldarb(dst, mem));
8740 
8741   ins_pipe(pipe_serial);
8742 %}
8743 
8744 // Load Short (16 bit signed)
8745 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8746 %{
8747   match(Set dst (LoadS mem));
8748 
8749   ins_cost(VOLATILE_REF_COST);
8750   format %{ "ldarshw  $dst, $mem\t# short" %}
8751 
8752   ins_encode(aarch64_enc_ldarshw(dst, mem));
8753 
8754   ins_pipe(pipe_serial);
8755 %}
8756 
8757 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8758 %{
8759   match(Set dst (LoadUS mem));
8760 
8761   ins_cost(VOLATILE_REF_COST);
8762   format %{ "ldarhw  $dst, $mem\t# short" %}
8763 
8764   ins_encode(aarch64_enc_ldarhw(dst, mem));
8765 
8766   ins_pipe(pipe_serial);
8767 %}
8768 
8769 // Load Short/Char (16 bit unsigned) into long
8770 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8771 %{
8772   match(Set dst (ConvI2L (LoadUS mem)));
8773 
8774   ins_cost(VOLATILE_REF_COST);
8775   format %{ "ldarh  $dst, $mem\t# short" %}
8776 
8777   ins_encode(aarch64_enc_ldarh(dst, mem));
8778 
8779   ins_pipe(pipe_serial);
8780 %}
8781 
8782 // Load Short/Char (16 bit signed) into long
8783 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8784 %{
8785   match(Set dst (ConvI2L (LoadS mem)));
8786 
8787   ins_cost(VOLATILE_REF_COST);
8788   format %{ "ldarh  $dst, $mem\t# short" %}
8789 
8790   ins_encode(aarch64_enc_ldarsh(dst, mem));
8791 
8792   ins_pipe(pipe_serial);
8793 %}
8794 
8795 // Load Integer (32 bit signed)
8796 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8797 %{
8798   match(Set dst (LoadI mem));
8799 
8800   ins_cost(VOLATILE_REF_COST);
8801   format %{ "ldarw  $dst, $mem\t# int" %}
8802 
8803   ins_encode(aarch64_enc_ldarw(dst, mem));
8804 
8805   ins_pipe(pipe_serial);
8806 %}
8807 
8808 // Load Integer (32 bit unsigned) into long
8809 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8810 %{
8811   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8812 
8813   ins_cost(VOLATILE_REF_COST);
8814   format %{ "ldarw  $dst, $mem\t# int" %}
8815 
8816   ins_encode(aarch64_enc_ldarw(dst, mem));
8817 
8818   ins_pipe(pipe_serial);
8819 %}
8820 
8821 // Load Long (64 bit signed)
8822 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8823 %{
8824   match(Set dst (LoadL mem));
8825 
8826   ins_cost(VOLATILE_REF_COST);
8827   format %{ "ldar  $dst, $mem\t# int" %}
8828 
8829   ins_encode(aarch64_enc_ldar(dst, mem));
8830 
8831   ins_pipe(pipe_serial);
8832 %}
8833 
8834 // Load Pointer
8835 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8836 %{
8837   match(Set dst (LoadP mem));
8838 
8839   ins_cost(VOLATILE_REF_COST);
8840   format %{ "ldar  $dst, $mem\t# ptr" %}
8841 
8842   ins_encode(aarch64_enc_ldar(dst, mem));
8843 
8844   ins_pipe(pipe_serial);
8845 %}
8846 
8847 // Load Compressed Pointer
8848 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8849 %{
8850   match(Set dst (LoadN mem));
8851 
8852   ins_cost(VOLATILE_REF_COST);
8853   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8854 
8855   ins_encode(aarch64_enc_ldarw(dst, mem));
8856 
8857   ins_pipe(pipe_serial);
8858 %}
8859 
8860 // Load Float
8861 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8862 %{
8863   match(Set dst (LoadF mem));
8864 
8865   ins_cost(VOLATILE_REF_COST);
8866   format %{ "ldars  $dst, $mem\t# float" %}
8867 
8868   ins_encode( aarch64_enc_fldars(dst, mem) );
8869 
8870   ins_pipe(pipe_serial);
8871 %}
8872 
8873 // Load Double
8874 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8875 %{
8876   match(Set dst (LoadD mem));
8877 
8878   ins_cost(VOLATILE_REF_COST);
8879   format %{ "ldard  $dst, $mem\t# double" %}
8880 
8881   ins_encode( aarch64_enc_fldard(dst, mem) );
8882 
8883   ins_pipe(pipe_serial);
8884 %}
8885 
8886 // Store Byte
8887 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8888 %{
8889   match(Set mem (StoreB mem src));
8890 
8891   ins_cost(VOLATILE_REF_COST);
8892   format %{ "stlrb  $src, $mem\t# byte" %}
8893 
8894   ins_encode(aarch64_enc_stlrb(src, mem));
8895 
8896   ins_pipe(pipe_class_memory);
8897 %}
8898 
8899 // Store Char/Short
8900 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8901 %{
8902   match(Set mem (StoreC mem src));
8903 
8904   ins_cost(VOLATILE_REF_COST);
8905   format %{ "stlrh  $src, $mem\t# short" %}
8906 
8907   ins_encode(aarch64_enc_stlrh(src, mem));
8908 
8909   ins_pipe(pipe_class_memory);
8910 %}
8911 
8912 // Store Integer
8913 
8914 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8915 %{
8916   match(Set mem(StoreI mem src));
8917 
8918   ins_cost(VOLATILE_REF_COST);
8919   format %{ "stlrw  $src, $mem\t# int" %}
8920 
8921   ins_encode(aarch64_enc_stlrw(src, mem));
8922 
8923   ins_pipe(pipe_class_memory);
8924 %}
8925 
8926 // Store Long (64 bit signed)
8927 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8928 %{
8929   match(Set mem (StoreL mem src));
8930 
8931   ins_cost(VOLATILE_REF_COST);
8932   format %{ "stlr  $src, $mem\t# int" %}
8933 
8934   ins_encode(aarch64_enc_stlr(src, mem));
8935 
8936   ins_pipe(pipe_class_memory);
8937 %}
8938 
8939 // Store Pointer
8940 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8941 %{
8942   match(Set mem (StoreP mem src));
8943 
8944   ins_cost(VOLATILE_REF_COST);
8945   format %{ "stlr  $src, $mem\t# ptr" %}
8946 
8947   ins_encode(aarch64_enc_stlr(src, mem));
8948 
8949   ins_pipe(pipe_class_memory);
8950 %}
8951 
8952 // Store Compressed Pointer
8953 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8954 %{
8955   match(Set mem (StoreN mem src));
8956 
8957   ins_cost(VOLATILE_REF_COST);
8958   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8959 
8960   ins_encode(aarch64_enc_stlrw(src, mem));
8961 
8962   ins_pipe(pipe_class_memory);
8963 %}
8964 
8965 // Store Float
8966 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8967 %{
8968   match(Set mem (StoreF mem src));
8969 
8970   ins_cost(VOLATILE_REF_COST);
8971   format %{ "stlrs  $src, $mem\t# float" %}
8972 
8973   ins_encode( aarch64_enc_fstlrs(src, mem) );
8974 
8975   ins_pipe(pipe_class_memory);
8976 %}
8977 
8978 // TODO
8979 // implement storeImmF0 and storeFImmPacked
8980 
8981 // Store Double
8982 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8983 %{
8984   match(Set mem (StoreD mem src));
8985 
8986   ins_cost(VOLATILE_REF_COST);
8987   format %{ "stlrd  $src, $mem\t# double" %}
8988 
8989   ins_encode( aarch64_enc_fstlrd(src, mem) );
8990 
8991   ins_pipe(pipe_class_memory);
8992 %}
8993 
8994 //  ---------------- end of volatile loads and stores ----------------
8995 
8996 // ============================================================================
8997 // BSWAP Instructions
8998 
8999 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
9000   match(Set dst (ReverseBytesI src));
9001 
9002   ins_cost(INSN_COST);
9003   format %{ "revw  $dst, $src" %}
9004 
9005   ins_encode %{
9006     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
9007   %}
9008 
9009   ins_pipe(ialu_reg);
9010 %}
9011 
9012 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
9013   match(Set dst (ReverseBytesL src));
9014 
9015   ins_cost(INSN_COST);
9016   format %{ "rev  $dst, $src" %}
9017 
9018   ins_encode %{
9019     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
9020   %}
9021 
9022   ins_pipe(ialu_reg);
9023 %}
9024 
9025 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
9026   match(Set dst (ReverseBytesUS src));
9027 
9028   ins_cost(INSN_COST);
9029   format %{ "rev16w  $dst, $src" %}
9030 
9031   ins_encode %{
9032     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9033   %}
9034 
9035   ins_pipe(ialu_reg);
9036 %}
9037 
9038 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
9039   match(Set dst (ReverseBytesS src));
9040 
9041   ins_cost(INSN_COST);
9042   format %{ "rev16w  $dst, $src\n\t"
9043             "sbfmw $dst, $dst, #0, #15" %}
9044 
9045   ins_encode %{
9046     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
9047     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
9048   %}
9049 
9050   ins_pipe(ialu_reg);
9051 %}
9052 
9053 // ============================================================================
9054 // Zero Count Instructions
9055 
9056 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9057   match(Set dst (CountLeadingZerosI src));
9058 
9059   ins_cost(INSN_COST);
9060   format %{ "clzw  $dst, $src" %}
9061   ins_encode %{
9062     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
9063   %}
9064 
9065   ins_pipe(ialu_reg);
9066 %}
9067 
9068 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
9069   match(Set dst (CountLeadingZerosL src));
9070 
9071   ins_cost(INSN_COST);
9072   format %{ "clz   $dst, $src" %}
9073   ins_encode %{
9074     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
9075   %}
9076 
9077   ins_pipe(ialu_reg);
9078 %}
9079 
9080 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
9081   match(Set dst (CountTrailingZerosI src));
9082 
9083   ins_cost(INSN_COST * 2);
9084   format %{ "rbitw  $dst, $src\n\t"
9085             "clzw   $dst, $dst" %}
9086   ins_encode %{
9087     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
9088     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
9089   %}
9090 
9091   ins_pipe(ialu_reg);
9092 %}
9093 
9094 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
9095   match(Set dst (CountTrailingZerosL src));
9096 
9097   ins_cost(INSN_COST * 2);
9098   format %{ "rbit   $dst, $src\n\t"
9099             "clz    $dst, $dst" %}
9100   ins_encode %{
9101     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
9102     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
9103   %}
9104 
9105   ins_pipe(ialu_reg);
9106 %}
9107 
9108 //---------- Population Count Instructions -------------------------------------
9109 //
9110 
9111 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
9112   predicate(UsePopCountInstruction);
9113   match(Set dst (PopCountI src));
9114   effect(TEMP tmp);
9115   ins_cost(INSN_COST * 13);
9116 
9117   format %{ "movw   $src, $src\n\t"
9118             "mov    $tmp, $src\t# vector (1D)\n\t"
9119             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9120             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9121             "mov    $dst, $tmp\t# vector (1D)" %}
9122   ins_encode %{
9123     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
9124     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9125     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9126     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9127     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9128   %}
9129 
9130   ins_pipe(pipe_class_default);
9131 %}
9132 
9133 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
9134   predicate(UsePopCountInstruction);
9135   match(Set dst (PopCountI (LoadI mem)));
9136   effect(TEMP tmp);
9137   ins_cost(INSN_COST * 13);
9138 
9139   format %{ "ldrs   $tmp, $mem\n\t"
9140             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9141             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9142             "mov    $dst, $tmp\t# vector (1D)" %}
9143   ins_encode %{
9144     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9145     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
9146                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9147     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9148     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9149     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9150   %}
9151 
9152   ins_pipe(pipe_class_default);
9153 %}
9154 
9155 // Note: Long.bitCount(long) returns an int.
9156 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
9157   predicate(UsePopCountInstruction);
9158   match(Set dst (PopCountL src));
9159   effect(TEMP tmp);
9160   ins_cost(INSN_COST * 13);
9161 
9162   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
9163             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9164             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9165             "mov    $dst, $tmp\t# vector (1D)" %}
9166   ins_encode %{
9167     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
9168     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9169     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9170     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9171   %}
9172 
9173   ins_pipe(pipe_class_default);
9174 %}
9175 
9176 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
9177   predicate(UsePopCountInstruction);
9178   match(Set dst (PopCountL (LoadL mem)));
9179   effect(TEMP tmp);
9180   ins_cost(INSN_COST * 13);
9181 
9182   format %{ "ldrd   $tmp, $mem\n\t"
9183             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
9184             "addv   $tmp, $tmp\t# vector (8B)\n\t"
9185             "mov    $dst, $tmp\t# vector (1D)" %}
9186   ins_encode %{
9187     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
9188     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
9189                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
9190     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9191     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
9192     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
9193   %}
9194 
9195   ins_pipe(pipe_class_default);
9196 %}
9197 
9198 // ============================================================================
9199 // MemBar Instruction
9200 
9201 instruct load_fence() %{
9202   match(LoadFence);
9203   ins_cost(VOLATILE_REF_COST);
9204 
9205   format %{ "load_fence" %}
9206 
9207   ins_encode %{
9208     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9209   %}
9210   ins_pipe(pipe_serial);
9211 %}
9212 
9213 instruct unnecessary_membar_acquire() %{
9214   predicate(unnecessary_acquire(n));
9215   match(MemBarAcquire);
9216   ins_cost(0);
9217 
9218   format %{ "membar_acquire (elided)" %}
9219 
9220   ins_encode %{
9221     __ block_comment("membar_acquire (elided)");
9222   %}
9223 
9224   ins_pipe(pipe_class_empty);
9225 %}
9226 
9227 instruct membar_acquire() %{
9228   match(MemBarAcquire);
9229   ins_cost(VOLATILE_REF_COST);
9230 
9231   format %{ "membar_acquire\n\t"
9232             "dmb ish" %}
9233 
9234   ins_encode %{
9235     __ block_comment("membar_acquire");
9236     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
9237   %}
9238 
9239   ins_pipe(pipe_serial);
9240 %}
9241 
9242 
9243 instruct membar_acquire_lock() %{
9244   match(MemBarAcquireLock);
9245   ins_cost(VOLATILE_REF_COST);
9246 
9247   format %{ "membar_acquire_lock (elided)" %}
9248 
9249   ins_encode %{
9250     __ block_comment("membar_acquire_lock (elided)");
9251   %}
9252 
9253   ins_pipe(pipe_serial);
9254 %}
9255 
9256 instruct store_fence() %{
9257   match(StoreFence);
9258   ins_cost(VOLATILE_REF_COST);
9259 
9260   format %{ "store_fence" %}
9261 
9262   ins_encode %{
9263     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9264   %}
9265   ins_pipe(pipe_serial);
9266 %}
9267 
9268 instruct unnecessary_membar_release() %{
9269   predicate(unnecessary_release(n));
9270   match(MemBarRelease);
9271   ins_cost(0);
9272 
9273   format %{ "membar_release (elided)" %}
9274 
9275   ins_encode %{
9276     __ block_comment("membar_release (elided)");
9277   %}
9278   ins_pipe(pipe_serial);
9279 %}
9280 
9281 instruct membar_release() %{
9282   match(MemBarRelease);
9283   ins_cost(VOLATILE_REF_COST);
9284 
9285   format %{ "membar_release\n\t"
9286             "dmb ish" %}
9287 
9288   ins_encode %{
9289     __ block_comment("membar_release");
9290     __ membar(Assembler::LoadStore|Assembler::StoreStore);
9291   %}
9292   ins_pipe(pipe_serial);
9293 %}
9294 
9295 instruct membar_storestore() %{
9296   match(MemBarStoreStore);
9297   ins_cost(VOLATILE_REF_COST);
9298 
9299   format %{ "MEMBAR-store-store" %}
9300 
9301   ins_encode %{
9302     __ membar(Assembler::StoreStore);
9303   %}
9304   ins_pipe(pipe_serial);
9305 %}
9306 
9307 instruct membar_release_lock() %{
9308   match(MemBarReleaseLock);
9309   ins_cost(VOLATILE_REF_COST);
9310 
9311   format %{ "membar_release_lock (elided)" %}
9312 
9313   ins_encode %{
9314     __ block_comment("membar_release_lock (elided)");
9315   %}
9316 
9317   ins_pipe(pipe_serial);
9318 %}
9319 
9320 instruct unnecessary_membar_volatile() %{
9321   predicate(unnecessary_volatile(n));
9322   match(MemBarVolatile);
9323   ins_cost(0);
9324 
9325   format %{ "membar_volatile (elided)" %}
9326 
9327   ins_encode %{
9328     __ block_comment("membar_volatile (elided)");
9329   %}
9330 
9331   ins_pipe(pipe_serial);
9332 %}
9333 
9334 instruct membar_volatile() %{
9335   match(MemBarVolatile);
9336   ins_cost(VOLATILE_REF_COST*100);
9337 
9338   format %{ "membar_volatile\n\t"
9339              "dmb ish"%}
9340 
9341   ins_encode %{
9342     __ block_comment("membar_volatile");
9343     __ membar(Assembler::StoreLoad);
9344   %}
9345 
9346   ins_pipe(pipe_serial);
9347 %}
9348 
9349 // ============================================================================
9350 // Cast/Convert Instructions
9351 
9352 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9353   match(Set dst (CastX2P src));
9354 
9355   ins_cost(INSN_COST);
9356   format %{ "mov $dst, $src\t# long -> ptr" %}
9357 
9358   ins_encode %{
9359     if ($dst$$reg != $src$$reg) {
9360       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9361     }
9362   %}
9363 
9364   ins_pipe(ialu_reg);
9365 %}
9366 
9367 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9368   match(Set dst (CastP2X src));
9369 
9370   ins_cost(INSN_COST);
9371   format %{ "mov $dst, $src\t# ptr -> long" %}
9372 
9373   ins_encode %{
9374     if ($dst$$reg != $src$$reg) {
9375       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9376     }
9377   %}
9378 
9379   ins_pipe(ialu_reg);
9380 %}
9381 
9382 // Convert oop into int for vectors alignment masking
9383 instruct convP2I(iRegINoSp dst, iRegP src) %{
9384   match(Set dst (ConvL2I (CastP2X src)));
9385 
9386   ins_cost(INSN_COST);
9387   format %{ "movw $dst, $src\t# ptr -> int" %}
9388   ins_encode %{
9389     __ movw($dst$$Register, $src$$Register);
9390   %}
9391 
9392   ins_pipe(ialu_reg);
9393 %}
9394 
9395 // Convert compressed oop into int for vectors alignment masking
9396 // in case of 32bit oops (heap < 4Gb).
9397 instruct convN2I(iRegINoSp dst, iRegN src)
9398 %{
9399   predicate(Universe::narrow_oop_shift() == 0);
9400   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9401 
9402   ins_cost(INSN_COST);
9403   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9404   ins_encode %{
9405     __ movw($dst$$Register, $src$$Register);
9406   %}
9407 
9408   ins_pipe(ialu_reg);
9409 %}
9410 
9411 instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
9412   match(Set dst (ShenandoahReadBarrier src));
9413   format %{ "shenandoah_rb $dst,$src" %}
9414   ins_encode %{
9415     Register s = $src$$Register;
9416     Register d = $dst$$Register;
9417     __ ldr(d, Address(s, BrooksPointer::byte_offset()));
9418   %}
9419   ins_pipe(pipe_class_memory);
9420 %}
9421 
9422 instruct shenandoahWB(iRegP_R0 dst, iRegP src, rFlagsReg cr) %{
9423   match(Set dst (ShenandoahWriteBarrier src));
9424   effect(KILL cr);
9425 
9426   format %{ "shenandoah_wb $dst,$src" %}
9427   ins_encode %{
9428 #if INCLUDE_SHENANDOAHGC
9429     Label done;
9430     Register s = $src$$Register;
9431     Register d = $dst$$Register;
9432     assert(d == r0, "result in r0");
9433     __ block_comment("Shenandoah write barrier {");
9434     // We need that first read barrier in order to trigger a SEGV/NPE on incoming NULL.
9435     // Also, it brings s into d in preparation for the call to shenandoah_write_barrier().
9436     __ ldr(d, Address(s, BrooksPointer::byte_offset()));
9437     __ shenandoah_write_barrier(d);
9438     __ block_comment("} Shenandoah write barrier");
9439 #else
9440     ShouldNotReachHere();
9441 #endif
9442   %}
9443   ins_pipe(pipe_slow);
9444 %}
9445 
9446 
9447 // Convert oop pointer into compressed form
9448 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9449   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9450   match(Set dst (EncodeP src));
9451   effect(KILL cr);
9452   ins_cost(INSN_COST * 3);
9453   format %{ "encode_heap_oop $dst, $src" %}
9454   ins_encode %{
9455     Register s = $src$$Register;
9456     Register d = $dst$$Register;
9457     __ encode_heap_oop(d, s);
9458   %}
9459   ins_pipe(ialu_reg);
9460 %}
9461 
9462 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9463   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9464   match(Set dst (EncodeP src));
9465   ins_cost(INSN_COST * 3);
9466   format %{ "encode_heap_oop_not_null $dst, $src" %}
9467   ins_encode %{
9468     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9469   %}
9470   ins_pipe(ialu_reg);
9471 %}
9472 
9473 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9474   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9475             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9476   match(Set dst (DecodeN src));
9477   ins_cost(INSN_COST * 3);
9478   format %{ "decode_heap_oop $dst, $src" %}
9479   ins_encode %{
9480     Register s = $src$$Register;
9481     Register d = $dst$$Register;
9482     __ decode_heap_oop(d, s);
9483   %}
9484   ins_pipe(ialu_reg);
9485 %}
9486 
9487 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9488   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9489             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9490   match(Set dst (DecodeN src));
9491   ins_cost(INSN_COST * 3);
9492   format %{ "decode_heap_oop_not_null $dst, $src" %}
9493   ins_encode %{
9494     Register s = $src$$Register;
9495     Register d = $dst$$Register;
9496     __ decode_heap_oop_not_null(d, s);
9497   %}
9498   ins_pipe(ialu_reg);
9499 %}
9500 
9501 // n.b. AArch64 implementations of encode_klass_not_null and
9502 // decode_klass_not_null do not modify the flags register so, unlike
9503 // Intel, we don't kill CR as a side effect here
9504 
9505 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9506   match(Set dst (EncodePKlass src));
9507 
9508   ins_cost(INSN_COST * 3);
9509   format %{ "encode_klass_not_null $dst,$src" %}
9510 
9511   ins_encode %{
9512     Register src_reg = as_Register($src$$reg);
9513     Register dst_reg = as_Register($dst$$reg);
9514     __ encode_klass_not_null(dst_reg, src_reg);
9515   %}
9516 
9517    ins_pipe(ialu_reg);
9518 %}
9519 
9520 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9521   match(Set dst (DecodeNKlass src));
9522 
9523   ins_cost(INSN_COST * 3);
9524   format %{ "decode_klass_not_null $dst,$src" %}
9525 
9526   ins_encode %{
9527     Register src_reg = as_Register($src$$reg);
9528     Register dst_reg = as_Register($dst$$reg);
9529     if (dst_reg != src_reg) {
9530       __ decode_klass_not_null(dst_reg, src_reg);
9531     } else {
9532       __ decode_klass_not_null(dst_reg);
9533     }
9534   %}
9535 
9536    ins_pipe(ialu_reg);
9537 %}
9538 
9539 instruct checkCastPP(iRegPNoSp dst)
9540 %{
9541   match(Set dst (CheckCastPP dst));
9542 
9543   size(0);
9544   format %{ "# checkcastPP of $dst" %}
9545   ins_encode(/* empty encoding */);
9546   ins_pipe(pipe_class_empty);
9547 %}
9548 
9549 instruct castPP(iRegPNoSp dst)
9550 %{
9551   match(Set dst (CastPP dst));
9552 
9553   size(0);
9554   format %{ "# castPP of $dst" %}
9555   ins_encode(/* empty encoding */);
9556   ins_pipe(pipe_class_empty);
9557 %}
9558 
9559 instruct castII(iRegI dst)
9560 %{
9561   match(Set dst (CastII dst));
9562 
9563   size(0);
9564   format %{ "# castII of $dst" %}
9565   ins_encode(/* empty encoding */);
9566   ins_cost(0);
9567   ins_pipe(pipe_class_empty);
9568 %}
9569 
9570 // ============================================================================
9571 // Atomic operation instructions
9572 //
9573 // Intel and SPARC both implement Ideal Node LoadPLocked and
9574 // Store{PIL}Conditional instructions using a normal load for the
9575 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9576 //
9577 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9578 // pair to lock object allocations from Eden space when not using
9579 // TLABs.
9580 //
9581 // There does not appear to be a Load{IL}Locked Ideal Node and the
9582 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9583 // and to use StoreIConditional only for 32-bit and StoreLConditional
9584 // only for 64-bit.
9585 //
9586 // We implement LoadPLocked and StorePLocked instructions using,
9587 // respectively the AArch64 hw load-exclusive and store-conditional
9588 // instructions. Whereas we must implement each of
9589 // Store{IL}Conditional using a CAS which employs a pair of
9590 // instructions comprising a load-exclusive followed by a
9591 // store-conditional.
9592 
9593 
9594 // Locked-load (linked load) of the current heap-top
9595 // used when updating the eden heap top
9596 // implemented using ldaxr on AArch64
9597 
9598 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9599 %{
9600   match(Set dst (LoadPLocked mem));
9601 
9602   ins_cost(VOLATILE_REF_COST);
9603 
9604   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9605 
9606   ins_encode(aarch64_enc_ldaxr(dst, mem));
9607 
9608   ins_pipe(pipe_serial);
9609 %}
9610 
9611 // Conditional-store of the updated heap-top.
9612 // Used during allocation of the shared heap.
9613 // Sets flag (EQ) on success.
9614 // implemented using stlxr on AArch64.
9615 
9616 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9617 %{
9618   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9619 
9620   ins_cost(VOLATILE_REF_COST);
9621 
9622  // TODO
9623  // do we need to do a store-conditional release or can we just use a
9624  // plain store-conditional?
9625 
9626   format %{
9627     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9628     "cmpw rscratch1, zr\t# EQ on successful write"
9629   %}
9630 
9631   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9632 
9633   ins_pipe(pipe_serial);
9634 %}
9635 
9636 
9637 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9638 // when attempting to rebias a lock towards the current thread.  We
9639 // must use the acquire form of cmpxchg in order to guarantee acquire
9640 // semantics in this case.
9641 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9642 %{
9643   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9644 
9645   ins_cost(VOLATILE_REF_COST);
9646 
9647   format %{
9648     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9649     "cmpw rscratch1, zr\t# EQ on successful write"
9650   %}
9651 
9652   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9653 
9654   ins_pipe(pipe_slow);
9655 %}
9656 
9657 // storeIConditional also has acquire semantics, for no better reason
9658 // than matching storeLConditional.  At the time of writing this
9659 // comment storeIConditional was not used anywhere by AArch64.
9660 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9661 %{
9662   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9663 
9664   ins_cost(VOLATILE_REF_COST);
9665 
9666   format %{
9667     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9668     "cmpw rscratch1, zr\t# EQ on successful write"
9669   %}
9670 
9671   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9672 
9673   ins_pipe(pipe_slow);
9674 %}
9675 
9676 // standard CompareAndSwapX when we are using barriers
9677 // these have higher priority than the rules selected by a predicate
9678 
9679 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9680 // can't match them
9681 
9682 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9683 
9684   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
9685   ins_cost(2 * VOLATILE_REF_COST);
9686 
9687   effect(KILL cr);
9688 
9689   format %{
9690     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9691     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9692   %}
9693 
9694   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
9695             aarch64_enc_cset_eq(res));
9696 
9697   ins_pipe(pipe_slow);
9698 %}
9699 
9700 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9701 
9702   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
9703   ins_cost(2 * VOLATILE_REF_COST);
9704 
9705   effect(KILL cr);
9706 
9707   format %{
9708     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9709     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9710   %}
9711 
9712   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
9713             aarch64_enc_cset_eq(res));
9714 
9715   ins_pipe(pipe_slow);
9716 %}
9717 
9718 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9719 
9720   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9721   ins_cost(2 * VOLATILE_REF_COST);
9722 
9723   effect(KILL cr);
9724 
9725  format %{
9726     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9727     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9728  %}
9729 
9730  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9731             aarch64_enc_cset_eq(res));
9732 
9733   ins_pipe(pipe_slow);
9734 %}
9735 
9736 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9737 
9738   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9739   ins_cost(2 * VOLATILE_REF_COST);
9740 
9741   effect(KILL cr);
9742 
9743  format %{
9744     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9745     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9746  %}
9747 
9748  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9749             aarch64_enc_cset_eq(res));
9750 
9751   ins_pipe(pipe_slow);
9752 %}
9753 
9754 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9755 
9756   predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR);
9757   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9758   ins_cost(2 * VOLATILE_REF_COST);
9759 
9760   effect(KILL cr);
9761 
9762  format %{
9763     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9764     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9765  %}
9766 
9767  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9768             aarch64_enc_cset_eq(res));
9769 
9770   ins_pipe(pipe_slow);
9771 %}
9772 instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
9773 
9774   predicate(UseShenandoahGC && ShenandoahCASBarrier);
9775   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9776   ins_cost(3 * VOLATILE_REF_COST);
9777 
9778   effect(TEMP tmp, KILL cr);
9779 
9780   format %{
9781     "cmpxchg_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9782     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9783   %}
9784 
9785   ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp),
9786              aarch64_enc_cset_eq(res));
9787 
9788   ins_pipe(pipe_slow);
9789 %}
9790 
9791 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9792 
9793   predicate(!UseShenandoahGC || !ShenandoahCASBarrier);
9794   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9795   ins_cost(2 * VOLATILE_REF_COST);
9796 
9797   effect(KILL cr);
9798 
9799  format %{
9800     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9801     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9802  %}
9803 
9804  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9805             aarch64_enc_cset_eq(res));
9806 
9807   ins_pipe(pipe_slow);
9808 %}
9809 
9810 instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
9811 
9812   predicate(UseShenandoahGC && ShenandoahCASBarrier);
9813   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9814   ins_cost(3 * VOLATILE_REF_COST);
9815 
9816   effect(TEMP tmp, KILL cr);
9817 
9818   format %{
9819     "cmpxchg_narrow_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9820     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9821   %}
9822 
9823   ins_encode %{
9824     Register tmp = $tmp$$Register;
9825     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
9826     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
9827     __ cset($res$$Register, Assembler::EQ);
9828   %}
9829 
9830   ins_pipe(pipe_slow);
9831 %}
9832 
9833 // alternative CompareAndSwapX when we are eliding barriers
9834 
9835 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9836 
9837   predicate(needs_acquiring_load_exclusive(n));
9838   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9839   ins_cost(VOLATILE_REF_COST);
9840 
9841   effect(KILL cr);
9842 
9843  format %{
9844     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9845     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9846  %}
9847 
9848  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9849             aarch64_enc_cset_eq(res));
9850 
9851   ins_pipe(pipe_slow);
9852 %}
9853 
9854 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9855 
9856   predicate(needs_acquiring_load_exclusive(n));
9857   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9858   ins_cost(VOLATILE_REF_COST);
9859 
9860   effect(KILL cr);
9861 
9862  format %{
9863     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9864     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9865  %}
9866 
9867  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9868             aarch64_enc_cset_eq(res));
9869 
9870   ins_pipe(pipe_slow);
9871 %}
9872 
9873 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9874 
9875   predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR));
9876   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9877   ins_cost(VOLATILE_REF_COST);
9878 
9879   effect(KILL cr);
9880 
9881  format %{
9882     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9883     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9884  %}
9885 
9886  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9887             aarch64_enc_cset_eq(res));
9888 
9889   ins_pipe(pipe_slow);
9890 %}
9891 
9892 instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
9893 
9894   predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier);
9895   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9896   ins_cost(2 * VOLATILE_REF_COST);
9897 
9898   effect(TEMP tmp, KILL cr);
9899 
9900   format %{
9901     "cmpxchg_acq_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9902     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9903   %}
9904 
9905   ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp),
9906              aarch64_enc_cset_eq(res));
9907 
9908   ins_pipe(pipe_slow);
9909 %}
9910 
9911 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9912 
9913   predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier));
9914   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9915   ins_cost(VOLATILE_REF_COST);
9916 
9917   effect(KILL cr);
9918 
9919  format %{
9920     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9921     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9922  %}
9923 
9924  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9925             aarch64_enc_cset_eq(res));
9926 
9927   ins_pipe(pipe_slow);
9928 %}
9929 
9930 instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
9931 
9932   predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier);
9933   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9934   ins_cost(3 * VOLATILE_REF_COST);
9935 
9936   effect(TEMP tmp, KILL cr);
9937 
9938  format %{
9939     "cmpxchg_narrow_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
9940     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9941  %}
9942 
9943   ins_encode %{
9944     Register tmp = $tmp$$Register;
9945     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
9946     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
9947     __ cset($res$$Register, Assembler::EQ);
9948   %}
9949 
9950   ins_pipe(pipe_slow);
9951 %}
9952 
9953 // ---------------------------------------------------------------------
9954 
9955 
9956 // BEGIN This section of the file is automatically generated. Do not edit --------------
9957 
9958 // Sundry CAS operations.  Note that release is always true,
9959 // regardless of the memory ordering of the CAS.  This is because we
9960 // need the volatile case to be sequentially consistent but there is
9961 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
9962 // can't check the type of memory ordering here, so we always emit a
9963 // STLXR.
9964 
9965 // This section is generated from aarch64_ad_cas.m4
9966 
9967 
9968 
9969 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9970   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
9971   ins_cost(2 * VOLATILE_REF_COST);
9972   effect(TEMP_DEF res, KILL cr);
9973   format %{
9974     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
9975   %}
9976   ins_encode %{
9977     __ uxtbw(rscratch2, $oldval$$Register);
9978     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9979                Assembler::byte, /*acquire*/ false, /*release*/ true,
9980                /*weak*/ false, $res$$Register);
9981     __ sxtbw($res$$Register, $res$$Register);
9982   %}
9983   ins_pipe(pipe_slow);
9984 %}
9985 
9986 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9987   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
9988   ins_cost(2 * VOLATILE_REF_COST);
9989   effect(TEMP_DEF res, KILL cr);
9990   format %{
9991     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
9992   %}
9993   ins_encode %{
9994     __ uxthw(rscratch2, $oldval$$Register);
9995     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
9996                Assembler::halfword, /*acquire*/ false, /*release*/ true,
9997                /*weak*/ false, $res$$Register);
9998     __ sxthw($res$$Register, $res$$Register);
9999   %}
10000   ins_pipe(pipe_slow);
10001 %}
10002 
10003 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
10004   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
10005   ins_cost(2 * VOLATILE_REF_COST);
10006   effect(TEMP_DEF res, KILL cr);
10007   format %{
10008     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
10009   %}
10010   ins_encode %{
10011     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10012                Assembler::word, /*acquire*/ false, /*release*/ true,
10013                /*weak*/ false, $res$$Register);
10014   %}
10015   ins_pipe(pipe_slow);
10016 %}
10017 
10018 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
10019   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
10020   ins_cost(2 * VOLATILE_REF_COST);
10021   effect(TEMP_DEF res, KILL cr);
10022   format %{
10023     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
10024   %}
10025   ins_encode %{
10026     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10027                Assembler::xword, /*acquire*/ false, /*release*/ true,
10028                /*weak*/ false, $res$$Register);
10029   %}
10030   ins_pipe(pipe_slow);
10031 %}
10032 
10033 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
10034   predicate(!UseShenandoahGC || !ShenandoahCASBarrier);
10035   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
10036   ins_cost(2 * VOLATILE_REF_COST);
10037   effect(TEMP_DEF res, KILL cr);
10038   format %{
10039     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
10040   %}
10041   ins_encode %{
10042     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10043                Assembler::word, /*acquire*/ false, /*release*/ true,
10044                /*weak*/ false, $res$$Register);
10045   %}
10046   ins_pipe(pipe_slow);
10047 %}
10048 
10049 instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
10050   predicate(UseShenandoahGC && ShenandoahCASBarrier);
10051   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
10052   ins_cost(3 * VOLATILE_REF_COST);
10053   effect(TEMP_DEF res, TEMP tmp, KILL cr);
10054   format %{
10055     "cmpxchg_oop_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
10056   %}
10057   ins_encode %{
10058     Register tmp = $tmp$$Register;
10059     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
10060     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register,
10061                    /*acquire*/ false, /*release*/ true, /*weak*/ false, /* encode*/ false, noreg, noreg, rscratch2, $res$$Register);
10062   %}
10063   ins_pipe(pipe_slow);
10064 %}
10065 
10066 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
10067   predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR);
10068   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
10069   ins_cost(2 * VOLATILE_REF_COST);
10070   effect(TEMP_DEF res, KILL cr);
10071   format %{
10072     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
10073   %}
10074   ins_encode %{
10075     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10076                Assembler::xword, /*acquire*/ false, /*release*/ true,
10077                /*weak*/ false, $res$$Register);
10078   %}
10079   ins_pipe(pipe_slow);
10080 %}
10081 
10082 instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
10083   predicate(UseShenandoahGC && ShenandoahCASBarrier);
10084   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
10085   ins_cost(3 * VOLATILE_REF_COST);
10086   effect(TEMP_DEF res, TEMP tmp, KILL cr);
10087   format %{
10088     "cmpxchg_oop_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
10089   %}
10090   ins_encode %{
10091     Register tmp = $tmp$$Register;
10092     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
10093     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register,
10094                    /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg, rscratch2, $res$$Register);
10095   %}
10096   ins_pipe(pipe_slow);
10097 %}
10098 
10099 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
10100   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
10101   ins_cost(2 * VOLATILE_REF_COST);
10102   effect(KILL cr);
10103   format %{
10104     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
10105     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10106   %}
10107   ins_encode %{
10108     __ uxtbw(rscratch2, $oldval$$Register);
10109     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
10110                Assembler::byte, /*acquire*/ false, /*release*/ true,
10111                /*weak*/ true, noreg);
10112     __ csetw($res$$Register, Assembler::EQ);
10113   %}
10114   ins_pipe(pipe_slow);
10115 %}
10116 
10117 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
10118   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
10119   ins_cost(2 * VOLATILE_REF_COST);
10120   effect(KILL cr);
10121   format %{
10122     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
10123     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10124   %}
10125   ins_encode %{
10126     __ uxthw(rscratch2, $oldval$$Register);
10127     __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
10128                Assembler::halfword, /*acquire*/ false, /*release*/ true,
10129                /*weak*/ true, noreg);
10130     __ csetw($res$$Register, Assembler::EQ);
10131   %}
10132   ins_pipe(pipe_slow);
10133 %}
10134 
10135 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
10136   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
10137   ins_cost(2 * VOLATILE_REF_COST);
10138   effect(KILL cr);
10139   format %{
10140     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
10141     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10142   %}
10143   ins_encode %{
10144     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10145                Assembler::word, /*acquire*/ false, /*release*/ true,
10146                /*weak*/ true, noreg);
10147     __ csetw($res$$Register, Assembler::EQ);
10148   %}
10149   ins_pipe(pipe_slow);
10150 %}
10151 
10152 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
10153   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
10154   ins_cost(2 * VOLATILE_REF_COST);
10155   effect(KILL cr);
10156   format %{
10157     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
10158     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10159   %}
10160   ins_encode %{
10161     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10162                Assembler::xword, /*acquire*/ false, /*release*/ true,
10163                /*weak*/ true, noreg);
10164     __ csetw($res$$Register, Assembler::EQ);
10165   %}
10166   ins_pipe(pipe_slow);
10167 %}
10168 
10169 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
10170   predicate(!UseShenandoahGC || !ShenandoahCASBarrier);
10171   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
10172   ins_cost(2 * VOLATILE_REF_COST);
10173   effect(KILL cr);
10174   format %{
10175     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
10176     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10177   %}
10178   ins_encode %{
10179     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10180                Assembler::word, /*acquire*/ false, /*release*/ true,
10181                /*weak*/ true, noreg);
10182     __ csetw($res$$Register, Assembler::EQ);
10183   %}
10184   ins_pipe(pipe_slow);
10185 %}
10186 
10187 instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
10188   predicate(UseShenandoahGC && ShenandoahCASBarrier);
10189   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
10190   ins_cost(3 * VOLATILE_REF_COST);
10191   effect(TEMP tmp, KILL cr);
10192   format %{
10193     "cmpxchg_oop_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
10194     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10195   %}
10196   ins_encode %{
10197     Register tmp = $tmp$$Register;
10198     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
10199     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register,
10200                    /*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
10201     __ csetw($res$$Register, Assembler::EQ);
10202   %}
10203   ins_pipe(pipe_slow);
10204 %}
10205 
10206 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
10207   predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR);
10208   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
10209   ins_cost(2 * VOLATILE_REF_COST);
10210   effect(KILL cr);
10211   format %{
10212     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
10213     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10214   %}
10215   ins_encode %{
10216     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
10217                Assembler::xword, /*acquire*/ false, /*release*/ true,
10218                /*weak*/ true, noreg);
10219     __ csetw($res$$Register, Assembler::EQ);
10220   %}
10221   ins_pipe(pipe_slow);
10222 %}
10223 
10224 instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
10225   predicate(UseShenandoahGC && ShenandoahCASBarrier);
10226   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
10227   ins_cost(3 * VOLATILE_REF_COST);
10228   effect(TEMP tmp, KILL cr);
10229   format %{
10230     "cmpxchg_oop_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
10231     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
10232   %}
10233   ins_encode %{
10234     Register tmp = $tmp$$Register;
10235     __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
10236     __ cmpxchg_oop($mem$$Register, tmp, $newval$$Register,
10237                    /*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
10238     __ csetw($res$$Register, Assembler::EQ);
10239   %}
10240   ins_pipe(pipe_slow);
10241 %}
10242 // END This section of the file is automatically generated. Do not edit --------------
10243 // ---------------------------------------------------------------------
10244 
10245 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
10246   match(Set prev (GetAndSetI mem newv));
10247   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
10248   ins_encode %{
10249     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10250   %}
10251   ins_pipe(pipe_serial);
10252 %}
10253 
10254 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
10255   match(Set prev (GetAndSetL mem newv));
10256   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10257   ins_encode %{
10258     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10259   %}
10260   ins_pipe(pipe_serial);
10261 %}
10262 
10263 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
10264   match(Set prev (GetAndSetN mem newv));
10265   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
10266   ins_encode %{
10267     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
10268   %}
10269   ins_pipe(pipe_serial);
10270 %}
10271 
10272 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
10273   match(Set prev (GetAndSetP mem newv));
10274   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
10275   ins_encode %{
10276     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
10277   %}
10278   ins_pipe(pipe_serial);
10279 %}
10280 
10281 
10282 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
10283   match(Set newval (GetAndAddL mem incr));
10284   ins_cost(INSN_COST * 10);
10285   format %{ "get_and_addL $newval, [$mem], $incr" %}
10286   ins_encode %{
10287     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
10288   %}
10289   ins_pipe(pipe_serial);
10290 %}
10291 
10292 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
10293   predicate(n->as_LoadStore()->result_not_used());
10294   match(Set dummy (GetAndAddL mem incr));
10295   ins_cost(INSN_COST * 9);
10296   format %{ "get_and_addL [$mem], $incr" %}
10297   ins_encode %{
10298     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
10299   %}
10300   ins_pipe(pipe_serial);
10301 %}
10302 
10303 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
10304   match(Set newval (GetAndAddL mem incr));
10305   ins_cost(INSN_COST * 10);
10306   format %{ "get_and_addL $newval, [$mem], $incr" %}
10307   ins_encode %{
10308     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
10309   %}
10310   ins_pipe(pipe_serial);
10311 %}
10312 
10313 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
10314   predicate(n->as_LoadStore()->result_not_used());
10315   match(Set dummy (GetAndAddL mem incr));
10316   ins_cost(INSN_COST * 9);
10317   format %{ "get_and_addL [$mem], $incr" %}
10318   ins_encode %{
10319     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
10320   %}
10321   ins_pipe(pipe_serial);
10322 %}
10323 
10324 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
10325   match(Set newval (GetAndAddI mem incr));
10326   ins_cost(INSN_COST * 10);
10327   format %{ "get_and_addI $newval, [$mem], $incr" %}
10328   ins_encode %{
10329     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
10330   %}
10331   ins_pipe(pipe_serial);
10332 %}
10333 
10334 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
10335   predicate(n->as_LoadStore()->result_not_used());
10336   match(Set dummy (GetAndAddI mem incr));
10337   ins_cost(INSN_COST * 9);
10338   format %{ "get_and_addI [$mem], $incr" %}
10339   ins_encode %{
10340     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
10341   %}
10342   ins_pipe(pipe_serial);
10343 %}
10344 
10345 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
10346   match(Set newval (GetAndAddI mem incr));
10347   ins_cost(INSN_COST * 10);
10348   format %{ "get_and_addI $newval, [$mem], $incr" %}
10349   ins_encode %{
10350     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
10351   %}
10352   ins_pipe(pipe_serial);
10353 %}
10354 
10355 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
10356   predicate(n->as_LoadStore()->result_not_used());
10357   match(Set dummy (GetAndAddI mem incr));
10358   ins_cost(INSN_COST * 9);
10359   format %{ "get_and_addI [$mem], $incr" %}
10360   ins_encode %{
10361     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
10362   %}
10363   ins_pipe(pipe_serial);
10364 %}
10365 
10366 // Manifest a CmpL result in an integer register.
10367 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
10368 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
10369 %{
10370   match(Set dst (CmpL3 src1 src2));
10371   effect(KILL flags);
10372 
10373   ins_cost(INSN_COST * 6);
10374   format %{
10375       "cmp $src1, $src2"
10376       "csetw $dst, ne"
10377       "cnegw $dst, lt"
10378   %}
10379   // format %{ "CmpL3 $dst, $src1, $src2" %}
10380   ins_encode %{
10381     __ cmp($src1$$Register, $src2$$Register);
10382     __ csetw($dst$$Register, Assembler::NE);
10383     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10384   %}
10385 
10386   ins_pipe(pipe_class_default);
10387 %}
10388 
10389 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
10390 %{
10391   match(Set dst (CmpL3 src1 src2));
10392   effect(KILL flags);
10393 
10394   ins_cost(INSN_COST * 6);
10395   format %{
10396       "cmp $src1, $src2"
10397       "csetw $dst, ne"
10398       "cnegw $dst, lt"
10399   %}
10400   ins_encode %{
10401     int32_t con = (int32_t)$src2$$constant;
10402      if (con < 0) {
10403       __ adds(zr, $src1$$Register, -con);
10404     } else {
10405       __ subs(zr, $src1$$Register, con);
10406     }
10407     __ csetw($dst$$Register, Assembler::NE);
10408     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
10409   %}
10410 
10411   ins_pipe(pipe_class_default);
10412 %}
10413 
10414 // ============================================================================
10415 // Conditional Move Instructions
10416 
10417 // n.b. we have identical rules for both a signed compare op (cmpOp)
10418 // and an unsigned compare op (cmpOpU). it would be nice if we could
10419 // define an op class which merged both inputs and use it to type the
10420 // argument to a single rule. unfortunatelyt his fails because the
10421 // opclass does not live up to the COND_INTER interface of its
10422 // component operands. When the generic code tries to negate the
10423 // operand it ends up running the generci Machoper::negate method
10424 // which throws a ShouldNotHappen. So, we have to provide two flavours
10425 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
10426 
10427 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10428   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10429 
10430   ins_cost(INSN_COST * 2);
10431   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
10432 
10433   ins_encode %{
10434     __ cselw(as_Register($dst$$reg),
10435              as_Register($src2$$reg),
10436              as_Register($src1$$reg),
10437              (Assembler::Condition)$cmp$$cmpcode);
10438   %}
10439 
10440   ins_pipe(icond_reg_reg);
10441 %}
10442 
10443 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10444   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
10445 
10446   ins_cost(INSN_COST * 2);
10447   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
10448 
10449   ins_encode %{
10450     __ cselw(as_Register($dst$$reg),
10451              as_Register($src2$$reg),
10452              as_Register($src1$$reg),
10453              (Assembler::Condition)$cmp$$cmpcode);
10454   %}
10455 
10456   ins_pipe(icond_reg_reg);
10457 %}
10458 
10459 // special cases where one arg is zero
10460 
10461 // n.b. this is selected in preference to the rule above because it
10462 // avoids loading constant 0 into a source register
10463 
10464 // TODO
10465 // we ought only to be able to cull one of these variants as the ideal
10466 // transforms ought always to order the zero consistently (to left/right?)
10467 
10468 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10469   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10470 
10471   ins_cost(INSN_COST * 2);
10472   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
10473 
10474   ins_encode %{
10475     __ cselw(as_Register($dst$$reg),
10476              as_Register($src$$reg),
10477              zr,
10478              (Assembler::Condition)$cmp$$cmpcode);
10479   %}
10480 
10481   ins_pipe(icond_reg);
10482 %}
10483 
10484 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
10485   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
10486 
10487   ins_cost(INSN_COST * 2);
10488   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
10489 
10490   ins_encode %{
10491     __ cselw(as_Register($dst$$reg),
10492              as_Register($src$$reg),
10493              zr,
10494              (Assembler::Condition)$cmp$$cmpcode);
10495   %}
10496 
10497   ins_pipe(icond_reg);
10498 %}
10499 
10500 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10501   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10502 
10503   ins_cost(INSN_COST * 2);
10504   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
10505 
10506   ins_encode %{
10507     __ cselw(as_Register($dst$$reg),
10508              zr,
10509              as_Register($src$$reg),
10510              (Assembler::Condition)$cmp$$cmpcode);
10511   %}
10512 
10513   ins_pipe(icond_reg);
10514 %}
10515 
10516 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
10517   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
10518 
10519   ins_cost(INSN_COST * 2);
10520   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
10521 
10522   ins_encode %{
10523     __ cselw(as_Register($dst$$reg),
10524              zr,
10525              as_Register($src$$reg),
10526              (Assembler::Condition)$cmp$$cmpcode);
10527   %}
10528 
10529   ins_pipe(icond_reg);
10530 %}
10531 
10532 // special case for creating a boolean 0 or 1
10533 
10534 // n.b. this is selected in preference to the rule above because it
10535 // avoids loading constants 0 and 1 into a source register
10536 
10537 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10538   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10539 
10540   ins_cost(INSN_COST * 2);
10541   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
10542 
10543   ins_encode %{
10544     // equivalently
10545     // cset(as_Register($dst$$reg),
10546     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10547     __ csincw(as_Register($dst$$reg),
10548              zr,
10549              zr,
10550              (Assembler::Condition)$cmp$$cmpcode);
10551   %}
10552 
10553   ins_pipe(icond_none);
10554 %}
10555 
10556 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
10557   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
10558 
10559   ins_cost(INSN_COST * 2);
10560   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
10561 
10562   ins_encode %{
10563     // equivalently
10564     // cset(as_Register($dst$$reg),
10565     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
10566     __ csincw(as_Register($dst$$reg),
10567              zr,
10568              zr,
10569              (Assembler::Condition)$cmp$$cmpcode);
10570   %}
10571 
10572   ins_pipe(icond_none);
10573 %}
10574 
10575 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10576   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10577 
10578   ins_cost(INSN_COST * 2);
10579   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
10580 
10581   ins_encode %{
10582     __ csel(as_Register($dst$$reg),
10583             as_Register($src2$$reg),
10584             as_Register($src1$$reg),
10585             (Assembler::Condition)$cmp$$cmpcode);
10586   %}
10587 
10588   ins_pipe(icond_reg_reg);
10589 %}
10590 
10591 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
10592   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
10593 
10594   ins_cost(INSN_COST * 2);
10595   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
10596 
10597   ins_encode %{
10598     __ csel(as_Register($dst$$reg),
10599             as_Register($src2$$reg),
10600             as_Register($src1$$reg),
10601             (Assembler::Condition)$cmp$$cmpcode);
10602   %}
10603 
10604   ins_pipe(icond_reg_reg);
10605 %}
10606 
10607 // special cases where one arg is zero
10608 
10609 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10610   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10611 
10612   ins_cost(INSN_COST * 2);
10613   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
10614 
10615   ins_encode %{
10616     __ csel(as_Register($dst$$reg),
10617             zr,
10618             as_Register($src$$reg),
10619             (Assembler::Condition)$cmp$$cmpcode);
10620   %}
10621 
10622   ins_pipe(icond_reg);
10623 %}
10624 
10625 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
10626   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
10627 
10628   ins_cost(INSN_COST * 2);
10629   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
10630 
10631   ins_encode %{
10632     __ csel(as_Register($dst$$reg),
10633             zr,
10634             as_Register($src$$reg),
10635             (Assembler::Condition)$cmp$$cmpcode);
10636   %}
10637 
10638   ins_pipe(icond_reg);
10639 %}
10640 
10641 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10642   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10643 
10644   ins_cost(INSN_COST * 2);
10645   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
10646 
10647   ins_encode %{
10648     __ csel(as_Register($dst$$reg),
10649             as_Register($src$$reg),
10650             zr,
10651             (Assembler::Condition)$cmp$$cmpcode);
10652   %}
10653 
10654   ins_pipe(icond_reg);
10655 %}
10656 
10657 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
10658   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
10659 
10660   ins_cost(INSN_COST * 2);
10661   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
10662 
10663   ins_encode %{
10664     __ csel(as_Register($dst$$reg),
10665             as_Register($src$$reg),
10666             zr,
10667             (Assembler::Condition)$cmp$$cmpcode);
10668   %}
10669 
10670   ins_pipe(icond_reg);
10671 %}
10672 
10673 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10674   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10675 
10676   ins_cost(INSN_COST * 2);
10677   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
10678 
10679   ins_encode %{
10680     __ csel(as_Register($dst$$reg),
10681             as_Register($src2$$reg),
10682             as_Register($src1$$reg),
10683             (Assembler::Condition)$cmp$$cmpcode);
10684   %}
10685 
10686   ins_pipe(icond_reg_reg);
10687 %}
10688 
10689 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
10690   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
10691 
10692   ins_cost(INSN_COST * 2);
10693   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
10694 
10695   ins_encode %{
10696     __ csel(as_Register($dst$$reg),
10697             as_Register($src2$$reg),
10698             as_Register($src1$$reg),
10699             (Assembler::Condition)$cmp$$cmpcode);
10700   %}
10701 
10702   ins_pipe(icond_reg_reg);
10703 %}
10704 
10705 // special cases where one arg is zero
10706 
10707 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10708   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10709 
10710   ins_cost(INSN_COST * 2);
10711   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
10712 
10713   ins_encode %{
10714     __ csel(as_Register($dst$$reg),
10715             zr,
10716             as_Register($src$$reg),
10717             (Assembler::Condition)$cmp$$cmpcode);
10718   %}
10719 
10720   ins_pipe(icond_reg);
10721 %}
10722 
10723 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
10724   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
10725 
10726   ins_cost(INSN_COST * 2);
10727   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
10728 
10729   ins_encode %{
10730     __ csel(as_Register($dst$$reg),
10731             zr,
10732             as_Register($src$$reg),
10733             (Assembler::Condition)$cmp$$cmpcode);
10734   %}
10735 
10736   ins_pipe(icond_reg);
10737 %}
10738 
10739 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10740   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10741 
10742   ins_cost(INSN_COST * 2);
10743   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
10744 
10745   ins_encode %{
10746     __ csel(as_Register($dst$$reg),
10747             as_Register($src$$reg),
10748             zr,
10749             (Assembler::Condition)$cmp$$cmpcode);
10750   %}
10751 
10752   ins_pipe(icond_reg);
10753 %}
10754 
10755 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
10756   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
10757 
10758   ins_cost(INSN_COST * 2);
10759   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
10760 
10761   ins_encode %{
10762     __ csel(as_Register($dst$$reg),
10763             as_Register($src$$reg),
10764             zr,
10765             (Assembler::Condition)$cmp$$cmpcode);
10766   %}
10767 
10768   ins_pipe(icond_reg);
10769 %}
10770 
10771 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10772   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10773 
10774   ins_cost(INSN_COST * 2);
10775   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10776 
10777   ins_encode %{
10778     __ cselw(as_Register($dst$$reg),
10779              as_Register($src2$$reg),
10780              as_Register($src1$$reg),
10781              (Assembler::Condition)$cmp$$cmpcode);
10782   %}
10783 
10784   ins_pipe(icond_reg_reg);
10785 %}
10786 
10787 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10788   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10789 
10790   ins_cost(INSN_COST * 2);
10791   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10792 
10793   ins_encode %{
10794     __ cselw(as_Register($dst$$reg),
10795              as_Register($src2$$reg),
10796              as_Register($src1$$reg),
10797              (Assembler::Condition)$cmp$$cmpcode);
10798   %}
10799 
10800   ins_pipe(icond_reg_reg);
10801 %}
10802 
10803 // special cases where one arg is zero
10804 
10805 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10806   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10807 
10808   ins_cost(INSN_COST * 2);
10809   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10810 
10811   ins_encode %{
10812     __ cselw(as_Register($dst$$reg),
10813              zr,
10814              as_Register($src$$reg),
10815              (Assembler::Condition)$cmp$$cmpcode);
10816   %}
10817 
10818   ins_pipe(icond_reg);
10819 %}
10820 
10821 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10822   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10823 
10824   ins_cost(INSN_COST * 2);
10825   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10826 
10827   ins_encode %{
10828     __ cselw(as_Register($dst$$reg),
10829              zr,
10830              as_Register($src$$reg),
10831              (Assembler::Condition)$cmp$$cmpcode);
10832   %}
10833 
10834   ins_pipe(icond_reg);
10835 %}
10836 
10837 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10838   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10839 
10840   ins_cost(INSN_COST * 2);
10841   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10842 
10843   ins_encode %{
10844     __ cselw(as_Register($dst$$reg),
10845              as_Register($src$$reg),
10846              zr,
10847              (Assembler::Condition)$cmp$$cmpcode);
10848   %}
10849 
10850   ins_pipe(icond_reg);
10851 %}
10852 
10853 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10854   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10855 
10856   ins_cost(INSN_COST * 2);
10857   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10858 
10859   ins_encode %{
10860     __ cselw(as_Register($dst$$reg),
10861              as_Register($src$$reg),
10862              zr,
10863              (Assembler::Condition)$cmp$$cmpcode);
10864   %}
10865 
10866   ins_pipe(icond_reg);
10867 %}
10868 
10869 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10870 %{
10871   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10872 
10873   ins_cost(INSN_COST * 3);
10874 
10875   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10876   ins_encode %{
10877     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10878     __ fcsels(as_FloatRegister($dst$$reg),
10879               as_FloatRegister($src2$$reg),
10880               as_FloatRegister($src1$$reg),
10881               cond);
10882   %}
10883 
10884   ins_pipe(fp_cond_reg_reg_s);
10885 %}
10886 
10887 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10888 %{
10889   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10890 
10891   ins_cost(INSN_COST * 3);
10892 
10893   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10894   ins_encode %{
10895     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10896     __ fcsels(as_FloatRegister($dst$$reg),
10897               as_FloatRegister($src2$$reg),
10898               as_FloatRegister($src1$$reg),
10899               cond);
10900   %}
10901 
10902   ins_pipe(fp_cond_reg_reg_s);
10903 %}
10904 
10905 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10906 %{
10907   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10908 
10909   ins_cost(INSN_COST * 3);
10910 
10911   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10912   ins_encode %{
10913     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10914     __ fcseld(as_FloatRegister($dst$$reg),
10915               as_FloatRegister($src2$$reg),
10916               as_FloatRegister($src1$$reg),
10917               cond);
10918   %}
10919 
10920   ins_pipe(fp_cond_reg_reg_d);
10921 %}
10922 
10923 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10924 %{
10925   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10926 
10927   ins_cost(INSN_COST * 3);
10928 
10929   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10930   ins_encode %{
10931     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10932     __ fcseld(as_FloatRegister($dst$$reg),
10933               as_FloatRegister($src2$$reg),
10934               as_FloatRegister($src1$$reg),
10935               cond);
10936   %}
10937 
10938   ins_pipe(fp_cond_reg_reg_d);
10939 %}
10940 
10941 // ============================================================================
10942 // Arithmetic Instructions
10943 //
10944 
10945 // Integer Addition
10946 
10947 // TODO
10948 // these currently employ operations which do not set CR and hence are
10949 // not flagged as killing CR but we would like to isolate the cases
10950 // where we want to set flags from those where we don't. need to work
10951 // out how to do that.
10952 
10953 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10954   match(Set dst (AddI src1 src2));
10955 
10956   ins_cost(INSN_COST);
10957   format %{ "addw  $dst, $src1, $src2" %}
10958 
10959   ins_encode %{
10960     __ addw(as_Register($dst$$reg),
10961             as_Register($src1$$reg),
10962             as_Register($src2$$reg));
10963   %}
10964 
10965   ins_pipe(ialu_reg_reg);
10966 %}
10967 
10968 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10969   match(Set dst (AddI src1 src2));
10970 
10971   ins_cost(INSN_COST);
10972   format %{ "addw $dst, $src1, $src2" %}
10973 
10974   // use opcode to indicate that this is an add not a sub
10975   opcode(0x0);
10976 
10977   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10978 
10979   ins_pipe(ialu_reg_imm);
10980 %}
10981 
10982 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10983   match(Set dst (AddI (ConvL2I src1) src2));
10984 
10985   ins_cost(INSN_COST);
10986   format %{ "addw $dst, $src1, $src2" %}
10987 
10988   // use opcode to indicate that this is an add not a sub
10989   opcode(0x0);
10990 
10991   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10992 
10993   ins_pipe(ialu_reg_imm);
10994 %}
10995 
10996 // Pointer Addition
10997 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10998   match(Set dst (AddP src1 src2));
10999 
11000   ins_cost(INSN_COST);
11001   format %{ "add $dst, $src1, $src2\t# ptr" %}
11002 
11003   ins_encode %{
11004     __ add(as_Register($dst$$reg),
11005            as_Register($src1$$reg),
11006            as_Register($src2$$reg));
11007   %}
11008 
11009   ins_pipe(ialu_reg_reg);
11010 %}
11011 
11012 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
11013   match(Set dst (AddP src1 (ConvI2L src2)));
11014 
11015   ins_cost(1.9 * INSN_COST);
11016   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
11017 
11018   ins_encode %{
11019     __ add(as_Register($dst$$reg),
11020            as_Register($src1$$reg),
11021            as_Register($src2$$reg), ext::sxtw);
11022   %}
11023 
11024   ins_pipe(ialu_reg_reg);
11025 %}
11026 
11027 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
11028   match(Set dst (AddP src1 (LShiftL src2 scale)));
11029 
11030   ins_cost(1.9 * INSN_COST);
11031   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
11032 
11033   ins_encode %{
11034     __ lea(as_Register($dst$$reg),
11035            Address(as_Register($src1$$reg), as_Register($src2$$reg),
11036                    Address::lsl($scale$$constant)));
11037   %}
11038 
11039   ins_pipe(ialu_reg_reg_shift);
11040 %}
11041 
11042 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
11043   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
11044 
11045   ins_cost(1.9 * INSN_COST);
11046   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
11047 
11048   ins_encode %{
11049     __ lea(as_Register($dst$$reg),
11050            Address(as_Register($src1$$reg), as_Register($src2$$reg),
11051                    Address::sxtw($scale$$constant)));
11052   %}
11053 
11054   ins_pipe(ialu_reg_reg_shift);
11055 %}
11056 
11057 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
11058   match(Set dst (LShiftL (ConvI2L src) scale));
11059 
11060   ins_cost(INSN_COST);
11061   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
11062 
11063   ins_encode %{
11064     __ sbfiz(as_Register($dst$$reg),
11065           as_Register($src$$reg),
11066           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
11067   %}
11068 
11069   ins_pipe(ialu_reg_shift);
11070 %}
11071 
11072 // Pointer Immediate Addition
11073 // n.b. this needs to be more expensive than using an indirect memory
11074 // operand
11075 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
11076   match(Set dst (AddP src1 src2));
11077 
11078   ins_cost(INSN_COST);
11079   format %{ "add $dst, $src1, $src2\t# ptr" %}
11080 
11081   // use opcode to indicate that this is an add not a sub
11082   opcode(0x0);
11083 
11084   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
11085 
11086   ins_pipe(ialu_reg_imm);
11087 %}
11088 
11089 // Long Addition
11090 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11091 
11092   match(Set dst (AddL src1 src2));
11093 
11094   ins_cost(INSN_COST);
11095   format %{ "add  $dst, $src1, $src2" %}
11096 
11097   ins_encode %{
11098     __ add(as_Register($dst$$reg),
11099            as_Register($src1$$reg),
11100            as_Register($src2$$reg));
11101   %}
11102 
11103   ins_pipe(ialu_reg_reg);
11104 %}
11105 
11106 // No constant pool entries requiredLong Immediate Addition.
11107 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
11108   match(Set dst (AddL src1 src2));
11109 
11110   ins_cost(INSN_COST);
11111   format %{ "add $dst, $src1, $src2" %}
11112 
11113   // use opcode to indicate that this is an add not a sub
11114   opcode(0x0);
11115 
11116   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
11117 
11118   ins_pipe(ialu_reg_imm);
11119 %}
11120 
11121 // Integer Subtraction
11122 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11123   match(Set dst (SubI src1 src2));
11124 
11125   ins_cost(INSN_COST);
11126   format %{ "subw  $dst, $src1, $src2" %}
11127 
11128   ins_encode %{
11129     __ subw(as_Register($dst$$reg),
11130             as_Register($src1$$reg),
11131             as_Register($src2$$reg));
11132   %}
11133 
11134   ins_pipe(ialu_reg_reg);
11135 %}
11136 
11137 // Immediate Subtraction
11138 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
11139   match(Set dst (SubI src1 src2));
11140 
11141   ins_cost(INSN_COST);
11142   format %{ "subw $dst, $src1, $src2" %}
11143 
11144   // use opcode to indicate that this is a sub not an add
11145   opcode(0x1);
11146 
11147   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
11148 
11149   ins_pipe(ialu_reg_imm);
11150 %}
11151 
11152 // Long Subtraction
11153 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11154 
11155   match(Set dst (SubL src1 src2));
11156 
11157   ins_cost(INSN_COST);
11158   format %{ "sub  $dst, $src1, $src2" %}
11159 
11160   ins_encode %{
11161     __ sub(as_Register($dst$$reg),
11162            as_Register($src1$$reg),
11163            as_Register($src2$$reg));
11164   %}
11165 
11166   ins_pipe(ialu_reg_reg);
11167 %}
11168 
11169 // No constant pool entries requiredLong Immediate Subtraction.
11170 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
11171   match(Set dst (SubL src1 src2));
11172 
11173   ins_cost(INSN_COST);
11174   format %{ "sub$dst, $src1, $src2" %}
11175 
11176   // use opcode to indicate that this is a sub not an add
11177   opcode(0x1);
11178 
11179   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
11180 
11181   ins_pipe(ialu_reg_imm);
11182 %}
11183 
11184 // Integer Negation (special case for sub)
11185 
11186 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
11187   match(Set dst (SubI zero src));
11188 
11189   ins_cost(INSN_COST);
11190   format %{ "negw $dst, $src\t# int" %}
11191 
11192   ins_encode %{
11193     __ negw(as_Register($dst$$reg),
11194             as_Register($src$$reg));
11195   %}
11196 
11197   ins_pipe(ialu_reg);
11198 %}
11199 
11200 // Long Negation
11201 
11202 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
11203   match(Set dst (SubL zero src));
11204 
11205   ins_cost(INSN_COST);
11206   format %{ "neg $dst, $src\t# long" %}
11207 
11208   ins_encode %{
11209     __ neg(as_Register($dst$$reg),
11210            as_Register($src$$reg));
11211   %}
11212 
11213   ins_pipe(ialu_reg);
11214 %}
11215 
11216 // Integer Multiply
11217 
11218 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11219   match(Set dst (MulI src1 src2));
11220 
11221   ins_cost(INSN_COST * 3);
11222   format %{ "mulw  $dst, $src1, $src2" %}
11223 
11224   ins_encode %{
11225     __ mulw(as_Register($dst$$reg),
11226             as_Register($src1$$reg),
11227             as_Register($src2$$reg));
11228   %}
11229 
11230   ins_pipe(imul_reg_reg);
11231 %}
11232 
11233 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11234   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
11235 
11236   ins_cost(INSN_COST * 3);
11237   format %{ "smull  $dst, $src1, $src2" %}
11238 
11239   ins_encode %{
11240     __ smull(as_Register($dst$$reg),
11241              as_Register($src1$$reg),
11242              as_Register($src2$$reg));
11243   %}
11244 
11245   ins_pipe(imul_reg_reg);
11246 %}
11247 
11248 // Long Multiply
11249 
11250 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11251   match(Set dst (MulL src1 src2));
11252 
11253   ins_cost(INSN_COST * 5);
11254   format %{ "mul  $dst, $src1, $src2" %}
11255 
11256   ins_encode %{
11257     __ mul(as_Register($dst$$reg),
11258            as_Register($src1$$reg),
11259            as_Register($src2$$reg));
11260   %}
11261 
11262   ins_pipe(lmul_reg_reg);
11263 %}
11264 
11265 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
11266 %{
11267   match(Set dst (MulHiL src1 src2));
11268 
11269   ins_cost(INSN_COST * 7);
11270   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
11271 
11272   ins_encode %{
11273     __ smulh(as_Register($dst$$reg),
11274              as_Register($src1$$reg),
11275              as_Register($src2$$reg));
11276   %}
11277 
11278   ins_pipe(lmul_reg_reg);
11279 %}
11280 
11281 // Combined Integer Multiply & Add/Sub
11282 
11283 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11284   match(Set dst (AddI src3 (MulI src1 src2)));
11285 
11286   ins_cost(INSN_COST * 3);
11287   format %{ "madd  $dst, $src1, $src2, $src3" %}
11288 
11289   ins_encode %{
11290     __ maddw(as_Register($dst$$reg),
11291              as_Register($src1$$reg),
11292              as_Register($src2$$reg),
11293              as_Register($src3$$reg));
11294   %}
11295 
11296   ins_pipe(imac_reg_reg);
11297 %}
11298 
11299 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
11300   match(Set dst (SubI src3 (MulI src1 src2)));
11301 
11302   ins_cost(INSN_COST * 3);
11303   format %{ "msub  $dst, $src1, $src2, $src3" %}
11304 
11305   ins_encode %{
11306     __ msubw(as_Register($dst$$reg),
11307              as_Register($src1$$reg),
11308              as_Register($src2$$reg),
11309              as_Register($src3$$reg));
11310   %}
11311 
11312   ins_pipe(imac_reg_reg);
11313 %}
11314 
11315 // Combined Long Multiply & Add/Sub
11316 
11317 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11318   match(Set dst (AddL src3 (MulL src1 src2)));
11319 
11320   ins_cost(INSN_COST * 5);
11321   format %{ "madd  $dst, $src1, $src2, $src3" %}
11322 
11323   ins_encode %{
11324     __ madd(as_Register($dst$$reg),
11325             as_Register($src1$$reg),
11326             as_Register($src2$$reg),
11327             as_Register($src3$$reg));
11328   %}
11329 
11330   ins_pipe(lmac_reg_reg);
11331 %}
11332 
11333 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
11334   match(Set dst (SubL src3 (MulL src1 src2)));
11335 
11336   ins_cost(INSN_COST * 5);
11337   format %{ "msub  $dst, $src1, $src2, $src3" %}
11338 
11339   ins_encode %{
11340     __ msub(as_Register($dst$$reg),
11341             as_Register($src1$$reg),
11342             as_Register($src2$$reg),
11343             as_Register($src3$$reg));
11344   %}
11345 
11346   ins_pipe(lmac_reg_reg);
11347 %}
11348 
11349 // Integer Divide
11350 
11351 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11352   match(Set dst (DivI src1 src2));
11353 
11354   ins_cost(INSN_COST * 19);
11355   format %{ "sdivw  $dst, $src1, $src2" %}
11356 
11357   ins_encode(aarch64_enc_divw(dst, src1, src2));
11358   ins_pipe(idiv_reg_reg);
11359 %}
11360 
11361 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
11362   match(Set dst (URShiftI (RShiftI src1 div1) div2));
11363   ins_cost(INSN_COST);
11364   format %{ "lsrw $dst, $src1, $div1" %}
11365   ins_encode %{
11366     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
11367   %}
11368   ins_pipe(ialu_reg_shift);
11369 %}
11370 
11371 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
11372   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
11373   ins_cost(INSN_COST);
11374   format %{ "addw $dst, $src, LSR $div1" %}
11375 
11376   ins_encode %{
11377     __ addw(as_Register($dst$$reg),
11378               as_Register($src$$reg),
11379               as_Register($src$$reg),
11380               Assembler::LSR, 31);
11381   %}
11382   ins_pipe(ialu_reg);
11383 %}
11384 
11385 // Long Divide
11386 
11387 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11388   match(Set dst (DivL src1 src2));
11389 
11390   ins_cost(INSN_COST * 35);
11391   format %{ "sdiv   $dst, $src1, $src2" %}
11392 
11393   ins_encode(aarch64_enc_div(dst, src1, src2));
11394   ins_pipe(ldiv_reg_reg);
11395 %}
11396 
11397 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
11398   match(Set dst (URShiftL (RShiftL src1 div1) div2));
11399   ins_cost(INSN_COST);
11400   format %{ "lsr $dst, $src1, $div1" %}
11401   ins_encode %{
11402     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
11403   %}
11404   ins_pipe(ialu_reg_shift);
11405 %}
11406 
11407 instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
11408   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
11409   ins_cost(INSN_COST);
11410   format %{ "add $dst, $src, $div1" %}
11411 
11412   ins_encode %{
11413     __ add(as_Register($dst$$reg),
11414               as_Register($src$$reg),
11415               as_Register($src$$reg),
11416               Assembler::LSR, 63);
11417   %}
11418   ins_pipe(ialu_reg);
11419 %}
11420 
11421 // Integer Remainder
11422 
11423 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11424   match(Set dst (ModI src1 src2));
11425 
11426   ins_cost(INSN_COST * 22);
11427   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
11428             "msubw($dst, rscratch1, $src2, $src1" %}
11429 
11430   ins_encode(aarch64_enc_modw(dst, src1, src2));
11431   ins_pipe(idiv_reg_reg);
11432 %}
11433 
11434 // Long Remainder
11435 
11436 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11437   match(Set dst (ModL src1 src2));
11438 
11439   ins_cost(INSN_COST * 38);
11440   format %{ "sdiv   rscratch1, $src1, $src2\n"
11441             "msub($dst, rscratch1, $src2, $src1" %}
11442 
11443   ins_encode(aarch64_enc_mod(dst, src1, src2));
11444   ins_pipe(ldiv_reg_reg);
11445 %}
11446 
11447 // Integer Shifts
11448 
11449 // Shift Left Register
11450 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11451   match(Set dst (LShiftI src1 src2));
11452 
11453   ins_cost(INSN_COST * 2);
11454   format %{ "lslvw  $dst, $src1, $src2" %}
11455 
11456   ins_encode %{
11457     __ lslvw(as_Register($dst$$reg),
11458              as_Register($src1$$reg),
11459              as_Register($src2$$reg));
11460   %}
11461 
11462   ins_pipe(ialu_reg_reg_vshift);
11463 %}
11464 
11465 // Shift Left Immediate
11466 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11467   match(Set dst (LShiftI src1 src2));
11468 
11469   ins_cost(INSN_COST);
11470   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
11471 
11472   ins_encode %{
11473     __ lslw(as_Register($dst$$reg),
11474             as_Register($src1$$reg),
11475             $src2$$constant & 0x1f);
11476   %}
11477 
11478   ins_pipe(ialu_reg_shift);
11479 %}
11480 
11481 // Shift Right Logical Register
11482 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11483   match(Set dst (URShiftI src1 src2));
11484 
11485   ins_cost(INSN_COST * 2);
11486   format %{ "lsrvw  $dst, $src1, $src2" %}
11487 
11488   ins_encode %{
11489     __ lsrvw(as_Register($dst$$reg),
11490              as_Register($src1$$reg),
11491              as_Register($src2$$reg));
11492   %}
11493 
11494   ins_pipe(ialu_reg_reg_vshift);
11495 %}
11496 
11497 // Shift Right Logical Immediate
11498 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11499   match(Set dst (URShiftI src1 src2));
11500 
11501   ins_cost(INSN_COST);
11502   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
11503 
11504   ins_encode %{
11505     __ lsrw(as_Register($dst$$reg),
11506             as_Register($src1$$reg),
11507             $src2$$constant & 0x1f);
11508   %}
11509 
11510   ins_pipe(ialu_reg_shift);
11511 %}
11512 
11513 // Shift Right Arithmetic Register
11514 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11515   match(Set dst (RShiftI src1 src2));
11516 
11517   ins_cost(INSN_COST * 2);
11518   format %{ "asrvw  $dst, $src1, $src2" %}
11519 
11520   ins_encode %{
11521     __ asrvw(as_Register($dst$$reg),
11522              as_Register($src1$$reg),
11523              as_Register($src2$$reg));
11524   %}
11525 
11526   ins_pipe(ialu_reg_reg_vshift);
11527 %}
11528 
11529 // Shift Right Arithmetic Immediate
11530 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
11531   match(Set dst (RShiftI src1 src2));
11532 
11533   ins_cost(INSN_COST);
11534   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
11535 
11536   ins_encode %{
11537     __ asrw(as_Register($dst$$reg),
11538             as_Register($src1$$reg),
11539             $src2$$constant & 0x1f);
11540   %}
11541 
11542   ins_pipe(ialu_reg_shift);
11543 %}
11544 
11545 // Combined Int Mask and Right Shift (using UBFM)
11546 // TODO
11547 
11548 // Long Shifts
11549 
11550 // Shift Left Register
11551 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11552   match(Set dst (LShiftL src1 src2));
11553 
11554   ins_cost(INSN_COST * 2);
11555   format %{ "lslv  $dst, $src1, $src2" %}
11556 
11557   ins_encode %{
11558     __ lslv(as_Register($dst$$reg),
11559             as_Register($src1$$reg),
11560             as_Register($src2$$reg));
11561   %}
11562 
11563   ins_pipe(ialu_reg_reg_vshift);
11564 %}
11565 
11566 // Shift Left Immediate
11567 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11568   match(Set dst (LShiftL src1 src2));
11569 
11570   ins_cost(INSN_COST);
11571   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
11572 
11573   ins_encode %{
11574     __ lsl(as_Register($dst$$reg),
11575             as_Register($src1$$reg),
11576             $src2$$constant & 0x3f);
11577   %}
11578 
11579   ins_pipe(ialu_reg_shift);
11580 %}
11581 
11582 // Shift Right Logical Register
11583 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11584   match(Set dst (URShiftL src1 src2));
11585 
11586   ins_cost(INSN_COST * 2);
11587   format %{ "lsrv  $dst, $src1, $src2" %}
11588 
11589   ins_encode %{
11590     __ lsrv(as_Register($dst$$reg),
11591             as_Register($src1$$reg),
11592             as_Register($src2$$reg));
11593   %}
11594 
11595   ins_pipe(ialu_reg_reg_vshift);
11596 %}
11597 
11598 // Shift Right Logical Immediate
11599 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11600   match(Set dst (URShiftL src1 src2));
11601 
11602   ins_cost(INSN_COST);
11603   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
11604 
11605   ins_encode %{
11606     __ lsr(as_Register($dst$$reg),
11607            as_Register($src1$$reg),
11608            $src2$$constant & 0x3f);
11609   %}
11610 
11611   ins_pipe(ialu_reg_shift);
11612 %}
11613 
11614 // A special-case pattern for card table stores.
11615 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
11616   match(Set dst (URShiftL (CastP2X src1) src2));
11617 
11618   ins_cost(INSN_COST);
11619   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
11620 
11621   ins_encode %{
11622     __ lsr(as_Register($dst$$reg),
11623            as_Register($src1$$reg),
11624            $src2$$constant & 0x3f);
11625   %}
11626 
11627   ins_pipe(ialu_reg_shift);
11628 %}
11629 
11630 // Shift Right Arithmetic Register
11631 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
11632   match(Set dst (RShiftL src1 src2));
11633 
11634   ins_cost(INSN_COST * 2);
11635   format %{ "asrv  $dst, $src1, $src2" %}
11636 
11637   ins_encode %{
11638     __ asrv(as_Register($dst$$reg),
11639             as_Register($src1$$reg),
11640             as_Register($src2$$reg));
11641   %}
11642 
11643   ins_pipe(ialu_reg_reg_vshift);
11644 %}
11645 
11646 // Shift Right Arithmetic Immediate
11647 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
11648   match(Set dst (RShiftL src1 src2));
11649 
11650   ins_cost(INSN_COST);
11651   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
11652 
11653   ins_encode %{
11654     __ asr(as_Register($dst$$reg),
11655            as_Register($src1$$reg),
11656            $src2$$constant & 0x3f);
11657   %}
11658 
11659   ins_pipe(ialu_reg_shift);
11660 %}
11661 
11662 // BEGIN This section of the file is automatically generated. Do not edit --------------
11663 
11664 instruct regL_not_reg(iRegLNoSp dst,
11665                          iRegL src1, immL_M1 m1,
11666                          rFlagsReg cr) %{
11667   match(Set dst (XorL src1 m1));
11668   ins_cost(INSN_COST);
11669   format %{ "eon  $dst, $src1, zr" %}
11670 
11671   ins_encode %{
11672     __ eon(as_Register($dst$$reg),
11673               as_Register($src1$$reg),
11674               zr,
11675               Assembler::LSL, 0);
11676   %}
11677 
11678   ins_pipe(ialu_reg);
11679 %}
11680 instruct regI_not_reg(iRegINoSp dst,
11681                          iRegIorL2I src1, immI_M1 m1,
11682                          rFlagsReg cr) %{
11683   match(Set dst (XorI src1 m1));
11684   ins_cost(INSN_COST);
11685   format %{ "eonw  $dst, $src1, zr" %}
11686 
11687   ins_encode %{
11688     __ eonw(as_Register($dst$$reg),
11689               as_Register($src1$$reg),
11690               zr,
11691               Assembler::LSL, 0);
11692   %}
11693 
11694   ins_pipe(ialu_reg);
11695 %}
11696 
11697 instruct AndI_reg_not_reg(iRegINoSp dst,
11698                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11699                          rFlagsReg cr) %{
11700   match(Set dst (AndI src1 (XorI src2 m1)));
11701   ins_cost(INSN_COST);
11702   format %{ "bicw  $dst, $src1, $src2" %}
11703 
11704   ins_encode %{
11705     __ bicw(as_Register($dst$$reg),
11706               as_Register($src1$$reg),
11707               as_Register($src2$$reg),
11708               Assembler::LSL, 0);
11709   %}
11710 
11711   ins_pipe(ialu_reg_reg);
11712 %}
11713 
11714 instruct AndL_reg_not_reg(iRegLNoSp dst,
11715                          iRegL src1, iRegL src2, immL_M1 m1,
11716                          rFlagsReg cr) %{
11717   match(Set dst (AndL src1 (XorL src2 m1)));
11718   ins_cost(INSN_COST);
11719   format %{ "bic  $dst, $src1, $src2" %}
11720 
11721   ins_encode %{
11722     __ bic(as_Register($dst$$reg),
11723               as_Register($src1$$reg),
11724               as_Register($src2$$reg),
11725               Assembler::LSL, 0);
11726   %}
11727 
11728   ins_pipe(ialu_reg_reg);
11729 %}
11730 
11731 instruct OrI_reg_not_reg(iRegINoSp dst,
11732                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11733                          rFlagsReg cr) %{
11734   match(Set dst (OrI src1 (XorI src2 m1)));
11735   ins_cost(INSN_COST);
11736   format %{ "ornw  $dst, $src1, $src2" %}
11737 
11738   ins_encode %{
11739     __ ornw(as_Register($dst$$reg),
11740               as_Register($src1$$reg),
11741               as_Register($src2$$reg),
11742               Assembler::LSL, 0);
11743   %}
11744 
11745   ins_pipe(ialu_reg_reg);
11746 %}
11747 
11748 instruct OrL_reg_not_reg(iRegLNoSp dst,
11749                          iRegL src1, iRegL src2, immL_M1 m1,
11750                          rFlagsReg cr) %{
11751   match(Set dst (OrL src1 (XorL src2 m1)));
11752   ins_cost(INSN_COST);
11753   format %{ "orn  $dst, $src1, $src2" %}
11754 
11755   ins_encode %{
11756     __ orn(as_Register($dst$$reg),
11757               as_Register($src1$$reg),
11758               as_Register($src2$$reg),
11759               Assembler::LSL, 0);
11760   %}
11761 
11762   ins_pipe(ialu_reg_reg);
11763 %}
11764 
11765 instruct XorI_reg_not_reg(iRegINoSp dst,
11766                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
11767                          rFlagsReg cr) %{
11768   match(Set dst (XorI m1 (XorI src2 src1)));
11769   ins_cost(INSN_COST);
11770   format %{ "eonw  $dst, $src1, $src2" %}
11771 
11772   ins_encode %{
11773     __ eonw(as_Register($dst$$reg),
11774               as_Register($src1$$reg),
11775               as_Register($src2$$reg),
11776               Assembler::LSL, 0);
11777   %}
11778 
11779   ins_pipe(ialu_reg_reg);
11780 %}
11781 
11782 instruct XorL_reg_not_reg(iRegLNoSp dst,
11783                          iRegL src1, iRegL src2, immL_M1 m1,
11784                          rFlagsReg cr) %{
11785   match(Set dst (XorL m1 (XorL src2 src1)));
11786   ins_cost(INSN_COST);
11787   format %{ "eon  $dst, $src1, $src2" %}
11788 
11789   ins_encode %{
11790     __ eon(as_Register($dst$$reg),
11791               as_Register($src1$$reg),
11792               as_Register($src2$$reg),
11793               Assembler::LSL, 0);
11794   %}
11795 
11796   ins_pipe(ialu_reg_reg);
11797 %}
11798 
11799 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11800                          iRegIorL2I src1, iRegIorL2I src2,
11801                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11802   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11803   ins_cost(1.9 * INSN_COST);
11804   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11805 
11806   ins_encode %{
11807     __ bicw(as_Register($dst$$reg),
11808               as_Register($src1$$reg),
11809               as_Register($src2$$reg),
11810               Assembler::LSR,
11811               $src3$$constant & 0x1f);
11812   %}
11813 
11814   ins_pipe(ialu_reg_reg_shift);
11815 %}
11816 
11817 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11818                          iRegL src1, iRegL src2,
11819                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11820   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11821   ins_cost(1.9 * INSN_COST);
11822   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11823 
11824   ins_encode %{
11825     __ bic(as_Register($dst$$reg),
11826               as_Register($src1$$reg),
11827               as_Register($src2$$reg),
11828               Assembler::LSR,
11829               $src3$$constant & 0x3f);
11830   %}
11831 
11832   ins_pipe(ialu_reg_reg_shift);
11833 %}
11834 
11835 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11836                          iRegIorL2I src1, iRegIorL2I src2,
11837                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11838   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11839   ins_cost(1.9 * INSN_COST);
11840   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11841 
11842   ins_encode %{
11843     __ bicw(as_Register($dst$$reg),
11844               as_Register($src1$$reg),
11845               as_Register($src2$$reg),
11846               Assembler::ASR,
11847               $src3$$constant & 0x1f);
11848   %}
11849 
11850   ins_pipe(ialu_reg_reg_shift);
11851 %}
11852 
11853 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11854                          iRegL src1, iRegL src2,
11855                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11856   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11857   ins_cost(1.9 * INSN_COST);
11858   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11859 
11860   ins_encode %{
11861     __ bic(as_Register($dst$$reg),
11862               as_Register($src1$$reg),
11863               as_Register($src2$$reg),
11864               Assembler::ASR,
11865               $src3$$constant & 0x3f);
11866   %}
11867 
11868   ins_pipe(ialu_reg_reg_shift);
11869 %}
11870 
11871 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11872                          iRegIorL2I src1, iRegIorL2I src2,
11873                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11874   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11875   ins_cost(1.9 * INSN_COST);
11876   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11877 
11878   ins_encode %{
11879     __ bicw(as_Register($dst$$reg),
11880               as_Register($src1$$reg),
11881               as_Register($src2$$reg),
11882               Assembler::LSL,
11883               $src3$$constant & 0x1f);
11884   %}
11885 
11886   ins_pipe(ialu_reg_reg_shift);
11887 %}
11888 
11889 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11890                          iRegL src1, iRegL src2,
11891                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11892   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11893   ins_cost(1.9 * INSN_COST);
11894   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11895 
11896   ins_encode %{
11897     __ bic(as_Register($dst$$reg),
11898               as_Register($src1$$reg),
11899               as_Register($src2$$reg),
11900               Assembler::LSL,
11901               $src3$$constant & 0x3f);
11902   %}
11903 
11904   ins_pipe(ialu_reg_reg_shift);
11905 %}
11906 
11907 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11908                          iRegIorL2I src1, iRegIorL2I src2,
11909                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11910   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11911   ins_cost(1.9 * INSN_COST);
11912   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11913 
11914   ins_encode %{
11915     __ eonw(as_Register($dst$$reg),
11916               as_Register($src1$$reg),
11917               as_Register($src2$$reg),
11918               Assembler::LSR,
11919               $src3$$constant & 0x1f);
11920   %}
11921 
11922   ins_pipe(ialu_reg_reg_shift);
11923 %}
11924 
11925 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11926                          iRegL src1, iRegL src2,
11927                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11928   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11929   ins_cost(1.9 * INSN_COST);
11930   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11931 
11932   ins_encode %{
11933     __ eon(as_Register($dst$$reg),
11934               as_Register($src1$$reg),
11935               as_Register($src2$$reg),
11936               Assembler::LSR,
11937               $src3$$constant & 0x3f);
11938   %}
11939 
11940   ins_pipe(ialu_reg_reg_shift);
11941 %}
11942 
11943 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11944                          iRegIorL2I src1, iRegIorL2I src2,
11945                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11946   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11947   ins_cost(1.9 * INSN_COST);
11948   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11949 
11950   ins_encode %{
11951     __ eonw(as_Register($dst$$reg),
11952               as_Register($src1$$reg),
11953               as_Register($src2$$reg),
11954               Assembler::ASR,
11955               $src3$$constant & 0x1f);
11956   %}
11957 
11958   ins_pipe(ialu_reg_reg_shift);
11959 %}
11960 
11961 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11962                          iRegL src1, iRegL src2,
11963                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11964   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11965   ins_cost(1.9 * INSN_COST);
11966   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11967 
11968   ins_encode %{
11969     __ eon(as_Register($dst$$reg),
11970               as_Register($src1$$reg),
11971               as_Register($src2$$reg),
11972               Assembler::ASR,
11973               $src3$$constant & 0x3f);
11974   %}
11975 
11976   ins_pipe(ialu_reg_reg_shift);
11977 %}
11978 
11979 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11980                          iRegIorL2I src1, iRegIorL2I src2,
11981                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11982   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11983   ins_cost(1.9 * INSN_COST);
11984   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11985 
11986   ins_encode %{
11987     __ eonw(as_Register($dst$$reg),
11988               as_Register($src1$$reg),
11989               as_Register($src2$$reg),
11990               Assembler::LSL,
11991               $src3$$constant & 0x1f);
11992   %}
11993 
11994   ins_pipe(ialu_reg_reg_shift);
11995 %}
11996 
11997 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11998                          iRegL src1, iRegL src2,
11999                          immI src3, immL_M1 src4, rFlagsReg cr) %{
12000   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
12001   ins_cost(1.9 * INSN_COST);
12002   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
12003 
12004   ins_encode %{
12005     __ eon(as_Register($dst$$reg),
12006               as_Register($src1$$reg),
12007               as_Register($src2$$reg),
12008               Assembler::LSL,
12009               $src3$$constant & 0x3f);
12010   %}
12011 
12012   ins_pipe(ialu_reg_reg_shift);
12013 %}
12014 
12015 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
12016                          iRegIorL2I src1, iRegIorL2I src2,
12017                          immI src3, immI_M1 src4, rFlagsReg cr) %{
12018   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
12019   ins_cost(1.9 * INSN_COST);
12020   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
12021 
12022   ins_encode %{
12023     __ ornw(as_Register($dst$$reg),
12024               as_Register($src1$$reg),
12025               as_Register($src2$$reg),
12026               Assembler::LSR,
12027               $src3$$constant & 0x1f);
12028   %}
12029 
12030   ins_pipe(ialu_reg_reg_shift);
12031 %}
12032 
12033 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
12034                          iRegL src1, iRegL src2,
12035                          immI src3, immL_M1 src4, rFlagsReg cr) %{
12036   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
12037   ins_cost(1.9 * INSN_COST);
12038   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
12039 
12040   ins_encode %{
12041     __ orn(as_Register($dst$$reg),
12042               as_Register($src1$$reg),
12043               as_Register($src2$$reg),
12044               Assembler::LSR,
12045               $src3$$constant & 0x3f);
12046   %}
12047 
12048   ins_pipe(ialu_reg_reg_shift);
12049 %}
12050 
12051 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
12052                          iRegIorL2I src1, iRegIorL2I src2,
12053                          immI src3, immI_M1 src4, rFlagsReg cr) %{
12054   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
12055   ins_cost(1.9 * INSN_COST);
12056   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
12057 
12058   ins_encode %{
12059     __ ornw(as_Register($dst$$reg),
12060               as_Register($src1$$reg),
12061               as_Register($src2$$reg),
12062               Assembler::ASR,
12063               $src3$$constant & 0x1f);
12064   %}
12065 
12066   ins_pipe(ialu_reg_reg_shift);
12067 %}
12068 
12069 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
12070                          iRegL src1, iRegL src2,
12071                          immI src3, immL_M1 src4, rFlagsReg cr) %{
12072   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
12073   ins_cost(1.9 * INSN_COST);
12074   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
12075 
12076   ins_encode %{
12077     __ orn(as_Register($dst$$reg),
12078               as_Register($src1$$reg),
12079               as_Register($src2$$reg),
12080               Assembler::ASR,
12081               $src3$$constant & 0x3f);
12082   %}
12083 
12084   ins_pipe(ialu_reg_reg_shift);
12085 %}
12086 
12087 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
12088                          iRegIorL2I src1, iRegIorL2I src2,
12089                          immI src3, immI_M1 src4, rFlagsReg cr) %{
12090   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
12091   ins_cost(1.9 * INSN_COST);
12092   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
12093 
12094   ins_encode %{
12095     __ ornw(as_Register($dst$$reg),
12096               as_Register($src1$$reg),
12097               as_Register($src2$$reg),
12098               Assembler::LSL,
12099               $src3$$constant & 0x1f);
12100   %}
12101 
12102   ins_pipe(ialu_reg_reg_shift);
12103 %}
12104 
12105 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
12106                          iRegL src1, iRegL src2,
12107                          immI src3, immL_M1 src4, rFlagsReg cr) %{
12108   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
12109   ins_cost(1.9 * INSN_COST);
12110   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
12111 
12112   ins_encode %{
12113     __ orn(as_Register($dst$$reg),
12114               as_Register($src1$$reg),
12115               as_Register($src2$$reg),
12116               Assembler::LSL,
12117               $src3$$constant & 0x3f);
12118   %}
12119 
12120   ins_pipe(ialu_reg_reg_shift);
12121 %}
12122 
12123 instruct AndI_reg_URShift_reg(iRegINoSp dst,
12124                          iRegIorL2I src1, iRegIorL2I src2,
12125                          immI src3, rFlagsReg cr) %{
12126   match(Set dst (AndI src1 (URShiftI src2 src3)));
12127 
12128   ins_cost(1.9 * INSN_COST);
12129   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
12130 
12131   ins_encode %{
12132     __ andw(as_Register($dst$$reg),
12133               as_Register($src1$$reg),
12134               as_Register($src2$$reg),
12135               Assembler::LSR,
12136               $src3$$constant & 0x1f);
12137   %}
12138 
12139   ins_pipe(ialu_reg_reg_shift);
12140 %}
12141 
12142 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
12143                          iRegL src1, iRegL src2,
12144                          immI src3, rFlagsReg cr) %{
12145   match(Set dst (AndL src1 (URShiftL src2 src3)));
12146 
12147   ins_cost(1.9 * INSN_COST);
12148   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
12149 
12150   ins_encode %{
12151     __ andr(as_Register($dst$$reg),
12152               as_Register($src1$$reg),
12153               as_Register($src2$$reg),
12154               Assembler::LSR,
12155               $src3$$constant & 0x3f);
12156   %}
12157 
12158   ins_pipe(ialu_reg_reg_shift);
12159 %}
12160 
12161 instruct AndI_reg_RShift_reg(iRegINoSp dst,
12162                          iRegIorL2I src1, iRegIorL2I src2,
12163                          immI src3, rFlagsReg cr) %{
12164   match(Set dst (AndI src1 (RShiftI src2 src3)));
12165 
12166   ins_cost(1.9 * INSN_COST);
12167   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
12168 
12169   ins_encode %{
12170     __ andw(as_Register($dst$$reg),
12171               as_Register($src1$$reg),
12172               as_Register($src2$$reg),
12173               Assembler::ASR,
12174               $src3$$constant & 0x1f);
12175   %}
12176 
12177   ins_pipe(ialu_reg_reg_shift);
12178 %}
12179 
12180 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
12181                          iRegL src1, iRegL src2,
12182                          immI src3, rFlagsReg cr) %{
12183   match(Set dst (AndL src1 (RShiftL src2 src3)));
12184 
12185   ins_cost(1.9 * INSN_COST);
12186   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
12187 
12188   ins_encode %{
12189     __ andr(as_Register($dst$$reg),
12190               as_Register($src1$$reg),
12191               as_Register($src2$$reg),
12192               Assembler::ASR,
12193               $src3$$constant & 0x3f);
12194   %}
12195 
12196   ins_pipe(ialu_reg_reg_shift);
12197 %}
12198 
12199 instruct AndI_reg_LShift_reg(iRegINoSp dst,
12200                          iRegIorL2I src1, iRegIorL2I src2,
12201                          immI src3, rFlagsReg cr) %{
12202   match(Set dst (AndI src1 (LShiftI src2 src3)));
12203 
12204   ins_cost(1.9 * INSN_COST);
12205   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
12206 
12207   ins_encode %{
12208     __ andw(as_Register($dst$$reg),
12209               as_Register($src1$$reg),
12210               as_Register($src2$$reg),
12211               Assembler::LSL,
12212               $src3$$constant & 0x1f);
12213   %}
12214 
12215   ins_pipe(ialu_reg_reg_shift);
12216 %}
12217 
12218 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
12219                          iRegL src1, iRegL src2,
12220                          immI src3, rFlagsReg cr) %{
12221   match(Set dst (AndL src1 (LShiftL src2 src3)));
12222 
12223   ins_cost(1.9 * INSN_COST);
12224   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
12225 
12226   ins_encode %{
12227     __ andr(as_Register($dst$$reg),
12228               as_Register($src1$$reg),
12229               as_Register($src2$$reg),
12230               Assembler::LSL,
12231               $src3$$constant & 0x3f);
12232   %}
12233 
12234   ins_pipe(ialu_reg_reg_shift);
12235 %}
12236 
12237 instruct XorI_reg_URShift_reg(iRegINoSp dst,
12238                          iRegIorL2I src1, iRegIorL2I src2,
12239                          immI src3, rFlagsReg cr) %{
12240   match(Set dst (XorI src1 (URShiftI src2 src3)));
12241 
12242   ins_cost(1.9 * INSN_COST);
12243   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
12244 
12245   ins_encode %{
12246     __ eorw(as_Register($dst$$reg),
12247               as_Register($src1$$reg),
12248               as_Register($src2$$reg),
12249               Assembler::LSR,
12250               $src3$$constant & 0x1f);
12251   %}
12252 
12253   ins_pipe(ialu_reg_reg_shift);
12254 %}
12255 
12256 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
12257                          iRegL src1, iRegL src2,
12258                          immI src3, rFlagsReg cr) %{
12259   match(Set dst (XorL src1 (URShiftL src2 src3)));
12260 
12261   ins_cost(1.9 * INSN_COST);
12262   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
12263 
12264   ins_encode %{
12265     __ eor(as_Register($dst$$reg),
12266               as_Register($src1$$reg),
12267               as_Register($src2$$reg),
12268               Assembler::LSR,
12269               $src3$$constant & 0x3f);
12270   %}
12271 
12272   ins_pipe(ialu_reg_reg_shift);
12273 %}
12274 
12275 instruct XorI_reg_RShift_reg(iRegINoSp dst,
12276                          iRegIorL2I src1, iRegIorL2I src2,
12277                          immI src3, rFlagsReg cr) %{
12278   match(Set dst (XorI src1 (RShiftI src2 src3)));
12279 
12280   ins_cost(1.9 * INSN_COST);
12281   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
12282 
12283   ins_encode %{
12284     __ eorw(as_Register($dst$$reg),
12285               as_Register($src1$$reg),
12286               as_Register($src2$$reg),
12287               Assembler::ASR,
12288               $src3$$constant & 0x1f);
12289   %}
12290 
12291   ins_pipe(ialu_reg_reg_shift);
12292 %}
12293 
12294 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
12295                          iRegL src1, iRegL src2,
12296                          immI src3, rFlagsReg cr) %{
12297   match(Set dst (XorL src1 (RShiftL src2 src3)));
12298 
12299   ins_cost(1.9 * INSN_COST);
12300   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
12301 
12302   ins_encode %{
12303     __ eor(as_Register($dst$$reg),
12304               as_Register($src1$$reg),
12305               as_Register($src2$$reg),
12306               Assembler::ASR,
12307               $src3$$constant & 0x3f);
12308   %}
12309 
12310   ins_pipe(ialu_reg_reg_shift);
12311 %}
12312 
12313 instruct XorI_reg_LShift_reg(iRegINoSp dst,
12314                          iRegIorL2I src1, iRegIorL2I src2,
12315                          immI src3, rFlagsReg cr) %{
12316   match(Set dst (XorI src1 (LShiftI src2 src3)));
12317 
12318   ins_cost(1.9 * INSN_COST);
12319   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
12320 
12321   ins_encode %{
12322     __ eorw(as_Register($dst$$reg),
12323               as_Register($src1$$reg),
12324               as_Register($src2$$reg),
12325               Assembler::LSL,
12326               $src3$$constant & 0x1f);
12327   %}
12328 
12329   ins_pipe(ialu_reg_reg_shift);
12330 %}
12331 
12332 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
12333                          iRegL src1, iRegL src2,
12334                          immI src3, rFlagsReg cr) %{
12335   match(Set dst (XorL src1 (LShiftL src2 src3)));
12336 
12337   ins_cost(1.9 * INSN_COST);
12338   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
12339 
12340   ins_encode %{
12341     __ eor(as_Register($dst$$reg),
12342               as_Register($src1$$reg),
12343               as_Register($src2$$reg),
12344               Assembler::LSL,
12345               $src3$$constant & 0x3f);
12346   %}
12347 
12348   ins_pipe(ialu_reg_reg_shift);
12349 %}
12350 
12351 instruct OrI_reg_URShift_reg(iRegINoSp dst,
12352                          iRegIorL2I src1, iRegIorL2I src2,
12353                          immI src3, rFlagsReg cr) %{
12354   match(Set dst (OrI src1 (URShiftI src2 src3)));
12355 
12356   ins_cost(1.9 * INSN_COST);
12357   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
12358 
12359   ins_encode %{
12360     __ orrw(as_Register($dst$$reg),
12361               as_Register($src1$$reg),
12362               as_Register($src2$$reg),
12363               Assembler::LSR,
12364               $src3$$constant & 0x1f);
12365   %}
12366 
12367   ins_pipe(ialu_reg_reg_shift);
12368 %}
12369 
12370 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
12371                          iRegL src1, iRegL src2,
12372                          immI src3, rFlagsReg cr) %{
12373   match(Set dst (OrL src1 (URShiftL src2 src3)));
12374 
12375   ins_cost(1.9 * INSN_COST);
12376   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
12377 
12378   ins_encode %{
12379     __ orr(as_Register($dst$$reg),
12380               as_Register($src1$$reg),
12381               as_Register($src2$$reg),
12382               Assembler::LSR,
12383               $src3$$constant & 0x3f);
12384   %}
12385 
12386   ins_pipe(ialu_reg_reg_shift);
12387 %}
12388 
12389 instruct OrI_reg_RShift_reg(iRegINoSp dst,
12390                          iRegIorL2I src1, iRegIorL2I src2,
12391                          immI src3, rFlagsReg cr) %{
12392   match(Set dst (OrI src1 (RShiftI src2 src3)));
12393 
12394   ins_cost(1.9 * INSN_COST);
12395   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
12396 
12397   ins_encode %{
12398     __ orrw(as_Register($dst$$reg),
12399               as_Register($src1$$reg),
12400               as_Register($src2$$reg),
12401               Assembler::ASR,
12402               $src3$$constant & 0x1f);
12403   %}
12404 
12405   ins_pipe(ialu_reg_reg_shift);
12406 %}
12407 
12408 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
12409                          iRegL src1, iRegL src2,
12410                          immI src3, rFlagsReg cr) %{
12411   match(Set dst (OrL src1 (RShiftL src2 src3)));
12412 
12413   ins_cost(1.9 * INSN_COST);
12414   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
12415 
12416   ins_encode %{
12417     __ orr(as_Register($dst$$reg),
12418               as_Register($src1$$reg),
12419               as_Register($src2$$reg),
12420               Assembler::ASR,
12421               $src3$$constant & 0x3f);
12422   %}
12423 
12424   ins_pipe(ialu_reg_reg_shift);
12425 %}
12426 
12427 instruct OrI_reg_LShift_reg(iRegINoSp dst,
12428                          iRegIorL2I src1, iRegIorL2I src2,
12429                          immI src3, rFlagsReg cr) %{
12430   match(Set dst (OrI src1 (LShiftI src2 src3)));
12431 
12432   ins_cost(1.9 * INSN_COST);
12433   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
12434 
12435   ins_encode %{
12436     __ orrw(as_Register($dst$$reg),
12437               as_Register($src1$$reg),
12438               as_Register($src2$$reg),
12439               Assembler::LSL,
12440               $src3$$constant & 0x1f);
12441   %}
12442 
12443   ins_pipe(ialu_reg_reg_shift);
12444 %}
12445 
12446 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
12447                          iRegL src1, iRegL src2,
12448                          immI src3, rFlagsReg cr) %{
12449   match(Set dst (OrL src1 (LShiftL src2 src3)));
12450 
12451   ins_cost(1.9 * INSN_COST);
12452   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
12453 
12454   ins_encode %{
12455     __ orr(as_Register($dst$$reg),
12456               as_Register($src1$$reg),
12457               as_Register($src2$$reg),
12458               Assembler::LSL,
12459               $src3$$constant & 0x3f);
12460   %}
12461 
12462   ins_pipe(ialu_reg_reg_shift);
12463 %}
12464 
12465 instruct AddI_reg_URShift_reg(iRegINoSp dst,
12466                          iRegIorL2I src1, iRegIorL2I src2,
12467                          immI src3, rFlagsReg cr) %{
12468   match(Set dst (AddI src1 (URShiftI src2 src3)));
12469 
12470   ins_cost(1.9 * INSN_COST);
12471   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
12472 
12473   ins_encode %{
12474     __ addw(as_Register($dst$$reg),
12475               as_Register($src1$$reg),
12476               as_Register($src2$$reg),
12477               Assembler::LSR,
12478               $src3$$constant & 0x1f);
12479   %}
12480 
12481   ins_pipe(ialu_reg_reg_shift);
12482 %}
12483 
12484 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
12485                          iRegL src1, iRegL src2,
12486                          immI src3, rFlagsReg cr) %{
12487   match(Set dst (AddL src1 (URShiftL src2 src3)));
12488 
12489   ins_cost(1.9 * INSN_COST);
12490   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
12491 
12492   ins_encode %{
12493     __ add(as_Register($dst$$reg),
12494               as_Register($src1$$reg),
12495               as_Register($src2$$reg),
12496               Assembler::LSR,
12497               $src3$$constant & 0x3f);
12498   %}
12499 
12500   ins_pipe(ialu_reg_reg_shift);
12501 %}
12502 
12503 instruct AddI_reg_RShift_reg(iRegINoSp dst,
12504                          iRegIorL2I src1, iRegIorL2I src2,
12505                          immI src3, rFlagsReg cr) %{
12506   match(Set dst (AddI src1 (RShiftI src2 src3)));
12507 
12508   ins_cost(1.9 * INSN_COST);
12509   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
12510 
12511   ins_encode %{
12512     __ addw(as_Register($dst$$reg),
12513               as_Register($src1$$reg),
12514               as_Register($src2$$reg),
12515               Assembler::ASR,
12516               $src3$$constant & 0x1f);
12517   %}
12518 
12519   ins_pipe(ialu_reg_reg_shift);
12520 %}
12521 
12522 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12523                          iRegL src1, iRegL src2,
12524                          immI src3, rFlagsReg cr) %{
12525   match(Set dst (AddL src1 (RShiftL src2 src3)));
12526 
12527   ins_cost(1.9 * INSN_COST);
12528   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12529 
12530   ins_encode %{
12531     __ add(as_Register($dst$$reg),
12532               as_Register($src1$$reg),
12533               as_Register($src2$$reg),
12534               Assembler::ASR,
12535               $src3$$constant & 0x3f);
12536   %}
12537 
12538   ins_pipe(ialu_reg_reg_shift);
12539 %}
12540 
12541 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12542                          iRegIorL2I src1, iRegIorL2I src2,
12543                          immI src3, rFlagsReg cr) %{
12544   match(Set dst (AddI src1 (LShiftI src2 src3)));
12545 
12546   ins_cost(1.9 * INSN_COST);
12547   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12548 
12549   ins_encode %{
12550     __ addw(as_Register($dst$$reg),
12551               as_Register($src1$$reg),
12552               as_Register($src2$$reg),
12553               Assembler::LSL,
12554               $src3$$constant & 0x1f);
12555   %}
12556 
12557   ins_pipe(ialu_reg_reg_shift);
12558 %}
12559 
12560 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12561                          iRegL src1, iRegL src2,
12562                          immI src3, rFlagsReg cr) %{
12563   match(Set dst (AddL src1 (LShiftL src2 src3)));
12564 
12565   ins_cost(1.9 * INSN_COST);
12566   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12567 
12568   ins_encode %{
12569     __ add(as_Register($dst$$reg),
12570               as_Register($src1$$reg),
12571               as_Register($src2$$reg),
12572               Assembler::LSL,
12573               $src3$$constant & 0x3f);
12574   %}
12575 
12576   ins_pipe(ialu_reg_reg_shift);
12577 %}
12578 
12579 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12580                          iRegIorL2I src1, iRegIorL2I src2,
12581                          immI src3, rFlagsReg cr) %{
12582   match(Set dst (SubI src1 (URShiftI src2 src3)));
12583 
12584   ins_cost(1.9 * INSN_COST);
12585   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12586 
12587   ins_encode %{
12588     __ subw(as_Register($dst$$reg),
12589               as_Register($src1$$reg),
12590               as_Register($src2$$reg),
12591               Assembler::LSR,
12592               $src3$$constant & 0x1f);
12593   %}
12594 
12595   ins_pipe(ialu_reg_reg_shift);
12596 %}
12597 
12598 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12599                          iRegL src1, iRegL src2,
12600                          immI src3, rFlagsReg cr) %{
12601   match(Set dst (SubL src1 (URShiftL src2 src3)));
12602 
12603   ins_cost(1.9 * INSN_COST);
12604   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12605 
12606   ins_encode %{
12607     __ sub(as_Register($dst$$reg),
12608               as_Register($src1$$reg),
12609               as_Register($src2$$reg),
12610               Assembler::LSR,
12611               $src3$$constant & 0x3f);
12612   %}
12613 
12614   ins_pipe(ialu_reg_reg_shift);
12615 %}
12616 
12617 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12618                          iRegIorL2I src1, iRegIorL2I src2,
12619                          immI src3, rFlagsReg cr) %{
12620   match(Set dst (SubI src1 (RShiftI src2 src3)));
12621 
12622   ins_cost(1.9 * INSN_COST);
12623   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12624 
12625   ins_encode %{
12626     __ subw(as_Register($dst$$reg),
12627               as_Register($src1$$reg),
12628               as_Register($src2$$reg),
12629               Assembler::ASR,
12630               $src3$$constant & 0x1f);
12631   %}
12632 
12633   ins_pipe(ialu_reg_reg_shift);
12634 %}
12635 
12636 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12637                          iRegL src1, iRegL src2,
12638                          immI src3, rFlagsReg cr) %{
12639   match(Set dst (SubL src1 (RShiftL src2 src3)));
12640 
12641   ins_cost(1.9 * INSN_COST);
12642   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12643 
12644   ins_encode %{
12645     __ sub(as_Register($dst$$reg),
12646               as_Register($src1$$reg),
12647               as_Register($src2$$reg),
12648               Assembler::ASR,
12649               $src3$$constant & 0x3f);
12650   %}
12651 
12652   ins_pipe(ialu_reg_reg_shift);
12653 %}
12654 
12655 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12656                          iRegIorL2I src1, iRegIorL2I src2,
12657                          immI src3, rFlagsReg cr) %{
12658   match(Set dst (SubI src1 (LShiftI src2 src3)));
12659 
12660   ins_cost(1.9 * INSN_COST);
12661   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12662 
12663   ins_encode %{
12664     __ subw(as_Register($dst$$reg),
12665               as_Register($src1$$reg),
12666               as_Register($src2$$reg),
12667               Assembler::LSL,
12668               $src3$$constant & 0x1f);
12669   %}
12670 
12671   ins_pipe(ialu_reg_reg_shift);
12672 %}
12673 
12674 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12675                          iRegL src1, iRegL src2,
12676                          immI src3, rFlagsReg cr) %{
12677   match(Set dst (SubL src1 (LShiftL src2 src3)));
12678 
12679   ins_cost(1.9 * INSN_COST);
12680   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12681 
12682   ins_encode %{
12683     __ sub(as_Register($dst$$reg),
12684               as_Register($src1$$reg),
12685               as_Register($src2$$reg),
12686               Assembler::LSL,
12687               $src3$$constant & 0x3f);
12688   %}
12689 
12690   ins_pipe(ialu_reg_reg_shift);
12691 %}
12692 
12693 
12694 
12695 // Shift Left followed by Shift Right.
12696 // This idiom is used by the compiler for the i2b bytecode etc.
12697 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12698 %{
12699   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12700   // Make sure we are not going to exceed what sbfm can do.
12701   predicate((unsigned int)n->in(2)->get_int() <= 63
12702             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12703 
12704   ins_cost(INSN_COST * 2);
12705   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12706   ins_encode %{
12707     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12708     int s = 63 - lshift;
12709     int r = (rshift - lshift) & 63;
12710     __ sbfm(as_Register($dst$$reg),
12711             as_Register($src$$reg),
12712             r, s);
12713   %}
12714 
12715   ins_pipe(ialu_reg_shift);
12716 %}
12717 
12718 // Shift Left followed by Shift Right.
12719 // This idiom is used by the compiler for the i2b bytecode etc.
12720 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12721 %{
12722   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12723   // Make sure we are not going to exceed what sbfmw can do.
12724   predicate((unsigned int)n->in(2)->get_int() <= 31
12725             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12726 
12727   ins_cost(INSN_COST * 2);
12728   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12729   ins_encode %{
12730     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12731     int s = 31 - lshift;
12732     int r = (rshift - lshift) & 31;
12733     __ sbfmw(as_Register($dst$$reg),
12734             as_Register($src$$reg),
12735             r, s);
12736   %}
12737 
12738   ins_pipe(ialu_reg_shift);
12739 %}
12740 
12741 // Shift Left followed by Shift Right.
12742 // This idiom is used by the compiler for the i2b bytecode etc.
12743 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12744 %{
12745   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12746   // Make sure we are not going to exceed what ubfm can do.
12747   predicate((unsigned int)n->in(2)->get_int() <= 63
12748             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
12749 
12750   ins_cost(INSN_COST * 2);
12751   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12752   ins_encode %{
12753     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12754     int s = 63 - lshift;
12755     int r = (rshift - lshift) & 63;
12756     __ ubfm(as_Register($dst$$reg),
12757             as_Register($src$$reg),
12758             r, s);
12759   %}
12760 
12761   ins_pipe(ialu_reg_shift);
12762 %}
12763 
12764 // Shift Left followed by Shift Right.
12765 // This idiom is used by the compiler for the i2b bytecode etc.
12766 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12767 %{
12768   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12769   // Make sure we are not going to exceed what ubfmw can do.
12770   predicate((unsigned int)n->in(2)->get_int() <= 31
12771             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
12772 
12773   ins_cost(INSN_COST * 2);
12774   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12775   ins_encode %{
12776     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12777     int s = 31 - lshift;
12778     int r = (rshift - lshift) & 31;
12779     __ ubfmw(as_Register($dst$$reg),
12780             as_Register($src$$reg),
12781             r, s);
12782   %}
12783 
12784   ins_pipe(ialu_reg_shift);
12785 %}
12786 // Bitfield extract with shift & mask
12787 
12788 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12789 %{
12790   match(Set dst (AndI (URShiftI src rshift) mask));
12791 
12792   ins_cost(INSN_COST);
12793   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12794   ins_encode %{
12795     int rshift = $rshift$$constant;
12796     long mask = $mask$$constant;
12797     int width = exact_log2(mask+1);
12798     __ ubfxw(as_Register($dst$$reg),
12799             as_Register($src$$reg), rshift, width);
12800   %}
12801   ins_pipe(ialu_reg_shift);
12802 %}
12803 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12804 %{
12805   match(Set dst (AndL (URShiftL src rshift) mask));
12806 
12807   ins_cost(INSN_COST);
12808   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12809   ins_encode %{
12810     int rshift = $rshift$$constant;
12811     long mask = $mask$$constant;
12812     int width = exact_log2(mask+1);
12813     __ ubfx(as_Register($dst$$reg),
12814             as_Register($src$$reg), rshift, width);
12815   %}
12816   ins_pipe(ialu_reg_shift);
12817 %}
12818 
12819 // We can use ubfx when extending an And with a mask when we know mask
12820 // is positive.  We know that because immI_bitmask guarantees it.
12821 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12822 %{
12823   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12824 
12825   ins_cost(INSN_COST * 2);
12826   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12827   ins_encode %{
12828     int rshift = $rshift$$constant;
12829     long mask = $mask$$constant;
12830     int width = exact_log2(mask+1);
12831     __ ubfx(as_Register($dst$$reg),
12832             as_Register($src$$reg), rshift, width);
12833   %}
12834   ins_pipe(ialu_reg_shift);
12835 %}
12836 
12837 // We can use ubfiz when masking by a positive number and then left shifting the result.
12838 // We know that the mask is positive because immI_bitmask guarantees it.
12839 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12840 %{
12841   match(Set dst (LShiftI (AndI src mask) lshift));
12842   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12843     (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
12844 
12845   ins_cost(INSN_COST);
12846   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12847   ins_encode %{
12848     int lshift = $lshift$$constant;
12849     long mask = $mask$$constant;
12850     int width = exact_log2(mask+1);
12851     __ ubfizw(as_Register($dst$$reg),
12852           as_Register($src$$reg), lshift, width);
12853   %}
12854   ins_pipe(ialu_reg_shift);
12855 %}
12856 // We can use ubfiz when masking by a positive number and then left shifting the result.
12857 // We know that the mask is positive because immL_bitmask guarantees it.
12858 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12859 %{
12860   match(Set dst (LShiftL (AndL src mask) lshift));
12861   predicate((unsigned int)n->in(2)->get_int() <= 63 &&
12862     (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
12863 
12864   ins_cost(INSN_COST);
12865   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12866   ins_encode %{
12867     int lshift = $lshift$$constant;
12868     long mask = $mask$$constant;
12869     int width = exact_log2(mask+1);
12870     __ ubfiz(as_Register($dst$$reg),
12871           as_Register($src$$reg), lshift, width);
12872   %}
12873   ins_pipe(ialu_reg_shift);
12874 %}
12875 
12876 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12877 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12878 %{
12879   match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
12880   predicate((unsigned int)n->in(2)->get_int() <= 31 &&
12881     (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
12882 
12883   ins_cost(INSN_COST);
12884   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12885   ins_encode %{
12886     int lshift = $lshift$$constant;
12887     long mask = $mask$$constant;
12888     int width = exact_log2(mask+1);
12889     __ ubfiz(as_Register($dst$$reg),
12890              as_Register($src$$reg), lshift, width);
12891   %}
12892   ins_pipe(ialu_reg_shift);
12893 %}
12894 
12895 // Rotations
12896 
12897 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12898 %{
12899   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12900   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12901 
12902   ins_cost(INSN_COST);
12903   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12904 
12905   ins_encode %{
12906     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12907             $rshift$$constant & 63);
12908   %}
12909   ins_pipe(ialu_reg_reg_extr);
12910 %}
12911 
12912 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12913 %{
12914   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12915   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12916 
12917   ins_cost(INSN_COST);
12918   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12919 
12920   ins_encode %{
12921     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12922             $rshift$$constant & 31);
12923   %}
12924   ins_pipe(ialu_reg_reg_extr);
12925 %}
12926 
12927 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12928 %{
12929   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12930   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12931 
12932   ins_cost(INSN_COST);
12933   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12934 
12935   ins_encode %{
12936     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12937             $rshift$$constant & 63);
12938   %}
12939   ins_pipe(ialu_reg_reg_extr);
12940 %}
12941 
12942 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12943 %{
12944   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12945   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12946 
12947   ins_cost(INSN_COST);
12948   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12949 
12950   ins_encode %{
12951     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12952             $rshift$$constant & 31);
12953   %}
12954   ins_pipe(ialu_reg_reg_extr);
12955 %}
12956 
12957 
12958 // rol expander
12959 
12960 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12961 %{
12962   effect(DEF dst, USE src, USE shift);
12963 
12964   format %{ "rol    $dst, $src, $shift" %}
12965   ins_cost(INSN_COST * 3);
12966   ins_encode %{
12967     __ subw(rscratch1, zr, as_Register($shift$$reg));
12968     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12969             rscratch1);
12970     %}
12971   ins_pipe(ialu_reg_reg_vshift);
12972 %}
12973 
12974 // rol expander
12975 
12976 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12977 %{
12978   effect(DEF dst, USE src, USE shift);
12979 
12980   format %{ "rol    $dst, $src, $shift" %}
12981   ins_cost(INSN_COST * 3);
12982   ins_encode %{
12983     __ subw(rscratch1, zr, as_Register($shift$$reg));
12984     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12985             rscratch1);
12986     %}
12987   ins_pipe(ialu_reg_reg_vshift);
12988 %}
12989 
12990 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12991 %{
12992   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12993 
12994   expand %{
12995     rolL_rReg(dst, src, shift, cr);
12996   %}
12997 %}
12998 
12999 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
13000 %{
13001   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
13002 
13003   expand %{
13004     rolL_rReg(dst, src, shift, cr);
13005   %}
13006 %}
13007 
13008 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
13009 %{
13010   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
13011 
13012   expand %{
13013     rolI_rReg(dst, src, shift, cr);
13014   %}
13015 %}
13016 
13017 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
13018 %{
13019   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
13020 
13021   expand %{
13022     rolI_rReg(dst, src, shift, cr);
13023   %}
13024 %}
13025 
13026 // ror expander
13027 
13028 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
13029 %{
13030   effect(DEF dst, USE src, USE shift);
13031 
13032   format %{ "ror    $dst, $src, $shift" %}
13033   ins_cost(INSN_COST);
13034   ins_encode %{
13035     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
13036             as_Register($shift$$reg));
13037     %}
13038   ins_pipe(ialu_reg_reg_vshift);
13039 %}
13040 
13041 // ror expander
13042 
13043 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
13044 %{
13045   effect(DEF dst, USE src, USE shift);
13046 
13047   format %{ "ror    $dst, $src, $shift" %}
13048   ins_cost(INSN_COST);
13049   ins_encode %{
13050     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
13051             as_Register($shift$$reg));
13052     %}
13053   ins_pipe(ialu_reg_reg_vshift);
13054 %}
13055 
13056 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
13057 %{
13058   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
13059 
13060   expand %{
13061     rorL_rReg(dst, src, shift, cr);
13062   %}
13063 %}
13064 
13065 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
13066 %{
13067   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
13068 
13069   expand %{
13070     rorL_rReg(dst, src, shift, cr);
13071   %}
13072 %}
13073 
13074 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
13075 %{
13076   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
13077 
13078   expand %{
13079     rorI_rReg(dst, src, shift, cr);
13080   %}
13081 %}
13082 
13083 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
13084 %{
13085   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
13086 
13087   expand %{
13088     rorI_rReg(dst, src, shift, cr);
13089   %}
13090 %}
13091 
13092 // Add/subtract (extended)
13093 
13094 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13095 %{
13096   match(Set dst (AddL src1 (ConvI2L src2)));
13097   ins_cost(INSN_COST);
13098   format %{ "add  $dst, $src1, $src2, sxtw" %}
13099 
13100    ins_encode %{
13101      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13102             as_Register($src2$$reg), ext::sxtw);
13103    %}
13104   ins_pipe(ialu_reg_reg);
13105 %};
13106 
13107 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
13108 %{
13109   match(Set dst (SubL src1 (ConvI2L src2)));
13110   ins_cost(INSN_COST);
13111   format %{ "sub  $dst, $src1, $src2, sxtw" %}
13112 
13113    ins_encode %{
13114      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13115             as_Register($src2$$reg), ext::sxtw);
13116    %}
13117   ins_pipe(ialu_reg_reg);
13118 %};
13119 
13120 
13121 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
13122 %{
13123   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13124   ins_cost(INSN_COST);
13125   format %{ "add  $dst, $src1, $src2, sxth" %}
13126 
13127    ins_encode %{
13128      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13129             as_Register($src2$$reg), ext::sxth);
13130    %}
13131   ins_pipe(ialu_reg_reg);
13132 %}
13133 
13134 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13135 %{
13136   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
13137   ins_cost(INSN_COST);
13138   format %{ "add  $dst, $src1, $src2, sxtb" %}
13139 
13140    ins_encode %{
13141      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13142             as_Register($src2$$reg), ext::sxtb);
13143    %}
13144   ins_pipe(ialu_reg_reg);
13145 %}
13146 
13147 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
13148 %{
13149   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
13150   ins_cost(INSN_COST);
13151   format %{ "add  $dst, $src1, $src2, uxtb" %}
13152 
13153    ins_encode %{
13154      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13155             as_Register($src2$$reg), ext::uxtb);
13156    %}
13157   ins_pipe(ialu_reg_reg);
13158 %}
13159 
13160 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
13161 %{
13162   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13163   ins_cost(INSN_COST);
13164   format %{ "add  $dst, $src1, $src2, sxth" %}
13165 
13166    ins_encode %{
13167      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13168             as_Register($src2$$reg), ext::sxth);
13169    %}
13170   ins_pipe(ialu_reg_reg);
13171 %}
13172 
13173 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
13174 %{
13175   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13176   ins_cost(INSN_COST);
13177   format %{ "add  $dst, $src1, $src2, sxtw" %}
13178 
13179    ins_encode %{
13180      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13181             as_Register($src2$$reg), ext::sxtw);
13182    %}
13183   ins_pipe(ialu_reg_reg);
13184 %}
13185 
13186 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13187 %{
13188   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
13189   ins_cost(INSN_COST);
13190   format %{ "add  $dst, $src1, $src2, sxtb" %}
13191 
13192    ins_encode %{
13193      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13194             as_Register($src2$$reg), ext::sxtb);
13195    %}
13196   ins_pipe(ialu_reg_reg);
13197 %}
13198 
13199 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
13200 %{
13201   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
13202   ins_cost(INSN_COST);
13203   format %{ "add  $dst, $src1, $src2, uxtb" %}
13204 
13205    ins_encode %{
13206      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13207             as_Register($src2$$reg), ext::uxtb);
13208    %}
13209   ins_pipe(ialu_reg_reg);
13210 %}
13211 
13212 
13213 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13214 %{
13215   match(Set dst (AddI src1 (AndI src2 mask)));
13216   ins_cost(INSN_COST);
13217   format %{ "addw  $dst, $src1, $src2, uxtb" %}
13218 
13219    ins_encode %{
13220      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13221             as_Register($src2$$reg), ext::uxtb);
13222    %}
13223   ins_pipe(ialu_reg_reg);
13224 %}
13225 
13226 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13227 %{
13228   match(Set dst (AddI src1 (AndI src2 mask)));
13229   ins_cost(INSN_COST);
13230   format %{ "addw  $dst, $src1, $src2, uxth" %}
13231 
13232    ins_encode %{
13233      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13234             as_Register($src2$$reg), ext::uxth);
13235    %}
13236   ins_pipe(ialu_reg_reg);
13237 %}
13238 
13239 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13240 %{
13241   match(Set dst (AddL src1 (AndL src2 mask)));
13242   ins_cost(INSN_COST);
13243   format %{ "add  $dst, $src1, $src2, uxtb" %}
13244 
13245    ins_encode %{
13246      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13247             as_Register($src2$$reg), ext::uxtb);
13248    %}
13249   ins_pipe(ialu_reg_reg);
13250 %}
13251 
13252 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13253 %{
13254   match(Set dst (AddL src1 (AndL src2 mask)));
13255   ins_cost(INSN_COST);
13256   format %{ "add  $dst, $src1, $src2, uxth" %}
13257 
13258    ins_encode %{
13259      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13260             as_Register($src2$$reg), ext::uxth);
13261    %}
13262   ins_pipe(ialu_reg_reg);
13263 %}
13264 
13265 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13266 %{
13267   match(Set dst (AddL src1 (AndL src2 mask)));
13268   ins_cost(INSN_COST);
13269   format %{ "add  $dst, $src1, $src2, uxtw" %}
13270 
13271    ins_encode %{
13272      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13273             as_Register($src2$$reg), ext::uxtw);
13274    %}
13275   ins_pipe(ialu_reg_reg);
13276 %}
13277 
13278 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
13279 %{
13280   match(Set dst (SubI src1 (AndI src2 mask)));
13281   ins_cost(INSN_COST);
13282   format %{ "subw  $dst, $src1, $src2, uxtb" %}
13283 
13284    ins_encode %{
13285      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13286             as_Register($src2$$reg), ext::uxtb);
13287    %}
13288   ins_pipe(ialu_reg_reg);
13289 %}
13290 
13291 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
13292 %{
13293   match(Set dst (SubI src1 (AndI src2 mask)));
13294   ins_cost(INSN_COST);
13295   format %{ "subw  $dst, $src1, $src2, uxth" %}
13296 
13297    ins_encode %{
13298      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13299             as_Register($src2$$reg), ext::uxth);
13300    %}
13301   ins_pipe(ialu_reg_reg);
13302 %}
13303 
13304 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
13305 %{
13306   match(Set dst (SubL src1 (AndL src2 mask)));
13307   ins_cost(INSN_COST);
13308   format %{ "sub  $dst, $src1, $src2, uxtb" %}
13309 
13310    ins_encode %{
13311      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13312             as_Register($src2$$reg), ext::uxtb);
13313    %}
13314   ins_pipe(ialu_reg_reg);
13315 %}
13316 
13317 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
13318 %{
13319   match(Set dst (SubL src1 (AndL src2 mask)));
13320   ins_cost(INSN_COST);
13321   format %{ "sub  $dst, $src1, $src2, uxth" %}
13322 
13323    ins_encode %{
13324      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13325             as_Register($src2$$reg), ext::uxth);
13326    %}
13327   ins_pipe(ialu_reg_reg);
13328 %}
13329 
13330 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
13331 %{
13332   match(Set dst (SubL src1 (AndL src2 mask)));
13333   ins_cost(INSN_COST);
13334   format %{ "sub  $dst, $src1, $src2, uxtw" %}
13335 
13336    ins_encode %{
13337      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13338             as_Register($src2$$reg), ext::uxtw);
13339    %}
13340   ins_pipe(ialu_reg_reg);
13341 %}
13342 
13343 
13344 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13345 %{
13346   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13347   ins_cost(1.9 * INSN_COST);
13348   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
13349 
13350    ins_encode %{
13351      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13352             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13353    %}
13354   ins_pipe(ialu_reg_reg_shift);
13355 %}
13356 
13357 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13358 %{
13359   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13360   ins_cost(1.9 * INSN_COST);
13361   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
13362 
13363    ins_encode %{
13364      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13365             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13366    %}
13367   ins_pipe(ialu_reg_reg_shift);
13368 %}
13369 
13370 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13371 %{
13372   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13373   ins_cost(1.9 * INSN_COST);
13374   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
13375 
13376    ins_encode %{
13377      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13378             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13379    %}
13380   ins_pipe(ialu_reg_reg_shift);
13381 %}
13382 
13383 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13384 %{
13385   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13386   ins_cost(1.9 * INSN_COST);
13387   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13388 
13389    ins_encode %{
13390      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13391             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13392    %}
13393   ins_pipe(ialu_reg_reg_shift);
13394 %}
13395 
13396 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13397 %{
13398   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13399   ins_cost(1.9 * INSN_COST);
13400   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13401 
13402    ins_encode %{
13403      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13404             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13405    %}
13406   ins_pipe(ialu_reg_reg_shift);
13407 %}
13408 
13409 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13410 %{
13411   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13412   ins_cost(1.9 * INSN_COST);
13413   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13414 
13415    ins_encode %{
13416      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13417             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13418    %}
13419   ins_pipe(ialu_reg_reg_shift);
13420 %}
13421 
13422 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13423 %{
13424   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13425   ins_cost(1.9 * INSN_COST);
13426   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13427 
13428    ins_encode %{
13429      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13430             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13431    %}
13432   ins_pipe(ialu_reg_reg_shift);
13433 %}
13434 
13435 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13436 %{
13437   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13438   ins_cost(1.9 * INSN_COST);
13439   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13440 
13441    ins_encode %{
13442      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13443             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13444    %}
13445   ins_pipe(ialu_reg_reg_shift);
13446 %}
13447 
13448 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13449 %{
13450   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13451   ins_cost(1.9 * INSN_COST);
13452   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13453 
13454    ins_encode %{
13455      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13456             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13457    %}
13458   ins_pipe(ialu_reg_reg_shift);
13459 %}
13460 
13461 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13462 %{
13463   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13464   ins_cost(1.9 * INSN_COST);
13465   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13466 
13467    ins_encode %{
13468      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13469             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13470    %}
13471   ins_pipe(ialu_reg_reg_shift);
13472 %}
13473 
13474 
13475 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13476 %{
13477   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13478   ins_cost(1.9 * INSN_COST);
13479   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13480 
13481    ins_encode %{
13482      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13483             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13484    %}
13485   ins_pipe(ialu_reg_reg_shift);
13486 %};
13487 
13488 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13489 %{
13490   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13491   ins_cost(1.9 * INSN_COST);
13492   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13493 
13494    ins_encode %{
13495      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13496             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13497    %}
13498   ins_pipe(ialu_reg_reg_shift);
13499 %};
13500 
13501 
13502 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13503 %{
13504   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13505   ins_cost(1.9 * INSN_COST);
13506   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13507 
13508    ins_encode %{
13509      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13510             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13511    %}
13512   ins_pipe(ialu_reg_reg_shift);
13513 %}
13514 
13515 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13516 %{
13517   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13518   ins_cost(1.9 * INSN_COST);
13519   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13520 
13521    ins_encode %{
13522      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13523             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13524    %}
13525   ins_pipe(ialu_reg_reg_shift);
13526 %}
13527 
13528 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13529 %{
13530   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13531   ins_cost(1.9 * INSN_COST);
13532   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13533 
13534    ins_encode %{
13535      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13536             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13537    %}
13538   ins_pipe(ialu_reg_reg_shift);
13539 %}
13540 
13541 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13542 %{
13543   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13544   ins_cost(1.9 * INSN_COST);
13545   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13546 
13547    ins_encode %{
13548      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13549             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13550    %}
13551   ins_pipe(ialu_reg_reg_shift);
13552 %}
13553 
13554 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13555 %{
13556   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13557   ins_cost(1.9 * INSN_COST);
13558   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13559 
13560    ins_encode %{
13561      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13562             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13563    %}
13564   ins_pipe(ialu_reg_reg_shift);
13565 %}
13566 
13567 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13568 %{
13569   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13570   ins_cost(1.9 * INSN_COST);
13571   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13572 
13573    ins_encode %{
13574      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13575             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13576    %}
13577   ins_pipe(ialu_reg_reg_shift);
13578 %}
13579 
13580 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13581 %{
13582   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13583   ins_cost(1.9 * INSN_COST);
13584   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13585 
13586    ins_encode %{
13587      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13588             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13589    %}
13590   ins_pipe(ialu_reg_reg_shift);
13591 %}
13592 
13593 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13594 %{
13595   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13596   ins_cost(1.9 * INSN_COST);
13597   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13598 
13599    ins_encode %{
13600      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13601             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13602    %}
13603   ins_pipe(ialu_reg_reg_shift);
13604 %}
13605 
13606 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13607 %{
13608   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13609   ins_cost(1.9 * INSN_COST);
13610   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13611 
13612    ins_encode %{
13613      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13614             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13615    %}
13616   ins_pipe(ialu_reg_reg_shift);
13617 %}
13618 
13619 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13620 %{
13621   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13622   ins_cost(1.9 * INSN_COST);
13623   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13624 
13625    ins_encode %{
13626      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13627             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13628    %}
13629   ins_pipe(ialu_reg_reg_shift);
13630 %}
13631 // END This section of the file is automatically generated. Do not edit --------------
13632 
13633 // ============================================================================
13634 // Floating Point Arithmetic Instructions
13635 
13636 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13637   match(Set dst (AddF src1 src2));
13638 
13639   ins_cost(INSN_COST * 5);
13640   format %{ "fadds   $dst, $src1, $src2" %}
13641 
13642   ins_encode %{
13643     __ fadds(as_FloatRegister($dst$$reg),
13644              as_FloatRegister($src1$$reg),
13645              as_FloatRegister($src2$$reg));
13646   %}
13647 
13648   ins_pipe(fp_dop_reg_reg_s);
13649 %}
13650 
13651 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13652   match(Set dst (AddD src1 src2));
13653 
13654   ins_cost(INSN_COST * 5);
13655   format %{ "faddd   $dst, $src1, $src2" %}
13656 
13657   ins_encode %{
13658     __ faddd(as_FloatRegister($dst$$reg),
13659              as_FloatRegister($src1$$reg),
13660              as_FloatRegister($src2$$reg));
13661   %}
13662 
13663   ins_pipe(fp_dop_reg_reg_d);
13664 %}
13665 
13666 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13667   match(Set dst (SubF src1 src2));
13668 
13669   ins_cost(INSN_COST * 5);
13670   format %{ "fsubs   $dst, $src1, $src2" %}
13671 
13672   ins_encode %{
13673     __ fsubs(as_FloatRegister($dst$$reg),
13674              as_FloatRegister($src1$$reg),
13675              as_FloatRegister($src2$$reg));
13676   %}
13677 
13678   ins_pipe(fp_dop_reg_reg_s);
13679 %}
13680 
13681 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13682   match(Set dst (SubD src1 src2));
13683 
13684   ins_cost(INSN_COST * 5);
13685   format %{ "fsubd   $dst, $src1, $src2" %}
13686 
13687   ins_encode %{
13688     __ fsubd(as_FloatRegister($dst$$reg),
13689              as_FloatRegister($src1$$reg),
13690              as_FloatRegister($src2$$reg));
13691   %}
13692 
13693   ins_pipe(fp_dop_reg_reg_d);
13694 %}
13695 
13696 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13697   match(Set dst (MulF src1 src2));
13698 
13699   ins_cost(INSN_COST * 6);
13700   format %{ "fmuls   $dst, $src1, $src2" %}
13701 
13702   ins_encode %{
13703     __ fmuls(as_FloatRegister($dst$$reg),
13704              as_FloatRegister($src1$$reg),
13705              as_FloatRegister($src2$$reg));
13706   %}
13707 
13708   ins_pipe(fp_dop_reg_reg_s);
13709 %}
13710 
13711 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13712   match(Set dst (MulD src1 src2));
13713 
13714   ins_cost(INSN_COST * 6);
13715   format %{ "fmuld   $dst, $src1, $src2" %}
13716 
13717   ins_encode %{
13718     __ fmuld(as_FloatRegister($dst$$reg),
13719              as_FloatRegister($src1$$reg),
13720              as_FloatRegister($src2$$reg));
13721   %}
13722 
13723   ins_pipe(fp_dop_reg_reg_d);
13724 %}
13725 
13726 // src1 * src2 + src3
13727 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13728   predicate(UseFMA);
13729   match(Set dst (FmaF src3 (Binary src1 src2)));
13730 
13731   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13732 
13733   ins_encode %{
13734     __ fmadds(as_FloatRegister($dst$$reg),
13735              as_FloatRegister($src1$$reg),
13736              as_FloatRegister($src2$$reg),
13737              as_FloatRegister($src3$$reg));
13738   %}
13739 
13740   ins_pipe(pipe_class_default);
13741 %}
13742 
13743 // src1 * src2 + src3
13744 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13745   predicate(UseFMA);
13746   match(Set dst (FmaD src3 (Binary src1 src2)));
13747 
13748   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13749 
13750   ins_encode %{
13751     __ fmaddd(as_FloatRegister($dst$$reg),
13752              as_FloatRegister($src1$$reg),
13753              as_FloatRegister($src2$$reg),
13754              as_FloatRegister($src3$$reg));
13755   %}
13756 
13757   ins_pipe(pipe_class_default);
13758 %}
13759 
13760 // -src1 * src2 + src3
13761 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13762   predicate(UseFMA);
13763   match(Set dst (FmaF src3 (Binary (NegF src1) src2)));
13764   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13765 
13766   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13767 
13768   ins_encode %{
13769     __ fmsubs(as_FloatRegister($dst$$reg),
13770               as_FloatRegister($src1$$reg),
13771               as_FloatRegister($src2$$reg),
13772               as_FloatRegister($src3$$reg));
13773   %}
13774 
13775   ins_pipe(pipe_class_default);
13776 %}
13777 
13778 // -src1 * src2 + src3
13779 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13780   predicate(UseFMA);
13781   match(Set dst (FmaD src3 (Binary (NegD src1) src2)));
13782   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13783 
13784   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13785 
13786   ins_encode %{
13787     __ fmsubd(as_FloatRegister($dst$$reg),
13788               as_FloatRegister($src1$$reg),
13789               as_FloatRegister($src2$$reg),
13790               as_FloatRegister($src3$$reg));
13791   %}
13792 
13793   ins_pipe(pipe_class_default);
13794 %}
13795 
13796 // -src1 * src2 - src3
13797 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13798   predicate(UseFMA);
13799   match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2)));
13800   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13801 
13802   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13803 
13804   ins_encode %{
13805     __ fnmadds(as_FloatRegister($dst$$reg),
13806                as_FloatRegister($src1$$reg),
13807                as_FloatRegister($src2$$reg),
13808                as_FloatRegister($src3$$reg));
13809   %}
13810 
13811   ins_pipe(pipe_class_default);
13812 %}
13813 
13814 // -src1 * src2 - src3
13815 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13816   predicate(UseFMA);
13817   match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2)));
13818   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13819 
13820   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13821 
13822   ins_encode %{
13823     __ fnmaddd(as_FloatRegister($dst$$reg),
13824                as_FloatRegister($src1$$reg),
13825                as_FloatRegister($src2$$reg),
13826                as_FloatRegister($src3$$reg));
13827   %}
13828 
13829   ins_pipe(pipe_class_default);
13830 %}
13831 
13832 // src1 * src2 - src3
13833 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13834   predicate(UseFMA);
13835   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13836 
13837   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13838 
13839   ins_encode %{
13840     __ fnmsubs(as_FloatRegister($dst$$reg),
13841                as_FloatRegister($src1$$reg),
13842                as_FloatRegister($src2$$reg),
13843                as_FloatRegister($src3$$reg));
13844   %}
13845 
13846   ins_pipe(pipe_class_default);
13847 %}
13848 
13849 // src1 * src2 - src3
13850 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13851   predicate(UseFMA);
13852   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13853 
13854   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13855 
13856   ins_encode %{
13857   // n.b. insn name should be fnmsubd
13858     __ fnmsub(as_FloatRegister($dst$$reg),
13859               as_FloatRegister($src1$$reg),
13860               as_FloatRegister($src2$$reg),
13861               as_FloatRegister($src3$$reg));
13862   %}
13863 
13864   ins_pipe(pipe_class_default);
13865 %}
13866 
13867 
13868 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13869   match(Set dst (DivF src1  src2));
13870 
13871   ins_cost(INSN_COST * 18);
13872   format %{ "fdivs   $dst, $src1, $src2" %}
13873 
13874   ins_encode %{
13875     __ fdivs(as_FloatRegister($dst$$reg),
13876              as_FloatRegister($src1$$reg),
13877              as_FloatRegister($src2$$reg));
13878   %}
13879 
13880   ins_pipe(fp_div_s);
13881 %}
13882 
13883 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13884   match(Set dst (DivD src1  src2));
13885 
13886   ins_cost(INSN_COST * 32);
13887   format %{ "fdivd   $dst, $src1, $src2" %}
13888 
13889   ins_encode %{
13890     __ fdivd(as_FloatRegister($dst$$reg),
13891              as_FloatRegister($src1$$reg),
13892              as_FloatRegister($src2$$reg));
13893   %}
13894 
13895   ins_pipe(fp_div_d);
13896 %}
13897 
13898 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13899   match(Set dst (NegF src));
13900 
13901   ins_cost(INSN_COST * 3);
13902   format %{ "fneg   $dst, $src" %}
13903 
13904   ins_encode %{
13905     __ fnegs(as_FloatRegister($dst$$reg),
13906              as_FloatRegister($src$$reg));
13907   %}
13908 
13909   ins_pipe(fp_uop_s);
13910 %}
13911 
13912 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13913   match(Set dst (NegD src));
13914 
13915   ins_cost(INSN_COST * 3);
13916   format %{ "fnegd   $dst, $src" %}
13917 
13918   ins_encode %{
13919     __ fnegd(as_FloatRegister($dst$$reg),
13920              as_FloatRegister($src$$reg));
13921   %}
13922 
13923   ins_pipe(fp_uop_d);
13924 %}
13925 
13926 instruct absF_reg(vRegF dst, vRegF src) %{
13927   match(Set dst (AbsF src));
13928 
13929   ins_cost(INSN_COST * 3);
13930   format %{ "fabss   $dst, $src" %}
13931   ins_encode %{
13932     __ fabss(as_FloatRegister($dst$$reg),
13933              as_FloatRegister($src$$reg));
13934   %}
13935 
13936   ins_pipe(fp_uop_s);
13937 %}
13938 
13939 instruct absD_reg(vRegD dst, vRegD src) %{
13940   match(Set dst (AbsD src));
13941 
13942   ins_cost(INSN_COST * 3);
13943   format %{ "fabsd   $dst, $src" %}
13944   ins_encode %{
13945     __ fabsd(as_FloatRegister($dst$$reg),
13946              as_FloatRegister($src$$reg));
13947   %}
13948 
13949   ins_pipe(fp_uop_d);
13950 %}
13951 
13952 instruct sqrtD_reg(vRegD dst, vRegD src) %{
13953   match(Set dst (SqrtD src));
13954 
13955   ins_cost(INSN_COST * 50);
13956   format %{ "fsqrtd  $dst, $src" %}
13957   ins_encode %{
13958     __ fsqrtd(as_FloatRegister($dst$$reg),
13959              as_FloatRegister($src$$reg));
13960   %}
13961 
13962   ins_pipe(fp_div_s);
13963 %}
13964 
13965 instruct sqrtF_reg(vRegF dst, vRegF src) %{
13966   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
13967 
13968   ins_cost(INSN_COST * 50);
13969   format %{ "fsqrts  $dst, $src" %}
13970   ins_encode %{
13971     __ fsqrts(as_FloatRegister($dst$$reg),
13972              as_FloatRegister($src$$reg));
13973   %}
13974 
13975   ins_pipe(fp_div_d);
13976 %}
13977 
13978 // ============================================================================
13979 // Logical Instructions
13980 
13981 // Integer Logical Instructions
13982 
13983 // And Instructions
13984 
13985 
13986 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
13987   match(Set dst (AndI src1 src2));
13988 
13989   format %{ "andw  $dst, $src1, $src2\t# int" %}
13990 
13991   ins_cost(INSN_COST);
13992   ins_encode %{
13993     __ andw(as_Register($dst$$reg),
13994             as_Register($src1$$reg),
13995             as_Register($src2$$reg));
13996   %}
13997 
13998   ins_pipe(ialu_reg_reg);
13999 %}
14000 
14001 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14002   match(Set dst (AndI src1 src2));
14003 
14004   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14005 
14006   ins_cost(INSN_COST);
14007   ins_encode %{
14008     __ andw(as_Register($dst$$reg),
14009             as_Register($src1$$reg),
14010             (unsigned long)($src2$$constant));
14011   %}
14012 
14013   ins_pipe(ialu_reg_imm);
14014 %}
14015 
14016 // Or Instructions
14017 
14018 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14019   match(Set dst (OrI src1 src2));
14020 
14021   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14022 
14023   ins_cost(INSN_COST);
14024   ins_encode %{
14025     __ orrw(as_Register($dst$$reg),
14026             as_Register($src1$$reg),
14027             as_Register($src2$$reg));
14028   %}
14029 
14030   ins_pipe(ialu_reg_reg);
14031 %}
14032 
14033 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14034   match(Set dst (OrI src1 src2));
14035 
14036   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14037 
14038   ins_cost(INSN_COST);
14039   ins_encode %{
14040     __ orrw(as_Register($dst$$reg),
14041             as_Register($src1$$reg),
14042             (unsigned long)($src2$$constant));
14043   %}
14044 
14045   ins_pipe(ialu_reg_imm);
14046 %}
14047 
14048 // Xor Instructions
14049 
14050 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14051   match(Set dst (XorI src1 src2));
14052 
14053   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14054 
14055   ins_cost(INSN_COST);
14056   ins_encode %{
14057     __ eorw(as_Register($dst$$reg),
14058             as_Register($src1$$reg),
14059             as_Register($src2$$reg));
14060   %}
14061 
14062   ins_pipe(ialu_reg_reg);
14063 %}
14064 
14065 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14066   match(Set dst (XorI src1 src2));
14067 
14068   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14069 
14070   ins_cost(INSN_COST);
14071   ins_encode %{
14072     __ eorw(as_Register($dst$$reg),
14073             as_Register($src1$$reg),
14074             (unsigned long)($src2$$constant));
14075   %}
14076 
14077   ins_pipe(ialu_reg_imm);
14078 %}
14079 
14080 // Long Logical Instructions
14081 // TODO
14082 
14083 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14084   match(Set dst (AndL src1 src2));
14085 
14086   format %{ "and  $dst, $src1, $src2\t# int" %}
14087 
14088   ins_cost(INSN_COST);
14089   ins_encode %{
14090     __ andr(as_Register($dst$$reg),
14091             as_Register($src1$$reg),
14092             as_Register($src2$$reg));
14093   %}
14094 
14095   ins_pipe(ialu_reg_reg);
14096 %}
14097 
14098 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14099   match(Set dst (AndL src1 src2));
14100 
14101   format %{ "and  $dst, $src1, $src2\t# int" %}
14102 
14103   ins_cost(INSN_COST);
14104   ins_encode %{
14105     __ andr(as_Register($dst$$reg),
14106             as_Register($src1$$reg),
14107             (unsigned long)($src2$$constant));
14108   %}
14109 
14110   ins_pipe(ialu_reg_imm);
14111 %}
14112 
14113 // Or Instructions
14114 
14115 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14116   match(Set dst (OrL src1 src2));
14117 
14118   format %{ "orr  $dst, $src1, $src2\t# int" %}
14119 
14120   ins_cost(INSN_COST);
14121   ins_encode %{
14122     __ orr(as_Register($dst$$reg),
14123            as_Register($src1$$reg),
14124            as_Register($src2$$reg));
14125   %}
14126 
14127   ins_pipe(ialu_reg_reg);
14128 %}
14129 
14130 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14131   match(Set dst (OrL src1 src2));
14132 
14133   format %{ "orr  $dst, $src1, $src2\t# int" %}
14134 
14135   ins_cost(INSN_COST);
14136   ins_encode %{
14137     __ orr(as_Register($dst$$reg),
14138            as_Register($src1$$reg),
14139            (unsigned long)($src2$$constant));
14140   %}
14141 
14142   ins_pipe(ialu_reg_imm);
14143 %}
14144 
14145 // Xor Instructions
14146 
14147 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14148   match(Set dst (XorL src1 src2));
14149 
14150   format %{ "eor  $dst, $src1, $src2\t# int" %}
14151 
14152   ins_cost(INSN_COST);
14153   ins_encode %{
14154     __ eor(as_Register($dst$$reg),
14155            as_Register($src1$$reg),
14156            as_Register($src2$$reg));
14157   %}
14158 
14159   ins_pipe(ialu_reg_reg);
14160 %}
14161 
14162 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14163   match(Set dst (XorL src1 src2));
14164 
14165   ins_cost(INSN_COST);
14166   format %{ "eor  $dst, $src1, $src2\t# int" %}
14167 
14168   ins_encode %{
14169     __ eor(as_Register($dst$$reg),
14170            as_Register($src1$$reg),
14171            (unsigned long)($src2$$constant));
14172   %}
14173 
14174   ins_pipe(ialu_reg_imm);
14175 %}
14176 
14177 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14178 %{
14179   match(Set dst (ConvI2L src));
14180 
14181   ins_cost(INSN_COST);
14182   format %{ "sxtw  $dst, $src\t# i2l" %}
14183   ins_encode %{
14184     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14185   %}
14186   ins_pipe(ialu_reg_shift);
14187 %}
14188 
14189 // this pattern occurs in bigmath arithmetic
14190 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14191 %{
14192   match(Set dst (AndL (ConvI2L src) mask));
14193 
14194   ins_cost(INSN_COST);
14195   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14196   ins_encode %{
14197     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14198   %}
14199 
14200   ins_pipe(ialu_reg_shift);
14201 %}
14202 
14203 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14204   match(Set dst (ConvL2I src));
14205 
14206   ins_cost(INSN_COST);
14207   format %{ "movw  $dst, $src \t// l2i" %}
14208 
14209   ins_encode %{
14210     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14211   %}
14212 
14213   ins_pipe(ialu_reg);
14214 %}
14215 
14216 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
14217 %{
14218   match(Set dst (Conv2B src));
14219   effect(KILL cr);
14220 
14221   format %{
14222     "cmpw $src, zr\n\t"
14223     "cset $dst, ne"
14224   %}
14225 
14226   ins_encode %{
14227     __ cmpw(as_Register($src$$reg), zr);
14228     __ cset(as_Register($dst$$reg), Assembler::NE);
14229   %}
14230 
14231   ins_pipe(ialu_reg);
14232 %}
14233 
14234 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
14235 %{
14236   match(Set dst (Conv2B src));
14237   effect(KILL cr);
14238 
14239   format %{
14240     "cmp  $src, zr\n\t"
14241     "cset $dst, ne"
14242   %}
14243 
14244   ins_encode %{
14245     __ cmp(as_Register($src$$reg), zr);
14246     __ cset(as_Register($dst$$reg), Assembler::NE);
14247   %}
14248 
14249   ins_pipe(ialu_reg);
14250 %}
14251 
14252 instruct convD2F_reg(vRegF dst, vRegD src) %{
14253   match(Set dst (ConvD2F src));
14254 
14255   ins_cost(INSN_COST * 5);
14256   format %{ "fcvtd  $dst, $src \t// d2f" %}
14257 
14258   ins_encode %{
14259     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14260   %}
14261 
14262   ins_pipe(fp_d2f);
14263 %}
14264 
14265 instruct convF2D_reg(vRegD dst, vRegF src) %{
14266   match(Set dst (ConvF2D src));
14267 
14268   ins_cost(INSN_COST * 5);
14269   format %{ "fcvts  $dst, $src \t// f2d" %}
14270 
14271   ins_encode %{
14272     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14273   %}
14274 
14275   ins_pipe(fp_f2d);
14276 %}
14277 
14278 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14279   match(Set dst (ConvF2I src));
14280 
14281   ins_cost(INSN_COST * 5);
14282   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14283 
14284   ins_encode %{
14285     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14286   %}
14287 
14288   ins_pipe(fp_f2i);
14289 %}
14290 
14291 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14292   match(Set dst (ConvF2L src));
14293 
14294   ins_cost(INSN_COST * 5);
14295   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14296 
14297   ins_encode %{
14298     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14299   %}
14300 
14301   ins_pipe(fp_f2l);
14302 %}
14303 
14304 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14305   match(Set dst (ConvI2F src));
14306 
14307   ins_cost(INSN_COST * 5);
14308   format %{ "scvtfws  $dst, $src \t// i2f" %}
14309 
14310   ins_encode %{
14311     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14312   %}
14313 
14314   ins_pipe(fp_i2f);
14315 %}
14316 
14317 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14318   match(Set dst (ConvL2F src));
14319 
14320   ins_cost(INSN_COST * 5);
14321   format %{ "scvtfs  $dst, $src \t// l2f" %}
14322 
14323   ins_encode %{
14324     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14325   %}
14326 
14327   ins_pipe(fp_l2f);
14328 %}
14329 
14330 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14331   match(Set dst (ConvD2I src));
14332 
14333   ins_cost(INSN_COST * 5);
14334   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14335 
14336   ins_encode %{
14337     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14338   %}
14339 
14340   ins_pipe(fp_d2i);
14341 %}
14342 
14343 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14344   match(Set dst (ConvD2L src));
14345 
14346   ins_cost(INSN_COST * 5);
14347   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14348 
14349   ins_encode %{
14350     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14351   %}
14352 
14353   ins_pipe(fp_d2l);
14354 %}
14355 
14356 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14357   match(Set dst (ConvI2D src));
14358 
14359   ins_cost(INSN_COST * 5);
14360   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14361 
14362   ins_encode %{
14363     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14364   %}
14365 
14366   ins_pipe(fp_i2d);
14367 %}
14368 
14369 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14370   match(Set dst (ConvL2D src));
14371 
14372   ins_cost(INSN_COST * 5);
14373   format %{ "scvtfd  $dst, $src \t// l2d" %}
14374 
14375   ins_encode %{
14376     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14377   %}
14378 
14379   ins_pipe(fp_l2d);
14380 %}
14381 
14382 // stack <-> reg and reg <-> reg shuffles with no conversion
14383 
14384 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14385 
14386   match(Set dst (MoveF2I src));
14387 
14388   effect(DEF dst, USE src);
14389 
14390   ins_cost(4 * INSN_COST);
14391 
14392   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14393 
14394   ins_encode %{
14395     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14396   %}
14397 
14398   ins_pipe(iload_reg_reg);
14399 
14400 %}
14401 
14402 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14403 
14404   match(Set dst (MoveI2F src));
14405 
14406   effect(DEF dst, USE src);
14407 
14408   ins_cost(4 * INSN_COST);
14409 
14410   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14411 
14412   ins_encode %{
14413     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14414   %}
14415 
14416   ins_pipe(pipe_class_memory);
14417 
14418 %}
14419 
14420 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14421 
14422   match(Set dst (MoveD2L src));
14423 
14424   effect(DEF dst, USE src);
14425 
14426   ins_cost(4 * INSN_COST);
14427 
14428   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14429 
14430   ins_encode %{
14431     __ ldr($dst$$Register, Address(sp, $src$$disp));
14432   %}
14433 
14434   ins_pipe(iload_reg_reg);
14435 
14436 %}
14437 
14438 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14439 
14440   match(Set dst (MoveL2D src));
14441 
14442   effect(DEF dst, USE src);
14443 
14444   ins_cost(4 * INSN_COST);
14445 
14446   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14447 
14448   ins_encode %{
14449     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14450   %}
14451 
14452   ins_pipe(pipe_class_memory);
14453 
14454 %}
14455 
14456 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14457 
14458   match(Set dst (MoveF2I src));
14459 
14460   effect(DEF dst, USE src);
14461 
14462   ins_cost(INSN_COST);
14463 
14464   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14465 
14466   ins_encode %{
14467     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14468   %}
14469 
14470   ins_pipe(pipe_class_memory);
14471 
14472 %}
14473 
14474 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14475 
14476   match(Set dst (MoveI2F src));
14477 
14478   effect(DEF dst, USE src);
14479 
14480   ins_cost(INSN_COST);
14481 
14482   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14483 
14484   ins_encode %{
14485     __ strw($src$$Register, Address(sp, $dst$$disp));
14486   %}
14487 
14488   ins_pipe(istore_reg_reg);
14489 
14490 %}
14491 
14492 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14493 
14494   match(Set dst (MoveD2L src));
14495 
14496   effect(DEF dst, USE src);
14497 
14498   ins_cost(INSN_COST);
14499 
14500   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14501 
14502   ins_encode %{
14503     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14504   %}
14505 
14506   ins_pipe(pipe_class_memory);
14507 
14508 %}
14509 
14510 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14511 
14512   match(Set dst (MoveL2D src));
14513 
14514   effect(DEF dst, USE src);
14515 
14516   ins_cost(INSN_COST);
14517 
14518   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14519 
14520   ins_encode %{
14521     __ str($src$$Register, Address(sp, $dst$$disp));
14522   %}
14523 
14524   ins_pipe(istore_reg_reg);
14525 
14526 %}
14527 
14528 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14529 
14530   match(Set dst (MoveF2I src));
14531 
14532   effect(DEF dst, USE src);
14533 
14534   ins_cost(INSN_COST);
14535 
14536   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14537 
14538   ins_encode %{
14539     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14540   %}
14541 
14542   ins_pipe(fp_f2i);
14543 
14544 %}
14545 
14546 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14547 
14548   match(Set dst (MoveI2F src));
14549 
14550   effect(DEF dst, USE src);
14551 
14552   ins_cost(INSN_COST);
14553 
14554   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14555 
14556   ins_encode %{
14557     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14558   %}
14559 
14560   ins_pipe(fp_i2f);
14561 
14562 %}
14563 
14564 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14565 
14566   match(Set dst (MoveD2L src));
14567 
14568   effect(DEF dst, USE src);
14569 
14570   ins_cost(INSN_COST);
14571 
14572   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14573 
14574   ins_encode %{
14575     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14576   %}
14577 
14578   ins_pipe(fp_d2l);
14579 
14580 %}
14581 
14582 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14583 
14584   match(Set dst (MoveL2D src));
14585 
14586   effect(DEF dst, USE src);
14587 
14588   ins_cost(INSN_COST);
14589 
14590   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14591 
14592   ins_encode %{
14593     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14594   %}
14595 
14596   ins_pipe(fp_l2d);
14597 
14598 %}
14599 
14600 // ============================================================================
14601 // clearing of an array
14602 
14603 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14604 %{
14605   match(Set dummy (ClearArray cnt base));
14606   effect(USE_KILL cnt, USE_KILL base);
14607 
14608   ins_cost(4 * INSN_COST);
14609   format %{ "ClearArray $cnt, $base" %}
14610 
14611   ins_encode %{
14612     __ zero_words($base$$Register, $cnt$$Register);
14613   %}
14614 
14615   ins_pipe(pipe_class_memory);
14616 %}
14617 
14618 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14619 %{
14620   predicate((u_int64_t)n->in(2)->get_long()
14621             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14622   match(Set dummy (ClearArray cnt base));
14623   effect(USE_KILL base);
14624 
14625   ins_cost(4 * INSN_COST);
14626   format %{ "ClearArray $cnt, $base" %}
14627 
14628   ins_encode %{
14629     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
14630   %}
14631 
14632   ins_pipe(pipe_class_memory);
14633 %}
14634 
14635 // ============================================================================
14636 // Overflow Math Instructions
14637 
14638 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14639 %{
14640   match(Set cr (OverflowAddI op1 op2));
14641 
14642   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14643   ins_cost(INSN_COST);
14644   ins_encode %{
14645     __ cmnw($op1$$Register, $op2$$Register);
14646   %}
14647 
14648   ins_pipe(icmp_reg_reg);
14649 %}
14650 
14651 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14652 %{
14653   match(Set cr (OverflowAddI op1 op2));
14654 
14655   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14656   ins_cost(INSN_COST);
14657   ins_encode %{
14658     __ cmnw($op1$$Register, $op2$$constant);
14659   %}
14660 
14661   ins_pipe(icmp_reg_imm);
14662 %}
14663 
14664 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14665 %{
14666   match(Set cr (OverflowAddL op1 op2));
14667 
14668   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14669   ins_cost(INSN_COST);
14670   ins_encode %{
14671     __ cmn($op1$$Register, $op2$$Register);
14672   %}
14673 
14674   ins_pipe(icmp_reg_reg);
14675 %}
14676 
14677 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14678 %{
14679   match(Set cr (OverflowAddL op1 op2));
14680 
14681   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14682   ins_cost(INSN_COST);
14683   ins_encode %{
14684     __ cmn($op1$$Register, $op2$$constant);
14685   %}
14686 
14687   ins_pipe(icmp_reg_imm);
14688 %}
14689 
14690 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14691 %{
14692   match(Set cr (OverflowSubI op1 op2));
14693 
14694   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14695   ins_cost(INSN_COST);
14696   ins_encode %{
14697     __ cmpw($op1$$Register, $op2$$Register);
14698   %}
14699 
14700   ins_pipe(icmp_reg_reg);
14701 %}
14702 
14703 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14704 %{
14705   match(Set cr (OverflowSubI op1 op2));
14706 
14707   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14708   ins_cost(INSN_COST);
14709   ins_encode %{
14710     __ cmpw($op1$$Register, $op2$$constant);
14711   %}
14712 
14713   ins_pipe(icmp_reg_imm);
14714 %}
14715 
14716 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14717 %{
14718   match(Set cr (OverflowSubL op1 op2));
14719 
14720   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14721   ins_cost(INSN_COST);
14722   ins_encode %{
14723     __ cmp($op1$$Register, $op2$$Register);
14724   %}
14725 
14726   ins_pipe(icmp_reg_reg);
14727 %}
14728 
14729 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14730 %{
14731   match(Set cr (OverflowSubL op1 op2));
14732 
14733   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14734   ins_cost(INSN_COST);
14735   ins_encode %{
14736     __ cmp($op1$$Register, $op2$$constant);
14737   %}
14738 
14739   ins_pipe(icmp_reg_imm);
14740 %}
14741 
14742 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14743 %{
14744   match(Set cr (OverflowSubI zero op1));
14745 
14746   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14747   ins_cost(INSN_COST);
14748   ins_encode %{
14749     __ cmpw(zr, $op1$$Register);
14750   %}
14751 
14752   ins_pipe(icmp_reg_imm);
14753 %}
14754 
14755 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14756 %{
14757   match(Set cr (OverflowSubL zero op1));
14758 
14759   format %{ "cmp   zr, $op1\t# overflow check long" %}
14760   ins_cost(INSN_COST);
14761   ins_encode %{
14762     __ cmp(zr, $op1$$Register);
14763   %}
14764 
14765   ins_pipe(icmp_reg_imm);
14766 %}
14767 
14768 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14769 %{
14770   match(Set cr (OverflowMulI op1 op2));
14771 
14772   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14773             "cmp   rscratch1, rscratch1, sxtw\n\t"
14774             "movw  rscratch1, #0x80000000\n\t"
14775             "cselw rscratch1, rscratch1, zr, NE\n\t"
14776             "cmpw  rscratch1, #1" %}
14777   ins_cost(5 * INSN_COST);
14778   ins_encode %{
14779     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14780     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14781     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14782     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14783     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14784   %}
14785 
14786   ins_pipe(pipe_slow);
14787 %}
14788 
14789 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
14790 %{
14791   match(If cmp (OverflowMulI op1 op2));
14792   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14793             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14794   effect(USE labl, KILL cr);
14795 
14796   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14797             "cmp   rscratch1, rscratch1, sxtw\n\t"
14798             "b$cmp   $labl" %}
14799   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14800   ins_encode %{
14801     Label* L = $labl$$label;
14802     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14803     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14804     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14805     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14806   %}
14807 
14808   ins_pipe(pipe_serial);
14809 %}
14810 
14811 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14812 %{
14813   match(Set cr (OverflowMulL op1 op2));
14814 
14815   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14816             "smulh rscratch2, $op1, $op2\n\t"
14817             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14818             "movw  rscratch1, #0x80000000\n\t"
14819             "cselw rscratch1, rscratch1, zr, NE\n\t"
14820             "cmpw  rscratch1, #1" %}
14821   ins_cost(6 * INSN_COST);
14822   ins_encode %{
14823     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14824     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14825     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14826     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14827     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14828     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14829   %}
14830 
14831   ins_pipe(pipe_slow);
14832 %}
14833 
14834 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14835 %{
14836   match(If cmp (OverflowMulL op1 op2));
14837   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14838             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14839   effect(USE labl, KILL cr);
14840 
14841   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14842             "smulh rscratch2, $op1, $op2\n\t"
14843             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14844             "b$cmp $labl" %}
14845   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14846   ins_encode %{
14847     Label* L = $labl$$label;
14848     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14849     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14850     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14851     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14852     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14853   %}
14854 
14855   ins_pipe(pipe_serial);
14856 %}
14857 
14858 // ============================================================================
14859 // Compare Instructions
14860 
14861 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
14862 %{
14863   match(Set cr (CmpI op1 op2));
14864 
14865   effect(DEF cr, USE op1, USE op2);
14866 
14867   ins_cost(INSN_COST);
14868   format %{ "cmpw  $op1, $op2" %}
14869 
14870   ins_encode(aarch64_enc_cmpw(op1, op2));
14871 
14872   ins_pipe(icmp_reg_reg);
14873 %}
14874 
14875 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
14876 %{
14877   match(Set cr (CmpI op1 zero));
14878 
14879   effect(DEF cr, USE op1);
14880 
14881   ins_cost(INSN_COST);
14882   format %{ "cmpw $op1, 0" %}
14883 
14884   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14885 
14886   ins_pipe(icmp_reg_imm);
14887 %}
14888 
14889 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
14890 %{
14891   match(Set cr (CmpI op1 op2));
14892 
14893   effect(DEF cr, USE op1);
14894 
14895   ins_cost(INSN_COST);
14896   format %{ "cmpw  $op1, $op2" %}
14897 
14898   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14899 
14900   ins_pipe(icmp_reg_imm);
14901 %}
14902 
14903 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
14904 %{
14905   match(Set cr (CmpI op1 op2));
14906 
14907   effect(DEF cr, USE op1);
14908 
14909   ins_cost(INSN_COST * 2);
14910   format %{ "cmpw  $op1, $op2" %}
14911 
14912   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14913 
14914   ins_pipe(icmp_reg_imm);
14915 %}
14916 
14917 // Unsigned compare Instructions; really, same as signed compare
14918 // except it should only be used to feed an If or a CMovI which takes a
14919 // cmpOpU.
14920 
14921 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
14922 %{
14923   match(Set cr (CmpU op1 op2));
14924 
14925   effect(DEF cr, USE op1, USE op2);
14926 
14927   ins_cost(INSN_COST);
14928   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14929 
14930   ins_encode(aarch64_enc_cmpw(op1, op2));
14931 
14932   ins_pipe(icmp_reg_reg);
14933 %}
14934 
14935 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
14936 %{
14937   match(Set cr (CmpU op1 zero));
14938 
14939   effect(DEF cr, USE op1);
14940 
14941   ins_cost(INSN_COST);
14942   format %{ "cmpw $op1, #0\t# unsigned" %}
14943 
14944   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
14945 
14946   ins_pipe(icmp_reg_imm);
14947 %}
14948 
14949 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
14950 %{
14951   match(Set cr (CmpU op1 op2));
14952 
14953   effect(DEF cr, USE op1);
14954 
14955   ins_cost(INSN_COST);
14956   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14957 
14958   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
14959 
14960   ins_pipe(icmp_reg_imm);
14961 %}
14962 
14963 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
14964 %{
14965   match(Set cr (CmpU op1 op2));
14966 
14967   effect(DEF cr, USE op1);
14968 
14969   ins_cost(INSN_COST * 2);
14970   format %{ "cmpw  $op1, $op2\t# unsigned" %}
14971 
14972   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
14973 
14974   ins_pipe(icmp_reg_imm);
14975 %}
14976 
14977 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14978 %{
14979   match(Set cr (CmpL op1 op2));
14980 
14981   effect(DEF cr, USE op1, USE op2);
14982 
14983   ins_cost(INSN_COST);
14984   format %{ "cmp  $op1, $op2" %}
14985 
14986   ins_encode(aarch64_enc_cmp(op1, op2));
14987 
14988   ins_pipe(icmp_reg_reg);
14989 %}
14990 
14991 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
14992 %{
14993   match(Set cr (CmpL op1 zero));
14994 
14995   effect(DEF cr, USE op1);
14996 
14997   ins_cost(INSN_COST);
14998   format %{ "tst  $op1" %}
14999 
15000   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15001 
15002   ins_pipe(icmp_reg_imm);
15003 %}
15004 
15005 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15006 %{
15007   match(Set cr (CmpL op1 op2));
15008 
15009   effect(DEF cr, USE op1);
15010 
15011   ins_cost(INSN_COST);
15012   format %{ "cmp  $op1, $op2" %}
15013 
15014   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15015 
15016   ins_pipe(icmp_reg_imm);
15017 %}
15018 
15019 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15020 %{
15021   match(Set cr (CmpL op1 op2));
15022 
15023   effect(DEF cr, USE op1);
15024 
15025   ins_cost(INSN_COST * 2);
15026   format %{ "cmp  $op1, $op2" %}
15027 
15028   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15029 
15030   ins_pipe(icmp_reg_imm);
15031 %}
15032 
15033 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15034 %{
15035   match(Set cr (CmpUL op1 op2));
15036 
15037   effect(DEF cr, USE op1, USE op2);
15038 
15039   ins_cost(INSN_COST);
15040   format %{ "cmp  $op1, $op2" %}
15041 
15042   ins_encode(aarch64_enc_cmp(op1, op2));
15043 
15044   ins_pipe(icmp_reg_reg);
15045 %}
15046 
15047 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15048 %{
15049   match(Set cr (CmpUL op1 zero));
15050 
15051   effect(DEF cr, USE op1);
15052 
15053   ins_cost(INSN_COST);
15054   format %{ "tst  $op1" %}
15055 
15056   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15057 
15058   ins_pipe(icmp_reg_imm);
15059 %}
15060 
15061 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15062 %{
15063   match(Set cr (CmpUL op1 op2));
15064 
15065   effect(DEF cr, USE op1);
15066 
15067   ins_cost(INSN_COST);
15068   format %{ "cmp  $op1, $op2" %}
15069 
15070   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15071 
15072   ins_pipe(icmp_reg_imm);
15073 %}
15074 
15075 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15076 %{
15077   match(Set cr (CmpUL op1 op2));
15078 
15079   effect(DEF cr, USE op1);
15080 
15081   ins_cost(INSN_COST * 2);
15082   format %{ "cmp  $op1, $op2" %}
15083 
15084   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15085 
15086   ins_pipe(icmp_reg_imm);
15087 %}
15088 
15089 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15090 %{
15091   match(Set cr (CmpP op1 op2));
15092 
15093   effect(DEF cr, USE op1, USE op2);
15094 
15095   ins_cost(INSN_COST);
15096   format %{ "cmp  $op1, $op2\t // ptr" %}
15097 
15098   ins_encode(aarch64_enc_cmpp(op1, op2));
15099 
15100   ins_pipe(icmp_reg_reg);
15101 %}
15102 
15103 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15104 %{
15105   match(Set cr (CmpN op1 op2));
15106 
15107   effect(DEF cr, USE op1, USE op2);
15108 
15109   ins_cost(INSN_COST);
15110   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15111 
15112   ins_encode(aarch64_enc_cmpn(op1, op2));
15113 
15114   ins_pipe(icmp_reg_reg);
15115 %}
15116 
15117 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15118 %{
15119   match(Set cr (CmpP op1 zero));
15120 
15121   effect(DEF cr, USE op1, USE zero);
15122 
15123   ins_cost(INSN_COST);
15124   format %{ "cmp  $op1, 0\t // ptr" %}
15125 
15126   ins_encode(aarch64_enc_testp(op1));
15127 
15128   ins_pipe(icmp_reg_imm);
15129 %}
15130 
15131 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15132 %{
15133   match(Set cr (CmpN op1 zero));
15134 
15135   effect(DEF cr, USE op1, USE zero);
15136 
15137   ins_cost(INSN_COST);
15138   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15139 
15140   ins_encode(aarch64_enc_testn(op1));
15141 
15142   ins_pipe(icmp_reg_imm);
15143 %}
15144 
15145 // FP comparisons
15146 //
15147 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15148 // using normal cmpOp. See declaration of rFlagsReg for details.
15149 
15150 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15151 %{
15152   match(Set cr (CmpF src1 src2));
15153 
15154   ins_cost(3 * INSN_COST);
15155   format %{ "fcmps $src1, $src2" %}
15156 
15157   ins_encode %{
15158     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15159   %}
15160 
15161   ins_pipe(pipe_class_compare);
15162 %}
15163 
15164 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15165 %{
15166   match(Set cr (CmpF src1 src2));
15167 
15168   ins_cost(3 * INSN_COST);
15169   format %{ "fcmps $src1, 0.0" %}
15170 
15171   ins_encode %{
15172     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
15173   %}
15174 
15175   ins_pipe(pipe_class_compare);
15176 %}
15177 // FROM HERE
15178 
15179 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15180 %{
15181   match(Set cr (CmpD src1 src2));
15182 
15183   ins_cost(3 * INSN_COST);
15184   format %{ "fcmpd $src1, $src2" %}
15185 
15186   ins_encode %{
15187     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15188   %}
15189 
15190   ins_pipe(pipe_class_compare);
15191 %}
15192 
15193 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15194 %{
15195   match(Set cr (CmpD src1 src2));
15196 
15197   ins_cost(3 * INSN_COST);
15198   format %{ "fcmpd $src1, 0.0" %}
15199 
15200   ins_encode %{
15201     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
15202   %}
15203 
15204   ins_pipe(pipe_class_compare);
15205 %}
15206 
15207 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15208 %{
15209   match(Set dst (CmpF3 src1 src2));
15210   effect(KILL cr);
15211 
15212   ins_cost(5 * INSN_COST);
15213   format %{ "fcmps $src1, $src2\n\t"
15214             "csinvw($dst, zr, zr, eq\n\t"
15215             "csnegw($dst, $dst, $dst, lt)"
15216   %}
15217 
15218   ins_encode %{
15219     Label done;
15220     FloatRegister s1 = as_FloatRegister($src1$$reg);
15221     FloatRegister s2 = as_FloatRegister($src2$$reg);
15222     Register d = as_Register($dst$$reg);
15223     __ fcmps(s1, s2);
15224     // installs 0 if EQ else -1
15225     __ csinvw(d, zr, zr, Assembler::EQ);
15226     // keeps -1 if less or unordered else installs 1
15227     __ csnegw(d, d, d, Assembler::LT);
15228     __ bind(done);
15229   %}
15230 
15231   ins_pipe(pipe_class_default);
15232 
15233 %}
15234 
15235 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15236 %{
15237   match(Set dst (CmpD3 src1 src2));
15238   effect(KILL cr);
15239 
15240   ins_cost(5 * INSN_COST);
15241   format %{ "fcmpd $src1, $src2\n\t"
15242             "csinvw($dst, zr, zr, eq\n\t"
15243             "csnegw($dst, $dst, $dst, lt)"
15244   %}
15245 
15246   ins_encode %{
15247     Label done;
15248     FloatRegister s1 = as_FloatRegister($src1$$reg);
15249     FloatRegister s2 = as_FloatRegister($src2$$reg);
15250     Register d = as_Register($dst$$reg);
15251     __ fcmpd(s1, s2);
15252     // installs 0 if EQ else -1
15253     __ csinvw(d, zr, zr, Assembler::EQ);
15254     // keeps -1 if less or unordered else installs 1
15255     __ csnegw(d, d, d, Assembler::LT);
15256     __ bind(done);
15257   %}
15258   ins_pipe(pipe_class_default);
15259 
15260 %}
15261 
15262 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15263 %{
15264   match(Set dst (CmpF3 src1 zero));
15265   effect(KILL cr);
15266 
15267   ins_cost(5 * INSN_COST);
15268   format %{ "fcmps $src1, 0.0\n\t"
15269             "csinvw($dst, zr, zr, eq\n\t"
15270             "csnegw($dst, $dst, $dst, lt)"
15271   %}
15272 
15273   ins_encode %{
15274     Label done;
15275     FloatRegister s1 = as_FloatRegister($src1$$reg);
15276     Register d = as_Register($dst$$reg);
15277     __ fcmps(s1, 0.0D);
15278     // installs 0 if EQ else -1
15279     __ csinvw(d, zr, zr, Assembler::EQ);
15280     // keeps -1 if less or unordered else installs 1
15281     __ csnegw(d, d, d, Assembler::LT);
15282     __ bind(done);
15283   %}
15284 
15285   ins_pipe(pipe_class_default);
15286 
15287 %}
15288 
15289 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15290 %{
15291   match(Set dst (CmpD3 src1 zero));
15292   effect(KILL cr);
15293 
15294   ins_cost(5 * INSN_COST);
15295   format %{ "fcmpd $src1, 0.0\n\t"
15296             "csinvw($dst, zr, zr, eq\n\t"
15297             "csnegw($dst, $dst, $dst, lt)"
15298   %}
15299 
15300   ins_encode %{
15301     Label done;
15302     FloatRegister s1 = as_FloatRegister($src1$$reg);
15303     Register d = as_Register($dst$$reg);
15304     __ fcmpd(s1, 0.0D);
15305     // installs 0 if EQ else -1
15306     __ csinvw(d, zr, zr, Assembler::EQ);
15307     // keeps -1 if less or unordered else installs 1
15308     __ csnegw(d, d, d, Assembler::LT);
15309     __ bind(done);
15310   %}
15311   ins_pipe(pipe_class_default);
15312 
15313 %}
15314 
15315 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15316 %{
15317   match(Set dst (CmpLTMask p q));
15318   effect(KILL cr);
15319 
15320   ins_cost(3 * INSN_COST);
15321 
15322   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15323             "csetw $dst, lt\n\t"
15324             "subw $dst, zr, $dst"
15325   %}
15326 
15327   ins_encode %{
15328     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15329     __ csetw(as_Register($dst$$reg), Assembler::LT);
15330     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15331   %}
15332 
15333   ins_pipe(ialu_reg_reg);
15334 %}
15335 
15336 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15337 %{
15338   match(Set dst (CmpLTMask src zero));
15339   effect(KILL cr);
15340 
15341   ins_cost(INSN_COST);
15342 
15343   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15344 
15345   ins_encode %{
15346     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15347   %}
15348 
15349   ins_pipe(ialu_reg_shift);
15350 %}
15351 
15352 // ============================================================================
15353 // Max and Min
15354 
15355 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15356 %{
15357   match(Set dst (MinI src1 src2));
15358 
15359   effect(DEF dst, USE src1, USE src2, KILL cr);
15360   size(8);
15361 
15362   ins_cost(INSN_COST * 3);
15363   format %{
15364     "cmpw $src1 $src2\t signed int\n\t"
15365     "cselw $dst, $src1, $src2 lt\t"
15366   %}
15367 
15368   ins_encode %{
15369     __ cmpw(as_Register($src1$$reg),
15370             as_Register($src2$$reg));
15371     __ cselw(as_Register($dst$$reg),
15372              as_Register($src1$$reg),
15373              as_Register($src2$$reg),
15374              Assembler::LT);
15375   %}
15376 
15377   ins_pipe(ialu_reg_reg);
15378 %}
15379 // FROM HERE
15380 
15381 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
15382 %{
15383   match(Set dst (MaxI src1 src2));
15384 
15385   effect(DEF dst, USE src1, USE src2, KILL cr);
15386   size(8);
15387 
15388   ins_cost(INSN_COST * 3);
15389   format %{
15390     "cmpw $src1 $src2\t signed int\n\t"
15391     "cselw $dst, $src1, $src2 gt\t"
15392   %}
15393 
15394   ins_encode %{
15395     __ cmpw(as_Register($src1$$reg),
15396             as_Register($src2$$reg));
15397     __ cselw(as_Register($dst$$reg),
15398              as_Register($src1$$reg),
15399              as_Register($src2$$reg),
15400              Assembler::GT);
15401   %}
15402 
15403   ins_pipe(ialu_reg_reg);
15404 %}
15405 
15406 // ============================================================================
15407 // Branch Instructions
15408 
15409 // Direct Branch.
15410 instruct branch(label lbl)
15411 %{
15412   match(Goto);
15413 
15414   effect(USE lbl);
15415 
15416   ins_cost(BRANCH_COST);
15417   format %{ "b  $lbl" %}
15418 
15419   ins_encode(aarch64_enc_b(lbl));
15420 
15421   ins_pipe(pipe_branch);
15422 %}
15423 
15424 // Conditional Near Branch
15425 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15426 %{
15427   // Same match rule as `branchConFar'.
15428   match(If cmp cr);
15429 
15430   effect(USE lbl);
15431 
15432   ins_cost(BRANCH_COST);
15433   // If set to 1 this indicates that the current instruction is a
15434   // short variant of a long branch. This avoids using this
15435   // instruction in first-pass matching. It will then only be used in
15436   // the `Shorten_branches' pass.
15437   // ins_short_branch(1);
15438   format %{ "b$cmp  $lbl" %}
15439 
15440   ins_encode(aarch64_enc_br_con(cmp, lbl));
15441 
15442   ins_pipe(pipe_branch_cond);
15443 %}
15444 
15445 // Conditional Near Branch Unsigned
15446 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15447 %{
15448   // Same match rule as `branchConFar'.
15449   match(If cmp cr);
15450 
15451   effect(USE lbl);
15452 
15453   ins_cost(BRANCH_COST);
15454   // If set to 1 this indicates that the current instruction is a
15455   // short variant of a long branch. This avoids using this
15456   // instruction in first-pass matching. It will then only be used in
15457   // the `Shorten_branches' pass.
15458   // ins_short_branch(1);
15459   format %{ "b$cmp  $lbl\t# unsigned" %}
15460 
15461   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15462 
15463   ins_pipe(pipe_branch_cond);
15464 %}
15465 
15466 // Make use of CBZ and CBNZ.  These instructions, as well as being
15467 // shorter than (cmp; branch), have the additional benefit of not
15468 // killing the flags.
15469 
15470 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15471   match(If cmp (CmpI op1 op2));
15472   effect(USE labl);
15473 
15474   ins_cost(BRANCH_COST);
15475   format %{ "cbw$cmp   $op1, $labl" %}
15476   ins_encode %{
15477     Label* L = $labl$$label;
15478     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15479     if (cond == Assembler::EQ)
15480       __ cbzw($op1$$Register, *L);
15481     else
15482       __ cbnzw($op1$$Register, *L);
15483   %}
15484   ins_pipe(pipe_cmp_branch);
15485 %}
15486 
15487 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15488   match(If cmp (CmpL op1 op2));
15489   effect(USE labl);
15490 
15491   ins_cost(BRANCH_COST);
15492   format %{ "cb$cmp   $op1, $labl" %}
15493   ins_encode %{
15494     Label* L = $labl$$label;
15495     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15496     if (cond == Assembler::EQ)
15497       __ cbz($op1$$Register, *L);
15498     else
15499       __ cbnz($op1$$Register, *L);
15500   %}
15501   ins_pipe(pipe_cmp_branch);
15502 %}
15503 
15504 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15505   match(If cmp (CmpP op1 op2));
15506   effect(USE labl);
15507 
15508   ins_cost(BRANCH_COST);
15509   format %{ "cb$cmp   $op1, $labl" %}
15510   ins_encode %{
15511     Label* L = $labl$$label;
15512     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15513     if (cond == Assembler::EQ)
15514       __ cbz($op1$$Register, *L);
15515     else
15516       __ cbnz($op1$$Register, *L);
15517   %}
15518   ins_pipe(pipe_cmp_branch);
15519 %}
15520 
15521 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15522   match(If cmp (CmpN op1 op2));
15523   effect(USE labl);
15524 
15525   ins_cost(BRANCH_COST);
15526   format %{ "cbw$cmp   $op1, $labl" %}
15527   ins_encode %{
15528     Label* L = $labl$$label;
15529     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15530     if (cond == Assembler::EQ)
15531       __ cbzw($op1$$Register, *L);
15532     else
15533       __ cbnzw($op1$$Register, *L);
15534   %}
15535   ins_pipe(pipe_cmp_branch);
15536 %}
15537 
15538 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15539   match(If cmp (CmpP (DecodeN oop) zero));
15540   effect(USE labl);
15541 
15542   ins_cost(BRANCH_COST);
15543   format %{ "cb$cmp   $oop, $labl" %}
15544   ins_encode %{
15545     Label* L = $labl$$label;
15546     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15547     if (cond == Assembler::EQ)
15548       __ cbzw($oop$$Register, *L);
15549     else
15550       __ cbnzw($oop$$Register, *L);
15551   %}
15552   ins_pipe(pipe_cmp_branch);
15553 %}
15554 
15555 instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
15556   match(If cmp (CmpU op1 op2));
15557   effect(USE labl);
15558 
15559   ins_cost(BRANCH_COST);
15560   format %{ "cbw$cmp   $op1, $labl" %}
15561   ins_encode %{
15562     Label* L = $labl$$label;
15563     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15564     if (cond == Assembler::EQ || cond == Assembler::LS)
15565       __ cbzw($op1$$Register, *L);
15566     else
15567       __ cbnzw($op1$$Register, *L);
15568   %}
15569   ins_pipe(pipe_cmp_branch);
15570 %}
15571 
15572 instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
15573   match(If cmp (CmpUL op1 op2));
15574   effect(USE labl);
15575 
15576   ins_cost(BRANCH_COST);
15577   format %{ "cb$cmp   $op1, $labl" %}
15578   ins_encode %{
15579     Label* L = $labl$$label;
15580     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15581     if (cond == Assembler::EQ || cond == Assembler::LS)
15582       __ cbz($op1$$Register, *L);
15583     else
15584       __ cbnz($op1$$Register, *L);
15585   %}
15586   ins_pipe(pipe_cmp_branch);
15587 %}
15588 
15589 // Test bit and Branch
15590 
15591 // Patterns for short (< 32KiB) variants
15592 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15593   match(If cmp (CmpL op1 op2));
15594   effect(USE labl);
15595 
15596   ins_cost(BRANCH_COST);
15597   format %{ "cb$cmp   $op1, $labl # long" %}
15598   ins_encode %{
15599     Label* L = $labl$$label;
15600     Assembler::Condition cond =
15601       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15602     __ tbr(cond, $op1$$Register, 63, *L);
15603   %}
15604   ins_pipe(pipe_cmp_branch);
15605   ins_short_branch(1);
15606 %}
15607 
15608 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15609   match(If cmp (CmpI op1 op2));
15610   effect(USE labl);
15611 
15612   ins_cost(BRANCH_COST);
15613   format %{ "cb$cmp   $op1, $labl # int" %}
15614   ins_encode %{
15615     Label* L = $labl$$label;
15616     Assembler::Condition cond =
15617       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15618     __ tbr(cond, $op1$$Register, 31, *L);
15619   %}
15620   ins_pipe(pipe_cmp_branch);
15621   ins_short_branch(1);
15622 %}
15623 
15624 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15625   match(If cmp (CmpL (AndL op1 op2) op3));
15626   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15627   effect(USE labl);
15628 
15629   ins_cost(BRANCH_COST);
15630   format %{ "tb$cmp   $op1, $op2, $labl" %}
15631   ins_encode %{
15632     Label* L = $labl$$label;
15633     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15634     int bit = exact_log2($op2$$constant);
15635     __ tbr(cond, $op1$$Register, bit, *L);
15636   %}
15637   ins_pipe(pipe_cmp_branch);
15638   ins_short_branch(1);
15639 %}
15640 
15641 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15642   match(If cmp (CmpI (AndI op1 op2) op3));
15643   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15644   effect(USE labl);
15645 
15646   ins_cost(BRANCH_COST);
15647   format %{ "tb$cmp   $op1, $op2, $labl" %}
15648   ins_encode %{
15649     Label* L = $labl$$label;
15650     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15651     int bit = exact_log2($op2$$constant);
15652     __ tbr(cond, $op1$$Register, bit, *L);
15653   %}
15654   ins_pipe(pipe_cmp_branch);
15655   ins_short_branch(1);
15656 %}
15657 
15658 // And far variants
15659 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15660   match(If cmp (CmpL op1 op2));
15661   effect(USE labl);
15662 
15663   ins_cost(BRANCH_COST);
15664   format %{ "cb$cmp   $op1, $labl # long" %}
15665   ins_encode %{
15666     Label* L = $labl$$label;
15667     Assembler::Condition cond =
15668       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15669     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15670   %}
15671   ins_pipe(pipe_cmp_branch);
15672 %}
15673 
15674 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15675   match(If cmp (CmpI op1 op2));
15676   effect(USE labl);
15677 
15678   ins_cost(BRANCH_COST);
15679   format %{ "cb$cmp   $op1, $labl # int" %}
15680   ins_encode %{
15681     Label* L = $labl$$label;
15682     Assembler::Condition cond =
15683       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15684     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15685   %}
15686   ins_pipe(pipe_cmp_branch);
15687 %}
15688 
15689 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15690   match(If cmp (CmpL (AndL op1 op2) op3));
15691   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
15692   effect(USE labl);
15693 
15694   ins_cost(BRANCH_COST);
15695   format %{ "tb$cmp   $op1, $op2, $labl" %}
15696   ins_encode %{
15697     Label* L = $labl$$label;
15698     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15699     int bit = exact_log2($op2$$constant);
15700     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15701   %}
15702   ins_pipe(pipe_cmp_branch);
15703 %}
15704 
15705 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15706   match(If cmp (CmpI (AndI op1 op2) op3));
15707   predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
15708   effect(USE labl);
15709 
15710   ins_cost(BRANCH_COST);
15711   format %{ "tb$cmp   $op1, $op2, $labl" %}
15712   ins_encode %{
15713     Label* L = $labl$$label;
15714     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15715     int bit = exact_log2($op2$$constant);
15716     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15717   %}
15718   ins_pipe(pipe_cmp_branch);
15719 %}
15720 
15721 // Test bits
15722 
15723 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15724   match(Set cr (CmpL (AndL op1 op2) op3));
15725   predicate(Assembler::operand_valid_for_logical_immediate
15726             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15727 
15728   ins_cost(INSN_COST);
15729   format %{ "tst $op1, $op2 # long" %}
15730   ins_encode %{
15731     __ tst($op1$$Register, $op2$$constant);
15732   %}
15733   ins_pipe(ialu_reg_reg);
15734 %}
15735 
15736 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15737   match(Set cr (CmpI (AndI op1 op2) op3));
15738   predicate(Assembler::operand_valid_for_logical_immediate
15739             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15740 
15741   ins_cost(INSN_COST);
15742   format %{ "tst $op1, $op2 # int" %}
15743   ins_encode %{
15744     __ tstw($op1$$Register, $op2$$constant);
15745   %}
15746   ins_pipe(ialu_reg_reg);
15747 %}
15748 
15749 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15750   match(Set cr (CmpL (AndL op1 op2) op3));
15751 
15752   ins_cost(INSN_COST);
15753   format %{ "tst $op1, $op2 # long" %}
15754   ins_encode %{
15755     __ tst($op1$$Register, $op2$$Register);
15756   %}
15757   ins_pipe(ialu_reg_reg);
15758 %}
15759 
15760 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15761   match(Set cr (CmpI (AndI op1 op2) op3));
15762 
15763   ins_cost(INSN_COST);
15764   format %{ "tstw $op1, $op2 # int" %}
15765   ins_encode %{
15766     __ tstw($op1$$Register, $op2$$Register);
15767   %}
15768   ins_pipe(ialu_reg_reg);
15769 %}
15770 
15771 
15772 // Conditional Far Branch
15773 // Conditional Far Branch Unsigned
15774 // TODO: fixme
15775 
15776 // counted loop end branch near
15777 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15778 %{
15779   match(CountedLoopEnd cmp cr);
15780 
15781   effect(USE lbl);
15782 
15783   ins_cost(BRANCH_COST);
15784   // short variant.
15785   // ins_short_branch(1);
15786   format %{ "b$cmp $lbl \t// counted loop end" %}
15787 
15788   ins_encode(aarch64_enc_br_con(cmp, lbl));
15789 
15790   ins_pipe(pipe_branch);
15791 %}
15792 
15793 // counted loop end branch near Unsigned
15794 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15795 %{
15796   match(CountedLoopEnd cmp cr);
15797 
15798   effect(USE lbl);
15799 
15800   ins_cost(BRANCH_COST);
15801   // short variant.
15802   // ins_short_branch(1);
15803   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
15804 
15805   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15806 
15807   ins_pipe(pipe_branch);
15808 %}
15809 
15810 // counted loop end branch far
15811 // counted loop end branch far unsigned
15812 // TODO: fixme
15813 
15814 // ============================================================================
15815 // inlined locking and unlocking
15816 
15817 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15818 %{
15819   match(Set cr (FastLock object box));
15820   effect(TEMP tmp, TEMP tmp2);
15821 
15822   // TODO
15823   // identify correct cost
15824   ins_cost(5 * INSN_COST);
15825   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
15826 
15827   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
15828 
15829   ins_pipe(pipe_serial);
15830 %}
15831 
15832 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
15833 %{
15834   match(Set cr (FastUnlock object box));
15835   effect(TEMP tmp, TEMP tmp2);
15836 
15837   ins_cost(5 * INSN_COST);
15838   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
15839 
15840   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
15841 
15842   ins_pipe(pipe_serial);
15843 %}
15844 
15845 
15846 // ============================================================================
15847 // Safepoint Instructions
15848 
15849 // TODO
15850 // provide a near and far version of this code
15851 
15852 instruct safePoint(iRegP poll)
15853 %{
15854   match(SafePoint poll);
15855 
15856   format %{
15857     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
15858   %}
15859   ins_encode %{
15860     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
15861   %}
15862   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
15863 %}
15864 
15865 
15866 // ============================================================================
15867 // Procedure Call/Return Instructions
15868 
15869 // Call Java Static Instruction
15870 
15871 instruct CallStaticJavaDirect(method meth)
15872 %{
15873   match(CallStaticJava);
15874 
15875   effect(USE meth);
15876 
15877   ins_cost(CALL_COST);
15878 
15879   format %{ "call,static $meth \t// ==> " %}
15880 
15881   ins_encode( aarch64_enc_java_static_call(meth),
15882               aarch64_enc_call_epilog );
15883 
15884   ins_pipe(pipe_class_call);
15885 %}
15886 
15887 // TO HERE
15888 
15889 // Call Java Dynamic Instruction
15890 instruct CallDynamicJavaDirect(method meth)
15891 %{
15892   match(CallDynamicJava);
15893 
15894   effect(USE meth);
15895 
15896   ins_cost(CALL_COST);
15897 
15898   format %{ "CALL,dynamic $meth \t// ==> " %}
15899 
15900   ins_encode( aarch64_enc_java_dynamic_call(meth),
15901                aarch64_enc_call_epilog );
15902 
15903   ins_pipe(pipe_class_call);
15904 %}
15905 
15906 // Call Runtime Instruction
15907 
15908 instruct CallRuntimeDirect(method meth)
15909 %{
15910   match(CallRuntime);
15911 
15912   effect(USE meth);
15913 
15914   ins_cost(CALL_COST);
15915 
15916   format %{ "CALL, runtime $meth" %}
15917 
15918   ins_encode( aarch64_enc_java_to_runtime(meth) );
15919 
15920   ins_pipe(pipe_class_call);
15921 %}
15922 
15923 // Call Runtime Instruction
15924 
15925 instruct CallLeafDirect(method meth)
15926 %{
15927   match(CallLeaf);
15928 
15929   effect(USE meth);
15930 
15931   ins_cost(CALL_COST);
15932 
15933   format %{ "CALL, runtime leaf $meth" %}
15934 
15935   ins_encode( aarch64_enc_java_to_runtime(meth) );
15936 
15937   ins_pipe(pipe_class_call);
15938 %}
15939 
15940 // Call Runtime Instruction
15941 
15942 instruct CallLeafNoFPDirect(method meth)
15943 %{
15944   match(CallLeafNoFP);
15945 
15946   effect(USE meth);
15947 
15948   ins_cost(CALL_COST);
15949 
15950   format %{ "CALL, runtime leaf nofp $meth" %}
15951 
15952   ins_encode( aarch64_enc_java_to_runtime(meth) );
15953 
15954   ins_pipe(pipe_class_call);
15955 %}
15956 
15957 // Tail Call; Jump from runtime stub to Java code.
15958 // Also known as an 'interprocedural jump'.
15959 // Target of jump will eventually return to caller.
15960 // TailJump below removes the return address.
15961 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
15962 %{
15963   match(TailCall jump_target method_oop);
15964 
15965   ins_cost(CALL_COST);
15966 
15967   format %{ "br $jump_target\t# $method_oop holds method oop" %}
15968 
15969   ins_encode(aarch64_enc_tail_call(jump_target));
15970 
15971   ins_pipe(pipe_class_call);
15972 %}
15973 
15974 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
15975 %{
15976   match(TailJump jump_target ex_oop);
15977 
15978   ins_cost(CALL_COST);
15979 
15980   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
15981 
15982   ins_encode(aarch64_enc_tail_jmp(jump_target));
15983 
15984   ins_pipe(pipe_class_call);
15985 %}
15986 
15987 // Create exception oop: created by stack-crawling runtime code.
15988 // Created exception is now available to this handler, and is setup
15989 // just prior to jumping to this handler. No code emitted.
15990 // TODO check
15991 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
15992 instruct CreateException(iRegP_R0 ex_oop)
15993 %{
15994   match(Set ex_oop (CreateEx));
15995 
15996   format %{ " -- \t// exception oop; no code emitted" %}
15997 
15998   size(0);
15999 
16000   ins_encode( /*empty*/ );
16001 
16002   ins_pipe(pipe_class_empty);
16003 %}
16004 
16005 // Rethrow exception: The exception oop will come in the first
16006 // argument position. Then JUMP (not call) to the rethrow stub code.
16007 instruct RethrowException() %{
16008   match(Rethrow);
16009   ins_cost(CALL_COST);
16010 
16011   format %{ "b rethrow_stub" %}
16012 
16013   ins_encode( aarch64_enc_rethrow() );
16014 
16015   ins_pipe(pipe_class_call);
16016 %}
16017 
16018 
16019 // Return Instruction
16020 // epilog node loads ret address into lr as part of frame pop
16021 instruct Ret()
16022 %{
16023   match(Return);
16024 
16025   format %{ "ret\t// return register" %}
16026 
16027   ins_encode( aarch64_enc_ret() );
16028 
16029   ins_pipe(pipe_branch);
16030 %}
16031 
16032 // Die now.
16033 instruct ShouldNotReachHere() %{
16034   match(Halt);
16035 
16036   ins_cost(CALL_COST);
16037   format %{ "ShouldNotReachHere" %}
16038 
16039   ins_encode %{
16040     // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
16041     // return true
16042     __ dpcs1(0xdead + 1);
16043   %}
16044 
16045   ins_pipe(pipe_class_default);
16046 %}
16047 
16048 // ============================================================================
16049 // Partial Subtype Check
16050 //
16051 // superklass array for an instance of the superklass.  Set a hidden
16052 // internal cache on a hit (cache is checked with exposed code in
16053 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16054 // encoding ALSO sets flags.
16055 
16056 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16057 %{
16058   match(Set result (PartialSubtypeCheck sub super));
16059   effect(KILL cr, KILL temp);
16060 
16061   ins_cost(1100);  // slightly larger than the next version
16062   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16063 
16064   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16065 
16066   opcode(0x1); // Force zero of result reg on hit
16067 
16068   ins_pipe(pipe_class_memory);
16069 %}
16070 
16071 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
16072 %{
16073   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
16074   effect(KILL temp, KILL result);
16075 
16076   ins_cost(1100);  // slightly larger than the next version
16077   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
16078 
16079   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16080 
16081   opcode(0x0); // Don't zero result reg on hit
16082 
16083   ins_pipe(pipe_class_memory);
16084 %}
16085 
16086 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16087                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16088 %{
16089   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
16090   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16091   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16092 
16093   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16094   ins_encode %{
16095     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16096     __ string_compare($str1$$Register, $str2$$Register,
16097                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16098                       $tmp1$$Register, $tmp2$$Register,
16099                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::UU);
16100   %}
16101   ins_pipe(pipe_class_memory);
16102 %}
16103 
16104 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16105                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16106 %{
16107   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
16108   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16109   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16110 
16111   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16112   ins_encode %{
16113     __ string_compare($str1$$Register, $str2$$Register,
16114                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16115                       $tmp1$$Register, $tmp2$$Register,
16116                       fnoreg, fnoreg, fnoreg, StrIntrinsicNode::LL);
16117   %}
16118   ins_pipe(pipe_class_memory);
16119 %}
16120 
16121 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16122                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16123                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16124 %{
16125   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
16126   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16127   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16128          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16129 
16130   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16131   ins_encode %{
16132     __ string_compare($str1$$Register, $str2$$Register,
16133                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16134                       $tmp1$$Register, $tmp2$$Register,
16135                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16136                       $vtmp3$$FloatRegister, StrIntrinsicNode::UL);
16137   %}
16138   ins_pipe(pipe_class_memory);
16139 %}
16140 
16141 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16142                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16143                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16144 %{
16145   predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
16146   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16147   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16148          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16149 
16150   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16151   ins_encode %{
16152     __ string_compare($str1$$Register, $str2$$Register,
16153                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16154                       $tmp1$$Register, $tmp2$$Register,
16155                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16156                       $vtmp3$$FloatRegister,StrIntrinsicNode::LU);
16157   %}
16158   ins_pipe(pipe_class_memory);
16159 %}
16160 
16161 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16162        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16163        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16164 %{
16165   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16166   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16167   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16168          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16169   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
16170 
16171   ins_encode %{
16172     __ string_indexof($str1$$Register, $str2$$Register,
16173                       $cnt1$$Register, $cnt2$$Register,
16174                       $tmp1$$Register, $tmp2$$Register,
16175                       $tmp3$$Register, $tmp4$$Register,
16176                       $tmp5$$Register, $tmp6$$Register,
16177                       -1, $result$$Register, StrIntrinsicNode::UU);
16178   %}
16179   ins_pipe(pipe_class_memory);
16180 %}
16181 
16182 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16183        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16184        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16185 %{
16186   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16187   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16188   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16189          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16190   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
16191 
16192   ins_encode %{
16193     __ string_indexof($str1$$Register, $str2$$Register,
16194                       $cnt1$$Register, $cnt2$$Register,
16195                       $tmp1$$Register, $tmp2$$Register,
16196                       $tmp3$$Register, $tmp4$$Register,
16197                       $tmp5$$Register, $tmp6$$Register,
16198                       -1, $result$$Register, StrIntrinsicNode::LL);
16199   %}
16200   ins_pipe(pipe_class_memory);
16201 %}
16202 
16203 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16204        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16205        iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
16206 %{
16207   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16208   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16209   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16210          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
16211   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
16212 
16213   ins_encode %{
16214     __ string_indexof($str1$$Register, $str2$$Register,
16215                       $cnt1$$Register, $cnt2$$Register,
16216                       $tmp1$$Register, $tmp2$$Register,
16217                       $tmp3$$Register, $tmp4$$Register,
16218                       $tmp5$$Register, $tmp6$$Register,
16219                       -1, $result$$Register, StrIntrinsicNode::UL);
16220   %}
16221   ins_pipe(pipe_class_memory);
16222 %}
16223 
16224 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16225                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16226                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16227 %{
16228   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16229   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16230   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16231          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16232   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
16233 
16234   ins_encode %{
16235     int icnt2 = (int)$int_cnt2$$constant;
16236     __ string_indexof($str1$$Register, $str2$$Register,
16237                       $cnt1$$Register, zr,
16238                       $tmp1$$Register, $tmp2$$Register,
16239                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16240                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16241   %}
16242   ins_pipe(pipe_class_memory);
16243 %}
16244 
16245 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16246                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16247                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16248 %{
16249   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16250   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16251   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16252          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16253   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
16254 
16255   ins_encode %{
16256     int icnt2 = (int)$int_cnt2$$constant;
16257     __ string_indexof($str1$$Register, $str2$$Register,
16258                       $cnt1$$Register, zr,
16259                       $tmp1$$Register, $tmp2$$Register,
16260                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16261                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16262   %}
16263   ins_pipe(pipe_class_memory);
16264 %}
16265 
16266 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16267                  immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16268                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16269 %{
16270   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16271   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16272   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16273          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16274   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
16275 
16276   ins_encode %{
16277     int icnt2 = (int)$int_cnt2$$constant;
16278     __ string_indexof($str1$$Register, $str2$$Register,
16279                       $cnt1$$Register, zr,
16280                       $tmp1$$Register, $tmp2$$Register,
16281                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16282                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16283   %}
16284   ins_pipe(pipe_class_memory);
16285 %}
16286 
16287 instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16288                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16289                               iRegINoSp tmp3, rFlagsReg cr)
16290 %{
16291   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16292   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16293          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16294 
16295   format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16296 
16297   ins_encode %{
16298     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16299                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16300                            $tmp3$$Register);
16301   %}
16302   ins_pipe(pipe_class_memory);
16303 %}
16304 
16305 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16306                         iRegI_R0 result, rFlagsReg cr)
16307 %{
16308   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16309   match(Set result (StrEquals (Binary str1 str2) cnt));
16310   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16311 
16312   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16313   ins_encode %{
16314     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16315     __ string_equals($str1$$Register, $str2$$Register,
16316                      $result$$Register, $cnt$$Register, 1);
16317   %}
16318   ins_pipe(pipe_class_memory);
16319 %}
16320 
16321 instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16322                         iRegI_R0 result, rFlagsReg cr)
16323 %{
16324   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
16325   match(Set result (StrEquals (Binary str1 str2) cnt));
16326   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16327 
16328   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16329   ins_encode %{
16330     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16331     __ string_equals($str1$$Register, $str2$$Register,
16332                      $result$$Register, $cnt$$Register, 2);
16333   %}
16334   ins_pipe(pipe_class_memory);
16335 %}
16336 
16337 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16338                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16339                        iRegP_R10 tmp, rFlagsReg cr)
16340 %{
16341   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16342   match(Set result (AryEq ary1 ary2));
16343   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16344 
16345   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16346   ins_encode %{
16347     __ arrays_equals($ary1$$Register, $ary2$$Register,
16348                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16349                      $result$$Register, $tmp$$Register, 1);
16350     %}
16351   ins_pipe(pipe_class_memory);
16352 %}
16353 
16354 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16355                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16356                        iRegP_R10 tmp, rFlagsReg cr)
16357 %{
16358   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16359   match(Set result (AryEq ary1 ary2));
16360   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16361 
16362   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
16363   ins_encode %{
16364     __ arrays_equals($ary1$$Register, $ary2$$Register,
16365                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16366                      $result$$Register, $tmp$$Register, 2);
16367   %}
16368   ins_pipe(pipe_class_memory);
16369 %}
16370 
16371 instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16372 %{
16373   match(Set result (HasNegatives ary1 len));
16374   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16375   format %{ "has negatives byte[] $ary1,$len -> $result" %}
16376   ins_encode %{
16377     __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
16378   %}
16379   ins_pipe( pipe_slow );
16380 %}
16381 
16382 // fast char[] to byte[] compression
16383 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16384                          vRegD_V0 tmp1, vRegD_V1 tmp2,
16385                          vRegD_V2 tmp3, vRegD_V3 tmp4,
16386                          iRegI_R0 result, rFlagsReg cr)
16387 %{
16388   match(Set result (StrCompressedCopy src (Binary dst len)));
16389   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16390 
16391   format %{ "String Compress $src,$dst -> $result    // KILL R1, R2, R3, R4" %}
16392   ins_encode %{
16393     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16394                            $tmp1$$FloatRegister, $tmp2$$FloatRegister,
16395                            $tmp3$$FloatRegister, $tmp4$$FloatRegister,
16396                            $result$$Register);
16397   %}
16398   ins_pipe( pipe_slow );
16399 %}
16400 
16401 // fast byte[] to char[] inflation
16402 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len,
16403                         vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr)
16404 %{
16405   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16406   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16407 
16408   format %{ "String Inflate $src,$dst    // KILL $tmp1, $tmp2" %}
16409   ins_encode %{
16410     __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16411                           $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register);
16412   %}
16413   ins_pipe(pipe_class_memory);
16414 %}
16415 
16416 // encode char[] to byte[] in ISO_8859_1
16417 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16418                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
16419                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
16420                           iRegI_R0 result, rFlagsReg cr)
16421 %{
16422   match(Set result (EncodeISOArray src (Binary dst len)));
16423   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
16424          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
16425 
16426   format %{ "Encode array $src,$dst,$len -> $result" %}
16427   ins_encode %{
16428     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16429          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
16430          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
16431   %}
16432   ins_pipe( pipe_class_memory );
16433 %}
16434 
16435 // ============================================================================
16436 // This name is KNOWN by the ADLC and cannot be changed.
16437 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
16438 // for this guy.
16439 instruct tlsLoadP(thread_RegP dst)
16440 %{
16441   match(Set dst (ThreadLocal));
16442 
16443   ins_cost(0);
16444 
16445   format %{ " -- \t// $dst=Thread::current(), empty" %}
16446 
16447   size(0);
16448 
16449   ins_encode( /*empty*/ );
16450 
16451   ins_pipe(pipe_class_empty);
16452 %}
16453 
16454 // ====================VECTOR INSTRUCTIONS=====================================
16455 
16456 // Load vector (32 bits)
16457 instruct loadV4(vecD dst, vmem4 mem)
16458 %{
16459   predicate(n->as_LoadVector()->memory_size() == 4);
16460   match(Set dst (LoadVector mem));
16461   ins_cost(4 * INSN_COST);
16462   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
16463   ins_encode( aarch64_enc_ldrvS(dst, mem) );
16464   ins_pipe(vload_reg_mem64);
16465 %}
16466 
16467 // Load vector (64 bits)
16468 instruct loadV8(vecD dst, vmem8 mem)
16469 %{
16470   predicate(n->as_LoadVector()->memory_size() == 8);
16471   match(Set dst (LoadVector mem));
16472   ins_cost(4 * INSN_COST);
16473   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
16474   ins_encode( aarch64_enc_ldrvD(dst, mem) );
16475   ins_pipe(vload_reg_mem64);
16476 %}
16477 
16478 // Load Vector (128 bits)
16479 instruct loadV16(vecX dst, vmem16 mem)
16480 %{
16481   predicate(n->as_LoadVector()->memory_size() == 16);
16482   match(Set dst (LoadVector mem));
16483   ins_cost(4 * INSN_COST);
16484   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
16485   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
16486   ins_pipe(vload_reg_mem128);
16487 %}
16488 
16489 // Store Vector (32 bits)
16490 instruct storeV4(vecD src, vmem4 mem)
16491 %{
16492   predicate(n->as_StoreVector()->memory_size() == 4);
16493   match(Set mem (StoreVector mem src));
16494   ins_cost(4 * INSN_COST);
16495   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
16496   ins_encode( aarch64_enc_strvS(src, mem) );
16497   ins_pipe(vstore_reg_mem64);
16498 %}
16499 
16500 // Store Vector (64 bits)
16501 instruct storeV8(vecD src, vmem8 mem)
16502 %{
16503   predicate(n->as_StoreVector()->memory_size() == 8);
16504   match(Set mem (StoreVector mem src));
16505   ins_cost(4 * INSN_COST);
16506   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
16507   ins_encode( aarch64_enc_strvD(src, mem) );
16508   ins_pipe(vstore_reg_mem64);
16509 %}
16510 
16511 // Store Vector (128 bits)
16512 instruct storeV16(vecX src, vmem16 mem)
16513 %{
16514   predicate(n->as_StoreVector()->memory_size() == 16);
16515   match(Set mem (StoreVector mem src));
16516   ins_cost(4 * INSN_COST);
16517   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
16518   ins_encode( aarch64_enc_strvQ(src, mem) );
16519   ins_pipe(vstore_reg_mem128);
16520 %}
16521 
16522 instruct replicate8B(vecD dst, iRegIorL2I src)
16523 %{
16524   predicate(n->as_Vector()->length() == 4 ||
16525             n->as_Vector()->length() == 8);
16526   match(Set dst (ReplicateB src));
16527   ins_cost(INSN_COST);
16528   format %{ "dup  $dst, $src\t# vector (8B)" %}
16529   ins_encode %{
16530     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
16531   %}
16532   ins_pipe(vdup_reg_reg64);
16533 %}
16534 
16535 instruct replicate16B(vecX dst, iRegIorL2I src)
16536 %{
16537   predicate(n->as_Vector()->length() == 16);
16538   match(Set dst (ReplicateB src));
16539   ins_cost(INSN_COST);
16540   format %{ "dup  $dst, $src\t# vector (16B)" %}
16541   ins_encode %{
16542     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
16543   %}
16544   ins_pipe(vdup_reg_reg128);
16545 %}
16546 
16547 instruct replicate8B_imm(vecD dst, immI con)
16548 %{
16549   predicate(n->as_Vector()->length() == 4 ||
16550             n->as_Vector()->length() == 8);
16551   match(Set dst (ReplicateB con));
16552   ins_cost(INSN_COST);
16553   format %{ "movi  $dst, $con\t# vector(8B)" %}
16554   ins_encode %{
16555     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
16556   %}
16557   ins_pipe(vmovi_reg_imm64);
16558 %}
16559 
16560 instruct replicate16B_imm(vecX dst, immI con)
16561 %{
16562   predicate(n->as_Vector()->length() == 16);
16563   match(Set dst (ReplicateB con));
16564   ins_cost(INSN_COST);
16565   format %{ "movi  $dst, $con\t# vector(16B)" %}
16566   ins_encode %{
16567     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
16568   %}
16569   ins_pipe(vmovi_reg_imm128);
16570 %}
16571 
16572 instruct replicate4S(vecD dst, iRegIorL2I src)
16573 %{
16574   predicate(n->as_Vector()->length() == 2 ||
16575             n->as_Vector()->length() == 4);
16576   match(Set dst (ReplicateS src));
16577   ins_cost(INSN_COST);
16578   format %{ "dup  $dst, $src\t# vector (4S)" %}
16579   ins_encode %{
16580     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
16581   %}
16582   ins_pipe(vdup_reg_reg64);
16583 %}
16584 
16585 instruct replicate8S(vecX dst, iRegIorL2I src)
16586 %{
16587   predicate(n->as_Vector()->length() == 8);
16588   match(Set dst (ReplicateS src));
16589   ins_cost(INSN_COST);
16590   format %{ "dup  $dst, $src\t# vector (8S)" %}
16591   ins_encode %{
16592     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
16593   %}
16594   ins_pipe(vdup_reg_reg128);
16595 %}
16596 
16597 instruct replicate4S_imm(vecD dst, immI con)
16598 %{
16599   predicate(n->as_Vector()->length() == 2 ||
16600             n->as_Vector()->length() == 4);
16601   match(Set dst (ReplicateS con));
16602   ins_cost(INSN_COST);
16603   format %{ "movi  $dst, $con\t# vector(4H)" %}
16604   ins_encode %{
16605     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
16606   %}
16607   ins_pipe(vmovi_reg_imm64);
16608 %}
16609 
16610 instruct replicate8S_imm(vecX dst, immI con)
16611 %{
16612   predicate(n->as_Vector()->length() == 8);
16613   match(Set dst (ReplicateS con));
16614   ins_cost(INSN_COST);
16615   format %{ "movi  $dst, $con\t# vector(8H)" %}
16616   ins_encode %{
16617     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
16618   %}
16619   ins_pipe(vmovi_reg_imm128);
16620 %}
16621 
16622 instruct replicate2I(vecD dst, iRegIorL2I src)
16623 %{
16624   predicate(n->as_Vector()->length() == 2);
16625   match(Set dst (ReplicateI src));
16626   ins_cost(INSN_COST);
16627   format %{ "dup  $dst, $src\t# vector (2I)" %}
16628   ins_encode %{
16629     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
16630   %}
16631   ins_pipe(vdup_reg_reg64);
16632 %}
16633 
16634 instruct replicate4I(vecX dst, iRegIorL2I src)
16635 %{
16636   predicate(n->as_Vector()->length() == 4);
16637   match(Set dst (ReplicateI src));
16638   ins_cost(INSN_COST);
16639   format %{ "dup  $dst, $src\t# vector (4I)" %}
16640   ins_encode %{
16641     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
16642   %}
16643   ins_pipe(vdup_reg_reg128);
16644 %}
16645 
16646 instruct replicate2I_imm(vecD dst, immI con)
16647 %{
16648   predicate(n->as_Vector()->length() == 2);
16649   match(Set dst (ReplicateI con));
16650   ins_cost(INSN_COST);
16651   format %{ "movi  $dst, $con\t# vector(2I)" %}
16652   ins_encode %{
16653     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
16654   %}
16655   ins_pipe(vmovi_reg_imm64);
16656 %}
16657 
16658 instruct replicate4I_imm(vecX dst, immI con)
16659 %{
16660   predicate(n->as_Vector()->length() == 4);
16661   match(Set dst (ReplicateI con));
16662   ins_cost(INSN_COST);
16663   format %{ "movi  $dst, $con\t# vector(4I)" %}
16664   ins_encode %{
16665     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
16666   %}
16667   ins_pipe(vmovi_reg_imm128);
16668 %}
16669 
16670 instruct replicate2L(vecX dst, iRegL src)
16671 %{
16672   predicate(n->as_Vector()->length() == 2);
16673   match(Set dst (ReplicateL src));
16674   ins_cost(INSN_COST);
16675   format %{ "dup  $dst, $src\t# vector (2L)" %}
16676   ins_encode %{
16677     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
16678   %}
16679   ins_pipe(vdup_reg_reg128);
16680 %}
16681 
16682 instruct replicate2L_zero(vecX dst, immI0 zero)
16683 %{
16684   predicate(n->as_Vector()->length() == 2);
16685   match(Set dst (ReplicateI zero));
16686   ins_cost(INSN_COST);
16687   format %{ "movi  $dst, $zero\t# vector(4I)" %}
16688   ins_encode %{
16689     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16690            as_FloatRegister($dst$$reg),
16691            as_FloatRegister($dst$$reg));
16692   %}
16693   ins_pipe(vmovi_reg_imm128);
16694 %}
16695 
16696 instruct replicate2F(vecD dst, vRegF src)
16697 %{
16698   predicate(n->as_Vector()->length() == 2);
16699   match(Set dst (ReplicateF src));
16700   ins_cost(INSN_COST);
16701   format %{ "dup  $dst, $src\t# vector (2F)" %}
16702   ins_encode %{
16703     __ dup(as_FloatRegister($dst$$reg), __ T2S,
16704            as_FloatRegister($src$$reg));
16705   %}
16706   ins_pipe(vdup_reg_freg64);
16707 %}
16708 
16709 instruct replicate4F(vecX dst, vRegF src)
16710 %{
16711   predicate(n->as_Vector()->length() == 4);
16712   match(Set dst (ReplicateF src));
16713   ins_cost(INSN_COST);
16714   format %{ "dup  $dst, $src\t# vector (4F)" %}
16715   ins_encode %{
16716     __ dup(as_FloatRegister($dst$$reg), __ T4S,
16717            as_FloatRegister($src$$reg));
16718   %}
16719   ins_pipe(vdup_reg_freg128);
16720 %}
16721 
16722 instruct replicate2D(vecX dst, vRegD src)
16723 %{
16724   predicate(n->as_Vector()->length() == 2);
16725   match(Set dst (ReplicateD src));
16726   ins_cost(INSN_COST);
16727   format %{ "dup  $dst, $src\t# vector (2D)" %}
16728   ins_encode %{
16729     __ dup(as_FloatRegister($dst$$reg), __ T2D,
16730            as_FloatRegister($src$$reg));
16731   %}
16732   ins_pipe(vdup_reg_dreg128);
16733 %}
16734 
16735 // ====================REDUCTION ARITHMETIC====================================
16736 
16737 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2)
16738 %{
16739   match(Set dst (AddReductionVI src1 src2));
16740   ins_cost(INSN_COST);
16741   effect(TEMP tmp, TEMP tmp2);
16742   format %{ "umov  $tmp, $src2, S, 0\n\t"
16743             "umov  $tmp2, $src2, S, 1\n\t"
16744             "addw  $dst, $src1, $tmp\n\t"
16745             "addw  $dst, $dst, $tmp2\t add reduction2i"
16746   %}
16747   ins_encode %{
16748     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16749     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16750     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
16751     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
16752   %}
16753   ins_pipe(pipe_class_default);
16754 %}
16755 
16756 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16757 %{
16758   match(Set dst (AddReductionVI src1 src2));
16759   ins_cost(INSN_COST);
16760   effect(TEMP tmp, TEMP tmp2);
16761   format %{ "addv  $tmp, T4S, $src2\n\t"
16762             "umov  $tmp2, $tmp, S, 0\n\t"
16763             "addw  $dst, $tmp2, $src1\t add reduction4i"
16764   %}
16765   ins_encode %{
16766     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
16767             as_FloatRegister($src2$$reg));
16768     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16769     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
16770   %}
16771   ins_pipe(pipe_class_default);
16772 %}
16773 
16774 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp)
16775 %{
16776   match(Set dst (MulReductionVI src1 src2));
16777   ins_cost(INSN_COST);
16778   effect(TEMP tmp, TEMP dst);
16779   format %{ "umov  $tmp, $src2, S, 0\n\t"
16780             "mul   $dst, $tmp, $src1\n\t"
16781             "umov  $tmp, $src2, S, 1\n\t"
16782             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
16783   %}
16784   ins_encode %{
16785     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
16786     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
16787     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
16788     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
16789   %}
16790   ins_pipe(pipe_class_default);
16791 %}
16792 
16793 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2)
16794 %{
16795   match(Set dst (MulReductionVI src1 src2));
16796   ins_cost(INSN_COST);
16797   effect(TEMP tmp, TEMP tmp2, TEMP dst);
16798   format %{ "ins   $tmp, $src2, 0, 1\n\t"
16799             "mul   $tmp, $tmp, $src2\n\t"
16800             "umov  $tmp2, $tmp, S, 0\n\t"
16801             "mul   $dst, $tmp2, $src1\n\t"
16802             "umov  $tmp2, $tmp, S, 1\n\t"
16803             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
16804   %}
16805   ins_encode %{
16806     __ ins(as_FloatRegister($tmp$$reg), __ D,
16807            as_FloatRegister($src2$$reg), 0, 1);
16808     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
16809            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
16810     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
16811     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
16812     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
16813     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
16814   %}
16815   ins_pipe(pipe_class_default);
16816 %}
16817 
16818 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16819 %{
16820   match(Set dst (AddReductionVF src1 src2));
16821   ins_cost(INSN_COST);
16822   effect(TEMP tmp, TEMP dst);
16823   format %{ "fadds $dst, $src1, $src2\n\t"
16824             "ins   $tmp, S, $src2, 0, 1\n\t"
16825             "fadds $dst, $dst, $tmp\t add reduction2f"
16826   %}
16827   ins_encode %{
16828     __ fadds(as_FloatRegister($dst$$reg),
16829              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16830     __ ins(as_FloatRegister($tmp$$reg), __ S,
16831            as_FloatRegister($src2$$reg), 0, 1);
16832     __ fadds(as_FloatRegister($dst$$reg),
16833              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16834   %}
16835   ins_pipe(pipe_class_default);
16836 %}
16837 
16838 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16839 %{
16840   match(Set dst (AddReductionVF src1 src2));
16841   ins_cost(INSN_COST);
16842   effect(TEMP tmp, TEMP dst);
16843   format %{ "fadds $dst, $src1, $src2\n\t"
16844             "ins   $tmp, S, $src2, 0, 1\n\t"
16845             "fadds $dst, $dst, $tmp\n\t"
16846             "ins   $tmp, S, $src2, 0, 2\n\t"
16847             "fadds $dst, $dst, $tmp\n\t"
16848             "ins   $tmp, S, $src2, 0, 3\n\t"
16849             "fadds $dst, $dst, $tmp\t add reduction4f"
16850   %}
16851   ins_encode %{
16852     __ fadds(as_FloatRegister($dst$$reg),
16853              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16854     __ ins(as_FloatRegister($tmp$$reg), __ S,
16855            as_FloatRegister($src2$$reg), 0, 1);
16856     __ fadds(as_FloatRegister($dst$$reg),
16857              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16858     __ ins(as_FloatRegister($tmp$$reg), __ S,
16859            as_FloatRegister($src2$$reg), 0, 2);
16860     __ fadds(as_FloatRegister($dst$$reg),
16861              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16862     __ ins(as_FloatRegister($tmp$$reg), __ S,
16863            as_FloatRegister($src2$$reg), 0, 3);
16864     __ fadds(as_FloatRegister($dst$$reg),
16865              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16866   %}
16867   ins_pipe(pipe_class_default);
16868 %}
16869 
16870 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
16871 %{
16872   match(Set dst (MulReductionVF src1 src2));
16873   ins_cost(INSN_COST);
16874   effect(TEMP tmp, TEMP dst);
16875   format %{ "fmuls $dst, $src1, $src2\n\t"
16876             "ins   $tmp, S, $src2, 0, 1\n\t"
16877             "fmuls $dst, $dst, $tmp\t add reduction4f"
16878   %}
16879   ins_encode %{
16880     __ fmuls(as_FloatRegister($dst$$reg),
16881              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16882     __ ins(as_FloatRegister($tmp$$reg), __ S,
16883            as_FloatRegister($src2$$reg), 0, 1);
16884     __ fmuls(as_FloatRegister($dst$$reg),
16885              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16886   %}
16887   ins_pipe(pipe_class_default);
16888 %}
16889 
16890 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
16891 %{
16892   match(Set dst (MulReductionVF src1 src2));
16893   ins_cost(INSN_COST);
16894   effect(TEMP tmp, TEMP dst);
16895   format %{ "fmuls $dst, $src1, $src2\n\t"
16896             "ins   $tmp, S, $src2, 0, 1\n\t"
16897             "fmuls $dst, $dst, $tmp\n\t"
16898             "ins   $tmp, S, $src2, 0, 2\n\t"
16899             "fmuls $dst, $dst, $tmp\n\t"
16900             "ins   $tmp, S, $src2, 0, 3\n\t"
16901             "fmuls $dst, $dst, $tmp\t add reduction4f"
16902   %}
16903   ins_encode %{
16904     __ fmuls(as_FloatRegister($dst$$reg),
16905              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16906     __ ins(as_FloatRegister($tmp$$reg), __ S,
16907            as_FloatRegister($src2$$reg), 0, 1);
16908     __ fmuls(as_FloatRegister($dst$$reg),
16909              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16910     __ ins(as_FloatRegister($tmp$$reg), __ S,
16911            as_FloatRegister($src2$$reg), 0, 2);
16912     __ fmuls(as_FloatRegister($dst$$reg),
16913              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16914     __ ins(as_FloatRegister($tmp$$reg), __ S,
16915            as_FloatRegister($src2$$reg), 0, 3);
16916     __ fmuls(as_FloatRegister($dst$$reg),
16917              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16918   %}
16919   ins_pipe(pipe_class_default);
16920 %}
16921 
16922 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16923 %{
16924   match(Set dst (AddReductionVD src1 src2));
16925   ins_cost(INSN_COST);
16926   effect(TEMP tmp, TEMP dst);
16927   format %{ "faddd $dst, $src1, $src2\n\t"
16928             "ins   $tmp, D, $src2, 0, 1\n\t"
16929             "faddd $dst, $dst, $tmp\t add reduction2d"
16930   %}
16931   ins_encode %{
16932     __ faddd(as_FloatRegister($dst$$reg),
16933              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16934     __ ins(as_FloatRegister($tmp$$reg), __ D,
16935            as_FloatRegister($src2$$reg), 0, 1);
16936     __ faddd(as_FloatRegister($dst$$reg),
16937              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16938   %}
16939   ins_pipe(pipe_class_default);
16940 %}
16941 
16942 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
16943 %{
16944   match(Set dst (MulReductionVD src1 src2));
16945   ins_cost(INSN_COST);
16946   effect(TEMP tmp, TEMP dst);
16947   format %{ "fmuld $dst, $src1, $src2\n\t"
16948             "ins   $tmp, D, $src2, 0, 1\n\t"
16949             "fmuld $dst, $dst, $tmp\t add reduction2d"
16950   %}
16951   ins_encode %{
16952     __ fmuld(as_FloatRegister($dst$$reg),
16953              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
16954     __ ins(as_FloatRegister($tmp$$reg), __ D,
16955            as_FloatRegister($src2$$reg), 0, 1);
16956     __ fmuld(as_FloatRegister($dst$$reg),
16957              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
16958   %}
16959   ins_pipe(pipe_class_default);
16960 %}
16961 
16962 // ====================VECTOR ARITHMETIC=======================================
16963 
16964 // --------------------------------- ADD --------------------------------------
16965 
16966 instruct vadd8B(vecD dst, vecD src1, vecD src2)
16967 %{
16968   predicate(n->as_Vector()->length() == 4 ||
16969             n->as_Vector()->length() == 8);
16970   match(Set dst (AddVB src1 src2));
16971   ins_cost(INSN_COST);
16972   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
16973   ins_encode %{
16974     __ addv(as_FloatRegister($dst$$reg), __ T8B,
16975             as_FloatRegister($src1$$reg),
16976             as_FloatRegister($src2$$reg));
16977   %}
16978   ins_pipe(vdop64);
16979 %}
16980 
16981 instruct vadd16B(vecX dst, vecX src1, vecX src2)
16982 %{
16983   predicate(n->as_Vector()->length() == 16);
16984   match(Set dst (AddVB src1 src2));
16985   ins_cost(INSN_COST);
16986   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
16987   ins_encode %{
16988     __ addv(as_FloatRegister($dst$$reg), __ T16B,
16989             as_FloatRegister($src1$$reg),
16990             as_FloatRegister($src2$$reg));
16991   %}
16992   ins_pipe(vdop128);
16993 %}
16994 
16995 instruct vadd4S(vecD dst, vecD src1, vecD src2)
16996 %{
16997   predicate(n->as_Vector()->length() == 2 ||
16998             n->as_Vector()->length() == 4);
16999   match(Set dst (AddVS src1 src2));
17000   ins_cost(INSN_COST);
17001   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
17002   ins_encode %{
17003     __ addv(as_FloatRegister($dst$$reg), __ T4H,
17004             as_FloatRegister($src1$$reg),
17005             as_FloatRegister($src2$$reg));
17006   %}
17007   ins_pipe(vdop64);
17008 %}
17009 
17010 instruct vadd8S(vecX dst, vecX src1, vecX src2)
17011 %{
17012   predicate(n->as_Vector()->length() == 8);
17013   match(Set dst (AddVS src1 src2));
17014   ins_cost(INSN_COST);
17015   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
17016   ins_encode %{
17017     __ addv(as_FloatRegister($dst$$reg), __ T8H,
17018             as_FloatRegister($src1$$reg),
17019             as_FloatRegister($src2$$reg));
17020   %}
17021   ins_pipe(vdop128);
17022 %}
17023 
17024 instruct vadd2I(vecD dst, vecD src1, vecD src2)
17025 %{
17026   predicate(n->as_Vector()->length() == 2);
17027   match(Set dst (AddVI src1 src2));
17028   ins_cost(INSN_COST);
17029   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
17030   ins_encode %{
17031     __ addv(as_FloatRegister($dst$$reg), __ T2S,
17032             as_FloatRegister($src1$$reg),
17033             as_FloatRegister($src2$$reg));
17034   %}
17035   ins_pipe(vdop64);
17036 %}
17037 
17038 instruct vadd4I(vecX dst, vecX src1, vecX src2)
17039 %{
17040   predicate(n->as_Vector()->length() == 4);
17041   match(Set dst (AddVI src1 src2));
17042   ins_cost(INSN_COST);
17043   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
17044   ins_encode %{
17045     __ addv(as_FloatRegister($dst$$reg), __ T4S,
17046             as_FloatRegister($src1$$reg),
17047             as_FloatRegister($src2$$reg));
17048   %}
17049   ins_pipe(vdop128);
17050 %}
17051 
17052 instruct vadd2L(vecX dst, vecX src1, vecX src2)
17053 %{
17054   predicate(n->as_Vector()->length() == 2);
17055   match(Set dst (AddVL src1 src2));
17056   ins_cost(INSN_COST);
17057   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
17058   ins_encode %{
17059     __ addv(as_FloatRegister($dst$$reg), __ T2D,
17060             as_FloatRegister($src1$$reg),
17061             as_FloatRegister($src2$$reg));
17062   %}
17063   ins_pipe(vdop128);
17064 %}
17065 
17066 instruct vadd2F(vecD dst, vecD src1, vecD src2)
17067 %{
17068   predicate(n->as_Vector()->length() == 2);
17069   match(Set dst (AddVF src1 src2));
17070   ins_cost(INSN_COST);
17071   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
17072   ins_encode %{
17073     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
17074             as_FloatRegister($src1$$reg),
17075             as_FloatRegister($src2$$reg));
17076   %}
17077   ins_pipe(vdop_fp64);
17078 %}
17079 
17080 instruct vadd4F(vecX dst, vecX src1, vecX src2)
17081 %{
17082   predicate(n->as_Vector()->length() == 4);
17083   match(Set dst (AddVF src1 src2));
17084   ins_cost(INSN_COST);
17085   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
17086   ins_encode %{
17087     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
17088             as_FloatRegister($src1$$reg),
17089             as_FloatRegister($src2$$reg));
17090   %}
17091   ins_pipe(vdop_fp128);
17092 %}
17093 
17094 instruct vadd2D(vecX dst, vecX src1, vecX src2)
17095 %{
17096   match(Set dst (AddVD src1 src2));
17097   ins_cost(INSN_COST);
17098   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
17099   ins_encode %{
17100     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
17101             as_FloatRegister($src1$$reg),
17102             as_FloatRegister($src2$$reg));
17103   %}
17104   ins_pipe(vdop_fp128);
17105 %}
17106 
17107 // --------------------------------- SUB --------------------------------------
17108 
17109 instruct vsub8B(vecD dst, vecD src1, vecD src2)
17110 %{
17111   predicate(n->as_Vector()->length() == 4 ||
17112             n->as_Vector()->length() == 8);
17113   match(Set dst (SubVB src1 src2));
17114   ins_cost(INSN_COST);
17115   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
17116   ins_encode %{
17117     __ subv(as_FloatRegister($dst$$reg), __ T8B,
17118             as_FloatRegister($src1$$reg),
17119             as_FloatRegister($src2$$reg));
17120   %}
17121   ins_pipe(vdop64);
17122 %}
17123 
17124 instruct vsub16B(vecX dst, vecX src1, vecX src2)
17125 %{
17126   predicate(n->as_Vector()->length() == 16);
17127   match(Set dst (SubVB src1 src2));
17128   ins_cost(INSN_COST);
17129   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
17130   ins_encode %{
17131     __ subv(as_FloatRegister($dst$$reg), __ T16B,
17132             as_FloatRegister($src1$$reg),
17133             as_FloatRegister($src2$$reg));
17134   %}
17135   ins_pipe(vdop128);
17136 %}
17137 
17138 instruct vsub4S(vecD dst, vecD src1, vecD src2)
17139 %{
17140   predicate(n->as_Vector()->length() == 2 ||
17141             n->as_Vector()->length() == 4);
17142   match(Set dst (SubVS src1 src2));
17143   ins_cost(INSN_COST);
17144   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
17145   ins_encode %{
17146     __ subv(as_FloatRegister($dst$$reg), __ T4H,
17147             as_FloatRegister($src1$$reg),
17148             as_FloatRegister($src2$$reg));
17149   %}
17150   ins_pipe(vdop64);
17151 %}
17152 
17153 instruct vsub8S(vecX dst, vecX src1, vecX src2)
17154 %{
17155   predicate(n->as_Vector()->length() == 8);
17156   match(Set dst (SubVS src1 src2));
17157   ins_cost(INSN_COST);
17158   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
17159   ins_encode %{
17160     __ subv(as_FloatRegister($dst$$reg), __ T8H,
17161             as_FloatRegister($src1$$reg),
17162             as_FloatRegister($src2$$reg));
17163   %}
17164   ins_pipe(vdop128);
17165 %}
17166 
17167 instruct vsub2I(vecD dst, vecD src1, vecD src2)
17168 %{
17169   predicate(n->as_Vector()->length() == 2);
17170   match(Set dst (SubVI src1 src2));
17171   ins_cost(INSN_COST);
17172   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
17173   ins_encode %{
17174     __ subv(as_FloatRegister($dst$$reg), __ T2S,
17175             as_FloatRegister($src1$$reg),
17176             as_FloatRegister($src2$$reg));
17177   %}
17178   ins_pipe(vdop64);
17179 %}
17180 
17181 instruct vsub4I(vecX dst, vecX src1, vecX src2)
17182 %{
17183   predicate(n->as_Vector()->length() == 4);
17184   match(Set dst (SubVI src1 src2));
17185   ins_cost(INSN_COST);
17186   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
17187   ins_encode %{
17188     __ subv(as_FloatRegister($dst$$reg), __ T4S,
17189             as_FloatRegister($src1$$reg),
17190             as_FloatRegister($src2$$reg));
17191   %}
17192   ins_pipe(vdop128);
17193 %}
17194 
17195 instruct vsub2L(vecX dst, vecX src1, vecX src2)
17196 %{
17197   predicate(n->as_Vector()->length() == 2);
17198   match(Set dst (SubVL src1 src2));
17199   ins_cost(INSN_COST);
17200   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
17201   ins_encode %{
17202     __ subv(as_FloatRegister($dst$$reg), __ T2D,
17203             as_FloatRegister($src1$$reg),
17204             as_FloatRegister($src2$$reg));
17205   %}
17206   ins_pipe(vdop128);
17207 %}
17208 
17209 instruct vsub2F(vecD dst, vecD src1, vecD src2)
17210 %{
17211   predicate(n->as_Vector()->length() == 2);
17212   match(Set dst (SubVF src1 src2));
17213   ins_cost(INSN_COST);
17214   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
17215   ins_encode %{
17216     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
17217             as_FloatRegister($src1$$reg),
17218             as_FloatRegister($src2$$reg));
17219   %}
17220   ins_pipe(vdop_fp64);
17221 %}
17222 
17223 instruct vsub4F(vecX dst, vecX src1, vecX src2)
17224 %{
17225   predicate(n->as_Vector()->length() == 4);
17226   match(Set dst (SubVF src1 src2));
17227   ins_cost(INSN_COST);
17228   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
17229   ins_encode %{
17230     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
17231             as_FloatRegister($src1$$reg),
17232             as_FloatRegister($src2$$reg));
17233   %}
17234   ins_pipe(vdop_fp128);
17235 %}
17236 
17237 instruct vsub2D(vecX dst, vecX src1, vecX src2)
17238 %{
17239   predicate(n->as_Vector()->length() == 2);
17240   match(Set dst (SubVD src1 src2));
17241   ins_cost(INSN_COST);
17242   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
17243   ins_encode %{
17244     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
17245             as_FloatRegister($src1$$reg),
17246             as_FloatRegister($src2$$reg));
17247   %}
17248   ins_pipe(vdop_fp128);
17249 %}
17250 
17251 // --------------------------------- MUL --------------------------------------
17252 
17253 instruct vmul4S(vecD dst, vecD src1, vecD src2)
17254 %{
17255   predicate(n->as_Vector()->length() == 2 ||
17256             n->as_Vector()->length() == 4);
17257   match(Set dst (MulVS src1 src2));
17258   ins_cost(INSN_COST);
17259   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
17260   ins_encode %{
17261     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
17262             as_FloatRegister($src1$$reg),
17263             as_FloatRegister($src2$$reg));
17264   %}
17265   ins_pipe(vmul64);
17266 %}
17267 
17268 instruct vmul8S(vecX dst, vecX src1, vecX src2)
17269 %{
17270   predicate(n->as_Vector()->length() == 8);
17271   match(Set dst (MulVS src1 src2));
17272   ins_cost(INSN_COST);
17273   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
17274   ins_encode %{
17275     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
17276             as_FloatRegister($src1$$reg),
17277             as_FloatRegister($src2$$reg));
17278   %}
17279   ins_pipe(vmul128);
17280 %}
17281 
17282 instruct vmul2I(vecD dst, vecD src1, vecD src2)
17283 %{
17284   predicate(n->as_Vector()->length() == 2);
17285   match(Set dst (MulVI src1 src2));
17286   ins_cost(INSN_COST);
17287   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
17288   ins_encode %{
17289     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
17290             as_FloatRegister($src1$$reg),
17291             as_FloatRegister($src2$$reg));
17292   %}
17293   ins_pipe(vmul64);
17294 %}
17295 
17296 instruct vmul4I(vecX dst, vecX src1, vecX src2)
17297 %{
17298   predicate(n->as_Vector()->length() == 4);
17299   match(Set dst (MulVI src1 src2));
17300   ins_cost(INSN_COST);
17301   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
17302   ins_encode %{
17303     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
17304             as_FloatRegister($src1$$reg),
17305             as_FloatRegister($src2$$reg));
17306   %}
17307   ins_pipe(vmul128);
17308 %}
17309 
17310 instruct vmul2F(vecD dst, vecD src1, vecD src2)
17311 %{
17312   predicate(n->as_Vector()->length() == 2);
17313   match(Set dst (MulVF src1 src2));
17314   ins_cost(INSN_COST);
17315   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
17316   ins_encode %{
17317     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
17318             as_FloatRegister($src1$$reg),
17319             as_FloatRegister($src2$$reg));
17320   %}
17321   ins_pipe(vmuldiv_fp64);
17322 %}
17323 
17324 instruct vmul4F(vecX dst, vecX src1, vecX src2)
17325 %{
17326   predicate(n->as_Vector()->length() == 4);
17327   match(Set dst (MulVF src1 src2));
17328   ins_cost(INSN_COST);
17329   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
17330   ins_encode %{
17331     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
17332             as_FloatRegister($src1$$reg),
17333             as_FloatRegister($src2$$reg));
17334   %}
17335   ins_pipe(vmuldiv_fp128);
17336 %}
17337 
17338 instruct vmul2D(vecX dst, vecX src1, vecX src2)
17339 %{
17340   predicate(n->as_Vector()->length() == 2);
17341   match(Set dst (MulVD src1 src2));
17342   ins_cost(INSN_COST);
17343   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
17344   ins_encode %{
17345     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
17346             as_FloatRegister($src1$$reg),
17347             as_FloatRegister($src2$$reg));
17348   %}
17349   ins_pipe(vmuldiv_fp128);
17350 %}
17351 
17352 // --------------------------------- MLA --------------------------------------
17353 
17354 instruct vmla4S(vecD dst, vecD src1, vecD src2)
17355 %{
17356   predicate(n->as_Vector()->length() == 2 ||
17357             n->as_Vector()->length() == 4);
17358   match(Set dst (AddVS dst (MulVS src1 src2)));
17359   ins_cost(INSN_COST);
17360   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
17361   ins_encode %{
17362     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
17363             as_FloatRegister($src1$$reg),
17364             as_FloatRegister($src2$$reg));
17365   %}
17366   ins_pipe(vmla64);
17367 %}
17368 
17369 instruct vmla8S(vecX dst, vecX src1, vecX src2)
17370 %{
17371   predicate(n->as_Vector()->length() == 8);
17372   match(Set dst (AddVS dst (MulVS src1 src2)));
17373   ins_cost(INSN_COST);
17374   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
17375   ins_encode %{
17376     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
17377             as_FloatRegister($src1$$reg),
17378             as_FloatRegister($src2$$reg));
17379   %}
17380   ins_pipe(vmla128);
17381 %}
17382 
17383 instruct vmla2I(vecD dst, vecD src1, vecD src2)
17384 %{
17385   predicate(n->as_Vector()->length() == 2);
17386   match(Set dst (AddVI dst (MulVI src1 src2)));
17387   ins_cost(INSN_COST);
17388   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
17389   ins_encode %{
17390     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
17391             as_FloatRegister($src1$$reg),
17392             as_FloatRegister($src2$$reg));
17393   %}
17394   ins_pipe(vmla64);
17395 %}
17396 
17397 instruct vmla4I(vecX dst, vecX src1, vecX src2)
17398 %{
17399   predicate(n->as_Vector()->length() == 4);
17400   match(Set dst (AddVI dst (MulVI src1 src2)));
17401   ins_cost(INSN_COST);
17402   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
17403   ins_encode %{
17404     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
17405             as_FloatRegister($src1$$reg),
17406             as_FloatRegister($src2$$reg));
17407   %}
17408   ins_pipe(vmla128);
17409 %}
17410 
17411 // dst + src1 * src2
17412 instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
17413   predicate(UseFMA && n->as_Vector()->length() == 2);
17414   match(Set dst (FmaVF  dst (Binary src1 src2)));
17415   format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
17416   ins_cost(INSN_COST);
17417   ins_encode %{
17418     __ fmla(as_FloatRegister($dst$$reg), __ T2S,
17419             as_FloatRegister($src1$$reg),
17420             as_FloatRegister($src2$$reg));
17421   %}
17422   ins_pipe(vmuldiv_fp64);
17423 %}
17424 
17425 // dst + src1 * src2
17426 instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
17427   predicate(UseFMA && n->as_Vector()->length() == 4);
17428   match(Set dst (FmaVF  dst (Binary src1 src2)));
17429   format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
17430   ins_cost(INSN_COST);
17431   ins_encode %{
17432     __ fmla(as_FloatRegister($dst$$reg), __ T4S,
17433             as_FloatRegister($src1$$reg),
17434             as_FloatRegister($src2$$reg));
17435   %}
17436   ins_pipe(vmuldiv_fp128);
17437 %}
17438 
17439 // dst + src1 * src2
17440 instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
17441   predicate(UseFMA && n->as_Vector()->length() == 2);
17442   match(Set dst (FmaVD  dst (Binary src1 src2)));
17443   format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
17444   ins_cost(INSN_COST);
17445   ins_encode %{
17446     __ fmla(as_FloatRegister($dst$$reg), __ T2D,
17447             as_FloatRegister($src1$$reg),
17448             as_FloatRegister($src2$$reg));
17449   %}
17450   ins_pipe(vmuldiv_fp128);
17451 %}
17452 
17453 // --------------------------------- MLS --------------------------------------
17454 
17455 instruct vmls4S(vecD dst, vecD src1, vecD src2)
17456 %{
17457   predicate(n->as_Vector()->length() == 2 ||
17458             n->as_Vector()->length() == 4);
17459   match(Set dst (SubVS dst (MulVS src1 src2)));
17460   ins_cost(INSN_COST);
17461   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
17462   ins_encode %{
17463     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
17464             as_FloatRegister($src1$$reg),
17465             as_FloatRegister($src2$$reg));
17466   %}
17467   ins_pipe(vmla64);
17468 %}
17469 
17470 instruct vmls8S(vecX dst, vecX src1, vecX src2)
17471 %{
17472   predicate(n->as_Vector()->length() == 8);
17473   match(Set dst (SubVS dst (MulVS src1 src2)));
17474   ins_cost(INSN_COST);
17475   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
17476   ins_encode %{
17477     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
17478             as_FloatRegister($src1$$reg),
17479             as_FloatRegister($src2$$reg));
17480   %}
17481   ins_pipe(vmla128);
17482 %}
17483 
17484 instruct vmls2I(vecD dst, vecD src1, vecD src2)
17485 %{
17486   predicate(n->as_Vector()->length() == 2);
17487   match(Set dst (SubVI dst (MulVI src1 src2)));
17488   ins_cost(INSN_COST);
17489   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
17490   ins_encode %{
17491     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
17492             as_FloatRegister($src1$$reg),
17493             as_FloatRegister($src2$$reg));
17494   %}
17495   ins_pipe(vmla64);
17496 %}
17497 
17498 instruct vmls4I(vecX dst, vecX src1, vecX src2)
17499 %{
17500   predicate(n->as_Vector()->length() == 4);
17501   match(Set dst (SubVI dst (MulVI src1 src2)));
17502   ins_cost(INSN_COST);
17503   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
17504   ins_encode %{
17505     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
17506             as_FloatRegister($src1$$reg),
17507             as_FloatRegister($src2$$reg));
17508   %}
17509   ins_pipe(vmla128);
17510 %}
17511 
17512 // dst - src1 * src2
17513 instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
17514   predicate(UseFMA && n->as_Vector()->length() == 2);
17515   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17516   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17517   format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
17518   ins_cost(INSN_COST);
17519   ins_encode %{
17520     __ fmls(as_FloatRegister($dst$$reg), __ T2S,
17521             as_FloatRegister($src1$$reg),
17522             as_FloatRegister($src2$$reg));
17523   %}
17524   ins_pipe(vmuldiv_fp64);
17525 %}
17526 
17527 // dst - src1 * src2
17528 instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
17529   predicate(UseFMA && n->as_Vector()->length() == 4);
17530   match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
17531   match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
17532   format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
17533   ins_cost(INSN_COST);
17534   ins_encode %{
17535     __ fmls(as_FloatRegister($dst$$reg), __ T4S,
17536             as_FloatRegister($src1$$reg),
17537             as_FloatRegister($src2$$reg));
17538   %}
17539   ins_pipe(vmuldiv_fp128);
17540 %}
17541 
17542 // dst - src1 * src2
17543 instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
17544   predicate(UseFMA && n->as_Vector()->length() == 2);
17545   match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
17546   match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
17547   format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
17548   ins_cost(INSN_COST);
17549   ins_encode %{
17550     __ fmls(as_FloatRegister($dst$$reg), __ T2D,
17551             as_FloatRegister($src1$$reg),
17552             as_FloatRegister($src2$$reg));
17553   %}
17554   ins_pipe(vmuldiv_fp128);
17555 %}
17556 
17557 // --------------------------------- DIV --------------------------------------
17558 
17559 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
17560 %{
17561   predicate(n->as_Vector()->length() == 2);
17562   match(Set dst (DivVF src1 src2));
17563   ins_cost(INSN_COST);
17564   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
17565   ins_encode %{
17566     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
17567             as_FloatRegister($src1$$reg),
17568             as_FloatRegister($src2$$reg));
17569   %}
17570   ins_pipe(vmuldiv_fp64);
17571 %}
17572 
17573 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
17574 %{
17575   predicate(n->as_Vector()->length() == 4);
17576   match(Set dst (DivVF src1 src2));
17577   ins_cost(INSN_COST);
17578   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
17579   ins_encode %{
17580     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
17581             as_FloatRegister($src1$$reg),
17582             as_FloatRegister($src2$$reg));
17583   %}
17584   ins_pipe(vmuldiv_fp128);
17585 %}
17586 
17587 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
17588 %{
17589   predicate(n->as_Vector()->length() == 2);
17590   match(Set dst (DivVD src1 src2));
17591   ins_cost(INSN_COST);
17592   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
17593   ins_encode %{
17594     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
17595             as_FloatRegister($src1$$reg),
17596             as_FloatRegister($src2$$reg));
17597   %}
17598   ins_pipe(vmuldiv_fp128);
17599 %}
17600 
17601 // --------------------------------- SQRT -------------------------------------
17602 
17603 instruct vsqrt2D(vecX dst, vecX src)
17604 %{
17605   predicate(n->as_Vector()->length() == 2);
17606   match(Set dst (SqrtVD src));
17607   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
17608   ins_encode %{
17609     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
17610              as_FloatRegister($src$$reg));
17611   %}
17612   ins_pipe(vsqrt_fp128);
17613 %}
17614 
17615 // --------------------------------- ABS --------------------------------------
17616 
17617 instruct vabs2F(vecD dst, vecD src)
17618 %{
17619   predicate(n->as_Vector()->length() == 2);
17620   match(Set dst (AbsVF src));
17621   ins_cost(INSN_COST * 3);
17622   format %{ "fabs  $dst,$src\t# vector (2S)" %}
17623   ins_encode %{
17624     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
17625             as_FloatRegister($src$$reg));
17626   %}
17627   ins_pipe(vunop_fp64);
17628 %}
17629 
17630 instruct vabs4F(vecX dst, vecX src)
17631 %{
17632   predicate(n->as_Vector()->length() == 4);
17633   match(Set dst (AbsVF src));
17634   ins_cost(INSN_COST * 3);
17635   format %{ "fabs  $dst,$src\t# vector (4S)" %}
17636   ins_encode %{
17637     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
17638             as_FloatRegister($src$$reg));
17639   %}
17640   ins_pipe(vunop_fp128);
17641 %}
17642 
17643 instruct vabs2D(vecX dst, vecX src)
17644 %{
17645   predicate(n->as_Vector()->length() == 2);
17646   match(Set dst (AbsVD src));
17647   ins_cost(INSN_COST * 3);
17648   format %{ "fabs  $dst,$src\t# vector (2D)" %}
17649   ins_encode %{
17650     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
17651             as_FloatRegister($src$$reg));
17652   %}
17653   ins_pipe(vunop_fp128);
17654 %}
17655 
17656 // --------------------------------- NEG --------------------------------------
17657 
17658 instruct vneg2F(vecD dst, vecD src)
17659 %{
17660   predicate(n->as_Vector()->length() == 2);
17661   match(Set dst (NegVF src));
17662   ins_cost(INSN_COST * 3);
17663   format %{ "fneg  $dst,$src\t# vector (2S)" %}
17664   ins_encode %{
17665     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
17666             as_FloatRegister($src$$reg));
17667   %}
17668   ins_pipe(vunop_fp64);
17669 %}
17670 
17671 instruct vneg4F(vecX dst, vecX src)
17672 %{
17673   predicate(n->as_Vector()->length() == 4);
17674   match(Set dst (NegVF src));
17675   ins_cost(INSN_COST * 3);
17676   format %{ "fneg  $dst,$src\t# vector (4S)" %}
17677   ins_encode %{
17678     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
17679             as_FloatRegister($src$$reg));
17680   %}
17681   ins_pipe(vunop_fp128);
17682 %}
17683 
17684 instruct vneg2D(vecX dst, vecX src)
17685 %{
17686   predicate(n->as_Vector()->length() == 2);
17687   match(Set dst (NegVD src));
17688   ins_cost(INSN_COST * 3);
17689   format %{ "fneg  $dst,$src\t# vector (2D)" %}
17690   ins_encode %{
17691     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
17692             as_FloatRegister($src$$reg));
17693   %}
17694   ins_pipe(vunop_fp128);
17695 %}
17696 
17697 // --------------------------------- AND --------------------------------------
17698 
17699 instruct vand8B(vecD dst, vecD src1, vecD src2)
17700 %{
17701   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17702             n->as_Vector()->length_in_bytes() == 8);
17703   match(Set dst (AndV src1 src2));
17704   ins_cost(INSN_COST);
17705   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17706   ins_encode %{
17707     __ andr(as_FloatRegister($dst$$reg), __ T8B,
17708             as_FloatRegister($src1$$reg),
17709             as_FloatRegister($src2$$reg));
17710   %}
17711   ins_pipe(vlogical64);
17712 %}
17713 
17714 instruct vand16B(vecX dst, vecX src1, vecX src2)
17715 %{
17716   predicate(n->as_Vector()->length_in_bytes() == 16);
17717   match(Set dst (AndV src1 src2));
17718   ins_cost(INSN_COST);
17719   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
17720   ins_encode %{
17721     __ andr(as_FloatRegister($dst$$reg), __ T16B,
17722             as_FloatRegister($src1$$reg),
17723             as_FloatRegister($src2$$reg));
17724   %}
17725   ins_pipe(vlogical128);
17726 %}
17727 
17728 // --------------------------------- OR ---------------------------------------
17729 
17730 instruct vor8B(vecD dst, vecD src1, vecD src2)
17731 %{
17732   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17733             n->as_Vector()->length_in_bytes() == 8);
17734   match(Set dst (OrV src1 src2));
17735   ins_cost(INSN_COST);
17736   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
17737   ins_encode %{
17738     __ orr(as_FloatRegister($dst$$reg), __ T8B,
17739             as_FloatRegister($src1$$reg),
17740             as_FloatRegister($src2$$reg));
17741   %}
17742   ins_pipe(vlogical64);
17743 %}
17744 
17745 instruct vor16B(vecX dst, vecX src1, vecX src2)
17746 %{
17747   predicate(n->as_Vector()->length_in_bytes() == 16);
17748   match(Set dst (OrV src1 src2));
17749   ins_cost(INSN_COST);
17750   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
17751   ins_encode %{
17752     __ orr(as_FloatRegister($dst$$reg), __ T16B,
17753             as_FloatRegister($src1$$reg),
17754             as_FloatRegister($src2$$reg));
17755   %}
17756   ins_pipe(vlogical128);
17757 %}
17758 
17759 // --------------------------------- XOR --------------------------------------
17760 
17761 instruct vxor8B(vecD dst, vecD src1, vecD src2)
17762 %{
17763   predicate(n->as_Vector()->length_in_bytes() == 4 ||
17764             n->as_Vector()->length_in_bytes() == 8);
17765   match(Set dst (XorV src1 src2));
17766   ins_cost(INSN_COST);
17767   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
17768   ins_encode %{
17769     __ eor(as_FloatRegister($dst$$reg), __ T8B,
17770             as_FloatRegister($src1$$reg),
17771             as_FloatRegister($src2$$reg));
17772   %}
17773   ins_pipe(vlogical64);
17774 %}
17775 
17776 instruct vxor16B(vecX dst, vecX src1, vecX src2)
17777 %{
17778   predicate(n->as_Vector()->length_in_bytes() == 16);
17779   match(Set dst (XorV src1 src2));
17780   ins_cost(INSN_COST);
17781   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
17782   ins_encode %{
17783     __ eor(as_FloatRegister($dst$$reg), __ T16B,
17784             as_FloatRegister($src1$$reg),
17785             as_FloatRegister($src2$$reg));
17786   %}
17787   ins_pipe(vlogical128);
17788 %}
17789 
17790 // ------------------------------ Shift ---------------------------------------
17791 
17792 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
17793   match(Set dst (LShiftCntV cnt));
17794   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
17795   ins_encode %{
17796     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17797   %}
17798   ins_pipe(vdup_reg_reg128);
17799 %}
17800 
17801 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
17802 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
17803   match(Set dst (RShiftCntV cnt));
17804   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
17805   ins_encode %{
17806     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
17807     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
17808   %}
17809   ins_pipe(vdup_reg_reg128);
17810 %}
17811 
17812 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
17813   predicate(n->as_Vector()->length() == 4 ||
17814             n->as_Vector()->length() == 8);
17815   match(Set dst (LShiftVB src shift));
17816   match(Set dst (RShiftVB src shift));
17817   ins_cost(INSN_COST);
17818   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
17819   ins_encode %{
17820     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
17821             as_FloatRegister($src$$reg),
17822             as_FloatRegister($shift$$reg));
17823   %}
17824   ins_pipe(vshift64);
17825 %}
17826 
17827 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
17828   predicate(n->as_Vector()->length() == 16);
17829   match(Set dst (LShiftVB src shift));
17830   match(Set dst (RShiftVB src shift));
17831   ins_cost(INSN_COST);
17832   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
17833   ins_encode %{
17834     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
17835             as_FloatRegister($src$$reg),
17836             as_FloatRegister($shift$$reg));
17837   %}
17838   ins_pipe(vshift128);
17839 %}
17840 
17841 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
17842   predicate(n->as_Vector()->length() == 4 ||
17843             n->as_Vector()->length() == 8);
17844   match(Set dst (URShiftVB src shift));
17845   ins_cost(INSN_COST);
17846   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
17847   ins_encode %{
17848     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
17849             as_FloatRegister($src$$reg),
17850             as_FloatRegister($shift$$reg));
17851   %}
17852   ins_pipe(vshift64);
17853 %}
17854 
17855 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
17856   predicate(n->as_Vector()->length() == 16);
17857   match(Set dst (URShiftVB src shift));
17858   ins_cost(INSN_COST);
17859   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
17860   ins_encode %{
17861     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
17862             as_FloatRegister($src$$reg),
17863             as_FloatRegister($shift$$reg));
17864   %}
17865   ins_pipe(vshift128);
17866 %}
17867 
17868 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
17869   predicate(n->as_Vector()->length() == 4 ||
17870             n->as_Vector()->length() == 8);
17871   match(Set dst (LShiftVB src shift));
17872   ins_cost(INSN_COST);
17873   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
17874   ins_encode %{
17875     int sh = (int)$shift$$constant;
17876     if (sh >= 8) {
17877       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17878              as_FloatRegister($src$$reg),
17879              as_FloatRegister($src$$reg));
17880     } else {
17881       __ shl(as_FloatRegister($dst$$reg), __ T8B,
17882              as_FloatRegister($src$$reg), sh);
17883     }
17884   %}
17885   ins_pipe(vshift64_imm);
17886 %}
17887 
17888 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
17889   predicate(n->as_Vector()->length() == 16);
17890   match(Set dst (LShiftVB src shift));
17891   ins_cost(INSN_COST);
17892   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
17893   ins_encode %{
17894     int sh = (int)$shift$$constant;
17895     if (sh >= 8) {
17896       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17897              as_FloatRegister($src$$reg),
17898              as_FloatRegister($src$$reg));
17899     } else {
17900       __ shl(as_FloatRegister($dst$$reg), __ T16B,
17901              as_FloatRegister($src$$reg), sh);
17902     }
17903   %}
17904   ins_pipe(vshift128_imm);
17905 %}
17906 
17907 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
17908   predicate(n->as_Vector()->length() == 4 ||
17909             n->as_Vector()->length() == 8);
17910   match(Set dst (RShiftVB src shift));
17911   ins_cost(INSN_COST);
17912   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
17913   ins_encode %{
17914     int sh = (int)$shift$$constant;
17915     if (sh >= 8) sh = 7;
17916     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
17917            as_FloatRegister($src$$reg), sh);
17918   %}
17919   ins_pipe(vshift64_imm);
17920 %}
17921 
17922 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
17923   predicate(n->as_Vector()->length() == 16);
17924   match(Set dst (RShiftVB src shift));
17925   ins_cost(INSN_COST);
17926   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
17927   ins_encode %{
17928     int sh = (int)$shift$$constant;
17929     if (sh >= 8) sh = 7;
17930     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
17931            as_FloatRegister($src$$reg), sh);
17932   %}
17933   ins_pipe(vshift128_imm);
17934 %}
17935 
17936 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
17937   predicate(n->as_Vector()->length() == 4 ||
17938             n->as_Vector()->length() == 8);
17939   match(Set dst (URShiftVB src shift));
17940   ins_cost(INSN_COST);
17941   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
17942   ins_encode %{
17943     int sh = (int)$shift$$constant;
17944     if (sh >= 8) {
17945       __ eor(as_FloatRegister($dst$$reg), __ T8B,
17946              as_FloatRegister($src$$reg),
17947              as_FloatRegister($src$$reg));
17948     } else {
17949       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
17950              as_FloatRegister($src$$reg), sh);
17951     }
17952   %}
17953   ins_pipe(vshift64_imm);
17954 %}
17955 
17956 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
17957   predicate(n->as_Vector()->length() == 16);
17958   match(Set dst (URShiftVB src shift));
17959   ins_cost(INSN_COST);
17960   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
17961   ins_encode %{
17962     int sh = (int)$shift$$constant;
17963     if (sh >= 8) {
17964       __ eor(as_FloatRegister($dst$$reg), __ T16B,
17965              as_FloatRegister($src$$reg),
17966              as_FloatRegister($src$$reg));
17967     } else {
17968       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
17969              as_FloatRegister($src$$reg), sh);
17970     }
17971   %}
17972   ins_pipe(vshift128_imm);
17973 %}
17974 
17975 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
17976   predicate(n->as_Vector()->length() == 2 ||
17977             n->as_Vector()->length() == 4);
17978   match(Set dst (LShiftVS src shift));
17979   match(Set dst (RShiftVS src shift));
17980   ins_cost(INSN_COST);
17981   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
17982   ins_encode %{
17983     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
17984             as_FloatRegister($src$$reg),
17985             as_FloatRegister($shift$$reg));
17986   %}
17987   ins_pipe(vshift64);
17988 %}
17989 
17990 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
17991   predicate(n->as_Vector()->length() == 8);
17992   match(Set dst (LShiftVS src shift));
17993   match(Set dst (RShiftVS src shift));
17994   ins_cost(INSN_COST);
17995   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
17996   ins_encode %{
17997     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
17998             as_FloatRegister($src$$reg),
17999             as_FloatRegister($shift$$reg));
18000   %}
18001   ins_pipe(vshift128);
18002 %}
18003 
18004 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
18005   predicate(n->as_Vector()->length() == 2 ||
18006             n->as_Vector()->length() == 4);
18007   match(Set dst (URShiftVS src shift));
18008   ins_cost(INSN_COST);
18009   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
18010   ins_encode %{
18011     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
18012             as_FloatRegister($src$$reg),
18013             as_FloatRegister($shift$$reg));
18014   %}
18015   ins_pipe(vshift64);
18016 %}
18017 
18018 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
18019   predicate(n->as_Vector()->length() == 8);
18020   match(Set dst (URShiftVS src shift));
18021   ins_cost(INSN_COST);
18022   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
18023   ins_encode %{
18024     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
18025             as_FloatRegister($src$$reg),
18026             as_FloatRegister($shift$$reg));
18027   %}
18028   ins_pipe(vshift128);
18029 %}
18030 
18031 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
18032   predicate(n->as_Vector()->length() == 2 ||
18033             n->as_Vector()->length() == 4);
18034   match(Set dst (LShiftVS src shift));
18035   ins_cost(INSN_COST);
18036   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
18037   ins_encode %{
18038     int sh = (int)$shift$$constant;
18039     if (sh >= 16) {
18040       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18041              as_FloatRegister($src$$reg),
18042              as_FloatRegister($src$$reg));
18043     } else {
18044       __ shl(as_FloatRegister($dst$$reg), __ T4H,
18045              as_FloatRegister($src$$reg), sh);
18046     }
18047   %}
18048   ins_pipe(vshift64_imm);
18049 %}
18050 
18051 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
18052   predicate(n->as_Vector()->length() == 8);
18053   match(Set dst (LShiftVS src shift));
18054   ins_cost(INSN_COST);
18055   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
18056   ins_encode %{
18057     int sh = (int)$shift$$constant;
18058     if (sh >= 16) {
18059       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18060              as_FloatRegister($src$$reg),
18061              as_FloatRegister($src$$reg));
18062     } else {
18063       __ shl(as_FloatRegister($dst$$reg), __ T8H,
18064              as_FloatRegister($src$$reg), sh);
18065     }
18066   %}
18067   ins_pipe(vshift128_imm);
18068 %}
18069 
18070 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
18071   predicate(n->as_Vector()->length() == 2 ||
18072             n->as_Vector()->length() == 4);
18073   match(Set dst (RShiftVS src shift));
18074   ins_cost(INSN_COST);
18075   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
18076   ins_encode %{
18077     int sh = (int)$shift$$constant;
18078     if (sh >= 16) sh = 15;
18079     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
18080            as_FloatRegister($src$$reg), sh);
18081   %}
18082   ins_pipe(vshift64_imm);
18083 %}
18084 
18085 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
18086   predicate(n->as_Vector()->length() == 8);
18087   match(Set dst (RShiftVS src shift));
18088   ins_cost(INSN_COST);
18089   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
18090   ins_encode %{
18091     int sh = (int)$shift$$constant;
18092     if (sh >= 16) sh = 15;
18093     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
18094            as_FloatRegister($src$$reg), sh);
18095   %}
18096   ins_pipe(vshift128_imm);
18097 %}
18098 
18099 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
18100   predicate(n->as_Vector()->length() == 2 ||
18101             n->as_Vector()->length() == 4);
18102   match(Set dst (URShiftVS src shift));
18103   ins_cost(INSN_COST);
18104   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
18105   ins_encode %{
18106     int sh = (int)$shift$$constant;
18107     if (sh >= 16) {
18108       __ eor(as_FloatRegister($dst$$reg), __ T8B,
18109              as_FloatRegister($src$$reg),
18110              as_FloatRegister($src$$reg));
18111     } else {
18112       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
18113              as_FloatRegister($src$$reg), sh);
18114     }
18115   %}
18116   ins_pipe(vshift64_imm);
18117 %}
18118 
18119 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
18120   predicate(n->as_Vector()->length() == 8);
18121   match(Set dst (URShiftVS src shift));
18122   ins_cost(INSN_COST);
18123   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
18124   ins_encode %{
18125     int sh = (int)$shift$$constant;
18126     if (sh >= 16) {
18127       __ eor(as_FloatRegister($dst$$reg), __ T16B,
18128              as_FloatRegister($src$$reg),
18129              as_FloatRegister($src$$reg));
18130     } else {
18131       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
18132              as_FloatRegister($src$$reg), sh);
18133     }
18134   %}
18135   ins_pipe(vshift128_imm);
18136 %}
18137 
18138 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
18139   predicate(n->as_Vector()->length() == 2);
18140   match(Set dst (LShiftVI src shift));
18141   match(Set dst (RShiftVI src shift));
18142   ins_cost(INSN_COST);
18143   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
18144   ins_encode %{
18145     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
18146             as_FloatRegister($src$$reg),
18147             as_FloatRegister($shift$$reg));
18148   %}
18149   ins_pipe(vshift64);
18150 %}
18151 
18152 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
18153   predicate(n->as_Vector()->length() == 4);
18154   match(Set dst (LShiftVI src shift));
18155   match(Set dst (RShiftVI src shift));
18156   ins_cost(INSN_COST);
18157   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
18158   ins_encode %{
18159     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
18160             as_FloatRegister($src$$reg),
18161             as_FloatRegister($shift$$reg));
18162   %}
18163   ins_pipe(vshift128);
18164 %}
18165 
18166 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
18167   predicate(n->as_Vector()->length() == 2);
18168   match(Set dst (URShiftVI src shift));
18169   ins_cost(INSN_COST);
18170   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
18171   ins_encode %{
18172     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
18173             as_FloatRegister($src$$reg),
18174             as_FloatRegister($shift$$reg));
18175   %}
18176   ins_pipe(vshift64);
18177 %}
18178 
18179 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
18180   predicate(n->as_Vector()->length() == 4);
18181   match(Set dst (URShiftVI src shift));
18182   ins_cost(INSN_COST);
18183   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
18184   ins_encode %{
18185     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
18186             as_FloatRegister($src$$reg),
18187             as_FloatRegister($shift$$reg));
18188   %}
18189   ins_pipe(vshift128);
18190 %}
18191 
18192 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
18193   predicate(n->as_Vector()->length() == 2);
18194   match(Set dst (LShiftVI src shift));
18195   ins_cost(INSN_COST);
18196   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
18197   ins_encode %{
18198     __ shl(as_FloatRegister($dst$$reg), __ T2S,
18199            as_FloatRegister($src$$reg),
18200            (int)$shift$$constant);
18201   %}
18202   ins_pipe(vshift64_imm);
18203 %}
18204 
18205 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
18206   predicate(n->as_Vector()->length() == 4);
18207   match(Set dst (LShiftVI src shift));
18208   ins_cost(INSN_COST);
18209   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
18210   ins_encode %{
18211     __ shl(as_FloatRegister($dst$$reg), __ T4S,
18212            as_FloatRegister($src$$reg),
18213            (int)$shift$$constant);
18214   %}
18215   ins_pipe(vshift128_imm);
18216 %}
18217 
18218 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
18219   predicate(n->as_Vector()->length() == 2);
18220   match(Set dst (RShiftVI src shift));
18221   ins_cost(INSN_COST);
18222   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
18223   ins_encode %{
18224     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
18225             as_FloatRegister($src$$reg),
18226             (int)$shift$$constant);
18227   %}
18228   ins_pipe(vshift64_imm);
18229 %}
18230 
18231 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
18232   predicate(n->as_Vector()->length() == 4);
18233   match(Set dst (RShiftVI src shift));
18234   ins_cost(INSN_COST);
18235   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
18236   ins_encode %{
18237     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
18238             as_FloatRegister($src$$reg),
18239             (int)$shift$$constant);
18240   %}
18241   ins_pipe(vshift128_imm);
18242 %}
18243 
18244 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
18245   predicate(n->as_Vector()->length() == 2);
18246   match(Set dst (URShiftVI src shift));
18247   ins_cost(INSN_COST);
18248   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
18249   ins_encode %{
18250     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
18251             as_FloatRegister($src$$reg),
18252             (int)$shift$$constant);
18253   %}
18254   ins_pipe(vshift64_imm);
18255 %}
18256 
18257 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
18258   predicate(n->as_Vector()->length() == 4);
18259   match(Set dst (URShiftVI src shift));
18260   ins_cost(INSN_COST);
18261   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
18262   ins_encode %{
18263     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
18264             as_FloatRegister($src$$reg),
18265             (int)$shift$$constant);
18266   %}
18267   ins_pipe(vshift128_imm);
18268 %}
18269 
18270 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
18271   predicate(n->as_Vector()->length() == 2);
18272   match(Set dst (LShiftVL src shift));
18273   match(Set dst (RShiftVL src shift));
18274   ins_cost(INSN_COST);
18275   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
18276   ins_encode %{
18277     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
18278             as_FloatRegister($src$$reg),
18279             as_FloatRegister($shift$$reg));
18280   %}
18281   ins_pipe(vshift128);
18282 %}
18283 
18284 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
18285   predicate(n->as_Vector()->length() == 2);
18286   match(Set dst (URShiftVL src shift));
18287   ins_cost(INSN_COST);
18288   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
18289   ins_encode %{
18290     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
18291             as_FloatRegister($src$$reg),
18292             as_FloatRegister($shift$$reg));
18293   %}
18294   ins_pipe(vshift128);
18295 %}
18296 
18297 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
18298   predicate(n->as_Vector()->length() == 2);
18299   match(Set dst (LShiftVL src shift));
18300   ins_cost(INSN_COST);
18301   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
18302   ins_encode %{
18303     __ shl(as_FloatRegister($dst$$reg), __ T2D,
18304            as_FloatRegister($src$$reg),
18305            (int)$shift$$constant);
18306   %}
18307   ins_pipe(vshift128_imm);
18308 %}
18309 
18310 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
18311   predicate(n->as_Vector()->length() == 2);
18312   match(Set dst (RShiftVL src shift));
18313   ins_cost(INSN_COST);
18314   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
18315   ins_encode %{
18316     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
18317             as_FloatRegister($src$$reg),
18318             (int)$shift$$constant);
18319   %}
18320   ins_pipe(vshift128_imm);
18321 %}
18322 
18323 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
18324   predicate(n->as_Vector()->length() == 2);
18325   match(Set dst (URShiftVL src shift));
18326   ins_cost(INSN_COST);
18327   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
18328   ins_encode %{
18329     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
18330             as_FloatRegister($src$$reg),
18331             (int)$shift$$constant);
18332   %}
18333   ins_pipe(vshift128_imm);
18334 %}
18335 
18336 //----------PEEPHOLE RULES-----------------------------------------------------
18337 // These must follow all instruction definitions as they use the names
18338 // defined in the instructions definitions.
18339 //
18340 // peepmatch ( root_instr_name [preceding_instruction]* );
18341 //
18342 // peepconstraint %{
18343 // (instruction_number.operand_name relational_op instruction_number.operand_name
18344 //  [, ...] );
18345 // // instruction numbers are zero-based using left to right order in peepmatch
18346 //
18347 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
18348 // // provide an instruction_number.operand_name for each operand that appears
18349 // // in the replacement instruction's match rule
18350 //
18351 // ---------VM FLAGS---------------------------------------------------------
18352 //
18353 // All peephole optimizations can be turned off using -XX:-OptoPeephole
18354 //
18355 // Each peephole rule is given an identifying number starting with zero and
18356 // increasing by one in the order seen by the parser.  An individual peephole
18357 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
18358 // on the command-line.
18359 //
18360 // ---------CURRENT LIMITATIONS----------------------------------------------
18361 //
18362 // Only match adjacent instructions in same basic block
18363 // Only equality constraints
18364 // Only constraints between operands, not (0.dest_reg == RAX_enc)
18365 // Only one replacement instruction
18366 //
18367 // ---------EXAMPLE----------------------------------------------------------
18368 //
18369 // // pertinent parts of existing instructions in architecture description
18370 // instruct movI(iRegINoSp dst, iRegI src)
18371 // %{
18372 //   match(Set dst (CopyI src));
18373 // %}
18374 //
18375 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
18376 // %{
18377 //   match(Set dst (AddI dst src));
18378 //   effect(KILL cr);
18379 // %}
18380 //
18381 // // Change (inc mov) to lea
18382 // peephole %{
18383 //   // increment preceeded by register-register move
18384 //   peepmatch ( incI_iReg movI );
18385 //   // require that the destination register of the increment
18386 //   // match the destination register of the move
18387 //   peepconstraint ( 0.dst == 1.dst );
18388 //   // construct a replacement instruction that sets
18389 //   // the destination to ( move's source register + one )
18390 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
18391 // %}
18392 //
18393 
18394 // Implementation no longer uses movX instructions since
18395 // machine-independent system no longer uses CopyX nodes.
18396 //
18397 // peephole
18398 // %{
18399 //   peepmatch (incI_iReg movI);
18400 //   peepconstraint (0.dst == 1.dst);
18401 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18402 // %}
18403 
18404 // peephole
18405 // %{
18406 //   peepmatch (decI_iReg movI);
18407 //   peepconstraint (0.dst == 1.dst);
18408 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18409 // %}
18410 
18411 // peephole
18412 // %{
18413 //   peepmatch (addI_iReg_imm movI);
18414 //   peepconstraint (0.dst == 1.dst);
18415 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
18416 // %}
18417 
18418 // peephole
18419 // %{
18420 //   peepmatch (incL_iReg movL);
18421 //   peepconstraint (0.dst == 1.dst);
18422 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18423 // %}
18424 
18425 // peephole
18426 // %{
18427 //   peepmatch (decL_iReg movL);
18428 //   peepconstraint (0.dst == 1.dst);
18429 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18430 // %}
18431 
18432 // peephole
18433 // %{
18434 //   peepmatch (addL_iReg_imm movL);
18435 //   peepconstraint (0.dst == 1.dst);
18436 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
18437 // %}
18438 
18439 // peephole
18440 // %{
18441 //   peepmatch (addP_iReg_imm movP);
18442 //   peepconstraint (0.dst == 1.dst);
18443 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
18444 // %}
18445 
18446 // // Change load of spilled value to only a spill
18447 // instruct storeI(memory mem, iRegI src)
18448 // %{
18449 //   match(Set mem (StoreI mem src));
18450 // %}
18451 //
18452 // instruct loadI(iRegINoSp dst, memory mem)
18453 // %{
18454 //   match(Set dst (LoadI mem));
18455 // %}
18456 //
18457 
18458 //----------SMARTSPILL RULES---------------------------------------------------
18459 // These must follow all instruction definitions as they use the names
18460 // defined in the instructions definitions.
18461 
18462 // Local Variables:
18463 // mode: c++
18464 // End: