1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_normal(MemBarNode *leading);
1045   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1046   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1048   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1049 
1050   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1051 
1052   bool unnecessary_acquire(const Node *barrier);
1053   bool needs_acquiring_load(const Node *load);
1054 
1055   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1056 
1057   bool unnecessary_release(const Node *barrier);
1058   bool unnecessary_volatile(const Node *barrier);
1059   bool needs_releasing_store(const Node *store);
1060 
1061   // predicate controlling translation of CompareAndSwapX
1062   bool needs_acquiring_load_exclusive(const Node *load);
1063 
1064   // predicate controlling translation of StoreCM
1065   bool unnecessary_storestore(const Node *storecm);
1066 %}
1067 
1068 source %{
1069 
1070   // Optimizaton of volatile gets and puts
1071   // -------------------------------------
1072   //
1073   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1074   // use to implement volatile reads and writes. For a volatile read
1075   // we simply need
1076   //
1077   //   ldar<x>
1078   //
1079   // and for a volatile write we need
1080   //
1081   //   stlr<x>
1082   //
1083   // Alternatively, we can implement them by pairing a normal
1084   // load/store with a memory barrier. For a volatile read we need
1085   //
1086   //   ldr<x>
1087   //   dmb ishld
1088   //
1089   // for a volatile write
1090   //
1091   //   dmb ish
1092   //   str<x>
1093   //   dmb ish
1094   //
1095   // We can also use ldaxr and stlxr to implement compare and swap CAS
1096   // sequences. These are normally translated to an instruction
1097   // sequence like the following
1098   //
1099   //   dmb      ish
1100   // retry:
1101   //   ldxr<x>   rval raddr
1102   //   cmp       rval rold
1103   //   b.ne done
1104   //   stlxr<x>  rval, rnew, rold
1105   //   cbnz      rval retry
1106   // done:
1107   //   cset      r0, eq
1108   //   dmb ishld
1109   //
1110   // Note that the exclusive store is already using an stlxr
1111   // instruction. That is required to ensure visibility to other
1112   // threads of the exclusive write (assuming it succeeds) before that
1113   // of any subsequent writes.
1114   //
1115   // The following instruction sequence is an improvement on the above
1116   //
1117   // retry:
1118   //   ldaxr<x>  rval raddr
1119   //   cmp       rval rold
1120   //   b.ne done
1121   //   stlxr<x>  rval, rnew, rold
1122   //   cbnz      rval retry
1123   // done:
1124   //   cset      r0, eq
1125   //
1126   // We don't need the leading dmb ish since the stlxr guarantees
1127   // visibility of prior writes in the case that the swap is
1128   // successful. Crucially we don't have to worry about the case where
1129   // the swap is not successful since no valid program should be
1130   // relying on visibility of prior changes by the attempting thread
1131   // in the case where the CAS fails.
1132   //
1133   // Similarly, we don't need the trailing dmb ishld if we substitute
1134   // an ldaxr instruction since that will provide all the guarantees we
1135   // require regarding observation of changes made by other threads
1136   // before any change to the CAS address observed by the load.
1137   //
1138   // In order to generate the desired instruction sequence we need to
1139   // be able to identify specific 'signature' ideal graph node
1140   // sequences which i) occur as a translation of a volatile reads or
1141   // writes or CAS operations and ii) do not occur through any other
1142   // translation or graph transformation. We can then provide
1143   // alternative aldc matching rules which translate these node
1144   // sequences to the desired machine code sequences. Selection of the
1145   // alternative rules can be implemented by predicates which identify
1146   // the relevant node sequences.
1147   //
1148   // The ideal graph generator translates a volatile read to the node
1149   // sequence
1150   //
1151   //   LoadX[mo_acquire]
1152   //   MemBarAcquire
1153   //
1154   // As a special case when using the compressed oops optimization we
1155   // may also see this variant
1156   //
1157   //   LoadN[mo_acquire]
1158   //   DecodeN
1159   //   MemBarAcquire
1160   //
1161   // A volatile write is translated to the node sequence
1162   //
1163   //   MemBarRelease
1164   //   StoreX[mo_release] {CardMark}-optional
1165   //   MemBarVolatile
1166   //
1167   // n.b. the above node patterns are generated with a strict
1168   // 'signature' configuration of input and output dependencies (see
1169   // the predicates below for exact details). The card mark may be as
1170   // simple as a few extra nodes or, in a few GC configurations, may
1171   // include more complex control flow between the leading and
1172   // trailing memory barriers. However, whatever the card mark
1173   // configuration these signatures are unique to translated volatile
1174   // reads/stores -- they will not appear as a result of any other
1175   // bytecode translation or inlining nor as a consequence of
1176   // optimizing transforms.
1177   //
1178   // We also want to catch inlined unsafe volatile gets and puts and
1179   // be able to implement them using either ldar<x>/stlr<x> or some
1180   // combination of ldr<x>/stlr<x> and dmb instructions.
1181   //
1182   // Inlined unsafe volatiles puts manifest as a minor variant of the
1183   // normal volatile put node sequence containing an extra cpuorder
1184   // membar
1185   //
1186   //   MemBarRelease
1187   //   MemBarCPUOrder
1188   //   StoreX[mo_release] {CardMark}-optional
1189   //   MemBarVolatile
1190   //
1191   // n.b. as an aside, the cpuorder membar is not itself subject to
1192   // matching and translation by adlc rules.  However, the rule
1193   // predicates need to detect its presence in order to correctly
1194   // select the desired adlc rules.
1195   //
1196   // Inlined unsafe volatile gets manifest as a somewhat different
1197   // node sequence to a normal volatile get
1198   //
1199   //   MemBarCPUOrder
1200   //        ||       \\
1201   //   MemBarAcquire LoadX[mo_acquire]
1202   //        ||
1203   //   MemBarCPUOrder
1204   //
1205   // In this case the acquire membar does not directly depend on the
1206   // load. However, we can be sure that the load is generated from an
1207   // inlined unsafe volatile get if we see it dependent on this unique
1208   // sequence of membar nodes. Similarly, given an acquire membar we
1209   // can know that it was added because of an inlined unsafe volatile
1210   // get if it is fed and feeds a cpuorder membar and if its feed
1211   // membar also feeds an acquiring load.
1212   //
1213   // Finally an inlined (Unsafe) CAS operation is translated to the
1214   // following ideal graph
1215   //
1216   //   MemBarRelease
1217   //   MemBarCPUOrder
1218   //   CompareAndSwapX {CardMark}-optional
1219   //   MemBarCPUOrder
1220   //   MemBarAcquire
1221   //
1222   // So, where we can identify these volatile read and write
1223   // signatures we can choose to plant either of the above two code
1224   // sequences. For a volatile read we can simply plant a normal
1225   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1226   // also choose to inhibit translation of the MemBarAcquire and
1227   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1228   //
1229   // When we recognise a volatile store signature we can choose to
1230   // plant at a dmb ish as a translation for the MemBarRelease, a
1231   // normal str<x> and then a dmb ish for the MemBarVolatile.
1232   // Alternatively, we can inhibit translation of the MemBarRelease
1233   // and MemBarVolatile and instead plant a simple stlr<x>
1234   // instruction.
1235   //
1236   // when we recognise a CAS signature we can choose to plant a dmb
1237   // ish as a translation for the MemBarRelease, the conventional
1238   // macro-instruction sequence for the CompareAndSwap node (which
1239   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1240   // Alternatively, we can elide generation of the dmb instructions
1241   // and plant the alternative CompareAndSwap macro-instruction
1242   // sequence (which uses ldaxr<x>).
1243   //
1244   // Of course, the above only applies when we see these signature
1245   // configurations. We still want to plant dmb instructions in any
1246   // other cases where we may see a MemBarAcquire, MemBarRelease or
1247   // MemBarVolatile. For example, at the end of a constructor which
1248   // writes final/volatile fields we will see a MemBarRelease
1249   // instruction and this needs a 'dmb ish' lest we risk the
1250   // constructed object being visible without making the
1251   // final/volatile field writes visible.
1252   //
1253   // n.b. the translation rules below which rely on detection of the
1254   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1255   // If we see anything other than the signature configurations we
1256   // always just translate the loads and stores to ldr<x> and str<x>
1257   // and translate acquire, release and volatile membars to the
1258   // relevant dmb instructions.
1259   //
1260 
1261   // graph traversal helpers used for volatile put/get and CAS
1262   // optimization
1263 
1264   // 1) general purpose helpers
1265 
1266   // if node n is linked to a parent MemBarNode by an intervening
1267   // Control and Memory ProjNode return the MemBarNode otherwise return
1268   // NULL.
1269   //
1270   // n may only be a Load or a MemBar.
1271 
1272   MemBarNode *parent_membar(const Node *n)
1273   {
1274     Node *ctl = NULL;
1275     Node *mem = NULL;
1276     Node *membar = NULL;
1277 
1278     if (n->is_Load()) {
1279       ctl = n->lookup(LoadNode::Control);
1280       mem = n->lookup(LoadNode::Memory);
1281     } else if (n->is_MemBar()) {
1282       ctl = n->lookup(TypeFunc::Control);
1283       mem = n->lookup(TypeFunc::Memory);
1284     } else {
1285         return NULL;
1286     }
1287 
1288     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1289       return NULL;
1290     }
1291 
1292     membar = ctl->lookup(0);
1293 
1294     if (!membar || !membar->is_MemBar()) {
1295       return NULL;
1296     }
1297 
1298     if (mem->lookup(0) != membar) {
1299       return NULL;
1300     }
1301 
1302     return membar->as_MemBar();
1303   }
1304 
1305   // if n is linked to a child MemBarNode by intervening Control and
1306   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1307 
1308   MemBarNode *child_membar(const MemBarNode *n)
1309   {
1310     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1311     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1312 
1313     // MemBar needs to have both a Ctl and Mem projection
1314     if (! ctl || ! mem)
1315       return NULL;
1316 
1317     MemBarNode *child = NULL;
1318     Node *x;
1319 
1320     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1321       x = ctl->fast_out(i);
1322       // if we see a membar we keep hold of it. we may also see a new
1323       // arena copy of the original but it will appear later
1324       if (x->is_MemBar()) {
1325           child = x->as_MemBar();
1326           break;
1327       }
1328     }
1329 
1330     if (child == NULL) {
1331       return NULL;
1332     }
1333 
1334     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1335       x = mem->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x == child) {
1339         return child;
1340       }
1341     }
1342     return NULL;
1343   }
1344 
1345   // helper predicate use to filter candidates for a leading memory
1346   // barrier
1347   //
1348   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1349   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1350 
1351   bool leading_membar(const MemBarNode *barrier)
1352   {
1353     int opcode = barrier->Opcode();
1354     // if this is a release membar we are ok
1355     if (opcode == Op_MemBarRelease) {
1356       return true;
1357     }
1358     // if its a cpuorder membar . . .
1359     if (opcode != Op_MemBarCPUOrder) {
1360       return false;
1361     }
1362     // then the parent has to be a release membar
1363     MemBarNode *parent = parent_membar(barrier);
1364     if (!parent) {
1365       return false;
1366     }
1367     opcode = parent->Opcode();
1368     return opcode == Op_MemBarRelease;
1369   }
1370 
1371   // 2) card mark detection helper
1372 
1373   // helper predicate which can be used to detect a volatile membar
1374   // introduced as part of a conditional card mark sequence either by
1375   // G1 or by CMS when UseCondCardMark is true.
1376   //
1377   // membar can be definitively determined to be part of a card mark
1378   // sequence if and only if all the following hold
1379   //
1380   // i) it is a MemBarVolatile
1381   //
1382   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1383   // true
1384   //
1385   // iii) the node's Mem projection feeds a StoreCM node.
1386 
1387   bool is_card_mark_membar(const MemBarNode *barrier)
1388   {
1389     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1390       return false;
1391     }
1392 
1393     if (barrier->Opcode() != Op_MemBarVolatile) {
1394       return false;
1395     }
1396 
1397     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1398 
1399     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1400       Node *y = mem->fast_out(i);
1401       if (y->Opcode() == Op_StoreCM) {
1402         return true;
1403       }
1404     }
1405 
1406     return false;
1407   }
1408 
1409 
1410   // 3) helper predicates to traverse volatile put or CAS graphs which
1411   // may contain GC barrier subgraphs
1412 
1413   // Preamble
1414   // --------
1415   //
1416   // for volatile writes we can omit generating barriers and employ a
1417   // releasing store when we see a node sequence sequence with a
1418   // leading MemBarRelease and a trailing MemBarVolatile as follows
1419   //
1420   //   MemBarRelease
1421   //  {      ||      } -- optional
1422   //  {MemBarCPUOrder}
1423   //         ||     \\
1424   //         ||     StoreX[mo_release]
1425   //         | \     /
1426   //         | MergeMem
1427   //         | /
1428   //   MemBarVolatile
1429   //
1430   // where
1431   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1432   //  | \ and / indicate further routing of the Ctl and Mem feeds
1433   //
1434   // this is the graph we see for non-object stores. however, for a
1435   // volatile Object store (StoreN/P) we may see other nodes below the
1436   // leading membar because of the need for a GC pre- or post-write
1437   // barrier.
1438   //
1439   // with most GC configurations we with see this simple variant which
1440   // includes a post-write barrier card mark.
1441   //
1442   //   MemBarRelease______________________________
1443   //         ||    \\               Ctl \        \\
1444   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1445   //         | \     /                       . . .  /
1446   //         | MergeMem
1447   //         | /
1448   //         ||      /
1449   //   MemBarVolatile
1450   //
1451   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1452   // the object address to an int used to compute the card offset) and
1453   // Ctl+Mem to a StoreB node (which does the actual card mark).
1454   //
1455   // n.b. a StoreCM node will only appear in this configuration when
1456   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1457   // because it implies a requirement to order visibility of the card
1458   // mark (StoreCM) relative to the object put (StoreP/N) using a
1459   // StoreStore memory barrier (arguably this ought to be represented
1460   // explicitly in the ideal graph but that is not how it works). This
1461   // ordering is required for both non-volatile and volatile
1462   // puts. Normally that means we need to translate a StoreCM using
1463   // the sequence
1464   //
1465   //   dmb ishst
1466   //   stlrb
1467   //
1468   // However, in the case of a volatile put if we can recognise this
1469   // configuration and plant an stlr for the object write then we can
1470   // omit the dmb and just plant an strb since visibility of the stlr
1471   // is ordered before visibility of subsequent stores. StoreCM nodes
1472   // also arise when using G1 or using CMS with conditional card
1473   // marking. In these cases (as we shall see) we don't need to insert
1474   // the dmb when translating StoreCM because there is already an
1475   // intervening StoreLoad barrier between it and the StoreP/N.
1476   //
1477   // It is also possible to perform the card mark conditionally on it
1478   // currently being unmarked in which case the volatile put graph
1479   // will look slightly different
1480   //
1481   //   MemBarRelease____________________________________________
1482   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1483   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1484   //         | \     /                              \            |
1485   //         | MergeMem                            . . .      StoreB
1486   //         | /                                                /
1487   //         ||     /
1488   //   MemBarVolatile
1489   //
1490   // It is worth noting at this stage that both the above
1491   // configurations can be uniquely identified by checking that the
1492   // memory flow includes the following subgraph:
1493   //
1494   //   MemBarRelease
1495   //  {MemBarCPUOrder}
1496   //          |  \      . . .
1497   //          |  StoreX[mo_release]  . . .
1498   //          |   /
1499   //         MergeMem
1500   //          |
1501   //   MemBarVolatile
1502   //
1503   // This is referred to as a *normal* subgraph. It can easily be
1504   // detected starting from any candidate MemBarRelease,
1505   // StoreX[mo_release] or MemBarVolatile.
1506   //
1507   // A simple variation on this normal case occurs for an unsafe CAS
1508   // operation. The basic graph for a non-object CAS is
1509   //
1510   //   MemBarRelease
1511   //         ||
1512   //   MemBarCPUOrder
1513   //         ||     \\   . . .
1514   //         ||     CompareAndSwapX
1515   //         ||       |
1516   //         ||     SCMemProj
1517   //         | \     /
1518   //         | MergeMem
1519   //         | /
1520   //   MemBarCPUOrder
1521   //         ||
1522   //   MemBarAcquire
1523   //
1524   // The same basic variations on this arrangement (mutatis mutandis)
1525   // occur when a card mark is introduced. i.e. we se the same basic
1526   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1527   // tail of the graph is a pair comprising a MemBarCPUOrder +
1528   // MemBarAcquire.
1529   //
1530   // So, in the case of a CAS the normal graph has the variant form
1531   //
1532   //   MemBarRelease
1533   //   MemBarCPUOrder
1534   //          |   \      . . .
1535   //          |  CompareAndSwapX  . . .
1536   //          |    |
1537   //          |   SCMemProj
1538   //          |   /  . . .
1539   //         MergeMem
1540   //          |
1541   //   MemBarCPUOrder
1542   //   MemBarAcquire
1543   //
1544   // This graph can also easily be detected starting from any
1545   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1546   //
1547   // the code below uses two helper predicates, leading_to_normal and
1548   // normal_to_leading to identify these normal graphs, one validating
1549   // the layout starting from the top membar and searching down and
1550   // the other validating the layout starting from the lower membar
1551   // and searching up.
1552   //
1553   // There are two special case GC configurations when a normal graph
1554   // may not be generated: when using G1 (which always employs a
1555   // conditional card mark); and when using CMS with conditional card
1556   // marking configured. These GCs are both concurrent rather than
1557   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1558   // graph between the leading and trailing membar nodes, in
1559   // particular enforcing stronger memory serialisation beween the
1560   // object put and the corresponding conditional card mark. CMS
1561   // employs a post-write GC barrier while G1 employs both a pre- and
1562   // post-write GC barrier. Of course the extra nodes may be absent --
1563   // they are only inserted for object puts. This significantly
1564   // complicates the task of identifying whether a MemBarRelease,
1565   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1566   // when using these GC configurations (see below). It adds similar
1567   // complexity to the task of identifying whether a MemBarRelease,
1568   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1569   //
1570   // In both cases the post-write subtree includes an auxiliary
1571   // MemBarVolatile (StoreLoad barrier) separating the object put and
1572   // the read of the corresponding card. This poses two additional
1573   // problems.
1574   //
1575   // Firstly, a card mark MemBarVolatile needs to be distinguished
1576   // from a normal trailing MemBarVolatile. Resolving this first
1577   // problem is straightforward: a card mark MemBarVolatile always
1578   // projects a Mem feed to a StoreCM node and that is a unique marker
1579   //
1580   //      MemBarVolatile (card mark)
1581   //       C |    \     . . .
1582   //         |   StoreCM   . . .
1583   //       . . .
1584   //
1585   // The second problem is how the code generator is to translate the
1586   // card mark barrier? It always needs to be translated to a "dmb
1587   // ish" instruction whether or not it occurs as part of a volatile
1588   // put. A StoreLoad barrier is needed after the object put to ensure
1589   // i) visibility to GC threads of the object put and ii) visibility
1590   // to the mutator thread of any card clearing write by a GC
1591   // thread. Clearly a normal store (str) will not guarantee this
1592   // ordering but neither will a releasing store (stlr). The latter
1593   // guarantees that the object put is visible but does not guarantee
1594   // that writes by other threads have also been observed.
1595   //
1596   // So, returning to the task of translating the object put and the
1597   // leading/trailing membar nodes: what do the non-normal node graph
1598   // look like for these 2 special cases? and how can we determine the
1599   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1600   // in both normal and non-normal cases?
1601   //
1602   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1603   // which selects conditonal execution based on the value loaded
1604   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1605   // intervening StoreLoad barrier (MemBarVolatile).
1606   //
1607   // So, with CMS we may see a node graph for a volatile object store
1608   // which looks like this
1609   //
1610   //   MemBarRelease
1611   //   MemBarCPUOrder_(leading)__________________
1612   //     C |    M \       \\                   C \
1613   //       |       \    StoreN/P[mo_release]  CastP2X
1614   //       |    Bot \    /
1615   //       |       MergeMem
1616   //       |         /
1617   //      MemBarVolatile (card mark)
1618   //     C |  ||    M |
1619   //       | LoadB    |
1620   //       |   |      |
1621   //       | Cmp      |\
1622   //       | /        | \
1623   //       If         |  \
1624   //       | \        |   \
1625   // IfFalse  IfTrue  |    \
1626   //       \     / \  |     \
1627   //        \   / StoreCM    |
1628   //         \ /      |      |
1629   //        Region   . . .   |
1630   //          | \           /
1631   //          |  . . .  \  / Bot
1632   //          |       MergeMem
1633   //          |          |
1634   //        MemBarVolatile (trailing)
1635   //
1636   // The first MergeMem merges the AliasIdxBot Mem slice from the
1637   // leading membar and the oopptr Mem slice from the Store into the
1638   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1639   // Mem slice from the card mark membar and the AliasIdxRaw slice
1640   // from the StoreCM into the trailing membar (n.b. the latter
1641   // proceeds via a Phi associated with the If region).
1642   //
1643   // The graph for a CAS varies slightly, the obvious difference being
1644   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1645   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1646   // MemBarAcquire pair. The other important difference is that the
1647   // CompareAndSwap node's SCMemProj is not merged into the card mark
1648   // membar - it still feeds the trailing MergeMem. This also means
1649   // that the card mark membar receives its Mem feed directly from the
1650   // leading membar rather than via a MergeMem.
1651   //
1652   //   MemBarRelease
1653   //   MemBarCPUOrder__(leading)_________________________
1654   //       ||                       \\                 C \
1655   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1656   //     C |  ||    M |              |
1657   //       | LoadB    |       ______/|
1658   //       |   |      |      /       |
1659   //       | Cmp      |     /      SCMemProj
1660   //       | /        |    /         |
1661   //       If         |   /         /
1662   //       | \        |  /         /
1663   // IfFalse  IfTrue  | /         /
1664   //       \     / \  |/ prec    /
1665   //        \   / StoreCM       /
1666   //         \ /      |        /
1667   //        Region   . . .    /
1668   //          | \            /
1669   //          |  . . .  \   / Bot
1670   //          |       MergeMem
1671   //          |          |
1672   //        MemBarCPUOrder
1673   //        MemBarAcquire (trailing)
1674   //
1675   // This has a slightly different memory subgraph to the one seen
1676   // previously but the core of it is the same as for the CAS normal
1677   // sungraph
1678   //
1679   //   MemBarRelease
1680   //   MemBarCPUOrder____
1681   //      ||             \      . . .
1682   //   MemBarVolatile  CompareAndSwapX  . . .
1683   //      |  \            |
1684   //        . . .   SCMemProj
1685   //          |     /  . . .
1686   //         MergeMem
1687   //          |
1688   //   MemBarCPUOrder
1689   //   MemBarAcquire
1690   //
1691   //
1692   // G1 is quite a lot more complicated. The nodes inserted on behalf
1693   // of G1 may comprise: a pre-write graph which adds the old value to
1694   // the SATB queue; the releasing store itself; and, finally, a
1695   // post-write graph which performs a card mark.
1696   //
1697   // The pre-write graph may be omitted, but only when the put is
1698   // writing to a newly allocated (young gen) object and then only if
1699   // there is a direct memory chain to the Initialize node for the
1700   // object allocation. This will not happen for a volatile put since
1701   // any memory chain passes through the leading membar.
1702   //
1703   // The pre-write graph includes a series of 3 If tests. The outermost
1704   // If tests whether SATB is enabled (no else case). The next If tests
1705   // whether the old value is non-NULL (no else case). The third tests
1706   // whether the SATB queue index is > 0, if so updating the queue. The
1707   // else case for this third If calls out to the runtime to allocate a
1708   // new queue buffer.
1709   //
1710   // So with G1 the pre-write and releasing store subgraph looks like
1711   // this (the nested Ifs are omitted).
1712   //
1713   //  MemBarRelease (leading)____________
1714   //     C |  ||  M \   M \    M \  M \ . . .
1715   //       | LoadB   \  LoadL  LoadN   \
1716   //       | /        \                 \
1717   //       If         |\                 \
1718   //       | \        | \                 \
1719   //  IfFalse  IfTrue |  \                 \
1720   //       |     |    |   \                 |
1721   //       |     If   |   /\                |
1722   //       |     |          \               |
1723   //       |                 \              |
1724   //       |    . . .         \             |
1725   //       | /       | /       |            |
1726   //      Region  Phi[M]       |            |
1727   //       | \       |         |            |
1728   //       |  \_____ | ___     |            |
1729   //     C | C \     |   C \ M |            |
1730   //       | CastP2X | StoreN/P[mo_release] |
1731   //       |         |         |            |
1732   //     C |       M |       M |          M |
1733   //        \        |         |           /
1734   //                  . . .
1735   //          (post write subtree elided)
1736   //                    . . .
1737   //             C \         M /
1738   //         MemBarVolatile (trailing)
1739   //
1740   // n.b. the LoadB in this subgraph is not the card read -- it's a
1741   // read of the SATB queue active flag.
1742   //
1743   // Once again the CAS graph is a minor variant on the above with the
1744   // expected substitutions of CompareAndSawpX for StoreN/P and
1745   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1746   //
1747   // The G1 post-write subtree is also optional, this time when the
1748   // new value being written is either null or can be identified as a
1749   // newly allocated (young gen) object with no intervening control
1750   // flow. The latter cannot happen but the former may, in which case
1751   // the card mark membar is omitted and the memory feeds form the
1752   // leading membar and the SToreN/P are merged direct into the
1753   // trailing membar as per the normal subgraph. So, the only special
1754   // case which arises is when the post-write subgraph is generated.
1755   //
1756   // The kernel of the post-write G1 subgraph is the card mark itself
1757   // which includes a card mark memory barrier (MemBarVolatile), a
1758   // card test (LoadB), and a conditional update (If feeding a
1759   // StoreCM). These nodes are surrounded by a series of nested Ifs
1760   // which try to avoid doing the card mark. The top level If skips if
1761   // the object reference does not cross regions (i.e. it tests if
1762   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1763   // need not be recorded. The next If, which skips on a NULL value,
1764   // may be absent (it is not generated if the type of value is >=
1765   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1766   // checking if card_val != young).  n.b. although this test requires
1767   // a pre-read of the card it can safely be done before the StoreLoad
1768   // barrier. However that does not bypass the need to reread the card
1769   // after the barrier.
1770   //
1771   //                (pre-write subtree elided)
1772   //        . . .                  . . .    . . .  . . .
1773   //        C |                    M |     M |    M |
1774   //       Region                  Phi[M] StoreN    |
1775   //          |                     / \      |      |
1776   //         / \_______            /   \     |      |
1777   //      C / C \      . . .            \    |      |
1778   //       If   CastP2X . . .            |   |      |
1779   //       / \                           |   |      |
1780   //      /   \                          |   |      |
1781   // IfFalse IfTrue                      |   |      |
1782   //   |       |                         |   |     /|
1783   //   |       If                        |   |    / |
1784   //   |      / \                        |   |   /  |
1785   //   |     /   \                        \  |  /   |
1786   //   | IfFalse IfTrue                   MergeMem  |
1787   //   |  . . .    / \                       /      |
1788   //   |          /   \                     /       |
1789   //   |     IfFalse IfTrue                /        |
1790   //   |      . . .    |                  /         |
1791   //   |               If                /          |
1792   //   |               / \              /           |
1793   //   |              /   \            /            |
1794   //   |         IfFalse IfTrue       /             |
1795   //   |           . . .   |         /              |
1796   //   |                    \       /               |
1797   //   |                     \     /                |
1798   //   |             MemBarVolatile__(card mark)    |
1799   //   |                ||   C |  M \  M \          |
1800   //   |               LoadB   If    |    |         |
1801   //   |                      / \    |    |         |
1802   //   |                     . . .   |    |         |
1803   //   |                          \  |    |        /
1804   //   |                        StoreCM   |       /
1805   //   |                          . . .   |      /
1806   //   |                        _________/      /
1807   //   |                       /  _____________/
1808   //   |   . . .       . . .  |  /            /
1809   //   |    |                 | /   _________/
1810   //   |    |               Phi[M] /        /
1811   //   |    |                 |   /        /
1812   //   |    |                 |  /        /
1813   //   |  Region  . . .     Phi[M]  _____/
1814   //   |    /                 |    /
1815   //   |                      |   /
1816   //   | . . .   . . .        |  /
1817   //   | /                    | /
1818   // Region           |  |  Phi[M]
1819   //   |              |  |  / Bot
1820   //    \            MergeMem
1821   //     \            /
1822   //     MemBarVolatile
1823   //
1824   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1825   // from the leading membar and the oopptr Mem slice from the Store
1826   // into the card mark membar i.e. the memory flow to the card mark
1827   // membar still looks like a normal graph.
1828   //
1829   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1830   // Mem slices (from the StoreCM and other card mark queue stores).
1831   // However in this case the AliasIdxBot Mem slice does not come
1832   // direct from the card mark membar. It is merged through a series
1833   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1834   // from the leading membar with the Mem feed from the card mark
1835   // membar. Each Phi corresponds to one of the Ifs which may skip
1836   // around the card mark membar. So when the If implementing the NULL
1837   // value check has been elided the total number of Phis is 2
1838   // otherwise it is 3.
1839   //
1840   // The CAS graph when using G1GC also includes a pre-write subgraph
1841   // and an optional post-write subgraph. Teh sam evarioations are
1842   // introduced as for CMS with conditional card marking i.e. the
1843   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1844   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1845   // Mem feed from the CompareAndSwapP/N includes a precedence
1846   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1847   // trailing membar. So, as before the configuration includes the
1848   // normal CAS graph as a subgraph of the memory flow.
1849   //
1850   // So, the upshot is that in all cases the volatile put graph will
1851   // include a *normal* memory subgraph betwen the leading membar and
1852   // its child membar, either a volatile put graph (including a
1853   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1854   // When that child is not a card mark membar then it marks the end
1855   // of the volatile put or CAS subgraph. If the child is a card mark
1856   // membar then the normal subgraph will form part of a volatile put
1857   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1858   // to a trailing barrier via a MergeMem. That feed is either direct
1859   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1860   // memory flow (for G1).
1861   //
1862   // The predicates controlling generation of instructions for store
1863   // and barrier nodes employ a few simple helper functions (described
1864   // below) which identify the presence or absence of all these
1865   // subgraph configurations and provide a means of traversing from
1866   // one node in the subgraph to another.
1867 
1868   // is_CAS(int opcode)
1869   //
1870   // return true if opcode is one of the possible CompareAndSwapX
1871   // values otherwise false.
1872 
1873   bool is_CAS(int opcode)
1874   {
1875     return (opcode == Op_CompareAndSwapI ||
1876             opcode == Op_CompareAndSwapL ||
1877             opcode == Op_CompareAndSwapN ||
1878             opcode == Op_CompareAndSwapP);
1879   }
1880 
1881   // leading_to_normal
1882   //
1883   //graph traversal helper which detects the normal case Mem feed from
1884   // a release membar (or, optionally, its cpuorder child) to a
1885   // dependent volatile membar i.e. it ensures that one or other of
1886   // the following Mem flow subgraph is present.
1887   //
1888   //   MemBarRelease
1889   //   MemBarCPUOrder {leading}
1890   //          |  \      . . .
1891   //          |  StoreN/P[mo_release]  . . .
1892   //          |   /
1893   //         MergeMem
1894   //          |
1895   //   MemBarVolatile {trailing or card mark}
1896   //
1897   //   MemBarRelease
1898   //   MemBarCPUOrder {leading}
1899   //      |       \      . . .
1900   //      |     CompareAndSwapX  . . .
1901   //               |
1902   //     . . .    SCMemProj
1903   //           \   |
1904   //      |    MergeMem
1905   //      |       /
1906   //    MemBarCPUOrder
1907   //    MemBarAcquire {trailing}
1908   //
1909   // if the correct configuration is present returns the trailing
1910   // membar otherwise NULL.
1911   //
1912   // the input membar is expected to be either a cpuorder membar or a
1913   // release membar. in the latter case it should not have a cpu membar
1914   // child.
1915   //
1916   // the returned value may be a card mark or trailing membar
1917   //
1918 
1919   MemBarNode *leading_to_normal(MemBarNode *leading)
1920   {
1921     assert((leading->Opcode() == Op_MemBarRelease ||
1922             leading->Opcode() == Op_MemBarCPUOrder),
1923            "expecting a volatile or cpuroder membar!");
1924 
1925     // check the mem flow
1926     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1927 
1928     if (!mem) {
1929       return NULL;
1930     }
1931 
1932     Node *x = NULL;
1933     StoreNode * st = NULL;
1934     LoadStoreNode *cas = NULL;
1935     MergeMemNode *mm = NULL;
1936 
1937     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1938       x = mem->fast_out(i);
1939       if (x->is_MergeMem()) {
1940         if (mm != NULL) {
1941           return NULL;
1942         }
1943         // two merge mems is one too many
1944         mm = x->as_MergeMem();
1945       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1946         // two releasing stores/CAS nodes is one too many
1947         if (st != NULL || cas != NULL) {
1948           return NULL;
1949         }
1950         st = x->as_Store();
1951       } else if (is_CAS(x->Opcode())) {
1952         if (st != NULL || cas != NULL) {
1953           return NULL;
1954         }
1955         cas = x->as_LoadStore();
1956       }
1957     }
1958 
1959     // must have a store or a cas
1960     if (!st && !cas) {
1961       return NULL;
1962     }
1963 
1964     // must have a merge if we also have st
1965     if (st && !mm) {
1966       return NULL;
1967     }
1968 
1969     Node *y = NULL;
1970     if (cas) {
1971       // look for an SCMemProj
1972       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
1973         x = cas->fast_out(i);
1974         if (x->is_Proj()) {
1975           y = x;
1976           break;
1977         }
1978       }
1979       if (y == NULL) {
1980         return NULL;
1981       }
1982       // the proj must feed a MergeMem
1983       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
1984         x = y->fast_out(i);
1985         if (x->is_MergeMem()) {
1986           mm = x->as_MergeMem();
1987           break;
1988         }
1989       }
1990       if (mm == NULL)
1991         return NULL;
1992     } else {
1993       // ensure the store feeds the existing mergemem;
1994       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1995         if (st->fast_out(i) == mm) {
1996           y = st;
1997           break;
1998         }
1999       }
2000       if (y == NULL) {
2001         return NULL;
2002       }
2003     }
2004 
2005     MemBarNode *mbar = NULL;
2006     // ensure the merge feeds to the expected type of membar
2007     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2008       x = mm->fast_out(i);
2009       if (x->is_MemBar()) {
2010         int opcode = x->Opcode();
2011         if (opcode == Op_MemBarVolatile && st) {
2012           mbar = x->as_MemBar();
2013         } else if (cas && opcode == Op_MemBarCPUOrder) {
2014           MemBarNode *y =  x->as_MemBar();
2015           y = child_membar(y);
2016           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2017             mbar = y;
2018           }
2019         }
2020         break;
2021       }
2022     }
2023 
2024     return mbar;
2025   }
2026 
2027   // normal_to_leading
2028   //
2029   // graph traversal helper which detects the normal case Mem feed
2030   // from either a card mark or a trailing membar to a preceding
2031   // release membar (optionally its cpuorder child) i.e. it ensures
2032   // that one or other of the following Mem flow subgraphs is present.
2033   //
2034   //   MemBarRelease
2035   //   MemBarCPUOrder {leading}
2036   //          |  \      . . .
2037   //          |  StoreN/P[mo_release]  . . .
2038   //          |   /
2039   //         MergeMem
2040   //          |
2041   //   MemBarVolatile {card mark or trailing}
2042   //
2043   //   MemBarRelease
2044   //   MemBarCPUOrder {leading}
2045   //      |       \      . . .
2046   //      |     CompareAndSwapX  . . .
2047   //               |
2048   //     . . .    SCMemProj
2049   //           \   |
2050   //      |    MergeMem
2051   //      |        /
2052   //    MemBarCPUOrder
2053   //    MemBarAcquire {trailing}
2054   //
2055   // this predicate checks for the same flow as the previous predicate
2056   // but starting from the bottom rather than the top.
2057   //
2058   // if the configuration is present returns the cpuorder member for
2059   // preference or when absent the release membar otherwise NULL.
2060   //
2061   // n.b. the input membar is expected to be a MemBarVolatile but
2062   // need not be a card mark membar.
2063 
2064   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2065   {
2066     // input must be a volatile membar
2067     assert((barrier->Opcode() == Op_MemBarVolatile ||
2068             barrier->Opcode() == Op_MemBarAcquire),
2069            "expecting a volatile or an acquire membar");
2070     Node *x;
2071     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2072 
2073     // if we have an acquire membar then it must be fed via a CPUOrder
2074     // membar
2075 
2076     if (is_cas) {
2077       // skip to parent barrier which must be a cpuorder
2078       x = parent_membar(barrier);
2079       if (x->Opcode() != Op_MemBarCPUOrder)
2080         return NULL;
2081     } else {
2082       // start from the supplied barrier
2083       x = (Node *)barrier;
2084     }
2085 
2086     // the Mem feed to the membar should be a merge
2087     x = x ->in(TypeFunc::Memory);
2088     if (!x->is_MergeMem())
2089       return NULL;
2090 
2091     MergeMemNode *mm = x->as_MergeMem();
2092 
2093     if (is_cas) {
2094       // the merge should be fed from the CAS via an SCMemProj node
2095       x = NULL;
2096       for (uint idx = 1; idx < mm->req(); idx++) {
2097         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2098           x = mm->in(idx);
2099           break;
2100         }
2101       }
2102       if (x == NULL) {
2103         return NULL;
2104       }
2105       // check for a CAS feeding this proj
2106       x = x->in(0);
2107       int opcode = x->Opcode();
2108       if (!is_CAS(opcode)) {
2109         return NULL;
2110       }
2111       // the CAS should get its mem feed from the leading membar
2112       x = x->in(MemNode::Memory);
2113     } else {
2114       // the merge should get its Bottom mem feed from the leading membar
2115       x = mm->in(Compile::AliasIdxBot);
2116     }
2117 
2118     // ensure this is a non control projection
2119     if (!x->is_Proj() || x->is_CFG()) {
2120       return NULL;
2121     }
2122     // if it is fed by a membar that's the one we want
2123     x = x->in(0);
2124 
2125     if (!x->is_MemBar()) {
2126       return NULL;
2127     }
2128 
2129     MemBarNode *leading = x->as_MemBar();
2130     // reject invalid candidates
2131     if (!leading_membar(leading)) {
2132       return NULL;
2133     }
2134 
2135     // ok, we have a leading membar, now for the sanity clauses
2136 
2137     // the leading membar must feed Mem to a releasing store or CAS
2138     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2139     StoreNode *st = NULL;
2140     LoadStoreNode *cas = NULL;
2141     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2142       x = mem->fast_out(i);
2143       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2144         // two stores or CASes is one too many
2145         if (st != NULL || cas != NULL) {
2146           return NULL;
2147         }
2148         st = x->as_Store();
2149       } else if (is_CAS(x->Opcode())) {
2150         if (st != NULL || cas != NULL) {
2151           return NULL;
2152         }
2153         cas = x->as_LoadStore();
2154       }
2155     }
2156 
2157     // we should not have both a store and a cas
2158     if (st == NULL & cas == NULL) {
2159       return NULL;
2160     }
2161 
2162     if (st == NULL) {
2163       // nothing more to check
2164       return leading;
2165     } else {
2166       // we should not have a store if we started from an acquire
2167       if (is_cas) {
2168         return NULL;
2169       }
2170 
2171       // the store should feed the merge we used to get here
2172       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2173         if (st->fast_out(i) == mm) {
2174           return leading;
2175         }
2176       }
2177     }
2178 
2179     return NULL;
2180   }
2181 
2182   // card_mark_to_trailing
2183   //
2184   // graph traversal helper which detects extra, non-normal Mem feed
2185   // from a card mark volatile membar to a trailing membar i.e. it
2186   // ensures that one of the following three GC post-write Mem flow
2187   // subgraphs is present.
2188   //
2189   // 1)
2190   //     . . .
2191   //       |
2192   //   MemBarVolatile (card mark)
2193   //      |          |
2194   //      |        StoreCM
2195   //      |          |
2196   //      |        . . .
2197   //  Bot |  /
2198   //   MergeMem
2199   //      |
2200   //      |
2201   //    MemBarVolatile {trailing}
2202   //
2203   // 2)
2204   //   MemBarRelease/CPUOrder (leading)
2205   //    |
2206   //    |
2207   //    |\       . . .
2208   //    | \        |
2209   //    |  \  MemBarVolatile (card mark)
2210   //    |   \   |     |
2211   //     \   \  |   StoreCM    . . .
2212   //      \   \ |
2213   //       \  Phi
2214   //        \ /
2215   //        Phi  . . .
2216   //     Bot |   /
2217   //       MergeMem
2218   //         |
2219   //    MemBarVolatile {trailing}
2220   //
2221   //
2222   // 3)
2223   //   MemBarRelease/CPUOrder (leading)
2224   //    |
2225   //    |\
2226   //    | \
2227   //    |  \      . . .
2228   //    |   \       |
2229   //    |\   \  MemBarVolatile (card mark)
2230   //    | \   \   |     |
2231   //    |  \   \  |   StoreCM    . . .
2232   //    |   \   \ |
2233   //     \   \  Phi
2234   //      \   \ /
2235   //       \  Phi
2236   //        \ /
2237   //        Phi  . . .
2238   //     Bot |   /
2239   //       MergeMem
2240   //         |
2241   //         |
2242   //    MemBarVolatile {trailing}
2243   //
2244   // configuration 1 is only valid if UseConcMarkSweepGC &&
2245   // UseCondCardMark
2246   //
2247   // configurations 2 and 3 are only valid if UseG1GC.
2248   //
2249   // if a valid configuration is present returns the trailing membar
2250   // otherwise NULL.
2251   //
2252   // n.b. the supplied membar is expected to be a card mark
2253   // MemBarVolatile i.e. the caller must ensure the input node has the
2254   // correct operand and feeds Mem to a StoreCM node
2255 
2256   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2257   {
2258     // input must be a card mark volatile membar
2259     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2260 
2261     Node *feed = barrier->proj_out(TypeFunc::Memory);
2262     Node *x;
2263     MergeMemNode *mm = NULL;
2264 
2265     const int MAX_PHIS = 3;     // max phis we will search through
2266     int phicount = 0;           // current search count
2267 
2268     bool retry_feed = true;
2269     while (retry_feed) {
2270       // see if we have a direct MergeMem feed
2271       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2272         x = feed->fast_out(i);
2273         // the correct Phi will be merging a Bot memory slice
2274         if (x->is_MergeMem()) {
2275           mm = x->as_MergeMem();
2276           break;
2277         }
2278       }
2279       if (mm) {
2280         retry_feed = false;
2281       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2282         // the barrier may feed indirectly via one or two Phi nodes
2283         PhiNode *phi = NULL;
2284         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2285           x = feed->fast_out(i);
2286           // the correct Phi will be merging a Bot memory slice
2287           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2288             phi = x->as_Phi();
2289             break;
2290           }
2291         }
2292         if (!phi) {
2293           return NULL;
2294         }
2295         // look for another merge below this phi
2296         feed = phi;
2297       } else {
2298         // couldn't find a merge
2299         return NULL;
2300       }
2301     }
2302 
2303     // sanity check this feed turns up as the expected slice
2304     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2305 
2306     MemBarNode *trailing = NULL;
2307     // be sure we have a trailing membar the merge
2308     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2309       x = mm->fast_out(i);
2310       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2311         trailing = x->as_MemBar();
2312         break;
2313       }
2314     }
2315 
2316     return trailing;
2317   }
2318 
2319   // trailing_to_card_mark
2320   //
2321   // graph traversal helper which detects extra, non-normal Mem feed
2322   // from a trailing volatile membar to a preceding card mark volatile
2323   // membar i.e. it identifies whether one of the three possible extra
2324   // GC post-write Mem flow subgraphs is present
2325   //
2326   // this predicate checks for the same flow as the previous predicate
2327   // but starting from the bottom rather than the top.
2328   //
2329   // if the configuration is present returns the card mark membar
2330   // otherwise NULL
2331   //
2332   // n.b. the supplied membar is expected to be a trailing
2333   // MemBarVolatile i.e. the caller must ensure the input node has the
2334   // correct opcode
2335 
2336   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2337   {
2338     assert(trailing->Opcode() == Op_MemBarVolatile,
2339            "expecting a volatile membar");
2340     assert(!is_card_mark_membar(trailing),
2341            "not expecting a card mark membar");
2342 
2343     // the Mem feed to the membar should be a merge
2344     Node *x = trailing->in(TypeFunc::Memory);
2345     if (!x->is_MergeMem()) {
2346       return NULL;
2347     }
2348 
2349     MergeMemNode *mm = x->as_MergeMem();
2350 
2351     x = mm->in(Compile::AliasIdxBot);
2352     // with G1 we may possibly see a Phi or two before we see a Memory
2353     // Proj from the card mark membar
2354 
2355     const int MAX_PHIS = 3;     // max phis we will search through
2356     int phicount = 0;           // current search count
2357 
2358     bool retry_feed = !x->is_Proj();
2359 
2360     while (retry_feed) {
2361       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2362         PhiNode *phi = x->as_Phi();
2363         ProjNode *proj = NULL;
2364         PhiNode *nextphi = NULL;
2365         bool found_leading = false;
2366         for (uint i = 1; i < phi->req(); i++) {
2367           x = phi->in(i);
2368           if (x->is_Phi()) {
2369             nextphi = x->as_Phi();
2370           } else if (x->is_Proj()) {
2371             int opcode = x->in(0)->Opcode();
2372             if (opcode == Op_MemBarVolatile) {
2373               proj = x->as_Proj();
2374             } else if (opcode == Op_MemBarRelease ||
2375                        opcode == Op_MemBarCPUOrder) {
2376               // probably a leading membar
2377               found_leading = true;
2378             }
2379           }
2380         }
2381         // if we found a correct looking proj then retry from there
2382         // otherwise we must see a leading and a phi or this the
2383         // wrong config
2384         if (proj != NULL) {
2385           x = proj;
2386           retry_feed = false;
2387         } else if (found_leading && nextphi != NULL) {
2388           // retry from this phi to check phi2
2389           x = nextphi;
2390         } else {
2391           // not what we were looking for
2392           return NULL;
2393         }
2394       } else {
2395         return NULL;
2396       }
2397     }
2398     // the proj has to come from the card mark membar
2399     x = x->in(0);
2400     if (!x->is_MemBar()) {
2401       return NULL;
2402     }
2403 
2404     MemBarNode *card_mark_membar = x->as_MemBar();
2405 
2406     if (!is_card_mark_membar(card_mark_membar)) {
2407       return NULL;
2408     }
2409 
2410     return card_mark_membar;
2411   }
2412 
2413   // trailing_to_leading
2414   //
2415   // graph traversal helper which checks the Mem flow up the graph
2416   // from a (non-card mark) trailing membar attempting to locate and
2417   // return an associated leading membar. it first looks for a
2418   // subgraph in the normal configuration (relying on helper
2419   // normal_to_leading). failing that it then looks for one of the
2420   // possible post-write card mark subgraphs linking the trailing node
2421   // to a the card mark membar (relying on helper
2422   // trailing_to_card_mark), and then checks that the card mark membar
2423   // is fed by a leading membar (once again relying on auxiliary
2424   // predicate normal_to_leading).
2425   //
2426   // if the configuration is valid returns the cpuorder member for
2427   // preference or when absent the release membar otherwise NULL.
2428   //
2429   // n.b. the input membar is expected to be either a volatile or
2430   // acquire membar but in the former case must *not* be a card mark
2431   // membar.
2432 
2433   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2434   {
2435     assert((trailing->Opcode() == Op_MemBarAcquire ||
2436             trailing->Opcode() == Op_MemBarVolatile),
2437            "expecting an acquire or volatile membar");
2438     assert((trailing->Opcode() != Op_MemBarVolatile ||
2439             !is_card_mark_membar(trailing)),
2440            "not expecting a card mark membar");
2441 
2442     MemBarNode *leading = normal_to_leading(trailing);
2443 
2444     if (leading) {
2445       return leading;
2446     }
2447 
2448     // nothing more to do if this is an acquire
2449     if (trailing->Opcode() == Op_MemBarAcquire) {
2450       return NULL;
2451     }
2452 
2453     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2454 
2455     if (!card_mark_membar) {
2456       return NULL;
2457     }
2458 
2459     return normal_to_leading(card_mark_membar);
2460   }
2461 
2462   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2463 
2464 bool unnecessary_acquire(const Node *barrier)
2465 {
2466   assert(barrier->is_MemBar(), "expecting a membar");
2467 
2468   if (UseBarriersForVolatile) {
2469     // we need to plant a dmb
2470     return false;
2471   }
2472 
2473   // a volatile read derived from bytecode (or also from an inlined
2474   // SHA field read via LibraryCallKit::load_field_from_object)
2475   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2476   // with a bogus read dependency on it's preceding load. so in those
2477   // cases we will find the load node at the PARMS offset of the
2478   // acquire membar.  n.b. there may be an intervening DecodeN node.
2479   //
2480   // a volatile load derived from an inlined unsafe field access
2481   // manifests as a cpuorder membar with Ctl and Mem projections
2482   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2483   // acquire then feeds another cpuorder membar via Ctl and Mem
2484   // projections. The load has no output dependency on these trailing
2485   // membars because subsequent nodes inserted into the graph take
2486   // their control feed from the final membar cpuorder meaning they
2487   // are all ordered after the load.
2488 
2489   Node *x = barrier->lookup(TypeFunc::Parms);
2490   if (x) {
2491     // we are starting from an acquire and it has a fake dependency
2492     //
2493     // need to check for
2494     //
2495     //   LoadX[mo_acquire]
2496     //   {  |1   }
2497     //   {DecodeN}
2498     //      |Parms
2499     //   MemBarAcquire*
2500     //
2501     // where * tags node we were passed
2502     // and |k means input k
2503     if (x->is_DecodeNarrowPtr()) {
2504       x = x->in(1);
2505     }
2506 
2507     return (x->is_Load() && x->as_Load()->is_acquire());
2508   }
2509 
2510   // now check for an unsafe volatile get
2511 
2512   // need to check for
2513   //
2514   //   MemBarCPUOrder
2515   //        ||       \\
2516   //   MemBarAcquire* LoadX[mo_acquire]
2517   //        ||
2518   //   MemBarCPUOrder
2519   //
2520   // where * tags node we were passed
2521   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2522 
2523   // check for a parent MemBarCPUOrder
2524   ProjNode *ctl;
2525   ProjNode *mem;
2526   MemBarNode *parent = parent_membar(barrier);
2527   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2528     return false;
2529   ctl = parent->proj_out(TypeFunc::Control);
2530   mem = parent->proj_out(TypeFunc::Memory);
2531   if (!ctl || !mem) {
2532     return false;
2533   }
2534   // ensure the proj nodes both feed a LoadX[mo_acquire]
2535   LoadNode *ld = NULL;
2536   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2537     x = ctl->fast_out(i);
2538     // if we see a load we keep hold of it and stop searching
2539     if (x->is_Load()) {
2540       ld = x->as_Load();
2541       break;
2542     }
2543   }
2544   // it must be an acquiring load
2545   if (ld && ld->is_acquire()) {
2546 
2547     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2548       x = mem->fast_out(i);
2549       // if we see the same load we drop it and stop searching
2550       if (x == ld) {
2551         ld = NULL;
2552         break;
2553       }
2554     }
2555     // we must have dropped the load
2556     if (ld == NULL) {
2557       // check for a child cpuorder membar
2558       MemBarNode *child  = child_membar(barrier->as_MemBar());
2559       if (child && child->Opcode() == Op_MemBarCPUOrder)
2560         return true;
2561     }
2562   }
2563 
2564   // final option for unnecessary mebar is that it is a trailing node
2565   // belonging to a CAS
2566 
2567   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2568 
2569   return leading != NULL;
2570 }
2571 
2572 bool needs_acquiring_load(const Node *n)
2573 {
2574   assert(n->is_Load(), "expecting a load");
2575   if (UseBarriersForVolatile) {
2576     // we use a normal load and a dmb
2577     return false;
2578   }
2579 
2580   LoadNode *ld = n->as_Load();
2581 
2582   if (!ld->is_acquire()) {
2583     return false;
2584   }
2585 
2586   // check if this load is feeding an acquire membar
2587   //
2588   //   LoadX[mo_acquire]
2589   //   {  |1   }
2590   //   {DecodeN}
2591   //      |Parms
2592   //   MemBarAcquire*
2593   //
2594   // where * tags node we were passed
2595   // and |k means input k
2596 
2597   Node *start = ld;
2598   Node *mbacq = NULL;
2599 
2600   // if we hit a DecodeNarrowPtr we reset the start node and restart
2601   // the search through the outputs
2602  restart:
2603 
2604   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2605     Node *x = start->fast_out(i);
2606     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2607       mbacq = x;
2608     } else if (!mbacq &&
2609                (x->is_DecodeNarrowPtr() ||
2610                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2611       start = x;
2612       goto restart;
2613     }
2614   }
2615 
2616   if (mbacq) {
2617     return true;
2618   }
2619 
2620   // now check for an unsafe volatile get
2621 
2622   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2623   //
2624   //     MemBarCPUOrder
2625   //        ||       \\
2626   //   MemBarAcquire* LoadX[mo_acquire]
2627   //        ||
2628   //   MemBarCPUOrder
2629 
2630   MemBarNode *membar;
2631 
2632   membar = parent_membar(ld);
2633 
2634   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2635     return false;
2636   }
2637 
2638   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2639 
2640   membar = child_membar(membar);
2641 
2642   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2643     return false;
2644   }
2645 
2646   membar = child_membar(membar);
2647 
2648   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2649     return false;
2650   }
2651 
2652   return true;
2653 }
2654 
2655 bool unnecessary_release(const Node *n)
2656 {
2657   assert((n->is_MemBar() &&
2658           n->Opcode() == Op_MemBarRelease),
2659          "expecting a release membar");
2660 
2661   if (UseBarriersForVolatile) {
2662     // we need to plant a dmb
2663     return false;
2664   }
2665 
2666   // if there is a dependent CPUOrder barrier then use that as the
2667   // leading
2668 
2669   MemBarNode *barrier = n->as_MemBar();
2670   // check for an intervening cpuorder membar
2671   MemBarNode *b = child_membar(barrier);
2672   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2673     // ok, so start the check from the dependent cpuorder barrier
2674     barrier = b;
2675   }
2676 
2677   // must start with a normal feed
2678   MemBarNode *child_barrier = leading_to_normal(barrier);
2679 
2680   if (!child_barrier) {
2681     return false;
2682   }
2683 
2684   if (!is_card_mark_membar(child_barrier)) {
2685     // this is the trailing membar and we are done
2686     return true;
2687   }
2688 
2689   // must be sure this card mark feeds a trailing membar
2690   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2691   return (trailing != NULL);
2692 }
2693 
2694 bool unnecessary_volatile(const Node *n)
2695 {
2696   // assert n->is_MemBar();
2697   if (UseBarriersForVolatile) {
2698     // we need to plant a dmb
2699     return false;
2700   }
2701 
2702   MemBarNode *mbvol = n->as_MemBar();
2703 
2704   // first we check if this is part of a card mark. if so then we have
2705   // to generate a StoreLoad barrier
2706 
2707   if (is_card_mark_membar(mbvol)) {
2708       return false;
2709   }
2710 
2711   // ok, if it's not a card mark then we still need to check if it is
2712   // a trailing membar of a volatile put hgraph.
2713 
2714   return (trailing_to_leading(mbvol) != NULL);
2715 }
2716 
2717 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2718 
2719 bool needs_releasing_store(const Node *n)
2720 {
2721   // assert n->is_Store();
2722   if (UseBarriersForVolatile) {
2723     // we use a normal store and dmb combination
2724     return false;
2725   }
2726 
2727   StoreNode *st = n->as_Store();
2728 
2729   // the store must be marked as releasing
2730   if (!st->is_release()) {
2731     return false;
2732   }
2733 
2734   // the store must be fed by a membar
2735 
2736   Node *x = st->lookup(StoreNode::Memory);
2737 
2738   if (! x || !x->is_Proj()) {
2739     return false;
2740   }
2741 
2742   ProjNode *proj = x->as_Proj();
2743 
2744   x = proj->lookup(0);
2745 
2746   if (!x || !x->is_MemBar()) {
2747     return false;
2748   }
2749 
2750   MemBarNode *barrier = x->as_MemBar();
2751 
2752   // if the barrier is a release membar or a cpuorder mmebar fed by a
2753   // release membar then we need to check whether that forms part of a
2754   // volatile put graph.
2755 
2756   // reject invalid candidates
2757   if (!leading_membar(barrier)) {
2758     return false;
2759   }
2760 
2761   // does this lead a normal subgraph?
2762   MemBarNode *mbvol = leading_to_normal(barrier);
2763 
2764   if (!mbvol) {
2765     return false;
2766   }
2767 
2768   // all done unless this is a card mark
2769   if (!is_card_mark_membar(mbvol)) {
2770     return true;
2771   }
2772 
2773   // we found a card mark -- just make sure we have a trailing barrier
2774 
2775   return (card_mark_to_trailing(mbvol) != NULL);
2776 }
2777 
2778 // predicate controlling translation of CAS
2779 //
2780 // returns true if CAS needs to use an acquiring load otherwise false
2781 
2782 bool needs_acquiring_load_exclusive(const Node *n)
2783 {
2784   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2785   if (UseBarriersForVolatile) {
2786     return false;
2787   }
2788 
2789   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2790 #ifdef ASSERT
2791   LoadStoreNode *st = n->as_LoadStore();
2792 
2793   // the store must be fed by a membar
2794 
2795   Node *x = st->lookup(StoreNode::Memory);
2796 
2797   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2798 
2799   ProjNode *proj = x->as_Proj();
2800 
2801   x = proj->lookup(0);
2802 
2803   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2804 
2805   MemBarNode *barrier = x->as_MemBar();
2806 
2807   // the barrier must be a cpuorder mmebar fed by a release membar
2808 
2809   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2810          "CAS not fed by cpuorder membar!");
2811 
2812   MemBarNode *b = parent_membar(barrier);
2813   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2814           "CAS not fed by cpuorder+release membar pair!");
2815 
2816   // does this lead a normal subgraph?
2817   MemBarNode *mbar = leading_to_normal(barrier);
2818 
2819   assert(mbar != NULL, "CAS not embedded in normal graph!");
2820 
2821   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2822 #endif // ASSERT
2823   // so we can just return true here
2824   return true;
2825 }
2826 
2827 // predicate controlling translation of StoreCM
2828 //
2829 // returns true if a StoreStore must precede the card write otherwise
2830 // false
2831 
2832 bool unnecessary_storestore(const Node *storecm)
2833 {
2834   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2835 
2836   // we only ever need to generate a dmb ishst between an object put
2837   // and the associated card mark when we are using CMS without
2838   // conditional card marking
2839 
2840   if (!UseConcMarkSweepGC || UseCondCardMark) {
2841     return true;
2842   }
2843 
2844   // if we are implementing volatile puts using barriers then the
2845   // object put as an str so we must insert the dmb ishst
2846 
2847   if (UseBarriersForVolatile) {
2848     return false;
2849   }
2850 
2851   // we can omit the dmb ishst if this StoreCM is part of a volatile
2852   // put because in thta case the put will be implemented by stlr
2853   //
2854   // we need to check for a normal subgraph feeding this StoreCM.
2855   // that means the StoreCM must be fed Memory from a leading membar,
2856   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2857   // leading membar must be part of a normal subgraph
2858 
2859   Node *x = storecm->in(StoreNode::Memory);
2860 
2861   if (!x->is_Proj()) {
2862     return false;
2863   }
2864 
2865   x = x->in(0);
2866 
2867   if (!x->is_MemBar()) {
2868     return false;
2869   }
2870 
2871   MemBarNode *leading = x->as_MemBar();
2872 
2873   // reject invalid candidates
2874   if (!leading_membar(leading)) {
2875     return false;
2876   }
2877 
2878   // we can omit the StoreStore if it is the head of a normal subgraph
2879   return (leading_to_normal(leading) != NULL);
2880 }
2881 
2882 
2883 #define __ _masm.
2884 
2885 // advance declarations for helper functions to convert register
2886 // indices to register objects
2887 
2888 // the ad file has to provide implementations of certain methods
2889 // expected by the generic code
2890 //
2891 // REQUIRED FUNCTIONALITY
2892 
2893 //=============================================================================
2894 
2895 // !!!!! Special hack to get all types of calls to specify the byte offset
2896 //       from the start of the call to the point where the return address
2897 //       will point.
2898 
2899 int MachCallStaticJavaNode::ret_addr_offset()
2900 {
2901   // call should be a simple bl
2902   int off = 4;
2903   return off;
2904 }
2905 
2906 int MachCallDynamicJavaNode::ret_addr_offset()
2907 {
2908   return 16; // movz, movk, movk, bl
2909 }
2910 
2911 int MachCallRuntimeNode::ret_addr_offset() {
2912   // for generated stubs the call will be
2913   //   far_call(addr)
2914   // for real runtime callouts it will be six instructions
2915   // see aarch64_enc_java_to_runtime
2916   //   adr(rscratch2, retaddr)
2917   //   lea(rscratch1, RuntimeAddress(addr)
2918   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2919   //   blrt rscratch1
2920   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2921   if (cb) {
2922     return MacroAssembler::far_branch_size();
2923   } else {
2924     return 6 * NativeInstruction::instruction_size;
2925   }
2926 }
2927 
2928 // Indicate if the safepoint node needs the polling page as an input
2929 
2930 // the shared code plants the oop data at the start of the generated
2931 // code for the safepoint node and that needs ot be at the load
2932 // instruction itself. so we cannot plant a mov of the safepoint poll
2933 // address followed by a load. setting this to true means the mov is
2934 // scheduled as a prior instruction. that's better for scheduling
2935 // anyway.
2936 
2937 bool SafePointNode::needs_polling_address_input()
2938 {
2939   return true;
2940 }
2941 
2942 //=============================================================================
2943 
2944 #ifndef PRODUCT
2945 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2946   st->print("BREAKPOINT");
2947 }
2948 #endif
2949 
2950 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2951   MacroAssembler _masm(&cbuf);
2952   __ brk(0);
2953 }
2954 
2955 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2956   return MachNode::size(ra_);
2957 }
2958 
2959 //=============================================================================
2960 
2961 #ifndef PRODUCT
2962   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2963     st->print("nop \t# %d bytes pad for loops and calls", _count);
2964   }
2965 #endif
2966 
2967   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2968     MacroAssembler _masm(&cbuf);
2969     for (int i = 0; i < _count; i++) {
2970       __ nop();
2971     }
2972   }
2973 
2974   uint MachNopNode::size(PhaseRegAlloc*) const {
2975     return _count * NativeInstruction::instruction_size;
2976   }
2977 
2978 //=============================================================================
2979 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2980 
2981 int Compile::ConstantTable::calculate_table_base_offset() const {
2982   return 0;  // absolute addressing, no offset
2983 }
2984 
2985 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2986 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2987   ShouldNotReachHere();
2988 }
2989 
2990 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2991   // Empty encoding
2992 }
2993 
2994 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2995   return 0;
2996 }
2997 
2998 #ifndef PRODUCT
2999 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3000   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3001 }
3002 #endif
3003 
3004 #ifndef PRODUCT
3005 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3006   Compile* C = ra_->C;
3007 
3008   int framesize = C->frame_slots() << LogBytesPerInt;
3009 
3010   if (C->need_stack_bang(framesize))
3011     st->print("# stack bang size=%d\n\t", framesize);
3012 
3013   if (framesize < ((1 << 9) + 2 * wordSize)) {
3014     st->print("sub  sp, sp, #%d\n\t", framesize);
3015     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3016     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3017   } else {
3018     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3019     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3020     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3021     st->print("sub  sp, sp, rscratch1");
3022   }
3023 }
3024 #endif
3025 
3026 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3027   Compile* C = ra_->C;
3028   MacroAssembler _masm(&cbuf);
3029 
3030   // n.b. frame size includes space for return pc and rfp
3031   const long framesize = C->frame_size_in_bytes();
3032   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3033 
3034   // insert a nop at the start of the prolog so we can patch in a
3035   // branch if we need to invalidate the method later
3036   __ nop();
3037 
3038   int bangsize = C->bang_size_in_bytes();
3039   if (C->need_stack_bang(bangsize) && UseStackBanging)
3040     __ generate_stack_overflow_check(bangsize);
3041 
3042   __ build_frame(framesize);
3043 
3044   if (NotifySimulator) {
3045     __ notify(Assembler::method_entry);
3046   }
3047 
3048   if (VerifyStackAtCalls) {
3049     Unimplemented();
3050   }
3051 
3052   C->set_frame_complete(cbuf.insts_size());
3053 
3054   if (C->has_mach_constant_base_node()) {
3055     // NOTE: We set the table base offset here because users might be
3056     // emitted before MachConstantBaseNode.
3057     Compile::ConstantTable& constant_table = C->constant_table();
3058     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3059   }
3060 }
3061 
3062 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3063 {
3064   return MachNode::size(ra_); // too many variables; just compute it
3065                               // the hard way
3066 }
3067 
3068 int MachPrologNode::reloc() const
3069 {
3070   return 0;
3071 }
3072 
3073 //=============================================================================
3074 
3075 #ifndef PRODUCT
3076 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3077   Compile* C = ra_->C;
3078   int framesize = C->frame_slots() << LogBytesPerInt;
3079 
3080   st->print("# pop frame %d\n\t",framesize);
3081 
3082   if (framesize == 0) {
3083     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3084   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3085     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3086     st->print("add  sp, sp, #%d\n\t", framesize);
3087   } else {
3088     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3089     st->print("add  sp, sp, rscratch1\n\t");
3090     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3091   }
3092 
3093   if (do_polling() && C->is_method_compilation()) {
3094     st->print("# touch polling page\n\t");
3095     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3096     st->print("ldr zr, [rscratch1]");
3097   }
3098 }
3099 #endif
3100 
3101 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3102   Compile* C = ra_->C;
3103   MacroAssembler _masm(&cbuf);
3104   int framesize = C->frame_slots() << LogBytesPerInt;
3105 
3106   __ remove_frame(framesize);
3107 
3108   if (NotifySimulator) {
3109     __ notify(Assembler::method_reentry);
3110   }
3111 
3112   if (do_polling() && C->is_method_compilation()) {
3113     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3114   }
3115 }
3116 
3117 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3118   // Variable size. Determine dynamically.
3119   return MachNode::size(ra_);
3120 }
3121 
3122 int MachEpilogNode::reloc() const {
3123   // Return number of relocatable values contained in this instruction.
3124   return 1; // 1 for polling page.
3125 }
3126 
3127 const Pipeline * MachEpilogNode::pipeline() const {
3128   return MachNode::pipeline_class();
3129 }
3130 
3131 // This method seems to be obsolete. It is declared in machnode.hpp
3132 // and defined in all *.ad files, but it is never called. Should we
3133 // get rid of it?
3134 int MachEpilogNode::safepoint_offset() const {
3135   assert(do_polling(), "no return for this epilog node");
3136   return 4;
3137 }
3138 
3139 //=============================================================================
3140 
3141 // Figure out which register class each belongs in: rc_int, rc_float or
3142 // rc_stack.
3143 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3144 
3145 static enum RC rc_class(OptoReg::Name reg) {
3146 
3147   if (reg == OptoReg::Bad) {
3148     return rc_bad;
3149   }
3150 
3151   // we have 30 int registers * 2 halves
3152   // (rscratch1 and rscratch2 are omitted)
3153 
3154   if (reg < 60) {
3155     return rc_int;
3156   }
3157 
3158   // we have 32 float register * 2 halves
3159   if (reg < 60 + 128) {
3160     return rc_float;
3161   }
3162 
3163   // Between float regs & stack is the flags regs.
3164   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3165 
3166   return rc_stack;
3167 }
3168 
3169 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3170   Compile* C = ra_->C;
3171 
3172   // Get registers to move.
3173   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3174   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3175   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3176   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3177 
3178   enum RC src_hi_rc = rc_class(src_hi);
3179   enum RC src_lo_rc = rc_class(src_lo);
3180   enum RC dst_hi_rc = rc_class(dst_hi);
3181   enum RC dst_lo_rc = rc_class(dst_lo);
3182 
3183   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3184 
3185   if (src_hi != OptoReg::Bad) {
3186     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3187            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3188            "expected aligned-adjacent pairs");
3189   }
3190 
3191   if (src_lo == dst_lo && src_hi == dst_hi) {
3192     return 0;            // Self copy, no move.
3193   }
3194 
3195   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3196               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3197   int src_offset = ra_->reg2offset(src_lo);
3198   int dst_offset = ra_->reg2offset(dst_lo);
3199 
3200   if (bottom_type()->isa_vect() != NULL) {
3201     uint ireg = ideal_reg();
3202     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3203     if (cbuf) {
3204       MacroAssembler _masm(cbuf);
3205       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3206       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3207         // stack->stack
3208         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3209         if (ireg == Op_VecD) {
3210           __ unspill(rscratch1, true, src_offset);
3211           __ spill(rscratch1, true, dst_offset);
3212         } else {
3213           __ spill_copy128(src_offset, dst_offset);
3214         }
3215       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3216         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3217                ireg == Op_VecD ? __ T8B : __ T16B,
3218                as_FloatRegister(Matcher::_regEncode[src_lo]));
3219       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3220         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3221                        ireg == Op_VecD ? __ D : __ Q,
3222                        ra_->reg2offset(dst_lo));
3223       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3224         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3225                        ireg == Op_VecD ? __ D : __ Q,
3226                        ra_->reg2offset(src_lo));
3227       } else {
3228         ShouldNotReachHere();
3229       }
3230     }
3231   } else if (cbuf) {
3232     MacroAssembler _masm(cbuf);
3233     switch (src_lo_rc) {
3234     case rc_int:
3235       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3236         if (is64) {
3237             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3238                    as_Register(Matcher::_regEncode[src_lo]));
3239         } else {
3240             MacroAssembler _masm(cbuf);
3241             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3242                     as_Register(Matcher::_regEncode[src_lo]));
3243         }
3244       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3245         if (is64) {
3246             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3247                      as_Register(Matcher::_regEncode[src_lo]));
3248         } else {
3249             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3250                      as_Register(Matcher::_regEncode[src_lo]));
3251         }
3252       } else {                    // gpr --> stack spill
3253         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3254         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3255       }
3256       break;
3257     case rc_float:
3258       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3259         if (is64) {
3260             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3261                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3262         } else {
3263             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3264                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3265         }
3266       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3267           if (cbuf) {
3268             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3269                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3270         } else {
3271             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3272                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3273         }
3274       } else {                    // fpr --> stack spill
3275         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3276         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3277                  is64 ? __ D : __ S, dst_offset);
3278       }
3279       break;
3280     case rc_stack:
3281       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3282         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3283       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3284         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3285                    is64 ? __ D : __ S, src_offset);
3286       } else {                    // stack --> stack copy
3287         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3288         __ unspill(rscratch1, is64, src_offset);
3289         __ spill(rscratch1, is64, dst_offset);
3290       }
3291       break;
3292     default:
3293       assert(false, "bad rc_class for spill");
3294       ShouldNotReachHere();
3295     }
3296   }
3297 
3298   if (st) {
3299     st->print("spill ");
3300     if (src_lo_rc == rc_stack) {
3301       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3302     } else {
3303       st->print("%s -> ", Matcher::regName[src_lo]);
3304     }
3305     if (dst_lo_rc == rc_stack) {
3306       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3307     } else {
3308       st->print("%s", Matcher::regName[dst_lo]);
3309     }
3310     if (bottom_type()->isa_vect() != NULL) {
3311       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3312     } else {
3313       st->print("\t# spill size = %d", is64 ? 64:32);
3314     }
3315   }
3316 
3317   return 0;
3318 
3319 }
3320 
3321 #ifndef PRODUCT
3322 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3323   if (!ra_)
3324     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3325   else
3326     implementation(NULL, ra_, false, st);
3327 }
3328 #endif
3329 
3330 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3331   implementation(&cbuf, ra_, false, NULL);
3332 }
3333 
3334 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3335   return MachNode::size(ra_);
3336 }
3337 
3338 //=============================================================================
3339 
3340 #ifndef PRODUCT
3341 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3342   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3343   int reg = ra_->get_reg_first(this);
3344   st->print("add %s, rsp, #%d]\t# box lock",
3345             Matcher::regName[reg], offset);
3346 }
3347 #endif
3348 
3349 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3350   MacroAssembler _masm(&cbuf);
3351 
3352   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3353   int reg    = ra_->get_encode(this);
3354 
3355   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3356     __ add(as_Register(reg), sp, offset);
3357   } else {
3358     ShouldNotReachHere();
3359   }
3360 }
3361 
3362 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3363   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3364   return 4;
3365 }
3366 
3367 //=============================================================================
3368 
3369 #ifndef PRODUCT
3370 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3371 {
3372   st->print_cr("# MachUEPNode");
3373   if (UseCompressedClassPointers) {
3374     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3375     if (Universe::narrow_klass_shift() != 0) {
3376       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3377     }
3378   } else {
3379    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3380   }
3381   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3382   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3383 }
3384 #endif
3385 
3386 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3387 {
3388   // This is the unverified entry point.
3389   MacroAssembler _masm(&cbuf);
3390 
3391   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3392   Label skip;
3393   // TODO
3394   // can we avoid this skip and still use a reloc?
3395   __ br(Assembler::EQ, skip);
3396   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3397   __ bind(skip);
3398 }
3399 
3400 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3401 {
3402   return MachNode::size(ra_);
3403 }
3404 
3405 // REQUIRED EMIT CODE
3406 
3407 //=============================================================================
3408 
3409 // Emit exception handler code.
3410 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3411 {
3412   // mov rscratch1 #exception_blob_entry_point
3413   // br rscratch1
3414   // Note that the code buffer's insts_mark is always relative to insts.
3415   // That's why we must use the macroassembler to generate a handler.
3416   MacroAssembler _masm(&cbuf);
3417   address base = __ start_a_stub(size_exception_handler());
3418   if (base == NULL) {
3419     ciEnv::current()->record_failure("CodeCache is full");
3420     return 0;  // CodeBuffer::expand failed
3421   }
3422   int offset = __ offset();
3423   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3424   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3425   __ end_a_stub();
3426   return offset;
3427 }
3428 
3429 // Emit deopt handler code.
3430 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3431 {
3432   // Note that the code buffer's insts_mark is always relative to insts.
3433   // That's why we must use the macroassembler to generate a handler.
3434   MacroAssembler _masm(&cbuf);
3435   address base = __ start_a_stub(size_deopt_handler());
3436   if (base == NULL) {
3437     ciEnv::current()->record_failure("CodeCache is full");
3438     return 0;  // CodeBuffer::expand failed
3439   }
3440   int offset = __ offset();
3441 
3442   __ adr(lr, __ pc());
3443   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3444 
3445   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3446   __ end_a_stub();
3447   return offset;
3448 }
3449 
3450 // REQUIRED MATCHER CODE
3451 
3452 //=============================================================================
3453 
3454 const bool Matcher::match_rule_supported(int opcode) {
3455 
3456   // TODO
3457   // identify extra cases that we might want to provide match rules for
3458   // e.g. Op_StrEquals and other intrinsics
3459   if (!has_match_rule(opcode)) {
3460     return false;
3461   }
3462 
3463   return true;  // Per default match rules are supported.
3464 }
3465 
3466 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3467 
3468   // TODO
3469   // identify extra cases that we might want to provide match rules for
3470   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3471   bool ret_value = match_rule_supported(opcode);
3472   // Add rules here.
3473 
3474   return ret_value;  // Per default match rules are supported.
3475 }
3476 
3477 const int Matcher::float_pressure(int default_pressure_threshold) {
3478   return default_pressure_threshold;
3479 }
3480 
3481 int Matcher::regnum_to_fpu_offset(int regnum)
3482 {
3483   Unimplemented();
3484   return 0;
3485 }
3486 
3487 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
3488 {
3489   Unimplemented();
3490   return false;
3491 }
3492 
3493 const bool Matcher::isSimpleConstant64(jlong value) {
3494   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3495   // Probably always true, even if a temp register is required.
3496   return true;
3497 }
3498 
3499 // true just means we have fast l2f conversion
3500 const bool Matcher::convL2FSupported(void) {
3501   return true;
3502 }
3503 
3504 // Vector width in bytes.
3505 const int Matcher::vector_width_in_bytes(BasicType bt) {
3506   int size = MIN2(16,(int)MaxVectorSize);
3507   // Minimum 2 values in vector
3508   if (size < 2*type2aelembytes(bt)) size = 0;
3509   // But never < 4
3510   if (size < 4) size = 0;
3511   return size;
3512 }
3513 
3514 // Limits on vector size (number of elements) loaded into vector.
3515 const int Matcher::max_vector_size(const BasicType bt) {
3516   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3517 }
3518 const int Matcher::min_vector_size(const BasicType bt) {
3519 //  For the moment limit the vector size to 8 bytes
3520     int size = 8 / type2aelembytes(bt);
3521     if (size < 2) size = 2;
3522     return size;
3523 }
3524 
3525 // Vector ideal reg.
3526 const int Matcher::vector_ideal_reg(int len) {
3527   switch(len) {
3528     case  8: return Op_VecD;
3529     case 16: return Op_VecX;
3530   }
3531   ShouldNotReachHere();
3532   return 0;
3533 }
3534 
3535 const int Matcher::vector_shift_count_ideal_reg(int size) {
3536   return Op_VecX;
3537 }
3538 
3539 // AES support not yet implemented
3540 const bool Matcher::pass_original_key_for_aes() {
3541   return false;
3542 }
3543 
3544 // x86 supports misaligned vectors store/load.
3545 const bool Matcher::misaligned_vectors_ok() {
3546   return !AlignVector; // can be changed by flag
3547 }
3548 
3549 // false => size gets scaled to BytesPerLong, ok.
3550 const bool Matcher::init_array_count_is_in_bytes = false;
3551 
3552 // Threshold size for cleararray.
3553 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3554 
3555 // Use conditional move (CMOVL)
3556 const int Matcher::long_cmove_cost() {
3557   // long cmoves are no more expensive than int cmoves
3558   return 0;
3559 }
3560 
3561 const int Matcher::float_cmove_cost() {
3562   // float cmoves are no more expensive than int cmoves
3563   return 0;
3564 }
3565 
3566 // Does the CPU require late expand (see block.cpp for description of late expand)?
3567 const bool Matcher::require_postalloc_expand = false;
3568 
3569 // Should the Matcher clone shifts on addressing modes, expecting them
3570 // to be subsumed into complex addressing expressions or compute them
3571 // into registers?  True for Intel but false for most RISCs
3572 const bool Matcher::clone_shift_expressions = false;
3573 
3574 // Do we need to mask the count passed to shift instructions or does
3575 // the cpu only look at the lower 5/6 bits anyway?
3576 const bool Matcher::need_masked_shift_count = false;
3577 
3578 // This affects two different things:
3579 //  - how Decode nodes are matched
3580 //  - how ImplicitNullCheck opportunities are recognized
3581 // If true, the matcher will try to remove all Decodes and match them
3582 // (as operands) into nodes. NullChecks are not prepared to deal with
3583 // Decodes by final_graph_reshaping().
3584 // If false, final_graph_reshaping() forces the decode behind the Cmp
3585 // for a NullCheck. The matcher matches the Decode node into a register.
3586 // Implicit_null_check optimization moves the Decode along with the
3587 // memory operation back up before the NullCheck.
3588 bool Matcher::narrow_oop_use_complex_address() {
3589   return Universe::narrow_oop_shift() == 0;
3590 }
3591 
3592 bool Matcher::narrow_klass_use_complex_address() {
3593 // TODO
3594 // decide whether we need to set this to true
3595   return false;
3596 }
3597 
3598 // Is it better to copy float constants, or load them directly from
3599 // memory?  Intel can load a float constant from a direct address,
3600 // requiring no extra registers.  Most RISCs will have to materialize
3601 // an address into a register first, so they would do better to copy
3602 // the constant from stack.
3603 const bool Matcher::rematerialize_float_constants = false;
3604 
3605 // If CPU can load and store mis-aligned doubles directly then no
3606 // fixup is needed.  Else we split the double into 2 integer pieces
3607 // and move it piece-by-piece.  Only happens when passing doubles into
3608 // C code as the Java calling convention forces doubles to be aligned.
3609 const bool Matcher::misaligned_doubles_ok = true;
3610 
3611 // No-op on amd64
3612 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3613   Unimplemented();
3614 }
3615 
3616 // Advertise here if the CPU requires explicit rounding operations to
3617 // implement the UseStrictFP mode.
3618 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3619 
3620 // Are floats converted to double when stored to stack during
3621 // deoptimization?
3622 bool Matcher::float_in_double() { return true; }
3623 
3624 // Do ints take an entire long register or just half?
3625 // The relevant question is how the int is callee-saved:
3626 // the whole long is written but de-opt'ing will have to extract
3627 // the relevant 32 bits.
3628 const bool Matcher::int_in_long = true;
3629 
3630 // Return whether or not this register is ever used as an argument.
3631 // This function is used on startup to build the trampoline stubs in
3632 // generateOptoStub.  Registers not mentioned will be killed by the VM
3633 // call in the trampoline, and arguments in those registers not be
3634 // available to the callee.
3635 bool Matcher::can_be_java_arg(int reg)
3636 {
3637   return
3638     reg ==  R0_num || reg == R0_H_num ||
3639     reg ==  R1_num || reg == R1_H_num ||
3640     reg ==  R2_num || reg == R2_H_num ||
3641     reg ==  R3_num || reg == R3_H_num ||
3642     reg ==  R4_num || reg == R4_H_num ||
3643     reg ==  R5_num || reg == R5_H_num ||
3644     reg ==  R6_num || reg == R6_H_num ||
3645     reg ==  R7_num || reg == R7_H_num ||
3646     reg ==  V0_num || reg == V0_H_num ||
3647     reg ==  V1_num || reg == V1_H_num ||
3648     reg ==  V2_num || reg == V2_H_num ||
3649     reg ==  V3_num || reg == V3_H_num ||
3650     reg ==  V4_num || reg == V4_H_num ||
3651     reg ==  V5_num || reg == V5_H_num ||
3652     reg ==  V6_num || reg == V6_H_num ||
3653     reg ==  V7_num || reg == V7_H_num;
3654 }
3655 
3656 bool Matcher::is_spillable_arg(int reg)
3657 {
3658   return can_be_java_arg(reg);
3659 }
3660 
3661 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3662   return false;
3663 }
3664 
3665 RegMask Matcher::divI_proj_mask() {
3666   ShouldNotReachHere();
3667   return RegMask();
3668 }
3669 
3670 // Register for MODI projection of divmodI.
3671 RegMask Matcher::modI_proj_mask() {
3672   ShouldNotReachHere();
3673   return RegMask();
3674 }
3675 
3676 // Register for DIVL projection of divmodL.
3677 RegMask Matcher::divL_proj_mask() {
3678   ShouldNotReachHere();
3679   return RegMask();
3680 }
3681 
3682 // Register for MODL projection of divmodL.
3683 RegMask Matcher::modL_proj_mask() {
3684   ShouldNotReachHere();
3685   return RegMask();
3686 }
3687 
3688 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3689   return FP_REG_mask();
3690 }
3691 
3692 // helper for encoding java_to_runtime calls on sim
3693 //
3694 // this is needed to compute the extra arguments required when
3695 // planting a call to the simulator blrt instruction. the TypeFunc
3696 // can be queried to identify the counts for integral, and floating
3697 // arguments and the return type
3698 
3699 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3700 {
3701   int gps = 0;
3702   int fps = 0;
3703   const TypeTuple *domain = tf->domain();
3704   int max = domain->cnt();
3705   for (int i = TypeFunc::Parms; i < max; i++) {
3706     const Type *t = domain->field_at(i);
3707     switch(t->basic_type()) {
3708     case T_FLOAT:
3709     case T_DOUBLE:
3710       fps++;
3711     default:
3712       gps++;
3713     }
3714   }
3715   gpcnt = gps;
3716   fpcnt = fps;
3717   BasicType rt = tf->return_type();
3718   switch (rt) {
3719   case T_VOID:
3720     rtype = MacroAssembler::ret_type_void;
3721     break;
3722   default:
3723     rtype = MacroAssembler::ret_type_integral;
3724     break;
3725   case T_FLOAT:
3726     rtype = MacroAssembler::ret_type_float;
3727     break;
3728   case T_DOUBLE:
3729     rtype = MacroAssembler::ret_type_double;
3730     break;
3731   }
3732 }
3733 
3734 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3735   MacroAssembler _masm(&cbuf);                                          \
3736   {                                                                     \
3737     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3738     guarantee(DISP == 0, "mode not permitted for volatile");            \
3739     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3740     __ INSN(REG, as_Register(BASE));                                    \
3741   }
3742 
3743 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3744 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3745 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3746                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3747 
3748   // Used for all non-volatile memory accesses.  The use of
3749   // $mem->opcode() to discover whether this pattern uses sign-extended
3750   // offsets is something of a kludge.
3751   static void loadStore(MacroAssembler masm, mem_insn insn,
3752                          Register reg, int opcode,
3753                          Register base, int index, int size, int disp)
3754   {
3755     Address::extend scale;
3756 
3757     // Hooboy, this is fugly.  We need a way to communicate to the
3758     // encoder that the index needs to be sign extended, so we have to
3759     // enumerate all the cases.
3760     switch (opcode) {
3761     case INDINDEXSCALEDOFFSETI2L:
3762     case INDINDEXSCALEDI2L:
3763     case INDINDEXSCALEDOFFSETI2LN:
3764     case INDINDEXSCALEDI2LN:
3765     case INDINDEXOFFSETI2L:
3766     case INDINDEXOFFSETI2LN:
3767       scale = Address::sxtw(size);
3768       break;
3769     default:
3770       scale = Address::lsl(size);
3771     }
3772 
3773     if (index == -1) {
3774       (masm.*insn)(reg, Address(base, disp));
3775     } else {
3776       if (disp == 0) {
3777         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3778       } else {
3779         masm.lea(rscratch1, Address(base, disp));
3780         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3781       }
3782     }
3783   }
3784 
3785   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3786                          FloatRegister reg, int opcode,
3787                          Register base, int index, int size, int disp)
3788   {
3789     Address::extend scale;
3790 
3791     switch (opcode) {
3792     case INDINDEXSCALEDOFFSETI2L:
3793     case INDINDEXSCALEDI2L:
3794     case INDINDEXSCALEDOFFSETI2LN:
3795     case INDINDEXSCALEDI2LN:
3796       scale = Address::sxtw(size);
3797       break;
3798     default:
3799       scale = Address::lsl(size);
3800     }
3801 
3802      if (index == -1) {
3803       (masm.*insn)(reg, Address(base, disp));
3804     } else {
3805       if (disp == 0) {
3806         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3807       } else {
3808         masm.lea(rscratch1, Address(base, disp));
3809         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3810       }
3811     }
3812   }
3813 
3814   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3815                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3816                          int opcode, Register base, int index, int size, int disp)
3817   {
3818     if (index == -1) {
3819       (masm.*insn)(reg, T, Address(base, disp));
3820     } else {
3821       assert(disp == 0, "unsupported address mode");
3822       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3823     }
3824   }
3825 
3826 %}
3827 
3828 
3829 
3830 //----------ENCODING BLOCK-----------------------------------------------------
3831 // This block specifies the encoding classes used by the compiler to
3832 // output byte streams.  Encoding classes are parameterized macros
3833 // used by Machine Instruction Nodes in order to generate the bit
3834 // encoding of the instruction.  Operands specify their base encoding
3835 // interface with the interface keyword.  There are currently
3836 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3837 // COND_INTER.  REG_INTER causes an operand to generate a function
3838 // which returns its register number when queried.  CONST_INTER causes
3839 // an operand to generate a function which returns the value of the
3840 // constant when queried.  MEMORY_INTER causes an operand to generate
3841 // four functions which return the Base Register, the Index Register,
3842 // the Scale Value, and the Offset Value of the operand when queried.
3843 // COND_INTER causes an operand to generate six functions which return
3844 // the encoding code (ie - encoding bits for the instruction)
3845 // associated with each basic boolean condition for a conditional
3846 // instruction.
3847 //
3848 // Instructions specify two basic values for encoding.  Again, a
3849 // function is available to check if the constant displacement is an
3850 // oop. They use the ins_encode keyword to specify their encoding
3851 // classes (which must be a sequence of enc_class names, and their
3852 // parameters, specified in the encoding block), and they use the
3853 // opcode keyword to specify, in order, their primary, secondary, and
3854 // tertiary opcode.  Only the opcode sections which a particular
3855 // instruction needs for encoding need to be specified.
3856 encode %{
3857   // Build emit functions for each basic byte or larger field in the
3858   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3859   // from C++ code in the enc_class source block.  Emit functions will
3860   // live in the main source block for now.  In future, we can
3861   // generalize this by adding a syntax that specifies the sizes of
3862   // fields in an order, so that the adlc can build the emit functions
3863   // automagically
3864 
3865   // catch all for unimplemented encodings
3866   enc_class enc_unimplemented %{
3867     MacroAssembler _masm(&cbuf);
3868     __ unimplemented("C2 catch all");
3869   %}
3870 
3871   // BEGIN Non-volatile memory access
3872 
3873   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3874     Register dst_reg = as_Register($dst$$reg);
3875     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3876                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3877   %}
3878 
3879   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3880     Register dst_reg = as_Register($dst$$reg);
3881     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3882                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3883   %}
3884 
3885   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3886     Register dst_reg = as_Register($dst$$reg);
3887     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3888                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3889   %}
3890 
3891   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3892     Register dst_reg = as_Register($dst$$reg);
3893     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3894                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3895   %}
3896 
3897   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3898     Register dst_reg = as_Register($dst$$reg);
3899     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3900                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3901   %}
3902 
3903   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3904     Register dst_reg = as_Register($dst$$reg);
3905     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3906                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3907   %}
3908 
3909   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3910     Register dst_reg = as_Register($dst$$reg);
3911     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3912                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3913   %}
3914 
3915   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3916     Register dst_reg = as_Register($dst$$reg);
3917     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3919   %}
3920 
3921   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3922     Register dst_reg = as_Register($dst$$reg);
3923     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3924                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3925   %}
3926 
3927   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3928     Register dst_reg = as_Register($dst$$reg);
3929     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3930                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3931   %}
3932 
3933   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3934     Register dst_reg = as_Register($dst$$reg);
3935     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3936                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3937   %}
3938 
3939   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3940     Register dst_reg = as_Register($dst$$reg);
3941     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3942                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3943   %}
3944 
3945   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3946     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3947     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3948                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3949   %}
3950 
3951   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3952     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3953     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3954                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3955   %}
3956 
3957   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3958     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3959     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3960        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3961   %}
3962 
3963   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3964     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3965     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3966        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3967   %}
3968 
3969   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3970     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3971     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3972        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3973   %}
3974 
3975   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3976     Register src_reg = as_Register($src$$reg);
3977     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3978                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3979   %}
3980 
3981   enc_class aarch64_enc_strb0(memory mem) %{
3982     MacroAssembler _masm(&cbuf);
3983     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3984                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3985   %}
3986 
3987   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3988     MacroAssembler _masm(&cbuf);
3989     __ membar(Assembler::StoreStore);
3990     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3991                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3992   %}
3993 
3994   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3995     Register src_reg = as_Register($src$$reg);
3996     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3997                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3998   %}
3999 
4000   enc_class aarch64_enc_strh0(memory mem) %{
4001     MacroAssembler _masm(&cbuf);
4002     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4003                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4004   %}
4005 
4006   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4007     Register src_reg = as_Register($src$$reg);
4008     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4009                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4010   %}
4011 
4012   enc_class aarch64_enc_strw0(memory mem) %{
4013     MacroAssembler _masm(&cbuf);
4014     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4015                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4016   %}
4017 
4018   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4019     Register src_reg = as_Register($src$$reg);
4020     // we sometimes get asked to store the stack pointer into the
4021     // current thread -- we cannot do that directly on AArch64
4022     if (src_reg == r31_sp) {
4023       MacroAssembler _masm(&cbuf);
4024       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4025       __ mov(rscratch2, sp);
4026       src_reg = rscratch2;
4027     }
4028     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4029                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4030   %}
4031 
4032   enc_class aarch64_enc_str0(memory mem) %{
4033     MacroAssembler _masm(&cbuf);
4034     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4035                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4036   %}
4037 
4038   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4039     FloatRegister src_reg = as_FloatRegister($src$$reg);
4040     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4041                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4042   %}
4043 
4044   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4045     FloatRegister src_reg = as_FloatRegister($src$$reg);
4046     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4047                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4048   %}
4049 
4050   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4051     FloatRegister src_reg = as_FloatRegister($src$$reg);
4052     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4053        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4054   %}
4055 
4056   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4057     FloatRegister src_reg = as_FloatRegister($src$$reg);
4058     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4059        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4060   %}
4061 
4062   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4063     FloatRegister src_reg = as_FloatRegister($src$$reg);
4064     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4065        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4066   %}
4067 
4068   // END Non-volatile memory access
4069 
4070   // volatile loads and stores
4071 
4072   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4073     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4074                  rscratch1, stlrb);
4075   %}
4076 
4077   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4078     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4079                  rscratch1, stlrh);
4080   %}
4081 
4082   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4083     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4084                  rscratch1, stlrw);
4085   %}
4086 
4087 
4088   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4089     Register dst_reg = as_Register($dst$$reg);
4090     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4091              rscratch1, ldarb);
4092     __ sxtbw(dst_reg, dst_reg);
4093   %}
4094 
4095   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4096     Register dst_reg = as_Register($dst$$reg);
4097     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4098              rscratch1, ldarb);
4099     __ sxtb(dst_reg, dst_reg);
4100   %}
4101 
4102   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4103     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4104              rscratch1, ldarb);
4105   %}
4106 
4107   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4108     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4109              rscratch1, ldarb);
4110   %}
4111 
4112   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4113     Register dst_reg = as_Register($dst$$reg);
4114     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4115              rscratch1, ldarh);
4116     __ sxthw(dst_reg, dst_reg);
4117   %}
4118 
4119   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4120     Register dst_reg = as_Register($dst$$reg);
4121     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4122              rscratch1, ldarh);
4123     __ sxth(dst_reg, dst_reg);
4124   %}
4125 
4126   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4127     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4128              rscratch1, ldarh);
4129   %}
4130 
4131   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4132     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4133              rscratch1, ldarh);
4134   %}
4135 
4136   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4137     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4138              rscratch1, ldarw);
4139   %}
4140 
4141   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4142     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4143              rscratch1, ldarw);
4144   %}
4145 
4146   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4147     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4148              rscratch1, ldar);
4149   %}
4150 
4151   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4152     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4153              rscratch1, ldarw);
4154     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4155   %}
4156 
4157   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4158     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4159              rscratch1, ldar);
4160     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4161   %}
4162 
4163   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4164     Register src_reg = as_Register($src$$reg);
4165     // we sometimes get asked to store the stack pointer into the
4166     // current thread -- we cannot do that directly on AArch64
4167     if (src_reg == r31_sp) {
4168         MacroAssembler _masm(&cbuf);
4169       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4170       __ mov(rscratch2, sp);
4171       src_reg = rscratch2;
4172     }
4173     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4174                  rscratch1, stlr);
4175   %}
4176 
4177   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4178     {
4179       MacroAssembler _masm(&cbuf);
4180       FloatRegister src_reg = as_FloatRegister($src$$reg);
4181       __ fmovs(rscratch2, src_reg);
4182     }
4183     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4184                  rscratch1, stlrw);
4185   %}
4186 
4187   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4188     {
4189       MacroAssembler _masm(&cbuf);
4190       FloatRegister src_reg = as_FloatRegister($src$$reg);
4191       __ fmovd(rscratch2, src_reg);
4192     }
4193     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4194                  rscratch1, stlr);
4195   %}
4196 
4197   // synchronized read/update encodings
4198 
4199   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4200     MacroAssembler _masm(&cbuf);
4201     Register dst_reg = as_Register($dst$$reg);
4202     Register base = as_Register($mem$$base);
4203     int index = $mem$$index;
4204     int scale = $mem$$scale;
4205     int disp = $mem$$disp;
4206     if (index == -1) {
4207        if (disp != 0) {
4208         __ lea(rscratch1, Address(base, disp));
4209         __ ldaxr(dst_reg, rscratch1);
4210       } else {
4211         // TODO
4212         // should we ever get anything other than this case?
4213         __ ldaxr(dst_reg, base);
4214       }
4215     } else {
4216       Register index_reg = as_Register(index);
4217       if (disp == 0) {
4218         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4219         __ ldaxr(dst_reg, rscratch1);
4220       } else {
4221         __ lea(rscratch1, Address(base, disp));
4222         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4223         __ ldaxr(dst_reg, rscratch1);
4224       }
4225     }
4226   %}
4227 
4228   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4229     MacroAssembler _masm(&cbuf);
4230     Register src_reg = as_Register($src$$reg);
4231     Register base = as_Register($mem$$base);
4232     int index = $mem$$index;
4233     int scale = $mem$$scale;
4234     int disp = $mem$$disp;
4235     if (index == -1) {
4236        if (disp != 0) {
4237         __ lea(rscratch2, Address(base, disp));
4238         __ stlxr(rscratch1, src_reg, rscratch2);
4239       } else {
4240         // TODO
4241         // should we ever get anything other than this case?
4242         __ stlxr(rscratch1, src_reg, base);
4243       }
4244     } else {
4245       Register index_reg = as_Register(index);
4246       if (disp == 0) {
4247         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4248         __ stlxr(rscratch1, src_reg, rscratch2);
4249       } else {
4250         __ lea(rscratch2, Address(base, disp));
4251         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4252         __ stlxr(rscratch1, src_reg, rscratch2);
4253       }
4254     }
4255     __ cmpw(rscratch1, zr);
4256   %}
4257 
4258   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4259     MacroAssembler _masm(&cbuf);
4260     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4261     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4262                &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
4263   %}
4264 
4265   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4266     MacroAssembler _masm(&cbuf);
4267     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4268     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4269                &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4270   %}
4271 
4272 
4273   // The only difference between aarch64_enc_cmpxchg and
4274   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4275   // CompareAndSwap sequence to serve as a barrier on acquiring a
4276   // lock.
4277   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4278     MacroAssembler _masm(&cbuf);
4279     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4280     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4281                &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
4282   %}
4283 
4284   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4285     MacroAssembler _masm(&cbuf);
4286     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4287     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4288                &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4289   %}
4290 
4291 
4292   // auxiliary used for CompareAndSwapX to set result register
4293   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4294     MacroAssembler _masm(&cbuf);
4295     Register res_reg = as_Register($res$$reg);
4296     __ cset(res_reg, Assembler::EQ);
4297   %}
4298 
4299   // prefetch encodings
4300 
4301   enc_class aarch64_enc_prefetchw(memory mem) %{
4302     MacroAssembler _masm(&cbuf);
4303     Register base = as_Register($mem$$base);
4304     int index = $mem$$index;
4305     int scale = $mem$$scale;
4306     int disp = $mem$$disp;
4307     if (index == -1) {
4308       __ prfm(Address(base, disp), PSTL1KEEP);
4309     } else {
4310       Register index_reg = as_Register(index);
4311       if (disp == 0) {
4312         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4313       } else {
4314         __ lea(rscratch1, Address(base, disp));
4315         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4316       }
4317     }
4318   %}
4319 
4320   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4321     MacroAssembler _masm(&cbuf);
4322     Register cnt_reg = as_Register($cnt$$reg);
4323     Register base_reg = as_Register($base$$reg);
4324     // base is word aligned
4325     // cnt is count of words
4326 
4327     Label loop;
4328     Label entry;
4329 
4330 //  Algorithm:
4331 //
4332 //    scratch1 = cnt & 7;
4333 //    cnt -= scratch1;
4334 //    p += scratch1;
4335 //    switch (scratch1) {
4336 //      do {
4337 //        cnt -= 8;
4338 //          p[-8] = 0;
4339 //        case 7:
4340 //          p[-7] = 0;
4341 //        case 6:
4342 //          p[-6] = 0;
4343 //          // ...
4344 //        case 1:
4345 //          p[-1] = 0;
4346 //        case 0:
4347 //          p += 8;
4348 //      } while (cnt);
4349 //    }
4350 
4351     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4352 
4353     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4354     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4355     // base_reg always points to the end of the region we're about to zero
4356     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4357     __ adr(rscratch2, entry);
4358     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4359     __ br(rscratch2);
4360     __ bind(loop);
4361     __ sub(cnt_reg, cnt_reg, unroll);
4362     for (int i = -unroll; i < 0; i++)
4363       __ str(zr, Address(base_reg, i * wordSize));
4364     __ bind(entry);
4365     __ add(base_reg, base_reg, unroll * wordSize);
4366     __ cbnz(cnt_reg, loop);
4367   %}
4368 
4369   /// mov envcodings
4370 
4371   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4372     MacroAssembler _masm(&cbuf);
4373     u_int32_t con = (u_int32_t)$src$$constant;
4374     Register dst_reg = as_Register($dst$$reg);
4375     if (con == 0) {
4376       __ movw(dst_reg, zr);
4377     } else {
4378       __ movw(dst_reg, con);
4379     }
4380   %}
4381 
4382   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4383     MacroAssembler _masm(&cbuf);
4384     Register dst_reg = as_Register($dst$$reg);
4385     u_int64_t con = (u_int64_t)$src$$constant;
4386     if (con == 0) {
4387       __ mov(dst_reg, zr);
4388     } else {
4389       __ mov(dst_reg, con);
4390     }
4391   %}
4392 
4393   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4394     MacroAssembler _masm(&cbuf);
4395     Register dst_reg = as_Register($dst$$reg);
4396     address con = (address)$src$$constant;
4397     if (con == NULL || con == (address)1) {
4398       ShouldNotReachHere();
4399     } else {
4400       relocInfo::relocType rtype = $src->constant_reloc();
4401       if (rtype == relocInfo::oop_type) {
4402         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4403       } else if (rtype == relocInfo::metadata_type) {
4404         __ mov_metadata(dst_reg, (Metadata*)con);
4405       } else {
4406         assert(rtype == relocInfo::none, "unexpected reloc type");
4407         if (con < (address)(uintptr_t)os::vm_page_size()) {
4408           __ mov(dst_reg, con);
4409         } else {
4410           unsigned long offset;
4411           __ adrp(dst_reg, con, offset);
4412           __ add(dst_reg, dst_reg, offset);
4413         }
4414       }
4415     }
4416   %}
4417 
4418   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4419     MacroAssembler _masm(&cbuf);
4420     Register dst_reg = as_Register($dst$$reg);
4421     __ mov(dst_reg, zr);
4422   %}
4423 
4424   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4425     MacroAssembler _masm(&cbuf);
4426     Register dst_reg = as_Register($dst$$reg);
4427     __ mov(dst_reg, (u_int64_t)1);
4428   %}
4429 
4430   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4431     MacroAssembler _masm(&cbuf);
4432     address page = (address)$src$$constant;
4433     Register dst_reg = as_Register($dst$$reg);
4434     unsigned long off;
4435     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4436     assert(off == 0, "assumed offset == 0");
4437   %}
4438 
4439   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4440     MacroAssembler _masm(&cbuf);
4441     address page = (address)$src$$constant;
4442     Register dst_reg = as_Register($dst$$reg);
4443     unsigned long off;
4444     __ adrp(dst_reg, ExternalAddress(page), off);
4445     assert(off == 0, "assumed offset == 0");
4446   %}
4447 
4448   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4449     MacroAssembler _masm(&cbuf);
4450     Register dst_reg = as_Register($dst$$reg);
4451     address con = (address)$src$$constant;
4452     if (con == NULL) {
4453       ShouldNotReachHere();
4454     } else {
4455       relocInfo::relocType rtype = $src->constant_reloc();
4456       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4457       __ set_narrow_oop(dst_reg, (jobject)con);
4458     }
4459   %}
4460 
4461   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4462     MacroAssembler _masm(&cbuf);
4463     Register dst_reg = as_Register($dst$$reg);
4464     __ mov(dst_reg, zr);
4465   %}
4466 
4467   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4468     MacroAssembler _masm(&cbuf);
4469     Register dst_reg = as_Register($dst$$reg);
4470     address con = (address)$src$$constant;
4471     if (con == NULL) {
4472       ShouldNotReachHere();
4473     } else {
4474       relocInfo::relocType rtype = $src->constant_reloc();
4475       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4476       __ set_narrow_klass(dst_reg, (Klass *)con);
4477     }
4478   %}
4479 
4480   // arithmetic encodings
4481 
4482   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4483     MacroAssembler _masm(&cbuf);
4484     Register dst_reg = as_Register($dst$$reg);
4485     Register src_reg = as_Register($src1$$reg);
4486     int32_t con = (int32_t)$src2$$constant;
4487     // add has primary == 0, subtract has primary == 1
4488     if ($primary) { con = -con; }
4489     if (con < 0) {
4490       __ subw(dst_reg, src_reg, -con);
4491     } else {
4492       __ addw(dst_reg, src_reg, con);
4493     }
4494   %}
4495 
4496   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4497     MacroAssembler _masm(&cbuf);
4498     Register dst_reg = as_Register($dst$$reg);
4499     Register src_reg = as_Register($src1$$reg);
4500     int32_t con = (int32_t)$src2$$constant;
4501     // add has primary == 0, subtract has primary == 1
4502     if ($primary) { con = -con; }
4503     if (con < 0) {
4504       __ sub(dst_reg, src_reg, -con);
4505     } else {
4506       __ add(dst_reg, src_reg, con);
4507     }
4508   %}
4509 
4510   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4511     MacroAssembler _masm(&cbuf);
4512    Register dst_reg = as_Register($dst$$reg);
4513    Register src1_reg = as_Register($src1$$reg);
4514    Register src2_reg = as_Register($src2$$reg);
4515     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4516   %}
4517 
4518   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4519     MacroAssembler _masm(&cbuf);
4520    Register dst_reg = as_Register($dst$$reg);
4521    Register src1_reg = as_Register($src1$$reg);
4522    Register src2_reg = as_Register($src2$$reg);
4523     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4524   %}
4525 
4526   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4527     MacroAssembler _masm(&cbuf);
4528    Register dst_reg = as_Register($dst$$reg);
4529    Register src1_reg = as_Register($src1$$reg);
4530    Register src2_reg = as_Register($src2$$reg);
4531     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4532   %}
4533 
4534   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4535     MacroAssembler _masm(&cbuf);
4536    Register dst_reg = as_Register($dst$$reg);
4537    Register src1_reg = as_Register($src1$$reg);
4538    Register src2_reg = as_Register($src2$$reg);
4539     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4540   %}
4541 
4542   // compare instruction encodings
4543 
4544   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4545     MacroAssembler _masm(&cbuf);
4546     Register reg1 = as_Register($src1$$reg);
4547     Register reg2 = as_Register($src2$$reg);
4548     __ cmpw(reg1, reg2);
4549   %}
4550 
4551   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4552     MacroAssembler _masm(&cbuf);
4553     Register reg = as_Register($src1$$reg);
4554     int32_t val = $src2$$constant;
4555     if (val >= 0) {
4556       __ subsw(zr, reg, val);
4557     } else {
4558       __ addsw(zr, reg, -val);
4559     }
4560   %}
4561 
4562   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4563     MacroAssembler _masm(&cbuf);
4564     Register reg1 = as_Register($src1$$reg);
4565     u_int32_t val = (u_int32_t)$src2$$constant;
4566     __ movw(rscratch1, val);
4567     __ cmpw(reg1, rscratch1);
4568   %}
4569 
4570   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4571     MacroAssembler _masm(&cbuf);
4572     Register reg1 = as_Register($src1$$reg);
4573     Register reg2 = as_Register($src2$$reg);
4574     __ cmp(reg1, reg2);
4575   %}
4576 
4577   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4578     MacroAssembler _masm(&cbuf);
4579     Register reg = as_Register($src1$$reg);
4580     int64_t val = $src2$$constant;
4581     if (val >= 0) {
4582       __ subs(zr, reg, val);
4583     } else if (val != -val) {
4584       __ adds(zr, reg, -val);
4585     } else {
4586     // aargh, Long.MIN_VALUE is a special case
4587       __ orr(rscratch1, zr, (u_int64_t)val);
4588       __ subs(zr, reg, rscratch1);
4589     }
4590   %}
4591 
4592   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4593     MacroAssembler _masm(&cbuf);
4594     Register reg1 = as_Register($src1$$reg);
4595     u_int64_t val = (u_int64_t)$src2$$constant;
4596     __ mov(rscratch1, val);
4597     __ cmp(reg1, rscratch1);
4598   %}
4599 
4600   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4601     MacroAssembler _masm(&cbuf);
4602     Register reg1 = as_Register($src1$$reg);
4603     Register reg2 = as_Register($src2$$reg);
4604     __ cmp(reg1, reg2);
4605   %}
4606 
4607   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4608     MacroAssembler _masm(&cbuf);
4609     Register reg1 = as_Register($src1$$reg);
4610     Register reg2 = as_Register($src2$$reg);
4611     __ cmpw(reg1, reg2);
4612   %}
4613 
4614   enc_class aarch64_enc_testp(iRegP src) %{
4615     MacroAssembler _masm(&cbuf);
4616     Register reg = as_Register($src$$reg);
4617     __ cmp(reg, zr);
4618   %}
4619 
4620   enc_class aarch64_enc_testn(iRegN src) %{
4621     MacroAssembler _masm(&cbuf);
4622     Register reg = as_Register($src$$reg);
4623     __ cmpw(reg, zr);
4624   %}
4625 
4626   enc_class aarch64_enc_b(label lbl) %{
4627     MacroAssembler _masm(&cbuf);
4628     Label *L = $lbl$$label;
4629     __ b(*L);
4630   %}
4631 
4632   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4633     MacroAssembler _masm(&cbuf);
4634     Label *L = $lbl$$label;
4635     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4636   %}
4637 
4638   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4639     MacroAssembler _masm(&cbuf);
4640     Label *L = $lbl$$label;
4641     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4642   %}
4643 
4644   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4645   %{
4646      Register sub_reg = as_Register($sub$$reg);
4647      Register super_reg = as_Register($super$$reg);
4648      Register temp_reg = as_Register($temp$$reg);
4649      Register result_reg = as_Register($result$$reg);
4650 
4651      Label miss;
4652      MacroAssembler _masm(&cbuf);
4653      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4654                                      NULL, &miss,
4655                                      /*set_cond_codes:*/ true);
4656      if ($primary) {
4657        __ mov(result_reg, zr);
4658      }
4659      __ bind(miss);
4660   %}
4661 
4662   enc_class aarch64_enc_java_static_call(method meth) %{
4663     MacroAssembler _masm(&cbuf);
4664 
4665     address addr = (address)$meth$$method;
4666     address call;
4667     if (!_method) {
4668       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4669       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4670     } else if (_optimized_virtual) {
4671       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4672     } else {
4673       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4674     }
4675     if (call == NULL) {
4676       ciEnv::current()->record_failure("CodeCache is full");
4677       return;
4678     }
4679 
4680     if (_method) {
4681       // Emit stub for static call
4682       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4683       if (stub == NULL) {
4684         ciEnv::current()->record_failure("CodeCache is full");
4685         return;
4686       }
4687     }
4688   %}
4689 
4690   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4691     MacroAssembler _masm(&cbuf);
4692     address call = __ ic_call((address)$meth$$method);
4693     if (call == NULL) {
4694       ciEnv::current()->record_failure("CodeCache is full");
4695       return;
4696     }
4697   %}
4698 
4699   enc_class aarch64_enc_call_epilog() %{
4700     MacroAssembler _masm(&cbuf);
4701     if (VerifyStackAtCalls) {
4702       // Check that stack depth is unchanged: find majik cookie on stack
4703       __ call_Unimplemented();
4704     }
4705   %}
4706 
4707   enc_class aarch64_enc_java_to_runtime(method meth) %{
4708     MacroAssembler _masm(&cbuf);
4709 
4710     // some calls to generated routines (arraycopy code) are scheduled
4711     // by C2 as runtime calls. if so we can call them using a br (they
4712     // will be in a reachable segment) otherwise we have to use a blrt
4713     // which loads the absolute address into a register.
4714     address entry = (address)$meth$$method;
4715     CodeBlob *cb = CodeCache::find_blob(entry);
4716     if (cb) {
4717       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4718       if (call == NULL) {
4719         ciEnv::current()->record_failure("CodeCache is full");
4720         return;
4721       }
4722     } else {
4723       int gpcnt;
4724       int fpcnt;
4725       int rtype;
4726       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4727       Label retaddr;
4728       __ adr(rscratch2, retaddr);
4729       __ lea(rscratch1, RuntimeAddress(entry));
4730       // Leave a breadcrumb for JavaThread::pd_last_frame().
4731       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4732       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4733       __ bind(retaddr);
4734       __ add(sp, sp, 2 * wordSize);
4735     }
4736   %}
4737 
4738   enc_class aarch64_enc_rethrow() %{
4739     MacroAssembler _masm(&cbuf);
4740     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4741   %}
4742 
4743   enc_class aarch64_enc_ret() %{
4744     MacroAssembler _masm(&cbuf);
4745     __ ret(lr);
4746   %}
4747 
4748   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4749     MacroAssembler _masm(&cbuf);
4750     Register target_reg = as_Register($jump_target$$reg);
4751     __ br(target_reg);
4752   %}
4753 
4754   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4755     MacroAssembler _masm(&cbuf);
4756     Register target_reg = as_Register($jump_target$$reg);
4757     // exception oop should be in r0
4758     // ret addr has been popped into lr
4759     // callee expects it in r3
4760     __ mov(r3, lr);
4761     __ br(target_reg);
4762   %}
4763 
4764   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4765     MacroAssembler _masm(&cbuf);
4766     Register oop = as_Register($object$$reg);
4767     Register box = as_Register($box$$reg);
4768     Register disp_hdr = as_Register($tmp$$reg);
4769     Register tmp = as_Register($tmp2$$reg);
4770     Label cont;
4771     Label object_has_monitor;
4772     Label cas_failed;
4773 
4774     assert_different_registers(oop, box, tmp, disp_hdr);
4775 
4776     // Load markOop from object into displaced_header.
4777     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4778 
4779     // Always do locking in runtime.
4780     if (EmitSync & 0x01) {
4781       __ cmp(oop, zr);
4782       return;
4783     }
4784 
4785     if (UseBiasedLocking && !UseOptoBiasInlining) {
4786       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4787     }
4788 
4789     // Handle existing monitor
4790     if ((EmitSync & 0x02) == 0) {
4791       // we can use AArch64's bit test and branch here but
4792       // markoopDesc does not define a bit index just the bit value
4793       // so assert in case the bit pos changes
4794 #     define __monitor_value_log2 1
4795       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4796       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4797 #     undef __monitor_value_log2
4798     }
4799 
4800     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4801     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4802 
4803     // Load Compare Value application register.
4804 
4805     // Initialize the box. (Must happen before we update the object mark!)
4806     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4807 
4808     // Compare object markOop with mark and if equal exchange scratch1
4809     // with object markOop.
4810     {
4811       Label retry_load;
4812       __ bind(retry_load);
4813       __ ldaxr(tmp, oop);
4814       __ cmp(tmp, disp_hdr);
4815       __ br(Assembler::NE, cas_failed);
4816       // use stlxr to ensure update is immediately visible
4817       __ stlxr(tmp, box, oop);
4818       __ cbzw(tmp, cont);
4819       __ b(retry_load);
4820     }
4821 
4822     // Formerly:
4823     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4824     //               /*newv=*/box,
4825     //               /*addr=*/oop,
4826     //               /*tmp=*/tmp,
4827     //               cont,
4828     //               /*fail*/NULL);
4829 
4830     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4831 
4832     // If the compare-and-exchange succeeded, then we found an unlocked
4833     // object, will have now locked it will continue at label cont
4834 
4835     __ bind(cas_failed);
4836     // We did not see an unlocked object so try the fast recursive case.
4837 
4838     // Check if the owner is self by comparing the value in the
4839     // markOop of object (disp_hdr) with the stack pointer.
4840     __ mov(rscratch1, sp);
4841     __ sub(disp_hdr, disp_hdr, rscratch1);
4842     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4843     // If condition is true we are cont and hence we can store 0 as the
4844     // displaced header in the box, which indicates that it is a recursive lock.
4845     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4846     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4847 
4848     // Handle existing monitor.
4849     if ((EmitSync & 0x02) == 0) {
4850       __ b(cont);
4851 
4852       __ bind(object_has_monitor);
4853       // The object's monitor m is unlocked iff m->owner == NULL,
4854       // otherwise m->owner may contain a thread or a stack address.
4855       //
4856       // Try to CAS m->owner from NULL to current thread.
4857       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4858       __ mov(disp_hdr, zr);
4859 
4860       {
4861         Label retry_load, fail;
4862         __ bind(retry_load);
4863         __ ldaxr(rscratch1, tmp);
4864         __ cmp(disp_hdr, rscratch1);
4865         __ br(Assembler::NE, fail);
4866         // use stlxr to ensure update is immediately visible
4867         __ stlxr(rscratch1, rthread, tmp);
4868         __ cbnzw(rscratch1, retry_load);
4869         __ bind(fail);
4870       }
4871 
4872       // Label next;
4873       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4874       //               /*newv=*/rthread,
4875       //               /*addr=*/tmp,
4876       //               /*tmp=*/rscratch1,
4877       //               /*succeed*/next,
4878       //               /*fail*/NULL);
4879       // __ bind(next);
4880 
4881       // store a non-null value into the box.
4882       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4883 
4884       // PPC port checks the following invariants
4885       // #ifdef ASSERT
4886       // bne(flag, cont);
4887       // We have acquired the monitor, check some invariants.
4888       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4889       // Invariant 1: _recursions should be 0.
4890       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4891       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4892       //                        "monitor->_recursions should be 0", -1);
4893       // Invariant 2: OwnerIsThread shouldn't be 0.
4894       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4895       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4896       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4897       // #endif
4898     }
4899 
4900     __ bind(cont);
4901     // flag == EQ indicates success
4902     // flag == NE indicates failure
4903 
4904   %}
4905 
4906   // TODO
4907   // reimplement this with custom cmpxchgptr code
4908   // which avoids some of the unnecessary branching
4909   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4910     MacroAssembler _masm(&cbuf);
4911     Register oop = as_Register($object$$reg);
4912     Register box = as_Register($box$$reg);
4913     Register disp_hdr = as_Register($tmp$$reg);
4914     Register tmp = as_Register($tmp2$$reg);
4915     Label cont;
4916     Label object_has_monitor;
4917     Label cas_failed;
4918 
4919     assert_different_registers(oop, box, tmp, disp_hdr);
4920 
4921     // Always do locking in runtime.
4922     if (EmitSync & 0x01) {
4923       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4924       return;
4925     }
4926 
4927     if (UseBiasedLocking && !UseOptoBiasInlining) {
4928       __ biased_locking_exit(oop, tmp, cont);
4929     }
4930 
4931     // Find the lock address and load the displaced header from the stack.
4932     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4933 
4934     // If the displaced header is 0, we have a recursive unlock.
4935     __ cmp(disp_hdr, zr);
4936     __ br(Assembler::EQ, cont);
4937 
4938 
4939     // Handle existing monitor.
4940     if ((EmitSync & 0x02) == 0) {
4941       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4942       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4943     }
4944 
4945     // Check if it is still a light weight lock, this is is true if we
4946     // see the stack address of the basicLock in the markOop of the
4947     // object.
4948 
4949       {
4950         Label retry_load;
4951         __ bind(retry_load);
4952         __ ldxr(tmp, oop);
4953         __ cmp(box, tmp);
4954         __ br(Assembler::NE, cas_failed);
4955         // use stlxr to ensure update is immediately visible
4956         __ stlxr(tmp, disp_hdr, oop);
4957         __ cbzw(tmp, cont);
4958         __ b(retry_load);
4959       }
4960 
4961     // __ cmpxchgptr(/*compare_value=*/box,
4962     //               /*exchange_value=*/disp_hdr,
4963     //               /*where=*/oop,
4964     //               /*result=*/tmp,
4965     //               cont,
4966     //               /*cas_failed*/NULL);
4967     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4968 
4969     __ bind(cas_failed);
4970 
4971     // Handle existing monitor.
4972     if ((EmitSync & 0x02) == 0) {
4973       __ b(cont);
4974 
4975       __ bind(object_has_monitor);
4976       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4977       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4978       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4979       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4980       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4981       __ cmp(rscratch1, zr);
4982       __ br(Assembler::NE, cont);
4983 
4984       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4985       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4986       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4987       __ cmp(rscratch1, zr);
4988       __ cbnz(rscratch1, cont);
4989       // need a release store here
4990       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4991       __ stlr(rscratch1, tmp); // rscratch1 is zero
4992     }
4993 
4994     __ bind(cont);
4995     // flag == EQ indicates success
4996     // flag == NE indicates failure
4997   %}
4998 
4999 %}
5000 
5001 //----------FRAME--------------------------------------------------------------
5002 // Definition of frame structure and management information.
5003 //
5004 //  S T A C K   L A Y O U T    Allocators stack-slot number
5005 //                             |   (to get allocators register number
5006 //  G  Owned by    |        |  v    add OptoReg::stack0())
5007 //  r   CALLER     |        |
5008 //  o     |        +--------+      pad to even-align allocators stack-slot
5009 //  w     V        |  pad0  |        numbers; owned by CALLER
5010 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5011 //  h     ^        |   in   |  5
5012 //        |        |  args  |  4   Holes in incoming args owned by SELF
5013 //  |     |        |        |  3
5014 //  |     |        +--------+
5015 //  V     |        | old out|      Empty on Intel, window on Sparc
5016 //        |    old |preserve|      Must be even aligned.
5017 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5018 //        |        |   in   |  3   area for Intel ret address
5019 //     Owned by    |preserve|      Empty on Sparc.
5020 //       SELF      +--------+
5021 //        |        |  pad2  |  2   pad to align old SP
5022 //        |        +--------+  1
5023 //        |        | locks  |  0
5024 //        |        +--------+----> OptoReg::stack0(), even aligned
5025 //        |        |  pad1  | 11   pad to align new SP
5026 //        |        +--------+
5027 //        |        |        | 10
5028 //        |        | spills |  9   spills
5029 //        V        |        |  8   (pad0 slot for callee)
5030 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5031 //        ^        |  out   |  7
5032 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5033 //     Owned by    +--------+
5034 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5035 //        |    new |preserve|      Must be even-aligned.
5036 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5037 //        |        |        |
5038 //
5039 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5040 //         known from SELF's arguments and the Java calling convention.
5041 //         Region 6-7 is determined per call site.
5042 // Note 2: If the calling convention leaves holes in the incoming argument
5043 //         area, those holes are owned by SELF.  Holes in the outgoing area
5044 //         are owned by the CALLEE.  Holes should not be nessecary in the
5045 //         incoming area, as the Java calling convention is completely under
5046 //         the control of the AD file.  Doubles can be sorted and packed to
5047 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5048 //         varargs C calling conventions.
5049 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5050 //         even aligned with pad0 as needed.
5051 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5052 //           (the latter is true on Intel but is it false on AArch64?)
5053 //         region 6-11 is even aligned; it may be padded out more so that
5054 //         the region from SP to FP meets the minimum stack alignment.
5055 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5056 //         alignment.  Region 11, pad1, may be dynamically extended so that
5057 //         SP meets the minimum alignment.
5058 
5059 frame %{
5060   // What direction does stack grow in (assumed to be same for C & Java)
5061   stack_direction(TOWARDS_LOW);
5062 
5063   // These three registers define part of the calling convention
5064   // between compiled code and the interpreter.
5065 
5066   // Inline Cache Register or methodOop for I2C.
5067   inline_cache_reg(R12);
5068 
5069   // Method Oop Register when calling interpreter.
5070   interpreter_method_oop_reg(R12);
5071 
5072   // Number of stack slots consumed by locking an object
5073   sync_stack_slots(2);
5074 
5075   // Compiled code's Frame Pointer
5076   frame_pointer(R31);
5077 
5078   // Interpreter stores its frame pointer in a register which is
5079   // stored to the stack by I2CAdaptors.
5080   // I2CAdaptors convert from interpreted java to compiled java.
5081   interpreter_frame_pointer(R29);
5082 
5083   // Stack alignment requirement
5084   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5085 
5086   // Number of stack slots between incoming argument block and the start of
5087   // a new frame.  The PROLOG must add this many slots to the stack.  The
5088   // EPILOG must remove this many slots. aarch64 needs two slots for
5089   // return address and fp.
5090   // TODO think this is correct but check
5091   in_preserve_stack_slots(4);
5092 
5093   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5094   // for calls to C.  Supports the var-args backing area for register parms.
5095   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5096 
5097   // The after-PROLOG location of the return address.  Location of
5098   // return address specifies a type (REG or STACK) and a number
5099   // representing the register number (i.e. - use a register name) or
5100   // stack slot.
5101   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5102   // Otherwise, it is above the locks and verification slot and alignment word
5103   // TODO this may well be correct but need to check why that - 2 is there
5104   // ppc port uses 0 but we definitely need to allow for fixed_slots
5105   // which folds in the space used for monitors
5106   return_addr(STACK - 2 +
5107               round_to((Compile::current()->in_preserve_stack_slots() +
5108                         Compile::current()->fixed_slots()),
5109                        stack_alignment_in_slots()));
5110 
5111   // Body of function which returns an integer array locating
5112   // arguments either in registers or in stack slots.  Passed an array
5113   // of ideal registers called "sig" and a "length" count.  Stack-slot
5114   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5115   // arguments for a CALLEE.  Incoming stack arguments are
5116   // automatically biased by the preserve_stack_slots field above.
5117 
5118   calling_convention
5119   %{
5120     // No difference between ingoing/outgoing just pass false
5121     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5122   %}
5123 
5124   c_calling_convention
5125   %{
5126     // This is obviously always outgoing
5127     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5128   %}
5129 
5130   // Location of compiled Java return values.  Same as C for now.
5131   return_value
5132   %{
5133     // TODO do we allow ideal_reg == Op_RegN???
5134     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5135            "only return normal values");
5136 
5137     static const int lo[Op_RegL + 1] = { // enum name
5138       0,                                 // Op_Node
5139       0,                                 // Op_Set
5140       R0_num,                            // Op_RegN
5141       R0_num,                            // Op_RegI
5142       R0_num,                            // Op_RegP
5143       V0_num,                            // Op_RegF
5144       V0_num,                            // Op_RegD
5145       R0_num                             // Op_RegL
5146     };
5147 
5148     static const int hi[Op_RegL + 1] = { // enum name
5149       0,                                 // Op_Node
5150       0,                                 // Op_Set
5151       OptoReg::Bad,                       // Op_RegN
5152       OptoReg::Bad,                      // Op_RegI
5153       R0_H_num,                          // Op_RegP
5154       OptoReg::Bad,                      // Op_RegF
5155       V0_H_num,                          // Op_RegD
5156       R0_H_num                           // Op_RegL
5157     };
5158 
5159     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5160   %}
5161 %}
5162 
5163 //----------ATTRIBUTES---------------------------------------------------------
5164 //----------Operand Attributes-------------------------------------------------
5165 op_attrib op_cost(1);        // Required cost attribute
5166 
5167 //----------Instruction Attributes---------------------------------------------
5168 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5169 ins_attrib ins_size(32);        // Required size attribute (in bits)
5170 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5171                                 // a non-matching short branch variant
5172                                 // of some long branch?
5173 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5174                                 // be a power of 2) specifies the
5175                                 // alignment that some part of the
5176                                 // instruction (not necessarily the
5177                                 // start) requires.  If > 1, a
5178                                 // compute_padding() function must be
5179                                 // provided for the instruction
5180 
5181 //----------OPERANDS-----------------------------------------------------------
5182 // Operand definitions must precede instruction definitions for correct parsing
5183 // in the ADLC because operands constitute user defined types which are used in
5184 // instruction definitions.
5185 
5186 //----------Simple Operands----------------------------------------------------
5187 
5188 // Integer operands 32 bit
5189 // 32 bit immediate
5190 operand immI()
5191 %{
5192   match(ConI);
5193 
5194   op_cost(0);
5195   format %{ %}
5196   interface(CONST_INTER);
5197 %}
5198 
5199 // 32 bit zero
5200 operand immI0()
5201 %{
5202   predicate(n->get_int() == 0);
5203   match(ConI);
5204 
5205   op_cost(0);
5206   format %{ %}
5207   interface(CONST_INTER);
5208 %}
5209 
5210 // 32 bit unit increment
5211 operand immI_1()
5212 %{
5213   predicate(n->get_int() == 1);
5214   match(ConI);
5215 
5216   op_cost(0);
5217   format %{ %}
5218   interface(CONST_INTER);
5219 %}
5220 
5221 // 32 bit unit decrement
5222 operand immI_M1()
5223 %{
5224   predicate(n->get_int() == -1);
5225   match(ConI);
5226 
5227   op_cost(0);
5228   format %{ %}
5229   interface(CONST_INTER);
5230 %}
5231 
5232 operand immI_le_4()
5233 %{
5234   predicate(n->get_int() <= 4);
5235   match(ConI);
5236 
5237   op_cost(0);
5238   format %{ %}
5239   interface(CONST_INTER);
5240 %}
5241 
5242 operand immI_31()
5243 %{
5244   predicate(n->get_int() == 31);
5245   match(ConI);
5246 
5247   op_cost(0);
5248   format %{ %}
5249   interface(CONST_INTER);
5250 %}
5251 
5252 operand immI_8()
5253 %{
5254   predicate(n->get_int() == 8);
5255   match(ConI);
5256 
5257   op_cost(0);
5258   format %{ %}
5259   interface(CONST_INTER);
5260 %}
5261 
5262 operand immI_16()
5263 %{
5264   predicate(n->get_int() == 16);
5265   match(ConI);
5266 
5267   op_cost(0);
5268   format %{ %}
5269   interface(CONST_INTER);
5270 %}
5271 
5272 operand immI_24()
5273 %{
5274   predicate(n->get_int() == 24);
5275   match(ConI);
5276 
5277   op_cost(0);
5278   format %{ %}
5279   interface(CONST_INTER);
5280 %}
5281 
5282 operand immI_32()
5283 %{
5284   predicate(n->get_int() == 32);
5285   match(ConI);
5286 
5287   op_cost(0);
5288   format %{ %}
5289   interface(CONST_INTER);
5290 %}
5291 
5292 operand immI_48()
5293 %{
5294   predicate(n->get_int() == 48);
5295   match(ConI);
5296 
5297   op_cost(0);
5298   format %{ %}
5299   interface(CONST_INTER);
5300 %}
5301 
5302 operand immI_56()
5303 %{
5304   predicate(n->get_int() == 56);
5305   match(ConI);
5306 
5307   op_cost(0);
5308   format %{ %}
5309   interface(CONST_INTER);
5310 %}
5311 
5312 operand immI_64()
5313 %{
5314   predicate(n->get_int() == 64);
5315   match(ConI);
5316 
5317   op_cost(0);
5318   format %{ %}
5319   interface(CONST_INTER);
5320 %}
5321 
5322 operand immI_255()
5323 %{
5324   predicate(n->get_int() == 255);
5325   match(ConI);
5326 
5327   op_cost(0);
5328   format %{ %}
5329   interface(CONST_INTER);
5330 %}
5331 
5332 operand immI_65535()
5333 %{
5334   predicate(n->get_int() == 65535);
5335   match(ConI);
5336 
5337   op_cost(0);
5338   format %{ %}
5339   interface(CONST_INTER);
5340 %}
5341 
5342 operand immL_63()
5343 %{
5344   predicate(n->get_int() == 63);
5345   match(ConI);
5346 
5347   op_cost(0);
5348   format %{ %}
5349   interface(CONST_INTER);
5350 %}
5351 
5352 operand immL_255()
5353 %{
5354   predicate(n->get_int() == 255);
5355   match(ConI);
5356 
5357   op_cost(0);
5358   format %{ %}
5359   interface(CONST_INTER);
5360 %}
5361 
5362 operand immL_65535()
5363 %{
5364   predicate(n->get_long() == 65535L);
5365   match(ConL);
5366 
5367   op_cost(0);
5368   format %{ %}
5369   interface(CONST_INTER);
5370 %}
5371 
5372 operand immL_4294967295()
5373 %{
5374   predicate(n->get_long() == 4294967295L);
5375   match(ConL);
5376 
5377   op_cost(0);
5378   format %{ %}
5379   interface(CONST_INTER);
5380 %}
5381 
5382 operand immL_bitmask()
5383 %{
5384   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5385             && is_power_of_2(n->get_long() + 1));
5386   match(ConL);
5387 
5388   op_cost(0);
5389   format %{ %}
5390   interface(CONST_INTER);
5391 %}
5392 
5393 operand immI_bitmask()
5394 %{
5395   predicate(((n->get_int() & 0xc0000000) == 0)
5396             && is_power_of_2(n->get_int() + 1));
5397   match(ConI);
5398 
5399   op_cost(0);
5400   format %{ %}
5401   interface(CONST_INTER);
5402 %}
5403 
5404 // Scale values for scaled offset addressing modes (up to long but not quad)
5405 operand immIScale()
5406 %{
5407   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5408   match(ConI);
5409 
5410   op_cost(0);
5411   format %{ %}
5412   interface(CONST_INTER);
5413 %}
5414 
5415 // 26 bit signed offset -- for pc-relative branches
5416 operand immI26()
5417 %{
5418   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5419   match(ConI);
5420 
5421   op_cost(0);
5422   format %{ %}
5423   interface(CONST_INTER);
5424 %}
5425 
5426 // 19 bit signed offset -- for pc-relative loads
5427 operand immI19()
5428 %{
5429   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5430   match(ConI);
5431 
5432   op_cost(0);
5433   format %{ %}
5434   interface(CONST_INTER);
5435 %}
5436 
5437 // 12 bit unsigned offset -- for base plus immediate loads
5438 operand immIU12()
5439 %{
5440   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5441   match(ConI);
5442 
5443   op_cost(0);
5444   format %{ %}
5445   interface(CONST_INTER);
5446 %}
5447 
5448 operand immLU12()
5449 %{
5450   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5451   match(ConL);
5452 
5453   op_cost(0);
5454   format %{ %}
5455   interface(CONST_INTER);
5456 %}
5457 
5458 // Offset for scaled or unscaled immediate loads and stores
5459 operand immIOffset()
5460 %{
5461   predicate(Address::offset_ok_for_immed(n->get_int()));
5462   match(ConI);
5463 
5464   op_cost(0);
5465   format %{ %}
5466   interface(CONST_INTER);
5467 %}
5468 
5469 operand immLoffset()
5470 %{
5471   predicate(Address::offset_ok_for_immed(n->get_long()));
5472   match(ConL);
5473 
5474   op_cost(0);
5475   format %{ %}
5476   interface(CONST_INTER);
5477 %}
5478 
5479 // 32 bit integer valid for add sub immediate
5480 operand immIAddSub()
5481 %{
5482   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5483   match(ConI);
5484   op_cost(0);
5485   format %{ %}
5486   interface(CONST_INTER);
5487 %}
5488 
5489 // 32 bit unsigned integer valid for logical immediate
5490 // TODO -- check this is right when e.g the mask is 0x80000000
5491 operand immILog()
5492 %{
5493   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5494   match(ConI);
5495 
5496   op_cost(0);
5497   format %{ %}
5498   interface(CONST_INTER);
5499 %}
5500 
5501 // Integer operands 64 bit
5502 // 64 bit immediate
5503 operand immL()
5504 %{
5505   match(ConL);
5506 
5507   op_cost(0);
5508   format %{ %}
5509   interface(CONST_INTER);
5510 %}
5511 
5512 // 64 bit zero
5513 operand immL0()
5514 %{
5515   predicate(n->get_long() == 0);
5516   match(ConL);
5517 
5518   op_cost(0);
5519   format %{ %}
5520   interface(CONST_INTER);
5521 %}
5522 
5523 // 64 bit unit increment
5524 operand immL_1()
5525 %{
5526   predicate(n->get_long() == 1);
5527   match(ConL);
5528 
5529   op_cost(0);
5530   format %{ %}
5531   interface(CONST_INTER);
5532 %}
5533 
5534 // 64 bit unit decrement
5535 operand immL_M1()
5536 %{
5537   predicate(n->get_long() == -1);
5538   match(ConL);
5539 
5540   op_cost(0);
5541   format %{ %}
5542   interface(CONST_INTER);
5543 %}
5544 
5545 // 32 bit offset of pc in thread anchor
5546 
5547 operand immL_pc_off()
5548 %{
5549   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5550                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5551   match(ConL);
5552 
5553   op_cost(0);
5554   format %{ %}
5555   interface(CONST_INTER);
5556 %}
5557 
5558 // 64 bit integer valid for add sub immediate
5559 operand immLAddSub()
5560 %{
5561   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5562   match(ConL);
5563   op_cost(0);
5564   format %{ %}
5565   interface(CONST_INTER);
5566 %}
5567 
5568 // 64 bit integer valid for logical immediate
5569 operand immLLog()
5570 %{
5571   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5572   match(ConL);
5573   op_cost(0);
5574   format %{ %}
5575   interface(CONST_INTER);
5576 %}
5577 
5578 // Long Immediate: low 32-bit mask
5579 operand immL_32bits()
5580 %{
5581   predicate(n->get_long() == 0xFFFFFFFFL);
5582   match(ConL);
5583   op_cost(0);
5584   format %{ %}
5585   interface(CONST_INTER);
5586 %}
5587 
5588 // Pointer operands
5589 // Pointer Immediate
5590 operand immP()
5591 %{
5592   match(ConP);
5593 
5594   op_cost(0);
5595   format %{ %}
5596   interface(CONST_INTER);
5597 %}
5598 
5599 // NULL Pointer Immediate
5600 operand immP0()
5601 %{
5602   predicate(n->get_ptr() == 0);
5603   match(ConP);
5604 
5605   op_cost(0);
5606   format %{ %}
5607   interface(CONST_INTER);
5608 %}
5609 
5610 // Pointer Immediate One
5611 // this is used in object initialization (initial object header)
5612 operand immP_1()
5613 %{
5614   predicate(n->get_ptr() == 1);
5615   match(ConP);
5616 
5617   op_cost(0);
5618   format %{ %}
5619   interface(CONST_INTER);
5620 %}
5621 
5622 // Polling Page Pointer Immediate
5623 operand immPollPage()
5624 %{
5625   predicate((address)n->get_ptr() == os::get_polling_page());
5626   match(ConP);
5627 
5628   op_cost(0);
5629   format %{ %}
5630   interface(CONST_INTER);
5631 %}
5632 
5633 // Card Table Byte Map Base
5634 operand immByteMapBase()
5635 %{
5636   // Get base of card map
5637   predicate((jbyte*)n->get_ptr() ==
5638         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5639   match(ConP);
5640 
5641   op_cost(0);
5642   format %{ %}
5643   interface(CONST_INTER);
5644 %}
5645 
5646 // Pointer Immediate Minus One
5647 // this is used when we want to write the current PC to the thread anchor
5648 operand immP_M1()
5649 %{
5650   predicate(n->get_ptr() == -1);
5651   match(ConP);
5652 
5653   op_cost(0);
5654   format %{ %}
5655   interface(CONST_INTER);
5656 %}
5657 
5658 // Pointer Immediate Minus Two
5659 // this is used when we want to write the current PC to the thread anchor
5660 operand immP_M2()
5661 %{
5662   predicate(n->get_ptr() == -2);
5663   match(ConP);
5664 
5665   op_cost(0);
5666   format %{ %}
5667   interface(CONST_INTER);
5668 %}
5669 
5670 // Float and Double operands
5671 // Double Immediate
5672 operand immD()
5673 %{
5674   match(ConD);
5675   op_cost(0);
5676   format %{ %}
5677   interface(CONST_INTER);
5678 %}
5679 
5680 // Double Immediate: +0.0d
5681 operand immD0()
5682 %{
5683   predicate(jlong_cast(n->getd()) == 0);
5684   match(ConD);
5685 
5686   op_cost(0);
5687   format %{ %}
5688   interface(CONST_INTER);
5689 %}
5690 
5691 // constant 'double +0.0'.
5692 operand immDPacked()
5693 %{
5694   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5695   match(ConD);
5696   op_cost(0);
5697   format %{ %}
5698   interface(CONST_INTER);
5699 %}
5700 
5701 // Float Immediate
5702 operand immF()
5703 %{
5704   match(ConF);
5705   op_cost(0);
5706   format %{ %}
5707   interface(CONST_INTER);
5708 %}
5709 
5710 // Float Immediate: +0.0f.
5711 operand immF0()
5712 %{
5713   predicate(jint_cast(n->getf()) == 0);
5714   match(ConF);
5715 
5716   op_cost(0);
5717   format %{ %}
5718   interface(CONST_INTER);
5719 %}
5720 
5721 //
5722 operand immFPacked()
5723 %{
5724   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5725   match(ConF);
5726   op_cost(0);
5727   format %{ %}
5728   interface(CONST_INTER);
5729 %}
5730 
5731 // Narrow pointer operands
5732 // Narrow Pointer Immediate
5733 operand immN()
5734 %{
5735   match(ConN);
5736 
5737   op_cost(0);
5738   format %{ %}
5739   interface(CONST_INTER);
5740 %}
5741 
5742 // Narrow NULL Pointer Immediate
5743 operand immN0()
5744 %{
5745   predicate(n->get_narrowcon() == 0);
5746   match(ConN);
5747 
5748   op_cost(0);
5749   format %{ %}
5750   interface(CONST_INTER);
5751 %}
5752 
5753 operand immNKlass()
5754 %{
5755   match(ConNKlass);
5756 
5757   op_cost(0);
5758   format %{ %}
5759   interface(CONST_INTER);
5760 %}
5761 
5762 // Integer 32 bit Register Operands
5763 // Integer 32 bitRegister (excludes SP)
5764 operand iRegI()
5765 %{
5766   constraint(ALLOC_IN_RC(any_reg32));
5767   match(RegI);
5768   match(iRegINoSp);
5769   op_cost(0);
5770   format %{ %}
5771   interface(REG_INTER);
5772 %}
5773 
5774 // Integer 32 bit Register not Special
5775 operand iRegINoSp()
5776 %{
5777   constraint(ALLOC_IN_RC(no_special_reg32));
5778   match(RegI);
5779   op_cost(0);
5780   format %{ %}
5781   interface(REG_INTER);
5782 %}
5783 
5784 // Integer 64 bit Register Operands
5785 // Integer 64 bit Register (includes SP)
5786 operand iRegL()
5787 %{
5788   constraint(ALLOC_IN_RC(any_reg));
5789   match(RegL);
5790   match(iRegLNoSp);
5791   op_cost(0);
5792   format %{ %}
5793   interface(REG_INTER);
5794 %}
5795 
5796 // Integer 64 bit Register not Special
5797 operand iRegLNoSp()
5798 %{
5799   constraint(ALLOC_IN_RC(no_special_reg));
5800   match(RegL);
5801   format %{ %}
5802   interface(REG_INTER);
5803 %}
5804 
5805 // Pointer Register Operands
5806 // Pointer Register
5807 operand iRegP()
5808 %{
5809   constraint(ALLOC_IN_RC(ptr_reg));
5810   match(RegP);
5811   match(iRegPNoSp);
5812   match(iRegP_R0);
5813   //match(iRegP_R2);
5814   //match(iRegP_R4);
5815   //match(iRegP_R5);
5816   match(thread_RegP);
5817   op_cost(0);
5818   format %{ %}
5819   interface(REG_INTER);
5820 %}
5821 
5822 // Pointer 64 bit Register not Special
5823 operand iRegPNoSp()
5824 %{
5825   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5826   match(RegP);
5827   // match(iRegP);
5828   // match(iRegP_R0);
5829   // match(iRegP_R2);
5830   // match(iRegP_R4);
5831   // match(iRegP_R5);
5832   // match(thread_RegP);
5833   op_cost(0);
5834   format %{ %}
5835   interface(REG_INTER);
5836 %}
5837 
5838 // Pointer 64 bit Register R0 only
5839 operand iRegP_R0()
5840 %{
5841   constraint(ALLOC_IN_RC(r0_reg));
5842   match(RegP);
5843   // match(iRegP);
5844   match(iRegPNoSp);
5845   op_cost(0);
5846   format %{ %}
5847   interface(REG_INTER);
5848 %}
5849 
5850 // Pointer 64 bit Register R1 only
5851 operand iRegP_R1()
5852 %{
5853   constraint(ALLOC_IN_RC(r1_reg));
5854   match(RegP);
5855   // match(iRegP);
5856   match(iRegPNoSp);
5857   op_cost(0);
5858   format %{ %}
5859   interface(REG_INTER);
5860 %}
5861 
5862 // Pointer 64 bit Register R2 only
5863 operand iRegP_R2()
5864 %{
5865   constraint(ALLOC_IN_RC(r2_reg));
5866   match(RegP);
5867   // match(iRegP);
5868   match(iRegPNoSp);
5869   op_cost(0);
5870   format %{ %}
5871   interface(REG_INTER);
5872 %}
5873 
5874 // Pointer 64 bit Register R3 only
5875 operand iRegP_R3()
5876 %{
5877   constraint(ALLOC_IN_RC(r3_reg));
5878   match(RegP);
5879   // match(iRegP);
5880   match(iRegPNoSp);
5881   op_cost(0);
5882   format %{ %}
5883   interface(REG_INTER);
5884 %}
5885 
5886 // Pointer 64 bit Register R4 only
5887 operand iRegP_R4()
5888 %{
5889   constraint(ALLOC_IN_RC(r4_reg));
5890   match(RegP);
5891   // match(iRegP);
5892   match(iRegPNoSp);
5893   op_cost(0);
5894   format %{ %}
5895   interface(REG_INTER);
5896 %}
5897 
5898 // Pointer 64 bit Register R5 only
5899 operand iRegP_R5()
5900 %{
5901   constraint(ALLOC_IN_RC(r5_reg));
5902   match(RegP);
5903   // match(iRegP);
5904   match(iRegPNoSp);
5905   op_cost(0);
5906   format %{ %}
5907   interface(REG_INTER);
5908 %}
5909 
5910 // Pointer 64 bit Register R10 only
5911 operand iRegP_R10()
5912 %{
5913   constraint(ALLOC_IN_RC(r10_reg));
5914   match(RegP);
5915   // match(iRegP);
5916   match(iRegPNoSp);
5917   op_cost(0);
5918   format %{ %}
5919   interface(REG_INTER);
5920 %}
5921 
5922 // Long 64 bit Register R11 only
5923 operand iRegL_R11()
5924 %{
5925   constraint(ALLOC_IN_RC(r11_reg));
5926   match(RegL);
5927   match(iRegLNoSp);
5928   op_cost(0);
5929   format %{ %}
5930   interface(REG_INTER);
5931 %}
5932 
5933 // Pointer 64 bit Register FP only
5934 operand iRegP_FP()
5935 %{
5936   constraint(ALLOC_IN_RC(fp_reg));
5937   match(RegP);
5938   // match(iRegP);
5939   op_cost(0);
5940   format %{ %}
5941   interface(REG_INTER);
5942 %}
5943 
5944 // Register R0 only
5945 operand iRegI_R0()
5946 %{
5947   constraint(ALLOC_IN_RC(int_r0_reg));
5948   match(RegI);
5949   match(iRegINoSp);
5950   op_cost(0);
5951   format %{ %}
5952   interface(REG_INTER);
5953 %}
5954 
5955 // Register R2 only
5956 operand iRegI_R2()
5957 %{
5958   constraint(ALLOC_IN_RC(int_r2_reg));
5959   match(RegI);
5960   match(iRegINoSp);
5961   op_cost(0);
5962   format %{ %}
5963   interface(REG_INTER);
5964 %}
5965 
5966 // Register R3 only
5967 operand iRegI_R3()
5968 %{
5969   constraint(ALLOC_IN_RC(int_r3_reg));
5970   match(RegI);
5971   match(iRegINoSp);
5972   op_cost(0);
5973   format %{ %}
5974   interface(REG_INTER);
5975 %}
5976 
5977 
5978 // Register R2 only
5979 operand iRegI_R4()
5980 %{
5981   constraint(ALLOC_IN_RC(int_r4_reg));
5982   match(RegI);
5983   match(iRegINoSp);
5984   op_cost(0);
5985   format %{ %}
5986   interface(REG_INTER);
5987 %}
5988 
5989 
5990 // Pointer Register Operands
5991 // Narrow Pointer Register
5992 operand iRegN()
5993 %{
5994   constraint(ALLOC_IN_RC(any_reg32));
5995   match(RegN);
5996   match(iRegNNoSp);
5997   op_cost(0);
5998   format %{ %}
5999   interface(REG_INTER);
6000 %}
6001 
6002 // Integer 64 bit Register not Special
6003 operand iRegNNoSp()
6004 %{
6005   constraint(ALLOC_IN_RC(no_special_reg32));
6006   match(RegN);
6007   op_cost(0);
6008   format %{ %}
6009   interface(REG_INTER);
6010 %}
6011 
6012 // heap base register -- used for encoding immN0
6013 
6014 operand iRegIHeapbase()
6015 %{
6016   constraint(ALLOC_IN_RC(heapbase_reg));
6017   match(RegI);
6018   op_cost(0);
6019   format %{ %}
6020   interface(REG_INTER);
6021 %}
6022 
6023 // Float Register
6024 // Float register operands
6025 operand vRegF()
6026 %{
6027   constraint(ALLOC_IN_RC(float_reg));
6028   match(RegF);
6029 
6030   op_cost(0);
6031   format %{ %}
6032   interface(REG_INTER);
6033 %}
6034 
6035 // Double Register
6036 // Double register operands
6037 operand vRegD()
6038 %{
6039   constraint(ALLOC_IN_RC(double_reg));
6040   match(RegD);
6041 
6042   op_cost(0);
6043   format %{ %}
6044   interface(REG_INTER);
6045 %}
6046 
6047 operand vecD()
6048 %{
6049   constraint(ALLOC_IN_RC(vectord_reg));
6050   match(VecD);
6051 
6052   op_cost(0);
6053   format %{ %}
6054   interface(REG_INTER);
6055 %}
6056 
6057 operand vecX()
6058 %{
6059   constraint(ALLOC_IN_RC(vectorx_reg));
6060   match(VecX);
6061 
6062   op_cost(0);
6063   format %{ %}
6064   interface(REG_INTER);
6065 %}
6066 
6067 operand vRegD_V0()
6068 %{
6069   constraint(ALLOC_IN_RC(v0_reg));
6070   match(RegD);
6071   op_cost(0);
6072   format %{ %}
6073   interface(REG_INTER);
6074 %}
6075 
6076 operand vRegD_V1()
6077 %{
6078   constraint(ALLOC_IN_RC(v1_reg));
6079   match(RegD);
6080   op_cost(0);
6081   format %{ %}
6082   interface(REG_INTER);
6083 %}
6084 
6085 operand vRegD_V2()
6086 %{
6087   constraint(ALLOC_IN_RC(v2_reg));
6088   match(RegD);
6089   op_cost(0);
6090   format %{ %}
6091   interface(REG_INTER);
6092 %}
6093 
6094 operand vRegD_V3()
6095 %{
6096   constraint(ALLOC_IN_RC(v3_reg));
6097   match(RegD);
6098   op_cost(0);
6099   format %{ %}
6100   interface(REG_INTER);
6101 %}
6102 
6103 // Flags register, used as output of signed compare instructions
6104 
6105 // note that on AArch64 we also use this register as the output for
6106 // for floating point compare instructions (CmpF CmpD). this ensures
6107 // that ordered inequality tests use GT, GE, LT or LE none of which
6108 // pass through cases where the result is unordered i.e. one or both
6109 // inputs to the compare is a NaN. this means that the ideal code can
6110 // replace e.g. a GT with an LE and not end up capturing the NaN case
6111 // (where the comparison should always fail). EQ and NE tests are
6112 // always generated in ideal code so that unordered folds into the NE
6113 // case, matching the behaviour of AArch64 NE.
6114 //
6115 // This differs from x86 where the outputs of FP compares use a
6116 // special FP flags registers and where compares based on this
6117 // register are distinguished into ordered inequalities (cmpOpUCF) and
6118 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6119 // to explicitly handle the unordered case in branches. x86 also has
6120 // to include extra CMoveX rules to accept a cmpOpUCF input.
6121 
6122 operand rFlagsReg()
6123 %{
6124   constraint(ALLOC_IN_RC(int_flags));
6125   match(RegFlags);
6126 
6127   op_cost(0);
6128   format %{ "RFLAGS" %}
6129   interface(REG_INTER);
6130 %}
6131 
6132 // Flags register, used as output of unsigned compare instructions
6133 operand rFlagsRegU()
6134 %{
6135   constraint(ALLOC_IN_RC(int_flags));
6136   match(RegFlags);
6137 
6138   op_cost(0);
6139   format %{ "RFLAGSU" %}
6140   interface(REG_INTER);
6141 %}
6142 
6143 // Special Registers
6144 
6145 // Method Register
6146 operand inline_cache_RegP(iRegP reg)
6147 %{
6148   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6149   match(reg);
6150   match(iRegPNoSp);
6151   op_cost(0);
6152   format %{ %}
6153   interface(REG_INTER);
6154 %}
6155 
6156 operand interpreter_method_oop_RegP(iRegP reg)
6157 %{
6158   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6159   match(reg);
6160   match(iRegPNoSp);
6161   op_cost(0);
6162   format %{ %}
6163   interface(REG_INTER);
6164 %}
6165 
6166 // Thread Register
6167 operand thread_RegP(iRegP reg)
6168 %{
6169   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6170   match(reg);
6171   op_cost(0);
6172   format %{ %}
6173   interface(REG_INTER);
6174 %}
6175 
6176 operand lr_RegP(iRegP reg)
6177 %{
6178   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6179   match(reg);
6180   op_cost(0);
6181   format %{ %}
6182   interface(REG_INTER);
6183 %}
6184 
6185 //----------Memory Operands----------------------------------------------------
6186 
6187 operand indirect(iRegP reg)
6188 %{
6189   constraint(ALLOC_IN_RC(ptr_reg));
6190   match(reg);
6191   op_cost(0);
6192   format %{ "[$reg]" %}
6193   interface(MEMORY_INTER) %{
6194     base($reg);
6195     index(0xffffffff);
6196     scale(0x0);
6197     disp(0x0);
6198   %}
6199 %}
6200 
6201 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6202 %{
6203   constraint(ALLOC_IN_RC(ptr_reg));
6204   match(AddP (AddP reg (LShiftL lreg scale)) off);
6205   op_cost(INSN_COST);
6206   format %{ "$reg, $lreg lsl($scale), $off" %}
6207   interface(MEMORY_INTER) %{
6208     base($reg);
6209     index($lreg);
6210     scale($scale);
6211     disp($off);
6212   %}
6213 %}
6214 
6215 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6216 %{
6217   constraint(ALLOC_IN_RC(ptr_reg));
6218   match(AddP (AddP reg (LShiftL lreg scale)) off);
6219   op_cost(INSN_COST);
6220   format %{ "$reg, $lreg lsl($scale), $off" %}
6221   interface(MEMORY_INTER) %{
6222     base($reg);
6223     index($lreg);
6224     scale($scale);
6225     disp($off);
6226   %}
6227 %}
6228 
6229 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6230 %{
6231   constraint(ALLOC_IN_RC(ptr_reg));
6232   match(AddP (AddP reg (ConvI2L ireg)) off);
6233   op_cost(INSN_COST);
6234   format %{ "$reg, $ireg, $off I2L" %}
6235   interface(MEMORY_INTER) %{
6236     base($reg);
6237     index($ireg);
6238     scale(0x0);
6239     disp($off);
6240   %}
6241 %}
6242 
6243 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6244 %{
6245   constraint(ALLOC_IN_RC(ptr_reg));
6246   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6247   op_cost(INSN_COST);
6248   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6249   interface(MEMORY_INTER) %{
6250     base($reg);
6251     index($ireg);
6252     scale($scale);
6253     disp($off);
6254   %}
6255 %}
6256 
6257 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6258 %{
6259   constraint(ALLOC_IN_RC(ptr_reg));
6260   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6261   op_cost(0);
6262   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6263   interface(MEMORY_INTER) %{
6264     base($reg);
6265     index($ireg);
6266     scale($scale);
6267     disp(0x0);
6268   %}
6269 %}
6270 
6271 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6272 %{
6273   constraint(ALLOC_IN_RC(ptr_reg));
6274   match(AddP reg (LShiftL lreg scale));
6275   op_cost(0);
6276   format %{ "$reg, $lreg lsl($scale)" %}
6277   interface(MEMORY_INTER) %{
6278     base($reg);
6279     index($lreg);
6280     scale($scale);
6281     disp(0x0);
6282   %}
6283 %}
6284 
6285 operand indIndex(iRegP reg, iRegL lreg)
6286 %{
6287   constraint(ALLOC_IN_RC(ptr_reg));
6288   match(AddP reg lreg);
6289   op_cost(0);
6290   format %{ "$reg, $lreg" %}
6291   interface(MEMORY_INTER) %{
6292     base($reg);
6293     index($lreg);
6294     scale(0x0);
6295     disp(0x0);
6296   %}
6297 %}
6298 
6299 operand indOffI(iRegP reg, immIOffset off)
6300 %{
6301   constraint(ALLOC_IN_RC(ptr_reg));
6302   match(AddP reg off);
6303   op_cost(0);
6304   format %{ "[$reg, $off]" %}
6305   interface(MEMORY_INTER) %{
6306     base($reg);
6307     index(0xffffffff);
6308     scale(0x0);
6309     disp($off);
6310   %}
6311 %}
6312 
6313 operand indOffL(iRegP reg, immLoffset off)
6314 %{
6315   constraint(ALLOC_IN_RC(ptr_reg));
6316   match(AddP reg off);
6317   op_cost(0);
6318   format %{ "[$reg, $off]" %}
6319   interface(MEMORY_INTER) %{
6320     base($reg);
6321     index(0xffffffff);
6322     scale(0x0);
6323     disp($off);
6324   %}
6325 %}
6326 
6327 
6328 operand indirectN(iRegN reg)
6329 %{
6330   predicate(Universe::narrow_oop_shift() == 0);
6331   constraint(ALLOC_IN_RC(ptr_reg));
6332   match(DecodeN reg);
6333   op_cost(0);
6334   format %{ "[$reg]\t# narrow" %}
6335   interface(MEMORY_INTER) %{
6336     base($reg);
6337     index(0xffffffff);
6338     scale(0x0);
6339     disp(0x0);
6340   %}
6341 %}
6342 
6343 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6344 %{
6345   predicate(Universe::narrow_oop_shift() == 0);
6346   constraint(ALLOC_IN_RC(ptr_reg));
6347   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6348   op_cost(0);
6349   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6350   interface(MEMORY_INTER) %{
6351     base($reg);
6352     index($lreg);
6353     scale($scale);
6354     disp($off);
6355   %}
6356 %}
6357 
6358 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6359 %{
6360   predicate(Universe::narrow_oop_shift() == 0);
6361   constraint(ALLOC_IN_RC(ptr_reg));
6362   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6363   op_cost(INSN_COST);
6364   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6365   interface(MEMORY_INTER) %{
6366     base($reg);
6367     index($lreg);
6368     scale($scale);
6369     disp($off);
6370   %}
6371 %}
6372 
6373 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6374 %{
6375   predicate(Universe::narrow_oop_shift() == 0);
6376   constraint(ALLOC_IN_RC(ptr_reg));
6377   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6378   op_cost(INSN_COST);
6379   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6380   interface(MEMORY_INTER) %{
6381     base($reg);
6382     index($ireg);
6383     scale(0x0);
6384     disp($off);
6385   %}
6386 %}
6387 
6388 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6389 %{
6390   predicate(Universe::narrow_oop_shift() == 0);
6391   constraint(ALLOC_IN_RC(ptr_reg));
6392   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6393   op_cost(INSN_COST);
6394   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6395   interface(MEMORY_INTER) %{
6396     base($reg);
6397     index($ireg);
6398     scale($scale);
6399     disp($off);
6400   %}
6401 %}
6402 
6403 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6404 %{
6405   predicate(Universe::narrow_oop_shift() == 0);
6406   constraint(ALLOC_IN_RC(ptr_reg));
6407   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6408   op_cost(0);
6409   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6410   interface(MEMORY_INTER) %{
6411     base($reg);
6412     index($ireg);
6413     scale($scale);
6414     disp(0x0);
6415   %}
6416 %}
6417 
6418 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6419 %{
6420   predicate(Universe::narrow_oop_shift() == 0);
6421   constraint(ALLOC_IN_RC(ptr_reg));
6422   match(AddP (DecodeN reg) (LShiftL lreg scale));
6423   op_cost(0);
6424   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6425   interface(MEMORY_INTER) %{
6426     base($reg);
6427     index($lreg);
6428     scale($scale);
6429     disp(0x0);
6430   %}
6431 %}
6432 
6433 operand indIndexN(iRegN reg, iRegL lreg)
6434 %{
6435   predicate(Universe::narrow_oop_shift() == 0);
6436   constraint(ALLOC_IN_RC(ptr_reg));
6437   match(AddP (DecodeN reg) lreg);
6438   op_cost(0);
6439   format %{ "$reg, $lreg\t# narrow" %}
6440   interface(MEMORY_INTER) %{
6441     base($reg);
6442     index($lreg);
6443     scale(0x0);
6444     disp(0x0);
6445   %}
6446 %}
6447 
6448 operand indOffIN(iRegN reg, immIOffset off)
6449 %{
6450   predicate(Universe::narrow_oop_shift() == 0);
6451   constraint(ALLOC_IN_RC(ptr_reg));
6452   match(AddP (DecodeN reg) off);
6453   op_cost(0);
6454   format %{ "[$reg, $off]\t# narrow" %}
6455   interface(MEMORY_INTER) %{
6456     base($reg);
6457     index(0xffffffff);
6458     scale(0x0);
6459     disp($off);
6460   %}
6461 %}
6462 
6463 operand indOffLN(iRegN reg, immLoffset off)
6464 %{
6465   predicate(Universe::narrow_oop_shift() == 0);
6466   constraint(ALLOC_IN_RC(ptr_reg));
6467   match(AddP (DecodeN reg) off);
6468   op_cost(0);
6469   format %{ "[$reg, $off]\t# narrow" %}
6470   interface(MEMORY_INTER) %{
6471     base($reg);
6472     index(0xffffffff);
6473     scale(0x0);
6474     disp($off);
6475   %}
6476 %}
6477 
6478 
6479 
6480 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6481 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6482 %{
6483   constraint(ALLOC_IN_RC(ptr_reg));
6484   match(AddP reg off);
6485   op_cost(0);
6486   format %{ "[$reg, $off]" %}
6487   interface(MEMORY_INTER) %{
6488     base($reg);
6489     index(0xffffffff);
6490     scale(0x0);
6491     disp($off);
6492   %}
6493 %}
6494 
6495 //----------Special Memory Operands--------------------------------------------
6496 // Stack Slot Operand - This operand is used for loading and storing temporary
6497 //                      values on the stack where a match requires a value to
6498 //                      flow through memory.
6499 operand stackSlotP(sRegP reg)
6500 %{
6501   constraint(ALLOC_IN_RC(stack_slots));
6502   op_cost(100);
6503   // No match rule because this operand is only generated in matching
6504   // match(RegP);
6505   format %{ "[$reg]" %}
6506   interface(MEMORY_INTER) %{
6507     base(0x1e);  // RSP
6508     index(0x0);  // No Index
6509     scale(0x0);  // No Scale
6510     disp($reg);  // Stack Offset
6511   %}
6512 %}
6513 
6514 operand stackSlotI(sRegI reg)
6515 %{
6516   constraint(ALLOC_IN_RC(stack_slots));
6517   // No match rule because this operand is only generated in matching
6518   // match(RegI);
6519   format %{ "[$reg]" %}
6520   interface(MEMORY_INTER) %{
6521     base(0x1e);  // RSP
6522     index(0x0);  // No Index
6523     scale(0x0);  // No Scale
6524     disp($reg);  // Stack Offset
6525   %}
6526 %}
6527 
6528 operand stackSlotF(sRegF reg)
6529 %{
6530   constraint(ALLOC_IN_RC(stack_slots));
6531   // No match rule because this operand is only generated in matching
6532   // match(RegF);
6533   format %{ "[$reg]" %}
6534   interface(MEMORY_INTER) %{
6535     base(0x1e);  // RSP
6536     index(0x0);  // No Index
6537     scale(0x0);  // No Scale
6538     disp($reg);  // Stack Offset
6539   %}
6540 %}
6541 
6542 operand stackSlotD(sRegD reg)
6543 %{
6544   constraint(ALLOC_IN_RC(stack_slots));
6545   // No match rule because this operand is only generated in matching
6546   // match(RegD);
6547   format %{ "[$reg]" %}
6548   interface(MEMORY_INTER) %{
6549     base(0x1e);  // RSP
6550     index(0x0);  // No Index
6551     scale(0x0);  // No Scale
6552     disp($reg);  // Stack Offset
6553   %}
6554 %}
6555 
6556 operand stackSlotL(sRegL reg)
6557 %{
6558   constraint(ALLOC_IN_RC(stack_slots));
6559   // No match rule because this operand is only generated in matching
6560   // match(RegL);
6561   format %{ "[$reg]" %}
6562   interface(MEMORY_INTER) %{
6563     base(0x1e);  // RSP
6564     index(0x0);  // No Index
6565     scale(0x0);  // No Scale
6566     disp($reg);  // Stack Offset
6567   %}
6568 %}
6569 
6570 // Operands for expressing Control Flow
6571 // NOTE: Label is a predefined operand which should not be redefined in
6572 //       the AD file. It is generically handled within the ADLC.
6573 
6574 //----------Conditional Branch Operands----------------------------------------
6575 // Comparison Op  - This is the operation of the comparison, and is limited to
6576 //                  the following set of codes:
6577 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6578 //
6579 // Other attributes of the comparison, such as unsignedness, are specified
6580 // by the comparison instruction that sets a condition code flags register.
6581 // That result is represented by a flags operand whose subtype is appropriate
6582 // to the unsignedness (etc.) of the comparison.
6583 //
6584 // Later, the instruction which matches both the Comparison Op (a Bool) and
6585 // the flags (produced by the Cmp) specifies the coding of the comparison op
6586 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6587 
6588 // used for signed integral comparisons and fp comparisons
6589 
6590 operand cmpOp()
6591 %{
6592   match(Bool);
6593 
6594   format %{ "" %}
6595   interface(COND_INTER) %{
6596     equal(0x0, "eq");
6597     not_equal(0x1, "ne");
6598     less(0xb, "lt");
6599     greater_equal(0xa, "ge");
6600     less_equal(0xd, "le");
6601     greater(0xc, "gt");
6602     overflow(0x6, "vs");
6603     no_overflow(0x7, "vc");
6604   %}
6605 %}
6606 
6607 // used for unsigned integral comparisons
6608 
6609 operand cmpOpU()
6610 %{
6611   match(Bool);
6612 
6613   format %{ "" %}
6614   interface(COND_INTER) %{
6615     equal(0x0, "eq");
6616     not_equal(0x1, "ne");
6617     less(0x3, "lo");
6618     greater_equal(0x2, "hs");
6619     less_equal(0x9, "ls");
6620     greater(0x8, "hi");
6621     overflow(0x6, "vs");
6622     no_overflow(0x7, "vc");
6623   %}
6624 %}
6625 
6626 // Special operand allowing long args to int ops to be truncated for free
6627 
6628 operand iRegL2I(iRegL reg) %{
6629 
6630   op_cost(0);
6631 
6632   match(ConvL2I reg);
6633 
6634   format %{ "l2i($reg)" %}
6635 
6636   interface(REG_INTER)
6637 %}
6638 
6639 opclass vmem(indirect, indIndex, indOffI, indOffL);
6640 
6641 //----------OPERAND CLASSES----------------------------------------------------
6642 // Operand Classes are groups of operands that are used as to simplify
6643 // instruction definitions by not requiring the AD writer to specify
6644 // separate instructions for every form of operand when the
6645 // instruction accepts multiple operand types with the same basic
6646 // encoding and format. The classic case of this is memory operands.
6647 
6648 // memory is used to define read/write location for load/store
6649 // instruction defs. we can turn a memory op into an Address
6650 
6651 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6652                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6653 
6654 
6655 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6656 // operations. it allows the src to be either an iRegI or a (ConvL2I
6657 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6658 // can be elided because the 32-bit instruction will just employ the
6659 // lower 32 bits anyway.
6660 //
6661 // n.b. this does not elide all L2I conversions. if the truncated
6662 // value is consumed by more than one operation then the ConvL2I
6663 // cannot be bundled into the consuming nodes so an l2i gets planted
6664 // (actually a movw $dst $src) and the downstream instructions consume
6665 // the result of the l2i as an iRegI input. That's a shame since the
6666 // movw is actually redundant but its not too costly.
6667 
6668 opclass iRegIorL2I(iRegI, iRegL2I);
6669 
6670 //----------PIPELINE-----------------------------------------------------------
6671 // Rules which define the behavior of the target architectures pipeline.
6672 // Integer ALU reg operation
6673 pipeline %{
6674 
6675 attributes %{
6676   // ARM instructions are of fixed length
6677   fixed_size_instructions;        // Fixed size instructions TODO does
6678   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6679   // ARM instructions come in 32-bit word units
6680   instruction_unit_size = 4;         // An instruction is 4 bytes long
6681   instruction_fetch_unit_size = 64;  // The processor fetches one line
6682   instruction_fetch_units = 1;       // of 64 bytes
6683 
6684   // List of nop instructions
6685   nops( MachNop );
6686 %}
6687 
6688 // We don't use an actual pipeline model so don't care about resources
6689 // or description. we do use pipeline classes to introduce fixed
6690 // latencies
6691 
6692 //----------RESOURCES----------------------------------------------------------
6693 // Resources are the functional units available to the machine
6694 
6695 resources( INS0, INS1, INS01 = INS0 | INS1,
6696            ALU0, ALU1, ALU = ALU0 | ALU1,
6697            MAC,
6698            DIV,
6699            BRANCH,
6700            LDST,
6701            NEON_FP);
6702 
6703 //----------PIPELINE DESCRIPTION-----------------------------------------------
6704 // Pipeline Description specifies the stages in the machine's pipeline
6705 
6706 pipe_desc(ISS, EX1, EX2, WR);
6707 
6708 //----------PIPELINE CLASSES---------------------------------------------------
6709 // Pipeline Classes describe the stages in which input and output are
6710 // referenced by the hardware pipeline.
6711 
6712 //------- Integer ALU operations --------------------------
6713 
6714 // Integer ALU reg-reg operation
6715 // Operands needed in EX1, result generated in EX2
6716 // Eg.  ADD     x0, x1, x2
6717 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6718 %{
6719   single_instruction;
6720   dst    : EX2(write);
6721   src1   : EX1(read);
6722   src2   : EX1(read);
6723   INS01  : ISS; // Dual issue as instruction 0 or 1
6724   ALU    : EX2;
6725 %}
6726 
6727 // Integer ALU reg-reg operation with constant shift
6728 // Shifted register must be available in LATE_ISS instead of EX1
6729 // Eg.  ADD     x0, x1, x2, LSL #2
6730 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6731 %{
6732   single_instruction;
6733   dst    : EX2(write);
6734   src1   : EX1(read);
6735   src2   : ISS(read);
6736   INS01  : ISS;
6737   ALU    : EX2;
6738 %}
6739 
6740 // Integer ALU reg operation with constant shift
6741 // Eg.  LSL     x0, x1, #shift
6742 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6743 %{
6744   single_instruction;
6745   dst    : EX2(write);
6746   src1   : ISS(read);
6747   INS01  : ISS;
6748   ALU    : EX2;
6749 %}
6750 
6751 // Integer ALU reg-reg operation with variable shift
6752 // Both operands must be available in LATE_ISS instead of EX1
6753 // Result is available in EX1 instead of EX2
6754 // Eg.  LSLV    x0, x1, x2
6755 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6756 %{
6757   single_instruction;
6758   dst    : EX1(write);
6759   src1   : ISS(read);
6760   src2   : ISS(read);
6761   INS01  : ISS;
6762   ALU    : EX1;
6763 %}
6764 
6765 // Integer ALU reg-reg operation with extract
6766 // As for _vshift above, but result generated in EX2
6767 // Eg.  EXTR    x0, x1, x2, #N
6768 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6769 %{
6770   single_instruction;
6771   dst    : EX2(write);
6772   src1   : ISS(read);
6773   src2   : ISS(read);
6774   INS1   : ISS; // Can only dual issue as Instruction 1
6775   ALU    : EX1;
6776 %}
6777 
6778 // Integer ALU reg operation
6779 // Eg.  NEG     x0, x1
6780 pipe_class ialu_reg(iRegI dst, iRegI src)
6781 %{
6782   single_instruction;
6783   dst    : EX2(write);
6784   src    : EX1(read);
6785   INS01  : ISS;
6786   ALU    : EX2;
6787 %}
6788 
6789 // Integer ALU reg mmediate operation
6790 // Eg.  ADD     x0, x1, #N
6791 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6792 %{
6793   single_instruction;
6794   dst    : EX2(write);
6795   src1   : EX1(read);
6796   INS01  : ISS;
6797   ALU    : EX2;
6798 %}
6799 
6800 // Integer ALU immediate operation (no source operands)
6801 // Eg.  MOV     x0, #N
6802 pipe_class ialu_imm(iRegI dst)
6803 %{
6804   single_instruction;
6805   dst    : EX1(write);
6806   INS01  : ISS;
6807   ALU    : EX1;
6808 %}
6809 
6810 //------- Compare operation -------------------------------
6811 
6812 // Compare reg-reg
6813 // Eg.  CMP     x0, x1
6814 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6815 %{
6816   single_instruction;
6817 //  fixed_latency(16);
6818   cr     : EX2(write);
6819   op1    : EX1(read);
6820   op2    : EX1(read);
6821   INS01  : ISS;
6822   ALU    : EX2;
6823 %}
6824 
6825 // Compare reg-reg
6826 // Eg.  CMP     x0, #N
6827 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6828 %{
6829   single_instruction;
6830 //  fixed_latency(16);
6831   cr     : EX2(write);
6832   op1    : EX1(read);
6833   INS01  : ISS;
6834   ALU    : EX2;
6835 %}
6836 
6837 //------- Conditional instructions ------------------------
6838 
6839 // Conditional no operands
6840 // Eg.  CSINC   x0, zr, zr, <cond>
6841 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6842 %{
6843   single_instruction;
6844   cr     : EX1(read);
6845   dst    : EX2(write);
6846   INS01  : ISS;
6847   ALU    : EX2;
6848 %}
6849 
6850 // Conditional 2 operand
6851 // EG.  CSEL    X0, X1, X2, <cond>
6852 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6853 %{
6854   single_instruction;
6855   cr     : EX1(read);
6856   src1   : EX1(read);
6857   src2   : EX1(read);
6858   dst    : EX2(write);
6859   INS01  : ISS;
6860   ALU    : EX2;
6861 %}
6862 
6863 // Conditional 2 operand
6864 // EG.  CSEL    X0, X1, X2, <cond>
6865 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6866 %{
6867   single_instruction;
6868   cr     : EX1(read);
6869   src    : EX1(read);
6870   dst    : EX2(write);
6871   INS01  : ISS;
6872   ALU    : EX2;
6873 %}
6874 
6875 //------- Multiply pipeline operations --------------------
6876 
6877 // Multiply reg-reg
6878 // Eg.  MUL     w0, w1, w2
6879 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6880 %{
6881   single_instruction;
6882   dst    : WR(write);
6883   src1   : ISS(read);
6884   src2   : ISS(read);
6885   INS01  : ISS;
6886   MAC    : WR;
6887 %}
6888 
6889 // Multiply accumulate
6890 // Eg.  MADD    w0, w1, w2, w3
6891 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6892 %{
6893   single_instruction;
6894   dst    : WR(write);
6895   src1   : ISS(read);
6896   src2   : ISS(read);
6897   src3   : ISS(read);
6898   INS01  : ISS;
6899   MAC    : WR;
6900 %}
6901 
6902 // Eg.  MUL     w0, w1, w2
6903 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6904 %{
6905   single_instruction;
6906   fixed_latency(3); // Maximum latency for 64 bit mul
6907   dst    : WR(write);
6908   src1   : ISS(read);
6909   src2   : ISS(read);
6910   INS01  : ISS;
6911   MAC    : WR;
6912 %}
6913 
6914 // Multiply accumulate
6915 // Eg.  MADD    w0, w1, w2, w3
6916 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6917 %{
6918   single_instruction;
6919   fixed_latency(3); // Maximum latency for 64 bit mul
6920   dst    : WR(write);
6921   src1   : ISS(read);
6922   src2   : ISS(read);
6923   src3   : ISS(read);
6924   INS01  : ISS;
6925   MAC    : WR;
6926 %}
6927 
6928 //------- Divide pipeline operations --------------------
6929 
6930 // Eg.  SDIV    w0, w1, w2
6931 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6932 %{
6933   single_instruction;
6934   fixed_latency(8); // Maximum latency for 32 bit divide
6935   dst    : WR(write);
6936   src1   : ISS(read);
6937   src2   : ISS(read);
6938   INS0   : ISS; // Can only dual issue as instruction 0
6939   DIV    : WR;
6940 %}
6941 
6942 // Eg.  SDIV    x0, x1, x2
6943 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6944 %{
6945   single_instruction;
6946   fixed_latency(16); // Maximum latency for 64 bit divide
6947   dst    : WR(write);
6948   src1   : ISS(read);
6949   src2   : ISS(read);
6950   INS0   : ISS; // Can only dual issue as instruction 0
6951   DIV    : WR;
6952 %}
6953 
6954 //------- Load pipeline operations ------------------------
6955 
6956 // Load - prefetch
6957 // Eg.  PFRM    <mem>
6958 pipe_class iload_prefetch(memory mem)
6959 %{
6960   single_instruction;
6961   mem    : ISS(read);
6962   INS01  : ISS;
6963   LDST   : WR;
6964 %}
6965 
6966 // Load - reg, mem
6967 // Eg.  LDR     x0, <mem>
6968 pipe_class iload_reg_mem(iRegI dst, memory mem)
6969 %{
6970   single_instruction;
6971   dst    : WR(write);
6972   mem    : ISS(read);
6973   INS01  : ISS;
6974   LDST   : WR;
6975 %}
6976 
6977 // Load - reg, reg
6978 // Eg.  LDR     x0, [sp, x1]
6979 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6980 %{
6981   single_instruction;
6982   dst    : WR(write);
6983   src    : ISS(read);
6984   INS01  : ISS;
6985   LDST   : WR;
6986 %}
6987 
6988 //------- Store pipeline operations -----------------------
6989 
6990 // Store - zr, mem
6991 // Eg.  STR     zr, <mem>
6992 pipe_class istore_mem(memory mem)
6993 %{
6994   single_instruction;
6995   mem    : ISS(read);
6996   INS01  : ISS;
6997   LDST   : WR;
6998 %}
6999 
7000 // Store - reg, mem
7001 // Eg.  STR     x0, <mem>
7002 pipe_class istore_reg_mem(iRegI src, memory mem)
7003 %{
7004   single_instruction;
7005   mem    : ISS(read);
7006   src    : EX2(read);
7007   INS01  : ISS;
7008   LDST   : WR;
7009 %}
7010 
7011 // Store - reg, reg
7012 // Eg. STR      x0, [sp, x1]
7013 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7014 %{
7015   single_instruction;
7016   dst    : ISS(read);
7017   src    : EX2(read);
7018   INS01  : ISS;
7019   LDST   : WR;
7020 %}
7021 
7022 //------- Store pipeline operations -----------------------
7023 
7024 // Branch
7025 pipe_class pipe_branch()
7026 %{
7027   single_instruction;
7028   INS01  : ISS;
7029   BRANCH : EX1;
7030 %}
7031 
7032 // Conditional branch
7033 pipe_class pipe_branch_cond(rFlagsReg cr)
7034 %{
7035   single_instruction;
7036   cr     : EX1(read);
7037   INS01  : ISS;
7038   BRANCH : EX1;
7039 %}
7040 
7041 // Compare & Branch
7042 // EG.  CBZ/CBNZ
7043 pipe_class pipe_cmp_branch(iRegI op1)
7044 %{
7045   single_instruction;
7046   op1    : EX1(read);
7047   INS01  : ISS;
7048   BRANCH : EX1;
7049 %}
7050 
7051 //------- Synchronisation operations ----------------------
7052 
7053 // Any operation requiring serialization.
7054 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7055 pipe_class pipe_serial()
7056 %{
7057   single_instruction;
7058   force_serialization;
7059   fixed_latency(16);
7060   INS01  : ISS(2); // Cannot dual issue with any other instruction
7061   LDST   : WR;
7062 %}
7063 
7064 // Generic big/slow expanded idiom - also serialized
7065 pipe_class pipe_slow()
7066 %{
7067   instruction_count(10);
7068   multiple_bundles;
7069   force_serialization;
7070   fixed_latency(16);
7071   INS01  : ISS(2); // Cannot dual issue with any other instruction
7072   LDST   : WR;
7073 %}
7074 
7075 // Empty pipeline class
7076 pipe_class pipe_class_empty()
7077 %{
7078   single_instruction;
7079   fixed_latency(0);
7080 %}
7081 
7082 // Default pipeline class.
7083 pipe_class pipe_class_default()
7084 %{
7085   single_instruction;
7086   fixed_latency(2);
7087 %}
7088 
7089 // Pipeline class for compares.
7090 pipe_class pipe_class_compare()
7091 %{
7092   single_instruction;
7093   fixed_latency(16);
7094 %}
7095 
7096 // Pipeline class for memory operations.
7097 pipe_class pipe_class_memory()
7098 %{
7099   single_instruction;
7100   fixed_latency(16);
7101 %}
7102 
7103 // Pipeline class for call.
7104 pipe_class pipe_class_call()
7105 %{
7106   single_instruction;
7107   fixed_latency(100);
7108 %}
7109 
7110 // Define the class for the Nop node.
7111 define %{
7112    MachNop = pipe_class_empty;
7113 %}
7114 
7115 %}
7116 //----------INSTRUCTIONS-------------------------------------------------------
7117 //
7118 // match      -- States which machine-independent subtree may be replaced
7119 //               by this instruction.
7120 // ins_cost   -- The estimated cost of this instruction is used by instruction
7121 //               selection to identify a minimum cost tree of machine
7122 //               instructions that matches a tree of machine-independent
7123 //               instructions.
7124 // format     -- A string providing the disassembly for this instruction.
7125 //               The value of an instruction's operand may be inserted
7126 //               by referring to it with a '$' prefix.
7127 // opcode     -- Three instruction opcodes may be provided.  These are referred
7128 //               to within an encode class as $primary, $secondary, and $tertiary
7129 //               rrspectively.  The primary opcode is commonly used to
7130 //               indicate the type of machine instruction, while secondary
7131 //               and tertiary are often used for prefix options or addressing
7132 //               modes.
7133 // ins_encode -- A list of encode classes with parameters. The encode class
7134 //               name must have been defined in an 'enc_class' specification
7135 //               in the encode section of the architecture description.
7136 
7137 // ============================================================================
7138 // Memory (Load/Store) Instructions
7139 
7140 // Load Instructions
7141 
7142 // Load Byte (8 bit signed)
7143 instruct loadB(iRegINoSp dst, memory mem)
7144 %{
7145   match(Set dst (LoadB mem));
7146   predicate(!needs_acquiring_load(n));
7147 
7148   ins_cost(4 * INSN_COST);
7149   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7150 
7151   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7152 
7153   ins_pipe(iload_reg_mem);
7154 %}
7155 
7156 // Load Byte (8 bit signed) into long
7157 instruct loadB2L(iRegLNoSp dst, memory mem)
7158 %{
7159   match(Set dst (ConvI2L (LoadB mem)));
7160   predicate(!needs_acquiring_load(n->in(1)));
7161 
7162   ins_cost(4 * INSN_COST);
7163   format %{ "ldrsb  $dst, $mem\t# byte" %}
7164 
7165   ins_encode(aarch64_enc_ldrsb(dst, mem));
7166 
7167   ins_pipe(iload_reg_mem);
7168 %}
7169 
7170 // Load Byte (8 bit unsigned)
7171 instruct loadUB(iRegINoSp dst, memory mem)
7172 %{
7173   match(Set dst (LoadUB mem));
7174   predicate(!needs_acquiring_load(n));
7175 
7176   ins_cost(4 * INSN_COST);
7177   format %{ "ldrbw  $dst, $mem\t# byte" %}
7178 
7179   ins_encode(aarch64_enc_ldrb(dst, mem));
7180 
7181   ins_pipe(iload_reg_mem);
7182 %}
7183 
7184 // Load Byte (8 bit unsigned) into long
7185 instruct loadUB2L(iRegLNoSp dst, memory mem)
7186 %{
7187   match(Set dst (ConvI2L (LoadUB mem)));
7188   predicate(!needs_acquiring_load(n->in(1)));
7189 
7190   ins_cost(4 * INSN_COST);
7191   format %{ "ldrb  $dst, $mem\t# byte" %}
7192 
7193   ins_encode(aarch64_enc_ldrb(dst, mem));
7194 
7195   ins_pipe(iload_reg_mem);
7196 %}
7197 
7198 // Load Short (16 bit signed)
7199 instruct loadS(iRegINoSp dst, memory mem)
7200 %{
7201   match(Set dst (LoadS mem));
7202   predicate(!needs_acquiring_load(n));
7203 
7204   ins_cost(4 * INSN_COST);
7205   format %{ "ldrshw  $dst, $mem\t# short" %}
7206 
7207   ins_encode(aarch64_enc_ldrshw(dst, mem));
7208 
7209   ins_pipe(iload_reg_mem);
7210 %}
7211 
7212 // Load Short (16 bit signed) into long
7213 instruct loadS2L(iRegLNoSp dst, memory mem)
7214 %{
7215   match(Set dst (ConvI2L (LoadS mem)));
7216   predicate(!needs_acquiring_load(n->in(1)));
7217 
7218   ins_cost(4 * INSN_COST);
7219   format %{ "ldrsh  $dst, $mem\t# short" %}
7220 
7221   ins_encode(aarch64_enc_ldrsh(dst, mem));
7222 
7223   ins_pipe(iload_reg_mem);
7224 %}
7225 
7226 // Load Char (16 bit unsigned)
7227 instruct loadUS(iRegINoSp dst, memory mem)
7228 %{
7229   match(Set dst (LoadUS mem));
7230   predicate(!needs_acquiring_load(n));
7231 
7232   ins_cost(4 * INSN_COST);
7233   format %{ "ldrh  $dst, $mem\t# short" %}
7234 
7235   ins_encode(aarch64_enc_ldrh(dst, mem));
7236 
7237   ins_pipe(iload_reg_mem);
7238 %}
7239 
7240 // Load Short/Char (16 bit unsigned) into long
7241 instruct loadUS2L(iRegLNoSp dst, memory mem)
7242 %{
7243   match(Set dst (ConvI2L (LoadUS mem)));
7244   predicate(!needs_acquiring_load(n->in(1)));
7245 
7246   ins_cost(4 * INSN_COST);
7247   format %{ "ldrh  $dst, $mem\t# short" %}
7248 
7249   ins_encode(aarch64_enc_ldrh(dst, mem));
7250 
7251   ins_pipe(iload_reg_mem);
7252 %}
7253 
7254 // Load Integer (32 bit signed)
7255 instruct loadI(iRegINoSp dst, memory mem)
7256 %{
7257   match(Set dst (LoadI mem));
7258   predicate(!needs_acquiring_load(n));
7259 
7260   ins_cost(4 * INSN_COST);
7261   format %{ "ldrw  $dst, $mem\t# int" %}
7262 
7263   ins_encode(aarch64_enc_ldrw(dst, mem));
7264 
7265   ins_pipe(iload_reg_mem);
7266 %}
7267 
7268 // Load Integer (32 bit signed) into long
7269 instruct loadI2L(iRegLNoSp dst, memory mem)
7270 %{
7271   match(Set dst (ConvI2L (LoadI mem)));
7272   predicate(!needs_acquiring_load(n->in(1)));
7273 
7274   ins_cost(4 * INSN_COST);
7275   format %{ "ldrsw  $dst, $mem\t# int" %}
7276 
7277   ins_encode(aarch64_enc_ldrsw(dst, mem));
7278 
7279   ins_pipe(iload_reg_mem);
7280 %}
7281 
7282 // Load Integer (32 bit unsigned) into long
7283 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7284 %{
7285   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7286   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7287 
7288   ins_cost(4 * INSN_COST);
7289   format %{ "ldrw  $dst, $mem\t# int" %}
7290 
7291   ins_encode(aarch64_enc_ldrw(dst, mem));
7292 
7293   ins_pipe(iload_reg_mem);
7294 %}
7295 
7296 // Load Long (64 bit signed)
7297 instruct loadL(iRegLNoSp dst, memory mem)
7298 %{
7299   match(Set dst (LoadL mem));
7300   predicate(!needs_acquiring_load(n));
7301 
7302   ins_cost(4 * INSN_COST);
7303   format %{ "ldr  $dst, $mem\t# int" %}
7304 
7305   ins_encode(aarch64_enc_ldr(dst, mem));
7306 
7307   ins_pipe(iload_reg_mem);
7308 %}
7309 
7310 // Load Range
7311 instruct loadRange(iRegINoSp dst, memory mem)
7312 %{
7313   match(Set dst (LoadRange mem));
7314 
7315   ins_cost(4 * INSN_COST);
7316   format %{ "ldrw  $dst, $mem\t# range" %}
7317 
7318   ins_encode(aarch64_enc_ldrw(dst, mem));
7319 
7320   ins_pipe(iload_reg_mem);
7321 %}
7322 
7323 // Load Pointer
7324 instruct loadP(iRegPNoSp dst, memory mem)
7325 %{
7326   match(Set dst (LoadP mem));
7327   predicate(!needs_acquiring_load(n));
7328 
7329   ins_cost(4 * INSN_COST);
7330   format %{ "ldr  $dst, $mem\t# ptr" %}
7331 
7332   ins_encode(aarch64_enc_ldr(dst, mem));
7333 
7334   ins_pipe(iload_reg_mem);
7335 %}
7336 
7337 // Load Compressed Pointer
7338 instruct loadN(iRegNNoSp dst, memory mem)
7339 %{
7340   match(Set dst (LoadN mem));
7341   predicate(!needs_acquiring_load(n));
7342 
7343   ins_cost(4 * INSN_COST);
7344   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7345 
7346   ins_encode(aarch64_enc_ldrw(dst, mem));
7347 
7348   ins_pipe(iload_reg_mem);
7349 %}
7350 
7351 // Load Klass Pointer
7352 instruct loadKlass(iRegPNoSp dst, memory mem)
7353 %{
7354   match(Set dst (LoadKlass mem));
7355   predicate(!needs_acquiring_load(n));
7356 
7357   ins_cost(4 * INSN_COST);
7358   format %{ "ldr  $dst, $mem\t# class" %}
7359 
7360   ins_encode(aarch64_enc_ldr(dst, mem));
7361 
7362   ins_pipe(iload_reg_mem);
7363 %}
7364 
7365 // Load Narrow Klass Pointer
7366 instruct loadNKlass(iRegNNoSp dst, memory mem)
7367 %{
7368   match(Set dst (LoadNKlass mem));
7369   predicate(!needs_acquiring_load(n));
7370 
7371   ins_cost(4 * INSN_COST);
7372   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7373 
7374   ins_encode(aarch64_enc_ldrw(dst, mem));
7375 
7376   ins_pipe(iload_reg_mem);
7377 %}
7378 
7379 // Load Float
7380 instruct loadF(vRegF dst, memory mem)
7381 %{
7382   match(Set dst (LoadF mem));
7383   predicate(!needs_acquiring_load(n));
7384 
7385   ins_cost(4 * INSN_COST);
7386   format %{ "ldrs  $dst, $mem\t# float" %}
7387 
7388   ins_encode( aarch64_enc_ldrs(dst, mem) );
7389 
7390   ins_pipe(pipe_class_memory);
7391 %}
7392 
7393 // Load Double
7394 instruct loadD(vRegD dst, memory mem)
7395 %{
7396   match(Set dst (LoadD mem));
7397   predicate(!needs_acquiring_load(n));
7398 
7399   ins_cost(4 * INSN_COST);
7400   format %{ "ldrd  $dst, $mem\t# double" %}
7401 
7402   ins_encode( aarch64_enc_ldrd(dst, mem) );
7403 
7404   ins_pipe(pipe_class_memory);
7405 %}
7406 
7407 
7408 // Load Int Constant
7409 instruct loadConI(iRegINoSp dst, immI src)
7410 %{
7411   match(Set dst src);
7412 
7413   ins_cost(INSN_COST);
7414   format %{ "mov $dst, $src\t# int" %}
7415 
7416   ins_encode( aarch64_enc_movw_imm(dst, src) );
7417 
7418   ins_pipe(ialu_imm);
7419 %}
7420 
7421 // Load Long Constant
7422 instruct loadConL(iRegLNoSp dst, immL src)
7423 %{
7424   match(Set dst src);
7425 
7426   ins_cost(INSN_COST);
7427   format %{ "mov $dst, $src\t# long" %}
7428 
7429   ins_encode( aarch64_enc_mov_imm(dst, src) );
7430 
7431   ins_pipe(ialu_imm);
7432 %}
7433 
7434 // Load Pointer Constant
7435 
7436 instruct loadConP(iRegPNoSp dst, immP con)
7437 %{
7438   match(Set dst con);
7439 
7440   ins_cost(INSN_COST * 4);
7441   format %{
7442     "mov  $dst, $con\t# ptr\n\t"
7443   %}
7444 
7445   ins_encode(aarch64_enc_mov_p(dst, con));
7446 
7447   ins_pipe(ialu_imm);
7448 %}
7449 
7450 // Load Null Pointer Constant
7451 
7452 instruct loadConP0(iRegPNoSp dst, immP0 con)
7453 %{
7454   match(Set dst con);
7455 
7456   ins_cost(INSN_COST);
7457   format %{ "mov  $dst, $con\t# NULL ptr" %}
7458 
7459   ins_encode(aarch64_enc_mov_p0(dst, con));
7460 
7461   ins_pipe(ialu_imm);
7462 %}
7463 
7464 // Load Pointer Constant One
7465 
7466 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7467 %{
7468   match(Set dst con);
7469 
7470   ins_cost(INSN_COST);
7471   format %{ "mov  $dst, $con\t# NULL ptr" %}
7472 
7473   ins_encode(aarch64_enc_mov_p1(dst, con));
7474 
7475   ins_pipe(ialu_imm);
7476 %}
7477 
7478 // Load Poll Page Constant
7479 
7480 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7481 %{
7482   match(Set dst con);
7483 
7484   ins_cost(INSN_COST);
7485   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7486 
7487   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7488 
7489   ins_pipe(ialu_imm);
7490 %}
7491 
7492 // Load Byte Map Base Constant
7493 
7494 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7495 %{
7496   match(Set dst con);
7497 
7498   ins_cost(INSN_COST);
7499   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7500 
7501   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7502 
7503   ins_pipe(ialu_imm);
7504 %}
7505 
7506 // Load Narrow Pointer Constant
7507 
7508 instruct loadConN(iRegNNoSp dst, immN con)
7509 %{
7510   match(Set dst con);
7511 
7512   ins_cost(INSN_COST * 4);
7513   format %{ "mov  $dst, $con\t# compressed ptr" %}
7514 
7515   ins_encode(aarch64_enc_mov_n(dst, con));
7516 
7517   ins_pipe(ialu_imm);
7518 %}
7519 
7520 // Load Narrow Null Pointer Constant
7521 
7522 instruct loadConN0(iRegNNoSp dst, immN0 con)
7523 %{
7524   match(Set dst con);
7525 
7526   ins_cost(INSN_COST);
7527   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7528 
7529   ins_encode(aarch64_enc_mov_n0(dst, con));
7530 
7531   ins_pipe(ialu_imm);
7532 %}
7533 
7534 // Load Narrow Klass Constant
7535 
7536 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7537 %{
7538   match(Set dst con);
7539 
7540   ins_cost(INSN_COST);
7541   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7542 
7543   ins_encode(aarch64_enc_mov_nk(dst, con));
7544 
7545   ins_pipe(ialu_imm);
7546 %}
7547 
7548 // Load Packed Float Constant
7549 
7550 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7551   match(Set dst con);
7552   ins_cost(INSN_COST * 4);
7553   format %{ "fmovs  $dst, $con"%}
7554   ins_encode %{
7555     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7556   %}
7557 
7558   ins_pipe(pipe_class_default);
7559 %}
7560 
7561 // Load Float Constant
7562 
7563 instruct loadConF(vRegF dst, immF con) %{
7564   match(Set dst con);
7565 
7566   ins_cost(INSN_COST * 4);
7567 
7568   format %{
7569     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7570   %}
7571 
7572   ins_encode %{
7573     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7574   %}
7575 
7576   ins_pipe(pipe_class_default);
7577 %}
7578 
7579 // Load Packed Double Constant
7580 
7581 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7582   match(Set dst con);
7583   ins_cost(INSN_COST);
7584   format %{ "fmovd  $dst, $con"%}
7585   ins_encode %{
7586     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7587   %}
7588 
7589   ins_pipe(pipe_class_default);
7590 %}
7591 
7592 // Load Double Constant
7593 
7594 instruct loadConD(vRegD dst, immD con) %{
7595   match(Set dst con);
7596 
7597   ins_cost(INSN_COST * 5);
7598   format %{
7599     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7600   %}
7601 
7602   ins_encode %{
7603     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7604   %}
7605 
7606   ins_pipe(pipe_class_default);
7607 %}
7608 
7609 // Store Instructions
7610 
7611 // Store CMS card-mark Immediate
7612 instruct storeimmCM0(immI0 zero, memory mem)
7613 %{
7614   match(Set mem (StoreCM mem zero));
7615   predicate(unnecessary_storestore(n));
7616 
7617   ins_cost(INSN_COST);
7618   format %{ "strb zr, $mem\t# byte" %}
7619 
7620   ins_encode(aarch64_enc_strb0(mem));
7621 
7622   ins_pipe(istore_mem);
7623 %}
7624 
7625 // Store CMS card-mark Immediate with intervening StoreStore
7626 // needed when using CMS with no conditional card marking
7627 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7628 %{
7629   match(Set mem (StoreCM mem zero));
7630 
7631   ins_cost(INSN_COST * 2);
7632   format %{ "dmb ishst"
7633       "\n\tstrb zr, $mem\t# byte" %}
7634 
7635   ins_encode(aarch64_enc_strb0_ordered(mem));
7636 
7637   ins_pipe(istore_mem);
7638 %}
7639 
7640 // Store Byte
7641 instruct storeB(iRegIorL2I src, memory mem)
7642 %{
7643   match(Set mem (StoreB mem src));
7644   predicate(!needs_releasing_store(n));
7645 
7646   ins_cost(INSN_COST);
7647   format %{ "strb  $src, $mem\t# byte" %}
7648 
7649   ins_encode(aarch64_enc_strb(src, mem));
7650 
7651   ins_pipe(istore_reg_mem);
7652 %}
7653 
7654 
7655 instruct storeimmB0(immI0 zero, memory mem)
7656 %{
7657   match(Set mem (StoreB mem zero));
7658   predicate(!needs_releasing_store(n));
7659 
7660   ins_cost(INSN_COST);
7661   format %{ "strb rscractch2, $mem\t# byte" %}
7662 
7663   ins_encode(aarch64_enc_strb0(mem));
7664 
7665   ins_pipe(istore_mem);
7666 %}
7667 
7668 // Store Char/Short
7669 instruct storeC(iRegIorL2I src, memory mem)
7670 %{
7671   match(Set mem (StoreC mem src));
7672   predicate(!needs_releasing_store(n));
7673 
7674   ins_cost(INSN_COST);
7675   format %{ "strh  $src, $mem\t# short" %}
7676 
7677   ins_encode(aarch64_enc_strh(src, mem));
7678 
7679   ins_pipe(istore_reg_mem);
7680 %}
7681 
7682 instruct storeimmC0(immI0 zero, memory mem)
7683 %{
7684   match(Set mem (StoreC mem zero));
7685   predicate(!needs_releasing_store(n));
7686 
7687   ins_cost(INSN_COST);
7688   format %{ "strh  zr, $mem\t# short" %}
7689 
7690   ins_encode(aarch64_enc_strh0(mem));
7691 
7692   ins_pipe(istore_mem);
7693 %}
7694 
7695 // Store Integer
7696 
7697 instruct storeI(iRegIorL2I src, memory mem)
7698 %{
7699   match(Set mem(StoreI mem src));
7700   predicate(!needs_releasing_store(n));
7701 
7702   ins_cost(INSN_COST);
7703   format %{ "strw  $src, $mem\t# int" %}
7704 
7705   ins_encode(aarch64_enc_strw(src, mem));
7706 
7707   ins_pipe(istore_reg_mem);
7708 %}
7709 
7710 instruct storeimmI0(immI0 zero, memory mem)
7711 %{
7712   match(Set mem(StoreI mem zero));
7713   predicate(!needs_releasing_store(n));
7714 
7715   ins_cost(INSN_COST);
7716   format %{ "strw  zr, $mem\t# int" %}
7717 
7718   ins_encode(aarch64_enc_strw0(mem));
7719 
7720   ins_pipe(istore_mem);
7721 %}
7722 
7723 // Store Long (64 bit signed)
7724 instruct storeL(iRegL src, memory mem)
7725 %{
7726   match(Set mem (StoreL mem src));
7727   predicate(!needs_releasing_store(n));
7728 
7729   ins_cost(INSN_COST);
7730   format %{ "str  $src, $mem\t# int" %}
7731 
7732   ins_encode(aarch64_enc_str(src, mem));
7733 
7734   ins_pipe(istore_reg_mem);
7735 %}
7736 
7737 // Store Long (64 bit signed)
7738 instruct storeimmL0(immL0 zero, memory mem)
7739 %{
7740   match(Set mem (StoreL mem zero));
7741   predicate(!needs_releasing_store(n));
7742 
7743   ins_cost(INSN_COST);
7744   format %{ "str  zr, $mem\t# int" %}
7745 
7746   ins_encode(aarch64_enc_str0(mem));
7747 
7748   ins_pipe(istore_mem);
7749 %}
7750 
7751 // Store Pointer
7752 instruct storeP(iRegP src, memory mem)
7753 %{
7754   match(Set mem (StoreP mem src));
7755   predicate(!needs_releasing_store(n));
7756 
7757   ins_cost(INSN_COST);
7758   format %{ "str  $src, $mem\t# ptr" %}
7759 
7760   ins_encode(aarch64_enc_str(src, mem));
7761 
7762   ins_pipe(istore_reg_mem);
7763 %}
7764 
7765 // Store Pointer
7766 instruct storeimmP0(immP0 zero, memory mem)
7767 %{
7768   match(Set mem (StoreP mem zero));
7769   predicate(!needs_releasing_store(n));
7770 
7771   ins_cost(INSN_COST);
7772   format %{ "str zr, $mem\t# ptr" %}
7773 
7774   ins_encode(aarch64_enc_str0(mem));
7775 
7776   ins_pipe(istore_mem);
7777 %}
7778 
7779 // Store Compressed Pointer
7780 instruct storeN(iRegN src, memory mem)
7781 %{
7782   match(Set mem (StoreN mem src));
7783   predicate(!needs_releasing_store(n));
7784 
7785   ins_cost(INSN_COST);
7786   format %{ "strw  $src, $mem\t# compressed ptr" %}
7787 
7788   ins_encode(aarch64_enc_strw(src, mem));
7789 
7790   ins_pipe(istore_reg_mem);
7791 %}
7792 
7793 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7794 %{
7795   match(Set mem (StoreN mem zero));
7796   predicate(Universe::narrow_oop_base() == NULL &&
7797             Universe::narrow_klass_base() == NULL &&
7798             (!needs_releasing_store(n)));
7799 
7800   ins_cost(INSN_COST);
7801   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7802 
7803   ins_encode(aarch64_enc_strw(heapbase, mem));
7804 
7805   ins_pipe(istore_reg_mem);
7806 %}
7807 
7808 // Store Float
7809 instruct storeF(vRegF src, memory mem)
7810 %{
7811   match(Set mem (StoreF mem src));
7812   predicate(!needs_releasing_store(n));
7813 
7814   ins_cost(INSN_COST);
7815   format %{ "strs  $src, $mem\t# float" %}
7816 
7817   ins_encode( aarch64_enc_strs(src, mem) );
7818 
7819   ins_pipe(pipe_class_memory);
7820 %}
7821 
7822 // TODO
7823 // implement storeImmF0 and storeFImmPacked
7824 
7825 // Store Double
7826 instruct storeD(vRegD src, memory mem)
7827 %{
7828   match(Set mem (StoreD mem src));
7829   predicate(!needs_releasing_store(n));
7830 
7831   ins_cost(INSN_COST);
7832   format %{ "strd  $src, $mem\t# double" %}
7833 
7834   ins_encode( aarch64_enc_strd(src, mem) );
7835 
7836   ins_pipe(pipe_class_memory);
7837 %}
7838 
7839 // Store Compressed Klass Pointer
7840 instruct storeNKlass(iRegN src, memory mem)
7841 %{
7842   predicate(!needs_releasing_store(n));
7843   match(Set mem (StoreNKlass mem src));
7844 
7845   ins_cost(INSN_COST);
7846   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7847 
7848   ins_encode(aarch64_enc_strw(src, mem));
7849 
7850   ins_pipe(istore_reg_mem);
7851 %}
7852 
7853 // TODO
7854 // implement storeImmD0 and storeDImmPacked
7855 
7856 // prefetch instructions
7857 // Must be safe to execute with invalid address (cannot fault).
7858 
7859 instruct prefetchalloc( memory mem ) %{
7860   match(PrefetchAllocation mem);
7861 
7862   ins_cost(INSN_COST);
7863   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7864 
7865   ins_encode( aarch64_enc_prefetchw(mem) );
7866 
7867   ins_pipe(iload_prefetch);
7868 %}
7869 
7870 //  ---------------- volatile loads and stores ----------------
7871 
7872 // Load Byte (8 bit signed)
7873 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7874 %{
7875   match(Set dst (LoadB mem));
7876 
7877   ins_cost(VOLATILE_REF_COST);
7878   format %{ "ldarsb  $dst, $mem\t# byte" %}
7879 
7880   ins_encode(aarch64_enc_ldarsb(dst, mem));
7881 
7882   ins_pipe(pipe_serial);
7883 %}
7884 
7885 // Load Byte (8 bit signed) into long
7886 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7887 %{
7888   match(Set dst (ConvI2L (LoadB mem)));
7889 
7890   ins_cost(VOLATILE_REF_COST);
7891   format %{ "ldarsb  $dst, $mem\t# byte" %}
7892 
7893   ins_encode(aarch64_enc_ldarsb(dst, mem));
7894 
7895   ins_pipe(pipe_serial);
7896 %}
7897 
7898 // Load Byte (8 bit unsigned)
7899 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7900 %{
7901   match(Set dst (LoadUB mem));
7902 
7903   ins_cost(VOLATILE_REF_COST);
7904   format %{ "ldarb  $dst, $mem\t# byte" %}
7905 
7906   ins_encode(aarch64_enc_ldarb(dst, mem));
7907 
7908   ins_pipe(pipe_serial);
7909 %}
7910 
7911 // Load Byte (8 bit unsigned) into long
7912 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7913 %{
7914   match(Set dst (ConvI2L (LoadUB mem)));
7915 
7916   ins_cost(VOLATILE_REF_COST);
7917   format %{ "ldarb  $dst, $mem\t# byte" %}
7918 
7919   ins_encode(aarch64_enc_ldarb(dst, mem));
7920 
7921   ins_pipe(pipe_serial);
7922 %}
7923 
7924 // Load Short (16 bit signed)
7925 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7926 %{
7927   match(Set dst (LoadS mem));
7928 
7929   ins_cost(VOLATILE_REF_COST);
7930   format %{ "ldarshw  $dst, $mem\t# short" %}
7931 
7932   ins_encode(aarch64_enc_ldarshw(dst, mem));
7933 
7934   ins_pipe(pipe_serial);
7935 %}
7936 
7937 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7938 %{
7939   match(Set dst (LoadUS mem));
7940 
7941   ins_cost(VOLATILE_REF_COST);
7942   format %{ "ldarhw  $dst, $mem\t# short" %}
7943 
7944   ins_encode(aarch64_enc_ldarhw(dst, mem));
7945 
7946   ins_pipe(pipe_serial);
7947 %}
7948 
7949 // Load Short/Char (16 bit unsigned) into long
7950 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7951 %{
7952   match(Set dst (ConvI2L (LoadUS mem)));
7953 
7954   ins_cost(VOLATILE_REF_COST);
7955   format %{ "ldarh  $dst, $mem\t# short" %}
7956 
7957   ins_encode(aarch64_enc_ldarh(dst, mem));
7958 
7959   ins_pipe(pipe_serial);
7960 %}
7961 
7962 // Load Short/Char (16 bit signed) into long
7963 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7964 %{
7965   match(Set dst (ConvI2L (LoadS mem)));
7966 
7967   ins_cost(VOLATILE_REF_COST);
7968   format %{ "ldarh  $dst, $mem\t# short" %}
7969 
7970   ins_encode(aarch64_enc_ldarsh(dst, mem));
7971 
7972   ins_pipe(pipe_serial);
7973 %}
7974 
7975 // Load Integer (32 bit signed)
7976 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7977 %{
7978   match(Set dst (LoadI mem));
7979 
7980   ins_cost(VOLATILE_REF_COST);
7981   format %{ "ldarw  $dst, $mem\t# int" %}
7982 
7983   ins_encode(aarch64_enc_ldarw(dst, mem));
7984 
7985   ins_pipe(pipe_serial);
7986 %}
7987 
7988 // Load Integer (32 bit unsigned) into long
7989 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7990 %{
7991   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7992 
7993   ins_cost(VOLATILE_REF_COST);
7994   format %{ "ldarw  $dst, $mem\t# int" %}
7995 
7996   ins_encode(aarch64_enc_ldarw(dst, mem));
7997 
7998   ins_pipe(pipe_serial);
7999 %}
8000 
8001 // Load Long (64 bit signed)
8002 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8003 %{
8004   match(Set dst (LoadL mem));
8005 
8006   ins_cost(VOLATILE_REF_COST);
8007   format %{ "ldar  $dst, $mem\t# int" %}
8008 
8009   ins_encode(aarch64_enc_ldar(dst, mem));
8010 
8011   ins_pipe(pipe_serial);
8012 %}
8013 
8014 // Load Pointer
8015 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8016 %{
8017   match(Set dst (LoadP mem));
8018 
8019   ins_cost(VOLATILE_REF_COST);
8020   format %{ "ldar  $dst, $mem\t# ptr" %}
8021 
8022   ins_encode(aarch64_enc_ldar(dst, mem));
8023 
8024   ins_pipe(pipe_serial);
8025 %}
8026 
8027 // Load Compressed Pointer
8028 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8029 %{
8030   match(Set dst (LoadN mem));
8031 
8032   ins_cost(VOLATILE_REF_COST);
8033   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8034 
8035   ins_encode(aarch64_enc_ldarw(dst, mem));
8036 
8037   ins_pipe(pipe_serial);
8038 %}
8039 
8040 // Load Float
8041 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8042 %{
8043   match(Set dst (LoadF mem));
8044 
8045   ins_cost(VOLATILE_REF_COST);
8046   format %{ "ldars  $dst, $mem\t# float" %}
8047 
8048   ins_encode( aarch64_enc_fldars(dst, mem) );
8049 
8050   ins_pipe(pipe_serial);
8051 %}
8052 
8053 // Load Double
8054 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8055 %{
8056   match(Set dst (LoadD mem));
8057 
8058   ins_cost(VOLATILE_REF_COST);
8059   format %{ "ldard  $dst, $mem\t# double" %}
8060 
8061   ins_encode( aarch64_enc_fldard(dst, mem) );
8062 
8063   ins_pipe(pipe_serial);
8064 %}
8065 
8066 // Store Byte
8067 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8068 %{
8069   match(Set mem (StoreB mem src));
8070 
8071   ins_cost(VOLATILE_REF_COST);
8072   format %{ "stlrb  $src, $mem\t# byte" %}
8073 
8074   ins_encode(aarch64_enc_stlrb(src, mem));
8075 
8076   ins_pipe(pipe_class_memory);
8077 %}
8078 
8079 // Store Char/Short
8080 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8081 %{
8082   match(Set mem (StoreC mem src));
8083 
8084   ins_cost(VOLATILE_REF_COST);
8085   format %{ "stlrh  $src, $mem\t# short" %}
8086 
8087   ins_encode(aarch64_enc_stlrh(src, mem));
8088 
8089   ins_pipe(pipe_class_memory);
8090 %}
8091 
8092 // Store Integer
8093 
8094 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8095 %{
8096   match(Set mem(StoreI mem src));
8097 
8098   ins_cost(VOLATILE_REF_COST);
8099   format %{ "stlrw  $src, $mem\t# int" %}
8100 
8101   ins_encode(aarch64_enc_stlrw(src, mem));
8102 
8103   ins_pipe(pipe_class_memory);
8104 %}
8105 
8106 // Store Long (64 bit signed)
8107 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8108 %{
8109   match(Set mem (StoreL mem src));
8110 
8111   ins_cost(VOLATILE_REF_COST);
8112   format %{ "stlr  $src, $mem\t# int" %}
8113 
8114   ins_encode(aarch64_enc_stlr(src, mem));
8115 
8116   ins_pipe(pipe_class_memory);
8117 %}
8118 
8119 // Store Pointer
8120 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8121 %{
8122   match(Set mem (StoreP mem src));
8123 
8124   ins_cost(VOLATILE_REF_COST);
8125   format %{ "stlr  $src, $mem\t# ptr" %}
8126 
8127   ins_encode(aarch64_enc_stlr(src, mem));
8128 
8129   ins_pipe(pipe_class_memory);
8130 %}
8131 
8132 // Store Compressed Pointer
8133 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8134 %{
8135   match(Set mem (StoreN mem src));
8136 
8137   ins_cost(VOLATILE_REF_COST);
8138   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8139 
8140   ins_encode(aarch64_enc_stlrw(src, mem));
8141 
8142   ins_pipe(pipe_class_memory);
8143 %}
8144 
8145 // Store Float
8146 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8147 %{
8148   match(Set mem (StoreF mem src));
8149 
8150   ins_cost(VOLATILE_REF_COST);
8151   format %{ "stlrs  $src, $mem\t# float" %}
8152 
8153   ins_encode( aarch64_enc_fstlrs(src, mem) );
8154 
8155   ins_pipe(pipe_class_memory);
8156 %}
8157 
8158 // TODO
8159 // implement storeImmF0 and storeFImmPacked
8160 
8161 // Store Double
8162 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8163 %{
8164   match(Set mem (StoreD mem src));
8165 
8166   ins_cost(VOLATILE_REF_COST);
8167   format %{ "stlrd  $src, $mem\t# double" %}
8168 
8169   ins_encode( aarch64_enc_fstlrd(src, mem) );
8170 
8171   ins_pipe(pipe_class_memory);
8172 %}
8173 
8174 //  ---------------- end of volatile loads and stores ----------------
8175 
8176 // ============================================================================
8177 // BSWAP Instructions
8178 
8179 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8180   match(Set dst (ReverseBytesI src));
8181 
8182   ins_cost(INSN_COST);
8183   format %{ "revw  $dst, $src" %}
8184 
8185   ins_encode %{
8186     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8187   %}
8188 
8189   ins_pipe(ialu_reg);
8190 %}
8191 
8192 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8193   match(Set dst (ReverseBytesL src));
8194 
8195   ins_cost(INSN_COST);
8196   format %{ "rev  $dst, $src" %}
8197 
8198   ins_encode %{
8199     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8200   %}
8201 
8202   ins_pipe(ialu_reg);
8203 %}
8204 
8205 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8206   match(Set dst (ReverseBytesUS src));
8207 
8208   ins_cost(INSN_COST);
8209   format %{ "rev16w  $dst, $src" %}
8210 
8211   ins_encode %{
8212     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8213   %}
8214 
8215   ins_pipe(ialu_reg);
8216 %}
8217 
8218 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8219   match(Set dst (ReverseBytesS src));
8220 
8221   ins_cost(INSN_COST);
8222   format %{ "rev16w  $dst, $src\n\t"
8223             "sbfmw $dst, $dst, #0, #15" %}
8224 
8225   ins_encode %{
8226     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8227     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8228   %}
8229 
8230   ins_pipe(ialu_reg);
8231 %}
8232 
8233 // ============================================================================
8234 // Zero Count Instructions
8235 
8236 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8237   match(Set dst (CountLeadingZerosI src));
8238 
8239   ins_cost(INSN_COST);
8240   format %{ "clzw  $dst, $src" %}
8241   ins_encode %{
8242     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8243   %}
8244 
8245   ins_pipe(ialu_reg);
8246 %}
8247 
8248 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8249   match(Set dst (CountLeadingZerosL src));
8250 
8251   ins_cost(INSN_COST);
8252   format %{ "clz   $dst, $src" %}
8253   ins_encode %{
8254     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8255   %}
8256 
8257   ins_pipe(ialu_reg);
8258 %}
8259 
8260 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8261   match(Set dst (CountTrailingZerosI src));
8262 
8263   ins_cost(INSN_COST * 2);
8264   format %{ "rbitw  $dst, $src\n\t"
8265             "clzw   $dst, $dst" %}
8266   ins_encode %{
8267     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8268     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8269   %}
8270 
8271   ins_pipe(ialu_reg);
8272 %}
8273 
8274 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8275   match(Set dst (CountTrailingZerosL src));
8276 
8277   ins_cost(INSN_COST * 2);
8278   format %{ "rbit   $dst, $src\n\t"
8279             "clz    $dst, $dst" %}
8280   ins_encode %{
8281     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8282     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8283   %}
8284 
8285   ins_pipe(ialu_reg);
8286 %}
8287 
8288 //---------- Population Count Instructions -------------------------------------
8289 //
8290 
8291 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8292   predicate(UsePopCountInstruction);
8293   match(Set dst (PopCountI src));
8294   effect(TEMP tmp);
8295   ins_cost(INSN_COST * 13);
8296 
8297   format %{ "movw   $src, $src\n\t"
8298             "mov    $tmp, $src\t# vector (1D)\n\t"
8299             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8300             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8301             "mov    $dst, $tmp\t# vector (1D)" %}
8302   ins_encode %{
8303     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8304     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8305     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8306     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8307     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8308   %}
8309 
8310   ins_pipe(pipe_class_default);
8311 %}
8312 
8313 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8314   predicate(UsePopCountInstruction);
8315   match(Set dst (PopCountI (LoadI mem)));
8316   effect(TEMP tmp);
8317   ins_cost(INSN_COST * 13);
8318 
8319   format %{ "ldrs   $tmp, $mem\n\t"
8320             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8321             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8322             "mov    $dst, $tmp\t# vector (1D)" %}
8323   ins_encode %{
8324     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8325     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8326                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8327     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8328     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8329     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8330   %}
8331 
8332   ins_pipe(pipe_class_default);
8333 %}
8334 
8335 // Note: Long.bitCount(long) returns an int.
8336 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8337   predicate(UsePopCountInstruction);
8338   match(Set dst (PopCountL src));
8339   effect(TEMP tmp);
8340   ins_cost(INSN_COST * 13);
8341 
8342   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8343             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8344             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8345             "mov    $dst, $tmp\t# vector (1D)" %}
8346   ins_encode %{
8347     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8348     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8349     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8350     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8351   %}
8352 
8353   ins_pipe(pipe_class_default);
8354 %}
8355 
8356 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8357   predicate(UsePopCountInstruction);
8358   match(Set dst (PopCountL (LoadL mem)));
8359   effect(TEMP tmp);
8360   ins_cost(INSN_COST * 13);
8361 
8362   format %{ "ldrd   $tmp, $mem\n\t"
8363             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8364             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8365             "mov    $dst, $tmp\t# vector (1D)" %}
8366   ins_encode %{
8367     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8368     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8369                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8370     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8371     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8372     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8373   %}
8374 
8375   ins_pipe(pipe_class_default);
8376 %}
8377 
8378 // ============================================================================
8379 // MemBar Instruction
8380 
8381 instruct load_fence() %{
8382   match(LoadFence);
8383   ins_cost(VOLATILE_REF_COST);
8384 
8385   format %{ "load_fence" %}
8386 
8387   ins_encode %{
8388     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8389   %}
8390   ins_pipe(pipe_serial);
8391 %}
8392 
8393 instruct unnecessary_membar_acquire() %{
8394   predicate(unnecessary_acquire(n));
8395   match(MemBarAcquire);
8396   ins_cost(0);
8397 
8398   format %{ "membar_acquire (elided)" %}
8399 
8400   ins_encode %{
8401     __ block_comment("membar_acquire (elided)");
8402   %}
8403 
8404   ins_pipe(pipe_class_empty);
8405 %}
8406 
8407 instruct membar_acquire() %{
8408   match(MemBarAcquire);
8409   ins_cost(VOLATILE_REF_COST);
8410 
8411   format %{ "membar_acquire" %}
8412 
8413   ins_encode %{
8414     __ block_comment("membar_acquire");
8415     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8416   %}
8417 
8418   ins_pipe(pipe_serial);
8419 %}
8420 
8421 
8422 instruct membar_acquire_lock() %{
8423   match(MemBarAcquireLock);
8424   ins_cost(VOLATILE_REF_COST);
8425 
8426   format %{ "membar_acquire_lock (elided)" %}
8427 
8428   ins_encode %{
8429     __ block_comment("membar_acquire_lock (elided)");
8430   %}
8431 
8432   ins_pipe(pipe_serial);
8433 %}
8434 
8435 instruct store_fence() %{
8436   match(StoreFence);
8437   ins_cost(VOLATILE_REF_COST);
8438 
8439   format %{ "store_fence" %}
8440 
8441   ins_encode %{
8442     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8443   %}
8444   ins_pipe(pipe_serial);
8445 %}
8446 
8447 instruct unnecessary_membar_release() %{
8448   predicate(unnecessary_release(n));
8449   match(MemBarRelease);
8450   ins_cost(0);
8451 
8452   format %{ "membar_release (elided)" %}
8453 
8454   ins_encode %{
8455     __ block_comment("membar_release (elided)");
8456   %}
8457   ins_pipe(pipe_serial);
8458 %}
8459 
8460 instruct membar_release() %{
8461   match(MemBarRelease);
8462   ins_cost(VOLATILE_REF_COST);
8463 
8464   format %{ "membar_release" %}
8465 
8466   ins_encode %{
8467     __ block_comment("membar_release");
8468     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8469   %}
8470   ins_pipe(pipe_serial);
8471 %}
8472 
8473 instruct membar_storestore() %{
8474   match(MemBarStoreStore);
8475   ins_cost(VOLATILE_REF_COST);
8476 
8477   format %{ "MEMBAR-store-store" %}
8478 
8479   ins_encode %{
8480     __ membar(Assembler::StoreStore);
8481   %}
8482   ins_pipe(pipe_serial);
8483 %}
8484 
8485 instruct membar_release_lock() %{
8486   match(MemBarReleaseLock);
8487   ins_cost(VOLATILE_REF_COST);
8488 
8489   format %{ "membar_release_lock (elided)" %}
8490 
8491   ins_encode %{
8492     __ block_comment("membar_release_lock (elided)");
8493   %}
8494 
8495   ins_pipe(pipe_serial);
8496 %}
8497 
8498 instruct unnecessary_membar_volatile() %{
8499   predicate(unnecessary_volatile(n));
8500   match(MemBarVolatile);
8501   ins_cost(0);
8502 
8503   format %{ "membar_volatile (elided)" %}
8504 
8505   ins_encode %{
8506     __ block_comment("membar_volatile (elided)");
8507   %}
8508 
8509   ins_pipe(pipe_serial);
8510 %}
8511 
8512 instruct membar_volatile() %{
8513   match(MemBarVolatile);
8514   ins_cost(VOLATILE_REF_COST*100);
8515 
8516   format %{ "membar_volatile" %}
8517 
8518   ins_encode %{
8519     __ block_comment("membar_volatile");
8520     __ membar(Assembler::StoreLoad);
8521   %}
8522 
8523   ins_pipe(pipe_serial);
8524 %}
8525 
8526 // ============================================================================
8527 // Cast/Convert Instructions
8528 
8529 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8530   match(Set dst (CastX2P src));
8531 
8532   ins_cost(INSN_COST);
8533   format %{ "mov $dst, $src\t# long -> ptr" %}
8534 
8535   ins_encode %{
8536     if ($dst$$reg != $src$$reg) {
8537       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8538     }
8539   %}
8540 
8541   ins_pipe(ialu_reg);
8542 %}
8543 
8544 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8545   match(Set dst (CastP2X src));
8546 
8547   ins_cost(INSN_COST);
8548   format %{ "mov $dst, $src\t# ptr -> long" %}
8549 
8550   ins_encode %{
8551     if ($dst$$reg != $src$$reg) {
8552       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8553     }
8554   %}
8555 
8556   ins_pipe(ialu_reg);
8557 %}
8558 
8559 // Convert oop into int for vectors alignment masking
8560 instruct convP2I(iRegINoSp dst, iRegP src) %{
8561   match(Set dst (ConvL2I (CastP2X src)));
8562 
8563   ins_cost(INSN_COST);
8564   format %{ "movw $dst, $src\t# ptr -> int" %}
8565   ins_encode %{
8566     __ movw($dst$$Register, $src$$Register);
8567   %}
8568 
8569   ins_pipe(ialu_reg);
8570 %}
8571 
8572 // Convert compressed oop into int for vectors alignment masking
8573 // in case of 32bit oops (heap < 4Gb).
8574 instruct convN2I(iRegINoSp dst, iRegN src)
8575 %{
8576   predicate(Universe::narrow_oop_shift() == 0);
8577   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8578 
8579   ins_cost(INSN_COST);
8580   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8581   ins_encode %{
8582     __ movw($dst$$Register, $src$$Register);
8583   %}
8584 
8585   ins_pipe(ialu_reg);
8586 %}
8587 
8588 
8589 // Convert oop pointer into compressed form
8590 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8591   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8592   match(Set dst (EncodeP src));
8593   effect(KILL cr);
8594   ins_cost(INSN_COST * 3);
8595   format %{ "encode_heap_oop $dst, $src" %}
8596   ins_encode %{
8597     Register s = $src$$Register;
8598     Register d = $dst$$Register;
8599     __ encode_heap_oop(d, s);
8600   %}
8601   ins_pipe(ialu_reg);
8602 %}
8603 
8604 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8605   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8606   match(Set dst (EncodeP src));
8607   ins_cost(INSN_COST * 3);
8608   format %{ "encode_heap_oop_not_null $dst, $src" %}
8609   ins_encode %{
8610     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8611   %}
8612   ins_pipe(ialu_reg);
8613 %}
8614 
8615 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8616   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8617             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8618   match(Set dst (DecodeN src));
8619   ins_cost(INSN_COST * 3);
8620   format %{ "decode_heap_oop $dst, $src" %}
8621   ins_encode %{
8622     Register s = $src$$Register;
8623     Register d = $dst$$Register;
8624     __ decode_heap_oop(d, s);
8625   %}
8626   ins_pipe(ialu_reg);
8627 %}
8628 
8629 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8630   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8631             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8632   match(Set dst (DecodeN src));
8633   ins_cost(INSN_COST * 3);
8634   format %{ "decode_heap_oop_not_null $dst, $src" %}
8635   ins_encode %{
8636     Register s = $src$$Register;
8637     Register d = $dst$$Register;
8638     __ decode_heap_oop_not_null(d, s);
8639   %}
8640   ins_pipe(ialu_reg);
8641 %}
8642 
8643 // n.b. AArch64 implementations of encode_klass_not_null and
8644 // decode_klass_not_null do not modify the flags register so, unlike
8645 // Intel, we don't kill CR as a side effect here
8646 
8647 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8648   match(Set dst (EncodePKlass src));
8649 
8650   ins_cost(INSN_COST * 3);
8651   format %{ "encode_klass_not_null $dst,$src" %}
8652 
8653   ins_encode %{
8654     Register src_reg = as_Register($src$$reg);
8655     Register dst_reg = as_Register($dst$$reg);
8656     __ encode_klass_not_null(dst_reg, src_reg);
8657   %}
8658 
8659    ins_pipe(ialu_reg);
8660 %}
8661 
8662 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8663   match(Set dst (DecodeNKlass src));
8664 
8665   ins_cost(INSN_COST * 3);
8666   format %{ "decode_klass_not_null $dst,$src" %}
8667 
8668   ins_encode %{
8669     Register src_reg = as_Register($src$$reg);
8670     Register dst_reg = as_Register($dst$$reg);
8671     if (dst_reg != src_reg) {
8672       __ decode_klass_not_null(dst_reg, src_reg);
8673     } else {
8674       __ decode_klass_not_null(dst_reg);
8675     }
8676   %}
8677 
8678    ins_pipe(ialu_reg);
8679 %}
8680 
8681 instruct checkCastPP(iRegPNoSp dst)
8682 %{
8683   match(Set dst (CheckCastPP dst));
8684 
8685   size(0);
8686   format %{ "# checkcastPP of $dst" %}
8687   ins_encode(/* empty encoding */);
8688   ins_pipe(pipe_class_empty);
8689 %}
8690 
8691 instruct castPP(iRegPNoSp dst)
8692 %{
8693   match(Set dst (CastPP dst));
8694 
8695   size(0);
8696   format %{ "# castPP of $dst" %}
8697   ins_encode(/* empty encoding */);
8698   ins_pipe(pipe_class_empty);
8699 %}
8700 
8701 instruct castII(iRegI dst)
8702 %{
8703   match(Set dst (CastII dst));
8704 
8705   size(0);
8706   format %{ "# castII of $dst" %}
8707   ins_encode(/* empty encoding */);
8708   ins_cost(0);
8709   ins_pipe(pipe_class_empty);
8710 %}
8711 
8712 // ============================================================================
8713 // Atomic operation instructions
8714 //
8715 // Intel and SPARC both implement Ideal Node LoadPLocked and
8716 // Store{PIL}Conditional instructions using a normal load for the
8717 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8718 //
8719 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8720 // pair to lock object allocations from Eden space when not using
8721 // TLABs.
8722 //
8723 // There does not appear to be a Load{IL}Locked Ideal Node and the
8724 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8725 // and to use StoreIConditional only for 32-bit and StoreLConditional
8726 // only for 64-bit.
8727 //
8728 // We implement LoadPLocked and StorePLocked instructions using,
8729 // respectively the AArch64 hw load-exclusive and store-conditional
8730 // instructions. Whereas we must implement each of
8731 // Store{IL}Conditional using a CAS which employs a pair of
8732 // instructions comprising a load-exclusive followed by a
8733 // store-conditional.
8734 
8735 
8736 // Locked-load (linked load) of the current heap-top
8737 // used when updating the eden heap top
8738 // implemented using ldaxr on AArch64
8739 
8740 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8741 %{
8742   match(Set dst (LoadPLocked mem));
8743 
8744   ins_cost(VOLATILE_REF_COST);
8745 
8746   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8747 
8748   ins_encode(aarch64_enc_ldaxr(dst, mem));
8749 
8750   ins_pipe(pipe_serial);
8751 %}
8752 
8753 // Conditional-store of the updated heap-top.
8754 // Used during allocation of the shared heap.
8755 // Sets flag (EQ) on success.
8756 // implemented using stlxr on AArch64.
8757 
8758 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8759 %{
8760   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8761 
8762   ins_cost(VOLATILE_REF_COST);
8763 
8764  // TODO
8765  // do we need to do a store-conditional release or can we just use a
8766  // plain store-conditional?
8767 
8768   format %{
8769     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8770     "cmpw rscratch1, zr\t# EQ on successful write"
8771   %}
8772 
8773   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8774 
8775   ins_pipe(pipe_serial);
8776 %}
8777 
8778 
8779 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8780 // when attempting to rebias a lock towards the current thread.  We
8781 // must use the acquire form of cmpxchg in order to guarantee acquire
8782 // semantics in this case.
8783 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8784 %{
8785   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8786 
8787   ins_cost(VOLATILE_REF_COST);
8788 
8789   format %{
8790     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8791     "cmpw rscratch1, zr\t# EQ on successful write"
8792   %}
8793 
8794   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8795 
8796   ins_pipe(pipe_slow);
8797 %}
8798 
8799 // storeIConditional also has acquire semantics, for no better reason
8800 // than matching storeLConditional.  At the time of writing this
8801 // comment storeIConditional was not used anywhere by AArch64.
8802 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8803 %{
8804   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8805 
8806   ins_cost(VOLATILE_REF_COST);
8807 
8808   format %{
8809     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8810     "cmpw rscratch1, zr\t# EQ on successful write"
8811   %}
8812 
8813   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8814 
8815   ins_pipe(pipe_slow);
8816 %}
8817 
8818 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8819 // can't match them
8820 
8821 // standard CompareAndSwapX when we are using barriers
8822 // these have higher priority than the rules selected by a predicate
8823 
8824 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8825 
8826   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8827   ins_cost(2 * VOLATILE_REF_COST);
8828 
8829   effect(KILL cr);
8830 
8831  format %{
8832     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8833     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8834  %}
8835 
8836  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8837             aarch64_enc_cset_eq(res));
8838 
8839   ins_pipe(pipe_slow);
8840 %}
8841 
8842 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8843 
8844   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8845   ins_cost(2 * VOLATILE_REF_COST);
8846 
8847   effect(KILL cr);
8848 
8849  format %{
8850     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8851     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8852  %}
8853 
8854  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8855             aarch64_enc_cset_eq(res));
8856 
8857   ins_pipe(pipe_slow);
8858 %}
8859 
8860 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8861 
8862   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8863   ins_cost(2 * VOLATILE_REF_COST);
8864 
8865   effect(KILL cr);
8866 
8867  format %{
8868     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8869     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8870  %}
8871 
8872  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8873             aarch64_enc_cset_eq(res));
8874 
8875   ins_pipe(pipe_slow);
8876 %}
8877 
8878 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8879 
8880   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8881   ins_cost(2 * VOLATILE_REF_COST);
8882 
8883   effect(KILL cr);
8884 
8885  format %{
8886     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8887     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8888  %}
8889 
8890  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8891             aarch64_enc_cset_eq(res));
8892 
8893   ins_pipe(pipe_slow);
8894 %}
8895 
8896 // alternative CompareAndSwapX when we are eliding barriers
8897 
8898 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8899 
8900   predicate(needs_acquiring_load_exclusive(n));
8901   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8902   ins_cost(VOLATILE_REF_COST);
8903 
8904   effect(KILL cr);
8905 
8906  format %{
8907     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8908     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8909  %}
8910 
8911  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8912             aarch64_enc_cset_eq(res));
8913 
8914   ins_pipe(pipe_slow);
8915 %}
8916 
8917 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8918 
8919   predicate(needs_acquiring_load_exclusive(n));
8920   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8921   ins_cost(VOLATILE_REF_COST);
8922 
8923   effect(KILL cr);
8924 
8925  format %{
8926     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8927     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8928  %}
8929 
8930  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8931             aarch64_enc_cset_eq(res));
8932 
8933   ins_pipe(pipe_slow);
8934 %}
8935 
8936 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8937 
8938   predicate(needs_acquiring_load_exclusive(n));
8939   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8940   ins_cost(VOLATILE_REF_COST);
8941 
8942   effect(KILL cr);
8943 
8944  format %{
8945     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8946     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8947  %}
8948 
8949  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8950             aarch64_enc_cset_eq(res));
8951 
8952   ins_pipe(pipe_slow);
8953 %}
8954 
8955 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8956 
8957   predicate(needs_acquiring_load_exclusive(n));
8958   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8959   ins_cost(VOLATILE_REF_COST);
8960 
8961   effect(KILL cr);
8962 
8963  format %{
8964     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8965     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8966  %}
8967 
8968  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8969             aarch64_enc_cset_eq(res));
8970 
8971   ins_pipe(pipe_slow);
8972 %}
8973 
8974 
8975 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
8976   match(Set prev (GetAndSetI mem newv));
8977   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8978   ins_encode %{
8979     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8980   %}
8981   ins_pipe(pipe_serial);
8982 %}
8983 
8984 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
8985   match(Set prev (GetAndSetL mem newv));
8986   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8987   ins_encode %{
8988     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8989   %}
8990   ins_pipe(pipe_serial);
8991 %}
8992 
8993 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
8994   match(Set prev (GetAndSetN mem newv));
8995   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8996   ins_encode %{
8997     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8998   %}
8999   ins_pipe(pipe_serial);
9000 %}
9001 
9002 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9003   match(Set prev (GetAndSetP mem newv));
9004   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9005   ins_encode %{
9006     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9007   %}
9008   ins_pipe(pipe_serial);
9009 %}
9010 
9011 
9012 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9013   match(Set newval (GetAndAddL mem incr));
9014   ins_cost(INSN_COST * 10);
9015   format %{ "get_and_addL $newval, [$mem], $incr" %}
9016   ins_encode %{
9017     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9018   %}
9019   ins_pipe(pipe_serial);
9020 %}
9021 
9022 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9023   predicate(n->as_LoadStore()->result_not_used());
9024   match(Set dummy (GetAndAddL mem incr));
9025   ins_cost(INSN_COST * 9);
9026   format %{ "get_and_addL [$mem], $incr" %}
9027   ins_encode %{
9028     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9029   %}
9030   ins_pipe(pipe_serial);
9031 %}
9032 
9033 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9034   match(Set newval (GetAndAddL mem incr));
9035   ins_cost(INSN_COST * 10);
9036   format %{ "get_and_addL $newval, [$mem], $incr" %}
9037   ins_encode %{
9038     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9039   %}
9040   ins_pipe(pipe_serial);
9041 %}
9042 
9043 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9044   predicate(n->as_LoadStore()->result_not_used());
9045   match(Set dummy (GetAndAddL mem incr));
9046   ins_cost(INSN_COST * 9);
9047   format %{ "get_and_addL [$mem], $incr" %}
9048   ins_encode %{
9049     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9050   %}
9051   ins_pipe(pipe_serial);
9052 %}
9053 
9054 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9055   match(Set newval (GetAndAddI mem incr));
9056   ins_cost(INSN_COST * 10);
9057   format %{ "get_and_addI $newval, [$mem], $incr" %}
9058   ins_encode %{
9059     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9060   %}
9061   ins_pipe(pipe_serial);
9062 %}
9063 
9064 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9065   predicate(n->as_LoadStore()->result_not_used());
9066   match(Set dummy (GetAndAddI mem incr));
9067   ins_cost(INSN_COST * 9);
9068   format %{ "get_and_addI [$mem], $incr" %}
9069   ins_encode %{
9070     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9071   %}
9072   ins_pipe(pipe_serial);
9073 %}
9074 
9075 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9076   match(Set newval (GetAndAddI mem incr));
9077   ins_cost(INSN_COST * 10);
9078   format %{ "get_and_addI $newval, [$mem], $incr" %}
9079   ins_encode %{
9080     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9081   %}
9082   ins_pipe(pipe_serial);
9083 %}
9084 
9085 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9086   predicate(n->as_LoadStore()->result_not_used());
9087   match(Set dummy (GetAndAddI mem incr));
9088   ins_cost(INSN_COST * 9);
9089   format %{ "get_and_addI [$mem], $incr" %}
9090   ins_encode %{
9091     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9092   %}
9093   ins_pipe(pipe_serial);
9094 %}
9095 
9096 // Manifest a CmpL result in an integer register.
9097 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9098 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9099 %{
9100   match(Set dst (CmpL3 src1 src2));
9101   effect(KILL flags);
9102 
9103   ins_cost(INSN_COST * 6);
9104   format %{
9105       "cmp $src1, $src2"
9106       "csetw $dst, ne"
9107       "cnegw $dst, lt"
9108   %}
9109   // format %{ "CmpL3 $dst, $src1, $src2" %}
9110   ins_encode %{
9111     __ cmp($src1$$Register, $src2$$Register);
9112     __ csetw($dst$$Register, Assembler::NE);
9113     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9114   %}
9115 
9116   ins_pipe(pipe_class_default);
9117 %}
9118 
9119 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9120 %{
9121   match(Set dst (CmpL3 src1 src2));
9122   effect(KILL flags);
9123 
9124   ins_cost(INSN_COST * 6);
9125   format %{
9126       "cmp $src1, $src2"
9127       "csetw $dst, ne"
9128       "cnegw $dst, lt"
9129   %}
9130   ins_encode %{
9131     int32_t con = (int32_t)$src2$$constant;
9132      if (con < 0) {
9133       __ adds(zr, $src1$$Register, -con);
9134     } else {
9135       __ subs(zr, $src1$$Register, con);
9136     }
9137     __ csetw($dst$$Register, Assembler::NE);
9138     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9139   %}
9140 
9141   ins_pipe(pipe_class_default);
9142 %}
9143 
9144 // ============================================================================
9145 // Conditional Move Instructions
9146 
9147 // n.b. we have identical rules for both a signed compare op (cmpOp)
9148 // and an unsigned compare op (cmpOpU). it would be nice if we could
9149 // define an op class which merged both inputs and use it to type the
9150 // argument to a single rule. unfortunatelyt his fails because the
9151 // opclass does not live up to the COND_INTER interface of its
9152 // component operands. When the generic code tries to negate the
9153 // operand it ends up running the generci Machoper::negate method
9154 // which throws a ShouldNotHappen. So, we have to provide two flavours
9155 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9156 
9157 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9158   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9159 
9160   ins_cost(INSN_COST * 2);
9161   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9162 
9163   ins_encode %{
9164     __ cselw(as_Register($dst$$reg),
9165              as_Register($src2$$reg),
9166              as_Register($src1$$reg),
9167              (Assembler::Condition)$cmp$$cmpcode);
9168   %}
9169 
9170   ins_pipe(icond_reg_reg);
9171 %}
9172 
9173 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9174   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9175 
9176   ins_cost(INSN_COST * 2);
9177   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9178 
9179   ins_encode %{
9180     __ cselw(as_Register($dst$$reg),
9181              as_Register($src2$$reg),
9182              as_Register($src1$$reg),
9183              (Assembler::Condition)$cmp$$cmpcode);
9184   %}
9185 
9186   ins_pipe(icond_reg_reg);
9187 %}
9188 
9189 // special cases where one arg is zero
9190 
9191 // n.b. this is selected in preference to the rule above because it
9192 // avoids loading constant 0 into a source register
9193 
9194 // TODO
9195 // we ought only to be able to cull one of these variants as the ideal
9196 // transforms ought always to order the zero consistently (to left/right?)
9197 
9198 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9199   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9200 
9201   ins_cost(INSN_COST * 2);
9202   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9203 
9204   ins_encode %{
9205     __ cselw(as_Register($dst$$reg),
9206              as_Register($src$$reg),
9207              zr,
9208              (Assembler::Condition)$cmp$$cmpcode);
9209   %}
9210 
9211   ins_pipe(icond_reg);
9212 %}
9213 
9214 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9215   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9216 
9217   ins_cost(INSN_COST * 2);
9218   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9219 
9220   ins_encode %{
9221     __ cselw(as_Register($dst$$reg),
9222              as_Register($src$$reg),
9223              zr,
9224              (Assembler::Condition)$cmp$$cmpcode);
9225   %}
9226 
9227   ins_pipe(icond_reg);
9228 %}
9229 
9230 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9231   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9232 
9233   ins_cost(INSN_COST * 2);
9234   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9235 
9236   ins_encode %{
9237     __ cselw(as_Register($dst$$reg),
9238              zr,
9239              as_Register($src$$reg),
9240              (Assembler::Condition)$cmp$$cmpcode);
9241   %}
9242 
9243   ins_pipe(icond_reg);
9244 %}
9245 
9246 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9247   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9248 
9249   ins_cost(INSN_COST * 2);
9250   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9251 
9252   ins_encode %{
9253     __ cselw(as_Register($dst$$reg),
9254              zr,
9255              as_Register($src$$reg),
9256              (Assembler::Condition)$cmp$$cmpcode);
9257   %}
9258 
9259   ins_pipe(icond_reg);
9260 %}
9261 
9262 // special case for creating a boolean 0 or 1
9263 
9264 // n.b. this is selected in preference to the rule above because it
9265 // avoids loading constants 0 and 1 into a source register
9266 
9267 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9268   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9269 
9270   ins_cost(INSN_COST * 2);
9271   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9272 
9273   ins_encode %{
9274     // equivalently
9275     // cset(as_Register($dst$$reg),
9276     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9277     __ csincw(as_Register($dst$$reg),
9278              zr,
9279              zr,
9280              (Assembler::Condition)$cmp$$cmpcode);
9281   %}
9282 
9283   ins_pipe(icond_none);
9284 %}
9285 
9286 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9287   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9288 
9289   ins_cost(INSN_COST * 2);
9290   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9291 
9292   ins_encode %{
9293     // equivalently
9294     // cset(as_Register($dst$$reg),
9295     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9296     __ csincw(as_Register($dst$$reg),
9297              zr,
9298              zr,
9299              (Assembler::Condition)$cmp$$cmpcode);
9300   %}
9301 
9302   ins_pipe(icond_none);
9303 %}
9304 
9305 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9306   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9307 
9308   ins_cost(INSN_COST * 2);
9309   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9310 
9311   ins_encode %{
9312     __ csel(as_Register($dst$$reg),
9313             as_Register($src2$$reg),
9314             as_Register($src1$$reg),
9315             (Assembler::Condition)$cmp$$cmpcode);
9316   %}
9317 
9318   ins_pipe(icond_reg_reg);
9319 %}
9320 
9321 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9322   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9323 
9324   ins_cost(INSN_COST * 2);
9325   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9326 
9327   ins_encode %{
9328     __ csel(as_Register($dst$$reg),
9329             as_Register($src2$$reg),
9330             as_Register($src1$$reg),
9331             (Assembler::Condition)$cmp$$cmpcode);
9332   %}
9333 
9334   ins_pipe(icond_reg_reg);
9335 %}
9336 
9337 // special cases where one arg is zero
9338 
9339 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9340   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9341 
9342   ins_cost(INSN_COST * 2);
9343   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9344 
9345   ins_encode %{
9346     __ csel(as_Register($dst$$reg),
9347             zr,
9348             as_Register($src$$reg),
9349             (Assembler::Condition)$cmp$$cmpcode);
9350   %}
9351 
9352   ins_pipe(icond_reg);
9353 %}
9354 
9355 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9356   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9357 
9358   ins_cost(INSN_COST * 2);
9359   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9360 
9361   ins_encode %{
9362     __ csel(as_Register($dst$$reg),
9363             zr,
9364             as_Register($src$$reg),
9365             (Assembler::Condition)$cmp$$cmpcode);
9366   %}
9367 
9368   ins_pipe(icond_reg);
9369 %}
9370 
9371 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9372   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9373 
9374   ins_cost(INSN_COST * 2);
9375   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9376 
9377   ins_encode %{
9378     __ csel(as_Register($dst$$reg),
9379             as_Register($src$$reg),
9380             zr,
9381             (Assembler::Condition)$cmp$$cmpcode);
9382   %}
9383 
9384   ins_pipe(icond_reg);
9385 %}
9386 
9387 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9388   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9389 
9390   ins_cost(INSN_COST * 2);
9391   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9392 
9393   ins_encode %{
9394     __ csel(as_Register($dst$$reg),
9395             as_Register($src$$reg),
9396             zr,
9397             (Assembler::Condition)$cmp$$cmpcode);
9398   %}
9399 
9400   ins_pipe(icond_reg);
9401 %}
9402 
9403 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9404   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9405 
9406   ins_cost(INSN_COST * 2);
9407   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9408 
9409   ins_encode %{
9410     __ csel(as_Register($dst$$reg),
9411             as_Register($src2$$reg),
9412             as_Register($src1$$reg),
9413             (Assembler::Condition)$cmp$$cmpcode);
9414   %}
9415 
9416   ins_pipe(icond_reg_reg);
9417 %}
9418 
9419 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9420   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9421 
9422   ins_cost(INSN_COST * 2);
9423   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9424 
9425   ins_encode %{
9426     __ csel(as_Register($dst$$reg),
9427             as_Register($src2$$reg),
9428             as_Register($src1$$reg),
9429             (Assembler::Condition)$cmp$$cmpcode);
9430   %}
9431 
9432   ins_pipe(icond_reg_reg);
9433 %}
9434 
9435 // special cases where one arg is zero
9436 
9437 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9438   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9439 
9440   ins_cost(INSN_COST * 2);
9441   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9442 
9443   ins_encode %{
9444     __ csel(as_Register($dst$$reg),
9445             zr,
9446             as_Register($src$$reg),
9447             (Assembler::Condition)$cmp$$cmpcode);
9448   %}
9449 
9450   ins_pipe(icond_reg);
9451 %}
9452 
9453 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9454   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9455 
9456   ins_cost(INSN_COST * 2);
9457   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9458 
9459   ins_encode %{
9460     __ csel(as_Register($dst$$reg),
9461             zr,
9462             as_Register($src$$reg),
9463             (Assembler::Condition)$cmp$$cmpcode);
9464   %}
9465 
9466   ins_pipe(icond_reg);
9467 %}
9468 
9469 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9470   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9471 
9472   ins_cost(INSN_COST * 2);
9473   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9474 
9475   ins_encode %{
9476     __ csel(as_Register($dst$$reg),
9477             as_Register($src$$reg),
9478             zr,
9479             (Assembler::Condition)$cmp$$cmpcode);
9480   %}
9481 
9482   ins_pipe(icond_reg);
9483 %}
9484 
9485 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9486   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9487 
9488   ins_cost(INSN_COST * 2);
9489   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9490 
9491   ins_encode %{
9492     __ csel(as_Register($dst$$reg),
9493             as_Register($src$$reg),
9494             zr,
9495             (Assembler::Condition)$cmp$$cmpcode);
9496   %}
9497 
9498   ins_pipe(icond_reg);
9499 %}
9500 
9501 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9502   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9503 
9504   ins_cost(INSN_COST * 2);
9505   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9506 
9507   ins_encode %{
9508     __ cselw(as_Register($dst$$reg),
9509              as_Register($src2$$reg),
9510              as_Register($src1$$reg),
9511              (Assembler::Condition)$cmp$$cmpcode);
9512   %}
9513 
9514   ins_pipe(icond_reg_reg);
9515 %}
9516 
9517 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9518   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9519 
9520   ins_cost(INSN_COST * 2);
9521   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9522 
9523   ins_encode %{
9524     __ cselw(as_Register($dst$$reg),
9525              as_Register($src2$$reg),
9526              as_Register($src1$$reg),
9527              (Assembler::Condition)$cmp$$cmpcode);
9528   %}
9529 
9530   ins_pipe(icond_reg_reg);
9531 %}
9532 
9533 // special cases where one arg is zero
9534 
9535 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9536   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9537 
9538   ins_cost(INSN_COST * 2);
9539   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9540 
9541   ins_encode %{
9542     __ cselw(as_Register($dst$$reg),
9543              zr,
9544              as_Register($src$$reg),
9545              (Assembler::Condition)$cmp$$cmpcode);
9546   %}
9547 
9548   ins_pipe(icond_reg);
9549 %}
9550 
9551 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9552   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9553 
9554   ins_cost(INSN_COST * 2);
9555   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9556 
9557   ins_encode %{
9558     __ cselw(as_Register($dst$$reg),
9559              zr,
9560              as_Register($src$$reg),
9561              (Assembler::Condition)$cmp$$cmpcode);
9562   %}
9563 
9564   ins_pipe(icond_reg);
9565 %}
9566 
9567 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9568   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9569 
9570   ins_cost(INSN_COST * 2);
9571   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9572 
9573   ins_encode %{
9574     __ cselw(as_Register($dst$$reg),
9575              as_Register($src$$reg),
9576              zr,
9577              (Assembler::Condition)$cmp$$cmpcode);
9578   %}
9579 
9580   ins_pipe(icond_reg);
9581 %}
9582 
9583 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9584   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9585 
9586   ins_cost(INSN_COST * 2);
9587   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9588 
9589   ins_encode %{
9590     __ cselw(as_Register($dst$$reg),
9591              as_Register($src$$reg),
9592              zr,
9593              (Assembler::Condition)$cmp$$cmpcode);
9594   %}
9595 
9596   ins_pipe(icond_reg);
9597 %}
9598 
9599 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9600 %{
9601   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9602 
9603   ins_cost(INSN_COST * 3);
9604 
9605   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9606   ins_encode %{
9607     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9608     __ fcsels(as_FloatRegister($dst$$reg),
9609               as_FloatRegister($src2$$reg),
9610               as_FloatRegister($src1$$reg),
9611               cond);
9612   %}
9613 
9614   ins_pipe(pipe_class_default);
9615 %}
9616 
9617 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9618 %{
9619   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9620 
9621   ins_cost(INSN_COST * 3);
9622 
9623   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9624   ins_encode %{
9625     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9626     __ fcsels(as_FloatRegister($dst$$reg),
9627               as_FloatRegister($src2$$reg),
9628               as_FloatRegister($src1$$reg),
9629               cond);
9630   %}
9631 
9632   ins_pipe(pipe_class_default);
9633 %}
9634 
9635 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9636 %{
9637   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9638 
9639   ins_cost(INSN_COST * 3);
9640 
9641   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9642   ins_encode %{
9643     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9644     __ fcseld(as_FloatRegister($dst$$reg),
9645               as_FloatRegister($src2$$reg),
9646               as_FloatRegister($src1$$reg),
9647               cond);
9648   %}
9649 
9650   ins_pipe(pipe_class_default);
9651 %}
9652 
9653 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9654 %{
9655   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9656 
9657   ins_cost(INSN_COST * 3);
9658 
9659   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9660   ins_encode %{
9661     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9662     __ fcseld(as_FloatRegister($dst$$reg),
9663               as_FloatRegister($src2$$reg),
9664               as_FloatRegister($src1$$reg),
9665               cond);
9666   %}
9667 
9668   ins_pipe(pipe_class_default);
9669 %}
9670 
9671 // ============================================================================
9672 // Arithmetic Instructions
9673 //
9674 
9675 // Integer Addition
9676 
9677 // TODO
9678 // these currently employ operations which do not set CR and hence are
9679 // not flagged as killing CR but we would like to isolate the cases
9680 // where we want to set flags from those where we don't. need to work
9681 // out how to do that.
9682 
9683 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9684   match(Set dst (AddI src1 src2));
9685 
9686   ins_cost(INSN_COST);
9687   format %{ "addw  $dst, $src1, $src2" %}
9688 
9689   ins_encode %{
9690     __ addw(as_Register($dst$$reg),
9691             as_Register($src1$$reg),
9692             as_Register($src2$$reg));
9693   %}
9694 
9695   ins_pipe(ialu_reg_reg);
9696 %}
9697 
9698 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9699   match(Set dst (AddI src1 src2));
9700 
9701   ins_cost(INSN_COST);
9702   format %{ "addw $dst, $src1, $src2" %}
9703 
9704   // use opcode to indicate that this is an add not a sub
9705   opcode(0x0);
9706 
9707   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9708 
9709   ins_pipe(ialu_reg_imm);
9710 %}
9711 
9712 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9713   match(Set dst (AddI (ConvL2I src1) src2));
9714 
9715   ins_cost(INSN_COST);
9716   format %{ "addw $dst, $src1, $src2" %}
9717 
9718   // use opcode to indicate that this is an add not a sub
9719   opcode(0x0);
9720 
9721   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9722 
9723   ins_pipe(ialu_reg_imm);
9724 %}
9725 
9726 // Pointer Addition
9727 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9728   match(Set dst (AddP src1 src2));
9729 
9730   ins_cost(INSN_COST);
9731   format %{ "add $dst, $src1, $src2\t# ptr" %}
9732 
9733   ins_encode %{
9734     __ add(as_Register($dst$$reg),
9735            as_Register($src1$$reg),
9736            as_Register($src2$$reg));
9737   %}
9738 
9739   ins_pipe(ialu_reg_reg);
9740 %}
9741 
9742 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9743   match(Set dst (AddP src1 (ConvI2L src2)));
9744 
9745   ins_cost(1.9 * INSN_COST);
9746   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9747 
9748   ins_encode %{
9749     __ add(as_Register($dst$$reg),
9750            as_Register($src1$$reg),
9751            as_Register($src2$$reg), ext::sxtw);
9752   %}
9753 
9754   ins_pipe(ialu_reg_reg);
9755 %}
9756 
9757 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9758   match(Set dst (AddP src1 (LShiftL src2 scale)));
9759 
9760   ins_cost(1.9 * INSN_COST);
9761   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9762 
9763   ins_encode %{
9764     __ lea(as_Register($dst$$reg),
9765            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9766                    Address::lsl($scale$$constant)));
9767   %}
9768 
9769   ins_pipe(ialu_reg_reg_shift);
9770 %}
9771 
9772 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9773   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9774 
9775   ins_cost(1.9 * INSN_COST);
9776   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9777 
9778   ins_encode %{
9779     __ lea(as_Register($dst$$reg),
9780            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9781                    Address::sxtw($scale$$constant)));
9782   %}
9783 
9784   ins_pipe(ialu_reg_reg_shift);
9785 %}
9786 
9787 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9788   match(Set dst (LShiftL (ConvI2L src) scale));
9789 
9790   ins_cost(INSN_COST);
9791   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9792 
9793   ins_encode %{
9794     __ sbfiz(as_Register($dst$$reg),
9795           as_Register($src$$reg),
9796           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9797   %}
9798 
9799   ins_pipe(ialu_reg_shift);
9800 %}
9801 
9802 // Pointer Immediate Addition
9803 // n.b. this needs to be more expensive than using an indirect memory
9804 // operand
9805 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9806   match(Set dst (AddP src1 src2));
9807 
9808   ins_cost(INSN_COST);
9809   format %{ "add $dst, $src1, $src2\t# ptr" %}
9810 
9811   // use opcode to indicate that this is an add not a sub
9812   opcode(0x0);
9813 
9814   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9815 
9816   ins_pipe(ialu_reg_imm);
9817 %}
9818 
9819 // Long Addition
9820 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9821 
9822   match(Set dst (AddL src1 src2));
9823 
9824   ins_cost(INSN_COST);
9825   format %{ "add  $dst, $src1, $src2" %}
9826 
9827   ins_encode %{
9828     __ add(as_Register($dst$$reg),
9829            as_Register($src1$$reg),
9830            as_Register($src2$$reg));
9831   %}
9832 
9833   ins_pipe(ialu_reg_reg);
9834 %}
9835 
9836 // No constant pool entries requiredLong Immediate Addition.
9837 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9838   match(Set dst (AddL src1 src2));
9839 
9840   ins_cost(INSN_COST);
9841   format %{ "add $dst, $src1, $src2" %}
9842 
9843   // use opcode to indicate that this is an add not a sub
9844   opcode(0x0);
9845 
9846   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9847 
9848   ins_pipe(ialu_reg_imm);
9849 %}
9850 
9851 // Integer Subtraction
9852 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9853   match(Set dst (SubI src1 src2));
9854 
9855   ins_cost(INSN_COST);
9856   format %{ "subw  $dst, $src1, $src2" %}
9857 
9858   ins_encode %{
9859     __ subw(as_Register($dst$$reg),
9860             as_Register($src1$$reg),
9861             as_Register($src2$$reg));
9862   %}
9863 
9864   ins_pipe(ialu_reg_reg);
9865 %}
9866 
9867 // Immediate Subtraction
9868 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9869   match(Set dst (SubI src1 src2));
9870 
9871   ins_cost(INSN_COST);
9872   format %{ "subw $dst, $src1, $src2" %}
9873 
9874   // use opcode to indicate that this is a sub not an add
9875   opcode(0x1);
9876 
9877   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9878 
9879   ins_pipe(ialu_reg_imm);
9880 %}
9881 
9882 // Long Subtraction
9883 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9884 
9885   match(Set dst (SubL src1 src2));
9886 
9887   ins_cost(INSN_COST);
9888   format %{ "sub  $dst, $src1, $src2" %}
9889 
9890   ins_encode %{
9891     __ sub(as_Register($dst$$reg),
9892            as_Register($src1$$reg),
9893            as_Register($src2$$reg));
9894   %}
9895 
9896   ins_pipe(ialu_reg_reg);
9897 %}
9898 
9899 // No constant pool entries requiredLong Immediate Subtraction.
9900 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9901   match(Set dst (SubL src1 src2));
9902 
9903   ins_cost(INSN_COST);
9904   format %{ "sub$dst, $src1, $src2" %}
9905 
9906   // use opcode to indicate that this is a sub not an add
9907   opcode(0x1);
9908 
9909   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9910 
9911   ins_pipe(ialu_reg_imm);
9912 %}
9913 
9914 // Integer Negation (special case for sub)
9915 
9916 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9917   match(Set dst (SubI zero src));
9918 
9919   ins_cost(INSN_COST);
9920   format %{ "negw $dst, $src\t# int" %}
9921 
9922   ins_encode %{
9923     __ negw(as_Register($dst$$reg),
9924             as_Register($src$$reg));
9925   %}
9926 
9927   ins_pipe(ialu_reg);
9928 %}
9929 
9930 // Long Negation
9931 
9932 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
9933   match(Set dst (SubL zero src));
9934 
9935   ins_cost(INSN_COST);
9936   format %{ "neg $dst, $src\t# long" %}
9937 
9938   ins_encode %{
9939     __ neg(as_Register($dst$$reg),
9940            as_Register($src$$reg));
9941   %}
9942 
9943   ins_pipe(ialu_reg);
9944 %}
9945 
9946 // Integer Multiply
9947 
9948 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9949   match(Set dst (MulI src1 src2));
9950 
9951   ins_cost(INSN_COST * 3);
9952   format %{ "mulw  $dst, $src1, $src2" %}
9953 
9954   ins_encode %{
9955     __ mulw(as_Register($dst$$reg),
9956             as_Register($src1$$reg),
9957             as_Register($src2$$reg));
9958   %}
9959 
9960   ins_pipe(imul_reg_reg);
9961 %}
9962 
9963 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9964   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9965 
9966   ins_cost(INSN_COST * 3);
9967   format %{ "smull  $dst, $src1, $src2" %}
9968 
9969   ins_encode %{
9970     __ smull(as_Register($dst$$reg),
9971              as_Register($src1$$reg),
9972              as_Register($src2$$reg));
9973   %}
9974 
9975   ins_pipe(imul_reg_reg);
9976 %}
9977 
9978 // Long Multiply
9979 
9980 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9981   match(Set dst (MulL src1 src2));
9982 
9983   ins_cost(INSN_COST * 5);
9984   format %{ "mul  $dst, $src1, $src2" %}
9985 
9986   ins_encode %{
9987     __ mul(as_Register($dst$$reg),
9988            as_Register($src1$$reg),
9989            as_Register($src2$$reg));
9990   %}
9991 
9992   ins_pipe(lmul_reg_reg);
9993 %}
9994 
9995 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9996 %{
9997   match(Set dst (MulHiL src1 src2));
9998 
9999   ins_cost(INSN_COST * 7);
10000   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10001 
10002   ins_encode %{
10003     __ smulh(as_Register($dst$$reg),
10004              as_Register($src1$$reg),
10005              as_Register($src2$$reg));
10006   %}
10007 
10008   ins_pipe(lmul_reg_reg);
10009 %}
10010 
10011 // Combined Integer Multiply & Add/Sub
10012 
10013 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10014   match(Set dst (AddI src3 (MulI src1 src2)));
10015 
10016   ins_cost(INSN_COST * 3);
10017   format %{ "madd  $dst, $src1, $src2, $src3" %}
10018 
10019   ins_encode %{
10020     __ maddw(as_Register($dst$$reg),
10021              as_Register($src1$$reg),
10022              as_Register($src2$$reg),
10023              as_Register($src3$$reg));
10024   %}
10025 
10026   ins_pipe(imac_reg_reg);
10027 %}
10028 
10029 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10030   match(Set dst (SubI src3 (MulI src1 src2)));
10031 
10032   ins_cost(INSN_COST * 3);
10033   format %{ "msub  $dst, $src1, $src2, $src3" %}
10034 
10035   ins_encode %{
10036     __ msubw(as_Register($dst$$reg),
10037              as_Register($src1$$reg),
10038              as_Register($src2$$reg),
10039              as_Register($src3$$reg));
10040   %}
10041 
10042   ins_pipe(imac_reg_reg);
10043 %}
10044 
10045 // Combined Long Multiply & Add/Sub
10046 
10047 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10048   match(Set dst (AddL src3 (MulL src1 src2)));
10049 
10050   ins_cost(INSN_COST * 5);
10051   format %{ "madd  $dst, $src1, $src2, $src3" %}
10052 
10053   ins_encode %{
10054     __ madd(as_Register($dst$$reg),
10055             as_Register($src1$$reg),
10056             as_Register($src2$$reg),
10057             as_Register($src3$$reg));
10058   %}
10059 
10060   ins_pipe(lmac_reg_reg);
10061 %}
10062 
10063 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10064   match(Set dst (SubL src3 (MulL src1 src2)));
10065 
10066   ins_cost(INSN_COST * 5);
10067   format %{ "msub  $dst, $src1, $src2, $src3" %}
10068 
10069   ins_encode %{
10070     __ msub(as_Register($dst$$reg),
10071             as_Register($src1$$reg),
10072             as_Register($src2$$reg),
10073             as_Register($src3$$reg));
10074   %}
10075 
10076   ins_pipe(lmac_reg_reg);
10077 %}
10078 
10079 // Integer Divide
10080 
10081 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10082   match(Set dst (DivI src1 src2));
10083 
10084   ins_cost(INSN_COST * 19);
10085   format %{ "sdivw  $dst, $src1, $src2" %}
10086 
10087   ins_encode(aarch64_enc_divw(dst, src1, src2));
10088   ins_pipe(idiv_reg_reg);
10089 %}
10090 
10091 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10092   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10093   ins_cost(INSN_COST);
10094   format %{ "lsrw $dst, $src1, $div1" %}
10095   ins_encode %{
10096     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10097   %}
10098   ins_pipe(ialu_reg_shift);
10099 %}
10100 
10101 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10102   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10103   ins_cost(INSN_COST);
10104   format %{ "addw $dst, $src, LSR $div1" %}
10105 
10106   ins_encode %{
10107     __ addw(as_Register($dst$$reg),
10108               as_Register($src$$reg),
10109               as_Register($src$$reg),
10110               Assembler::LSR, 31);
10111   %}
10112   ins_pipe(ialu_reg);
10113 %}
10114 
10115 // Long Divide
10116 
10117 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10118   match(Set dst (DivL src1 src2));
10119 
10120   ins_cost(INSN_COST * 35);
10121   format %{ "sdiv   $dst, $src1, $src2" %}
10122 
10123   ins_encode(aarch64_enc_div(dst, src1, src2));
10124   ins_pipe(ldiv_reg_reg);
10125 %}
10126 
10127 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10128   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10129   ins_cost(INSN_COST);
10130   format %{ "lsr $dst, $src1, $div1" %}
10131   ins_encode %{
10132     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10133   %}
10134   ins_pipe(ialu_reg_shift);
10135 %}
10136 
10137 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10138   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10139   ins_cost(INSN_COST);
10140   format %{ "add $dst, $src, $div1" %}
10141 
10142   ins_encode %{
10143     __ add(as_Register($dst$$reg),
10144               as_Register($src$$reg),
10145               as_Register($src$$reg),
10146               Assembler::LSR, 63);
10147   %}
10148   ins_pipe(ialu_reg);
10149 %}
10150 
10151 // Integer Remainder
10152 
10153 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10154   match(Set dst (ModI src1 src2));
10155 
10156   ins_cost(INSN_COST * 22);
10157   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10158             "msubw($dst, rscratch1, $src2, $src1" %}
10159 
10160   ins_encode(aarch64_enc_modw(dst, src1, src2));
10161   ins_pipe(idiv_reg_reg);
10162 %}
10163 
10164 // Long Remainder
10165 
10166 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10167   match(Set dst (ModL src1 src2));
10168 
10169   ins_cost(INSN_COST * 38);
10170   format %{ "sdiv   rscratch1, $src1, $src2\n"
10171             "msub($dst, rscratch1, $src2, $src1" %}
10172 
10173   ins_encode(aarch64_enc_mod(dst, src1, src2));
10174   ins_pipe(ldiv_reg_reg);
10175 %}
10176 
10177 // Integer Shifts
10178 
10179 // Shift Left Register
10180 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10181   match(Set dst (LShiftI src1 src2));
10182 
10183   ins_cost(INSN_COST * 2);
10184   format %{ "lslvw  $dst, $src1, $src2" %}
10185 
10186   ins_encode %{
10187     __ lslvw(as_Register($dst$$reg),
10188              as_Register($src1$$reg),
10189              as_Register($src2$$reg));
10190   %}
10191 
10192   ins_pipe(ialu_reg_reg_vshift);
10193 %}
10194 
10195 // Shift Left Immediate
10196 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10197   match(Set dst (LShiftI src1 src2));
10198 
10199   ins_cost(INSN_COST);
10200   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10201 
10202   ins_encode %{
10203     __ lslw(as_Register($dst$$reg),
10204             as_Register($src1$$reg),
10205             $src2$$constant & 0x1f);
10206   %}
10207 
10208   ins_pipe(ialu_reg_shift);
10209 %}
10210 
10211 // Shift Right Logical Register
10212 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10213   match(Set dst (URShiftI src1 src2));
10214 
10215   ins_cost(INSN_COST * 2);
10216   format %{ "lsrvw  $dst, $src1, $src2" %}
10217 
10218   ins_encode %{
10219     __ lsrvw(as_Register($dst$$reg),
10220              as_Register($src1$$reg),
10221              as_Register($src2$$reg));
10222   %}
10223 
10224   ins_pipe(ialu_reg_reg_vshift);
10225 %}
10226 
10227 // Shift Right Logical Immediate
10228 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10229   match(Set dst (URShiftI src1 src2));
10230 
10231   ins_cost(INSN_COST);
10232   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10233 
10234   ins_encode %{
10235     __ lsrw(as_Register($dst$$reg),
10236             as_Register($src1$$reg),
10237             $src2$$constant & 0x1f);
10238   %}
10239 
10240   ins_pipe(ialu_reg_shift);
10241 %}
10242 
10243 // Shift Right Arithmetic Register
10244 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10245   match(Set dst (RShiftI src1 src2));
10246 
10247   ins_cost(INSN_COST * 2);
10248   format %{ "asrvw  $dst, $src1, $src2" %}
10249 
10250   ins_encode %{
10251     __ asrvw(as_Register($dst$$reg),
10252              as_Register($src1$$reg),
10253              as_Register($src2$$reg));
10254   %}
10255 
10256   ins_pipe(ialu_reg_reg_vshift);
10257 %}
10258 
10259 // Shift Right Arithmetic Immediate
10260 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10261   match(Set dst (RShiftI src1 src2));
10262 
10263   ins_cost(INSN_COST);
10264   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10265 
10266   ins_encode %{
10267     __ asrw(as_Register($dst$$reg),
10268             as_Register($src1$$reg),
10269             $src2$$constant & 0x1f);
10270   %}
10271 
10272   ins_pipe(ialu_reg_shift);
10273 %}
10274 
10275 // Combined Int Mask and Right Shift (using UBFM)
10276 // TODO
10277 
10278 // Long Shifts
10279 
10280 // Shift Left Register
10281 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10282   match(Set dst (LShiftL src1 src2));
10283 
10284   ins_cost(INSN_COST * 2);
10285   format %{ "lslv  $dst, $src1, $src2" %}
10286 
10287   ins_encode %{
10288     __ lslv(as_Register($dst$$reg),
10289             as_Register($src1$$reg),
10290             as_Register($src2$$reg));
10291   %}
10292 
10293   ins_pipe(ialu_reg_reg_vshift);
10294 %}
10295 
10296 // Shift Left Immediate
10297 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10298   match(Set dst (LShiftL src1 src2));
10299 
10300   ins_cost(INSN_COST);
10301   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10302 
10303   ins_encode %{
10304     __ lsl(as_Register($dst$$reg),
10305             as_Register($src1$$reg),
10306             $src2$$constant & 0x3f);
10307   %}
10308 
10309   ins_pipe(ialu_reg_shift);
10310 %}
10311 
10312 // Shift Right Logical Register
10313 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10314   match(Set dst (URShiftL src1 src2));
10315 
10316   ins_cost(INSN_COST * 2);
10317   format %{ "lsrv  $dst, $src1, $src2" %}
10318 
10319   ins_encode %{
10320     __ lsrv(as_Register($dst$$reg),
10321             as_Register($src1$$reg),
10322             as_Register($src2$$reg));
10323   %}
10324 
10325   ins_pipe(ialu_reg_reg_vshift);
10326 %}
10327 
10328 // Shift Right Logical Immediate
10329 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10330   match(Set dst (URShiftL src1 src2));
10331 
10332   ins_cost(INSN_COST);
10333   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10334 
10335   ins_encode %{
10336     __ lsr(as_Register($dst$$reg),
10337            as_Register($src1$$reg),
10338            $src2$$constant & 0x3f);
10339   %}
10340 
10341   ins_pipe(ialu_reg_shift);
10342 %}
10343 
10344 // A special-case pattern for card table stores.
10345 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10346   match(Set dst (URShiftL (CastP2X src1) src2));
10347 
10348   ins_cost(INSN_COST);
10349   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10350 
10351   ins_encode %{
10352     __ lsr(as_Register($dst$$reg),
10353            as_Register($src1$$reg),
10354            $src2$$constant & 0x3f);
10355   %}
10356 
10357   ins_pipe(ialu_reg_shift);
10358 %}
10359 
10360 // Shift Right Arithmetic Register
10361 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10362   match(Set dst (RShiftL src1 src2));
10363 
10364   ins_cost(INSN_COST * 2);
10365   format %{ "asrv  $dst, $src1, $src2" %}
10366 
10367   ins_encode %{
10368     __ asrv(as_Register($dst$$reg),
10369             as_Register($src1$$reg),
10370             as_Register($src2$$reg));
10371   %}
10372 
10373   ins_pipe(ialu_reg_reg_vshift);
10374 %}
10375 
10376 // Shift Right Arithmetic Immediate
10377 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10378   match(Set dst (RShiftL src1 src2));
10379 
10380   ins_cost(INSN_COST);
10381   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10382 
10383   ins_encode %{
10384     __ asr(as_Register($dst$$reg),
10385            as_Register($src1$$reg),
10386            $src2$$constant & 0x3f);
10387   %}
10388 
10389   ins_pipe(ialu_reg_shift);
10390 %}
10391 
10392 // BEGIN This section of the file is automatically generated. Do not edit --------------
10393 
10394 instruct regL_not_reg(iRegLNoSp dst,
10395                          iRegL src1, immL_M1 m1,
10396                          rFlagsReg cr) %{
10397   match(Set dst (XorL src1 m1));
10398   ins_cost(INSN_COST);
10399   format %{ "eon  $dst, $src1, zr" %}
10400 
10401   ins_encode %{
10402     __ eon(as_Register($dst$$reg),
10403               as_Register($src1$$reg),
10404               zr,
10405               Assembler::LSL, 0);
10406   %}
10407 
10408   ins_pipe(ialu_reg);
10409 %}
10410 instruct regI_not_reg(iRegINoSp dst,
10411                          iRegIorL2I src1, immI_M1 m1,
10412                          rFlagsReg cr) %{
10413   match(Set dst (XorI src1 m1));
10414   ins_cost(INSN_COST);
10415   format %{ "eonw  $dst, $src1, zr" %}
10416 
10417   ins_encode %{
10418     __ eonw(as_Register($dst$$reg),
10419               as_Register($src1$$reg),
10420               zr,
10421               Assembler::LSL, 0);
10422   %}
10423 
10424   ins_pipe(ialu_reg);
10425 %}
10426 
10427 instruct AndI_reg_not_reg(iRegINoSp dst,
10428                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10429                          rFlagsReg cr) %{
10430   match(Set dst (AndI src1 (XorI src2 m1)));
10431   ins_cost(INSN_COST);
10432   format %{ "bicw  $dst, $src1, $src2" %}
10433 
10434   ins_encode %{
10435     __ bicw(as_Register($dst$$reg),
10436               as_Register($src1$$reg),
10437               as_Register($src2$$reg),
10438               Assembler::LSL, 0);
10439   %}
10440 
10441   ins_pipe(ialu_reg_reg);
10442 %}
10443 
10444 instruct AndL_reg_not_reg(iRegLNoSp dst,
10445                          iRegL src1, iRegL src2, immL_M1 m1,
10446                          rFlagsReg cr) %{
10447   match(Set dst (AndL src1 (XorL src2 m1)));
10448   ins_cost(INSN_COST);
10449   format %{ "bic  $dst, $src1, $src2" %}
10450 
10451   ins_encode %{
10452     __ bic(as_Register($dst$$reg),
10453               as_Register($src1$$reg),
10454               as_Register($src2$$reg),
10455               Assembler::LSL, 0);
10456   %}
10457 
10458   ins_pipe(ialu_reg_reg);
10459 %}
10460 
10461 instruct OrI_reg_not_reg(iRegINoSp dst,
10462                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10463                          rFlagsReg cr) %{
10464   match(Set dst (OrI src1 (XorI src2 m1)));
10465   ins_cost(INSN_COST);
10466   format %{ "ornw  $dst, $src1, $src2" %}
10467 
10468   ins_encode %{
10469     __ ornw(as_Register($dst$$reg),
10470               as_Register($src1$$reg),
10471               as_Register($src2$$reg),
10472               Assembler::LSL, 0);
10473   %}
10474 
10475   ins_pipe(ialu_reg_reg);
10476 %}
10477 
10478 instruct OrL_reg_not_reg(iRegLNoSp dst,
10479                          iRegL src1, iRegL src2, immL_M1 m1,
10480                          rFlagsReg cr) %{
10481   match(Set dst (OrL src1 (XorL src2 m1)));
10482   ins_cost(INSN_COST);
10483   format %{ "orn  $dst, $src1, $src2" %}
10484 
10485   ins_encode %{
10486     __ orn(as_Register($dst$$reg),
10487               as_Register($src1$$reg),
10488               as_Register($src2$$reg),
10489               Assembler::LSL, 0);
10490   %}
10491 
10492   ins_pipe(ialu_reg_reg);
10493 %}
10494 
10495 instruct XorI_reg_not_reg(iRegINoSp dst,
10496                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10497                          rFlagsReg cr) %{
10498   match(Set dst (XorI m1 (XorI src2 src1)));
10499   ins_cost(INSN_COST);
10500   format %{ "eonw  $dst, $src1, $src2" %}
10501 
10502   ins_encode %{
10503     __ eonw(as_Register($dst$$reg),
10504               as_Register($src1$$reg),
10505               as_Register($src2$$reg),
10506               Assembler::LSL, 0);
10507   %}
10508 
10509   ins_pipe(ialu_reg_reg);
10510 %}
10511 
10512 instruct XorL_reg_not_reg(iRegLNoSp dst,
10513                          iRegL src1, iRegL src2, immL_M1 m1,
10514                          rFlagsReg cr) %{
10515   match(Set dst (XorL m1 (XorL src2 src1)));
10516   ins_cost(INSN_COST);
10517   format %{ "eon  $dst, $src1, $src2" %}
10518 
10519   ins_encode %{
10520     __ eon(as_Register($dst$$reg),
10521               as_Register($src1$$reg),
10522               as_Register($src2$$reg),
10523               Assembler::LSL, 0);
10524   %}
10525 
10526   ins_pipe(ialu_reg_reg);
10527 %}
10528 
10529 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10530                          iRegIorL2I src1, iRegIorL2I src2,
10531                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10532   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10533   ins_cost(1.9 * INSN_COST);
10534   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10535 
10536   ins_encode %{
10537     __ bicw(as_Register($dst$$reg),
10538               as_Register($src1$$reg),
10539               as_Register($src2$$reg),
10540               Assembler::LSR,
10541               $src3$$constant & 0x1f);
10542   %}
10543 
10544   ins_pipe(ialu_reg_reg_shift);
10545 %}
10546 
10547 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10548                          iRegL src1, iRegL src2,
10549                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10550   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10551   ins_cost(1.9 * INSN_COST);
10552   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10553 
10554   ins_encode %{
10555     __ bic(as_Register($dst$$reg),
10556               as_Register($src1$$reg),
10557               as_Register($src2$$reg),
10558               Assembler::LSR,
10559               $src3$$constant & 0x3f);
10560   %}
10561 
10562   ins_pipe(ialu_reg_reg_shift);
10563 %}
10564 
10565 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10566                          iRegIorL2I src1, iRegIorL2I src2,
10567                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10568   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10569   ins_cost(1.9 * INSN_COST);
10570   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10571 
10572   ins_encode %{
10573     __ bicw(as_Register($dst$$reg),
10574               as_Register($src1$$reg),
10575               as_Register($src2$$reg),
10576               Assembler::ASR,
10577               $src3$$constant & 0x1f);
10578   %}
10579 
10580   ins_pipe(ialu_reg_reg_shift);
10581 %}
10582 
10583 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10584                          iRegL src1, iRegL src2,
10585                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10586   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10587   ins_cost(1.9 * INSN_COST);
10588   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10589 
10590   ins_encode %{
10591     __ bic(as_Register($dst$$reg),
10592               as_Register($src1$$reg),
10593               as_Register($src2$$reg),
10594               Assembler::ASR,
10595               $src3$$constant & 0x3f);
10596   %}
10597 
10598   ins_pipe(ialu_reg_reg_shift);
10599 %}
10600 
10601 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10602                          iRegIorL2I src1, iRegIorL2I src2,
10603                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10604   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10605   ins_cost(1.9 * INSN_COST);
10606   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10607 
10608   ins_encode %{
10609     __ bicw(as_Register($dst$$reg),
10610               as_Register($src1$$reg),
10611               as_Register($src2$$reg),
10612               Assembler::LSL,
10613               $src3$$constant & 0x1f);
10614   %}
10615 
10616   ins_pipe(ialu_reg_reg_shift);
10617 %}
10618 
10619 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10620                          iRegL src1, iRegL src2,
10621                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10622   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10623   ins_cost(1.9 * INSN_COST);
10624   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10625 
10626   ins_encode %{
10627     __ bic(as_Register($dst$$reg),
10628               as_Register($src1$$reg),
10629               as_Register($src2$$reg),
10630               Assembler::LSL,
10631               $src3$$constant & 0x3f);
10632   %}
10633 
10634   ins_pipe(ialu_reg_reg_shift);
10635 %}
10636 
10637 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10638                          iRegIorL2I src1, iRegIorL2I src2,
10639                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10640   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10641   ins_cost(1.9 * INSN_COST);
10642   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10643 
10644   ins_encode %{
10645     __ eonw(as_Register($dst$$reg),
10646               as_Register($src1$$reg),
10647               as_Register($src2$$reg),
10648               Assembler::LSR,
10649               $src3$$constant & 0x1f);
10650   %}
10651 
10652   ins_pipe(ialu_reg_reg_shift);
10653 %}
10654 
10655 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10656                          iRegL src1, iRegL src2,
10657                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10658   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10659   ins_cost(1.9 * INSN_COST);
10660   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10661 
10662   ins_encode %{
10663     __ eon(as_Register($dst$$reg),
10664               as_Register($src1$$reg),
10665               as_Register($src2$$reg),
10666               Assembler::LSR,
10667               $src3$$constant & 0x3f);
10668   %}
10669 
10670   ins_pipe(ialu_reg_reg_shift);
10671 %}
10672 
10673 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10674                          iRegIorL2I src1, iRegIorL2I src2,
10675                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10676   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10677   ins_cost(1.9 * INSN_COST);
10678   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10679 
10680   ins_encode %{
10681     __ eonw(as_Register($dst$$reg),
10682               as_Register($src1$$reg),
10683               as_Register($src2$$reg),
10684               Assembler::ASR,
10685               $src3$$constant & 0x1f);
10686   %}
10687 
10688   ins_pipe(ialu_reg_reg_shift);
10689 %}
10690 
10691 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10692                          iRegL src1, iRegL src2,
10693                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10694   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10695   ins_cost(1.9 * INSN_COST);
10696   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10697 
10698   ins_encode %{
10699     __ eon(as_Register($dst$$reg),
10700               as_Register($src1$$reg),
10701               as_Register($src2$$reg),
10702               Assembler::ASR,
10703               $src3$$constant & 0x3f);
10704   %}
10705 
10706   ins_pipe(ialu_reg_reg_shift);
10707 %}
10708 
10709 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10710                          iRegIorL2I src1, iRegIorL2I src2,
10711                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10712   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10713   ins_cost(1.9 * INSN_COST);
10714   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10715 
10716   ins_encode %{
10717     __ eonw(as_Register($dst$$reg),
10718               as_Register($src1$$reg),
10719               as_Register($src2$$reg),
10720               Assembler::LSL,
10721               $src3$$constant & 0x1f);
10722   %}
10723 
10724   ins_pipe(ialu_reg_reg_shift);
10725 %}
10726 
10727 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10728                          iRegL src1, iRegL src2,
10729                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10730   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10731   ins_cost(1.9 * INSN_COST);
10732   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10733 
10734   ins_encode %{
10735     __ eon(as_Register($dst$$reg),
10736               as_Register($src1$$reg),
10737               as_Register($src2$$reg),
10738               Assembler::LSL,
10739               $src3$$constant & 0x3f);
10740   %}
10741 
10742   ins_pipe(ialu_reg_reg_shift);
10743 %}
10744 
10745 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10746                          iRegIorL2I src1, iRegIorL2I src2,
10747                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10748   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10749   ins_cost(1.9 * INSN_COST);
10750   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10751 
10752   ins_encode %{
10753     __ ornw(as_Register($dst$$reg),
10754               as_Register($src1$$reg),
10755               as_Register($src2$$reg),
10756               Assembler::LSR,
10757               $src3$$constant & 0x1f);
10758   %}
10759 
10760   ins_pipe(ialu_reg_reg_shift);
10761 %}
10762 
10763 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10764                          iRegL src1, iRegL src2,
10765                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10766   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10767   ins_cost(1.9 * INSN_COST);
10768   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10769 
10770   ins_encode %{
10771     __ orn(as_Register($dst$$reg),
10772               as_Register($src1$$reg),
10773               as_Register($src2$$reg),
10774               Assembler::LSR,
10775               $src3$$constant & 0x3f);
10776   %}
10777 
10778   ins_pipe(ialu_reg_reg_shift);
10779 %}
10780 
10781 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10782                          iRegIorL2I src1, iRegIorL2I src2,
10783                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10784   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10785   ins_cost(1.9 * INSN_COST);
10786   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10787 
10788   ins_encode %{
10789     __ ornw(as_Register($dst$$reg),
10790               as_Register($src1$$reg),
10791               as_Register($src2$$reg),
10792               Assembler::ASR,
10793               $src3$$constant & 0x1f);
10794   %}
10795 
10796   ins_pipe(ialu_reg_reg_shift);
10797 %}
10798 
10799 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10800                          iRegL src1, iRegL src2,
10801                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10802   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10803   ins_cost(1.9 * INSN_COST);
10804   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10805 
10806   ins_encode %{
10807     __ orn(as_Register($dst$$reg),
10808               as_Register($src1$$reg),
10809               as_Register($src2$$reg),
10810               Assembler::ASR,
10811               $src3$$constant & 0x3f);
10812   %}
10813 
10814   ins_pipe(ialu_reg_reg_shift);
10815 %}
10816 
10817 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10818                          iRegIorL2I src1, iRegIorL2I src2,
10819                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10820   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10821   ins_cost(1.9 * INSN_COST);
10822   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10823 
10824   ins_encode %{
10825     __ ornw(as_Register($dst$$reg),
10826               as_Register($src1$$reg),
10827               as_Register($src2$$reg),
10828               Assembler::LSL,
10829               $src3$$constant & 0x1f);
10830   %}
10831 
10832   ins_pipe(ialu_reg_reg_shift);
10833 %}
10834 
10835 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10836                          iRegL src1, iRegL src2,
10837                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10838   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10839   ins_cost(1.9 * INSN_COST);
10840   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10841 
10842   ins_encode %{
10843     __ orn(as_Register($dst$$reg),
10844               as_Register($src1$$reg),
10845               as_Register($src2$$reg),
10846               Assembler::LSL,
10847               $src3$$constant & 0x3f);
10848   %}
10849 
10850   ins_pipe(ialu_reg_reg_shift);
10851 %}
10852 
10853 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10854                          iRegIorL2I src1, iRegIorL2I src2,
10855                          immI src3, rFlagsReg cr) %{
10856   match(Set dst (AndI src1 (URShiftI src2 src3)));
10857 
10858   ins_cost(1.9 * INSN_COST);
10859   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10860 
10861   ins_encode %{
10862     __ andw(as_Register($dst$$reg),
10863               as_Register($src1$$reg),
10864               as_Register($src2$$reg),
10865               Assembler::LSR,
10866               $src3$$constant & 0x1f);
10867   %}
10868 
10869   ins_pipe(ialu_reg_reg_shift);
10870 %}
10871 
10872 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10873                          iRegL src1, iRegL src2,
10874                          immI src3, rFlagsReg cr) %{
10875   match(Set dst (AndL src1 (URShiftL src2 src3)));
10876 
10877   ins_cost(1.9 * INSN_COST);
10878   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10879 
10880   ins_encode %{
10881     __ andr(as_Register($dst$$reg),
10882               as_Register($src1$$reg),
10883               as_Register($src2$$reg),
10884               Assembler::LSR,
10885               $src3$$constant & 0x3f);
10886   %}
10887 
10888   ins_pipe(ialu_reg_reg_shift);
10889 %}
10890 
10891 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10892                          iRegIorL2I src1, iRegIorL2I src2,
10893                          immI src3, rFlagsReg cr) %{
10894   match(Set dst (AndI src1 (RShiftI src2 src3)));
10895 
10896   ins_cost(1.9 * INSN_COST);
10897   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10898 
10899   ins_encode %{
10900     __ andw(as_Register($dst$$reg),
10901               as_Register($src1$$reg),
10902               as_Register($src2$$reg),
10903               Assembler::ASR,
10904               $src3$$constant & 0x1f);
10905   %}
10906 
10907   ins_pipe(ialu_reg_reg_shift);
10908 %}
10909 
10910 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10911                          iRegL src1, iRegL src2,
10912                          immI src3, rFlagsReg cr) %{
10913   match(Set dst (AndL src1 (RShiftL src2 src3)));
10914 
10915   ins_cost(1.9 * INSN_COST);
10916   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10917 
10918   ins_encode %{
10919     __ andr(as_Register($dst$$reg),
10920               as_Register($src1$$reg),
10921               as_Register($src2$$reg),
10922               Assembler::ASR,
10923               $src3$$constant & 0x3f);
10924   %}
10925 
10926   ins_pipe(ialu_reg_reg_shift);
10927 %}
10928 
10929 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10930                          iRegIorL2I src1, iRegIorL2I src2,
10931                          immI src3, rFlagsReg cr) %{
10932   match(Set dst (AndI src1 (LShiftI src2 src3)));
10933 
10934   ins_cost(1.9 * INSN_COST);
10935   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10936 
10937   ins_encode %{
10938     __ andw(as_Register($dst$$reg),
10939               as_Register($src1$$reg),
10940               as_Register($src2$$reg),
10941               Assembler::LSL,
10942               $src3$$constant & 0x1f);
10943   %}
10944 
10945   ins_pipe(ialu_reg_reg_shift);
10946 %}
10947 
10948 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10949                          iRegL src1, iRegL src2,
10950                          immI src3, rFlagsReg cr) %{
10951   match(Set dst (AndL src1 (LShiftL src2 src3)));
10952 
10953   ins_cost(1.9 * INSN_COST);
10954   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10955 
10956   ins_encode %{
10957     __ andr(as_Register($dst$$reg),
10958               as_Register($src1$$reg),
10959               as_Register($src2$$reg),
10960               Assembler::LSL,
10961               $src3$$constant & 0x3f);
10962   %}
10963 
10964   ins_pipe(ialu_reg_reg_shift);
10965 %}
10966 
10967 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10968                          iRegIorL2I src1, iRegIorL2I src2,
10969                          immI src3, rFlagsReg cr) %{
10970   match(Set dst (XorI src1 (URShiftI src2 src3)));
10971 
10972   ins_cost(1.9 * INSN_COST);
10973   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10974 
10975   ins_encode %{
10976     __ eorw(as_Register($dst$$reg),
10977               as_Register($src1$$reg),
10978               as_Register($src2$$reg),
10979               Assembler::LSR,
10980               $src3$$constant & 0x1f);
10981   %}
10982 
10983   ins_pipe(ialu_reg_reg_shift);
10984 %}
10985 
10986 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10987                          iRegL src1, iRegL src2,
10988                          immI src3, rFlagsReg cr) %{
10989   match(Set dst (XorL src1 (URShiftL src2 src3)));
10990 
10991   ins_cost(1.9 * INSN_COST);
10992   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10993 
10994   ins_encode %{
10995     __ eor(as_Register($dst$$reg),
10996               as_Register($src1$$reg),
10997               as_Register($src2$$reg),
10998               Assembler::LSR,
10999               $src3$$constant & 0x3f);
11000   %}
11001 
11002   ins_pipe(ialu_reg_reg_shift);
11003 %}
11004 
11005 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11006                          iRegIorL2I src1, iRegIorL2I src2,
11007                          immI src3, rFlagsReg cr) %{
11008   match(Set dst (XorI src1 (RShiftI src2 src3)));
11009 
11010   ins_cost(1.9 * INSN_COST);
11011   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11012 
11013   ins_encode %{
11014     __ eorw(as_Register($dst$$reg),
11015               as_Register($src1$$reg),
11016               as_Register($src2$$reg),
11017               Assembler::ASR,
11018               $src3$$constant & 0x1f);
11019   %}
11020 
11021   ins_pipe(ialu_reg_reg_shift);
11022 %}
11023 
11024 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11025                          iRegL src1, iRegL src2,
11026                          immI src3, rFlagsReg cr) %{
11027   match(Set dst (XorL src1 (RShiftL src2 src3)));
11028 
11029   ins_cost(1.9 * INSN_COST);
11030   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11031 
11032   ins_encode %{
11033     __ eor(as_Register($dst$$reg),
11034               as_Register($src1$$reg),
11035               as_Register($src2$$reg),
11036               Assembler::ASR,
11037               $src3$$constant & 0x3f);
11038   %}
11039 
11040   ins_pipe(ialu_reg_reg_shift);
11041 %}
11042 
11043 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11044                          iRegIorL2I src1, iRegIorL2I src2,
11045                          immI src3, rFlagsReg cr) %{
11046   match(Set dst (XorI src1 (LShiftI src2 src3)));
11047 
11048   ins_cost(1.9 * INSN_COST);
11049   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11050 
11051   ins_encode %{
11052     __ eorw(as_Register($dst$$reg),
11053               as_Register($src1$$reg),
11054               as_Register($src2$$reg),
11055               Assembler::LSL,
11056               $src3$$constant & 0x1f);
11057   %}
11058 
11059   ins_pipe(ialu_reg_reg_shift);
11060 %}
11061 
11062 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11063                          iRegL src1, iRegL src2,
11064                          immI src3, rFlagsReg cr) %{
11065   match(Set dst (XorL src1 (LShiftL src2 src3)));
11066 
11067   ins_cost(1.9 * INSN_COST);
11068   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11069 
11070   ins_encode %{
11071     __ eor(as_Register($dst$$reg),
11072               as_Register($src1$$reg),
11073               as_Register($src2$$reg),
11074               Assembler::LSL,
11075               $src3$$constant & 0x3f);
11076   %}
11077 
11078   ins_pipe(ialu_reg_reg_shift);
11079 %}
11080 
11081 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11082                          iRegIorL2I src1, iRegIorL2I src2,
11083                          immI src3, rFlagsReg cr) %{
11084   match(Set dst (OrI src1 (URShiftI src2 src3)));
11085 
11086   ins_cost(1.9 * INSN_COST);
11087   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11088 
11089   ins_encode %{
11090     __ orrw(as_Register($dst$$reg),
11091               as_Register($src1$$reg),
11092               as_Register($src2$$reg),
11093               Assembler::LSR,
11094               $src3$$constant & 0x1f);
11095   %}
11096 
11097   ins_pipe(ialu_reg_reg_shift);
11098 %}
11099 
11100 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11101                          iRegL src1, iRegL src2,
11102                          immI src3, rFlagsReg cr) %{
11103   match(Set dst (OrL src1 (URShiftL src2 src3)));
11104 
11105   ins_cost(1.9 * INSN_COST);
11106   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11107 
11108   ins_encode %{
11109     __ orr(as_Register($dst$$reg),
11110               as_Register($src1$$reg),
11111               as_Register($src2$$reg),
11112               Assembler::LSR,
11113               $src3$$constant & 0x3f);
11114   %}
11115 
11116   ins_pipe(ialu_reg_reg_shift);
11117 %}
11118 
11119 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11120                          iRegIorL2I src1, iRegIorL2I src2,
11121                          immI src3, rFlagsReg cr) %{
11122   match(Set dst (OrI src1 (RShiftI src2 src3)));
11123 
11124   ins_cost(1.9 * INSN_COST);
11125   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11126 
11127   ins_encode %{
11128     __ orrw(as_Register($dst$$reg),
11129               as_Register($src1$$reg),
11130               as_Register($src2$$reg),
11131               Assembler::ASR,
11132               $src3$$constant & 0x1f);
11133   %}
11134 
11135   ins_pipe(ialu_reg_reg_shift);
11136 %}
11137 
11138 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11139                          iRegL src1, iRegL src2,
11140                          immI src3, rFlagsReg cr) %{
11141   match(Set dst (OrL src1 (RShiftL src2 src3)));
11142 
11143   ins_cost(1.9 * INSN_COST);
11144   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11145 
11146   ins_encode %{
11147     __ orr(as_Register($dst$$reg),
11148               as_Register($src1$$reg),
11149               as_Register($src2$$reg),
11150               Assembler::ASR,
11151               $src3$$constant & 0x3f);
11152   %}
11153 
11154   ins_pipe(ialu_reg_reg_shift);
11155 %}
11156 
11157 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11158                          iRegIorL2I src1, iRegIorL2I src2,
11159                          immI src3, rFlagsReg cr) %{
11160   match(Set dst (OrI src1 (LShiftI src2 src3)));
11161 
11162   ins_cost(1.9 * INSN_COST);
11163   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11164 
11165   ins_encode %{
11166     __ orrw(as_Register($dst$$reg),
11167               as_Register($src1$$reg),
11168               as_Register($src2$$reg),
11169               Assembler::LSL,
11170               $src3$$constant & 0x1f);
11171   %}
11172 
11173   ins_pipe(ialu_reg_reg_shift);
11174 %}
11175 
11176 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11177                          iRegL src1, iRegL src2,
11178                          immI src3, rFlagsReg cr) %{
11179   match(Set dst (OrL src1 (LShiftL src2 src3)));
11180 
11181   ins_cost(1.9 * INSN_COST);
11182   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11183 
11184   ins_encode %{
11185     __ orr(as_Register($dst$$reg),
11186               as_Register($src1$$reg),
11187               as_Register($src2$$reg),
11188               Assembler::LSL,
11189               $src3$$constant & 0x3f);
11190   %}
11191 
11192   ins_pipe(ialu_reg_reg_shift);
11193 %}
11194 
11195 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11196                          iRegIorL2I src1, iRegIorL2I src2,
11197                          immI src3, rFlagsReg cr) %{
11198   match(Set dst (AddI src1 (URShiftI src2 src3)));
11199 
11200   ins_cost(1.9 * INSN_COST);
11201   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11202 
11203   ins_encode %{
11204     __ addw(as_Register($dst$$reg),
11205               as_Register($src1$$reg),
11206               as_Register($src2$$reg),
11207               Assembler::LSR,
11208               $src3$$constant & 0x1f);
11209   %}
11210 
11211   ins_pipe(ialu_reg_reg_shift);
11212 %}
11213 
11214 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11215                          iRegL src1, iRegL src2,
11216                          immI src3, rFlagsReg cr) %{
11217   match(Set dst (AddL src1 (URShiftL src2 src3)));
11218 
11219   ins_cost(1.9 * INSN_COST);
11220   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11221 
11222   ins_encode %{
11223     __ add(as_Register($dst$$reg),
11224               as_Register($src1$$reg),
11225               as_Register($src2$$reg),
11226               Assembler::LSR,
11227               $src3$$constant & 0x3f);
11228   %}
11229 
11230   ins_pipe(ialu_reg_reg_shift);
11231 %}
11232 
11233 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11234                          iRegIorL2I src1, iRegIorL2I src2,
11235                          immI src3, rFlagsReg cr) %{
11236   match(Set dst (AddI src1 (RShiftI src2 src3)));
11237 
11238   ins_cost(1.9 * INSN_COST);
11239   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11240 
11241   ins_encode %{
11242     __ addw(as_Register($dst$$reg),
11243               as_Register($src1$$reg),
11244               as_Register($src2$$reg),
11245               Assembler::ASR,
11246               $src3$$constant & 0x1f);
11247   %}
11248 
11249   ins_pipe(ialu_reg_reg_shift);
11250 %}
11251 
11252 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11253                          iRegL src1, iRegL src2,
11254                          immI src3, rFlagsReg cr) %{
11255   match(Set dst (AddL src1 (RShiftL src2 src3)));
11256 
11257   ins_cost(1.9 * INSN_COST);
11258   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11259 
11260   ins_encode %{
11261     __ add(as_Register($dst$$reg),
11262               as_Register($src1$$reg),
11263               as_Register($src2$$reg),
11264               Assembler::ASR,
11265               $src3$$constant & 0x3f);
11266   %}
11267 
11268   ins_pipe(ialu_reg_reg_shift);
11269 %}
11270 
11271 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11272                          iRegIorL2I src1, iRegIorL2I src2,
11273                          immI src3, rFlagsReg cr) %{
11274   match(Set dst (AddI src1 (LShiftI src2 src3)));
11275 
11276   ins_cost(1.9 * INSN_COST);
11277   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11278 
11279   ins_encode %{
11280     __ addw(as_Register($dst$$reg),
11281               as_Register($src1$$reg),
11282               as_Register($src2$$reg),
11283               Assembler::LSL,
11284               $src3$$constant & 0x1f);
11285   %}
11286 
11287   ins_pipe(ialu_reg_reg_shift);
11288 %}
11289 
11290 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11291                          iRegL src1, iRegL src2,
11292                          immI src3, rFlagsReg cr) %{
11293   match(Set dst (AddL src1 (LShiftL src2 src3)));
11294 
11295   ins_cost(1.9 * INSN_COST);
11296   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11297 
11298   ins_encode %{
11299     __ add(as_Register($dst$$reg),
11300               as_Register($src1$$reg),
11301               as_Register($src2$$reg),
11302               Assembler::LSL,
11303               $src3$$constant & 0x3f);
11304   %}
11305 
11306   ins_pipe(ialu_reg_reg_shift);
11307 %}
11308 
11309 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11310                          iRegIorL2I src1, iRegIorL2I src2,
11311                          immI src3, rFlagsReg cr) %{
11312   match(Set dst (SubI src1 (URShiftI src2 src3)));
11313 
11314   ins_cost(1.9 * INSN_COST);
11315   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11316 
11317   ins_encode %{
11318     __ subw(as_Register($dst$$reg),
11319               as_Register($src1$$reg),
11320               as_Register($src2$$reg),
11321               Assembler::LSR,
11322               $src3$$constant & 0x1f);
11323   %}
11324 
11325   ins_pipe(ialu_reg_reg_shift);
11326 %}
11327 
11328 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11329                          iRegL src1, iRegL src2,
11330                          immI src3, rFlagsReg cr) %{
11331   match(Set dst (SubL src1 (URShiftL src2 src3)));
11332 
11333   ins_cost(1.9 * INSN_COST);
11334   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11335 
11336   ins_encode %{
11337     __ sub(as_Register($dst$$reg),
11338               as_Register($src1$$reg),
11339               as_Register($src2$$reg),
11340               Assembler::LSR,
11341               $src3$$constant & 0x3f);
11342   %}
11343 
11344   ins_pipe(ialu_reg_reg_shift);
11345 %}
11346 
11347 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11348                          iRegIorL2I src1, iRegIorL2I src2,
11349                          immI src3, rFlagsReg cr) %{
11350   match(Set dst (SubI src1 (RShiftI src2 src3)));
11351 
11352   ins_cost(1.9 * INSN_COST);
11353   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11354 
11355   ins_encode %{
11356     __ subw(as_Register($dst$$reg),
11357               as_Register($src1$$reg),
11358               as_Register($src2$$reg),
11359               Assembler::ASR,
11360               $src3$$constant & 0x1f);
11361   %}
11362 
11363   ins_pipe(ialu_reg_reg_shift);
11364 %}
11365 
11366 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11367                          iRegL src1, iRegL src2,
11368                          immI src3, rFlagsReg cr) %{
11369   match(Set dst (SubL src1 (RShiftL src2 src3)));
11370 
11371   ins_cost(1.9 * INSN_COST);
11372   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11373 
11374   ins_encode %{
11375     __ sub(as_Register($dst$$reg),
11376               as_Register($src1$$reg),
11377               as_Register($src2$$reg),
11378               Assembler::ASR,
11379               $src3$$constant & 0x3f);
11380   %}
11381 
11382   ins_pipe(ialu_reg_reg_shift);
11383 %}
11384 
11385 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11386                          iRegIorL2I src1, iRegIorL2I src2,
11387                          immI src3, rFlagsReg cr) %{
11388   match(Set dst (SubI src1 (LShiftI src2 src3)));
11389 
11390   ins_cost(1.9 * INSN_COST);
11391   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11392 
11393   ins_encode %{
11394     __ subw(as_Register($dst$$reg),
11395               as_Register($src1$$reg),
11396               as_Register($src2$$reg),
11397               Assembler::LSL,
11398               $src3$$constant & 0x1f);
11399   %}
11400 
11401   ins_pipe(ialu_reg_reg_shift);
11402 %}
11403 
11404 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11405                          iRegL src1, iRegL src2,
11406                          immI src3, rFlagsReg cr) %{
11407   match(Set dst (SubL src1 (LShiftL src2 src3)));
11408 
11409   ins_cost(1.9 * INSN_COST);
11410   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11411 
11412   ins_encode %{
11413     __ sub(as_Register($dst$$reg),
11414               as_Register($src1$$reg),
11415               as_Register($src2$$reg),
11416               Assembler::LSL,
11417               $src3$$constant & 0x3f);
11418   %}
11419 
11420   ins_pipe(ialu_reg_reg_shift);
11421 %}
11422 
11423 
11424 
11425 // Shift Left followed by Shift Right.
11426 // This idiom is used by the compiler for the i2b bytecode etc.
11427 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11428 %{
11429   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11430   // Make sure we are not going to exceed what sbfm can do.
11431   predicate((unsigned int)n->in(2)->get_int() <= 63
11432             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11433 
11434   ins_cost(INSN_COST * 2);
11435   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11436   ins_encode %{
11437     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11438     int s = 63 - lshift;
11439     int r = (rshift - lshift) & 63;
11440     __ sbfm(as_Register($dst$$reg),
11441             as_Register($src$$reg),
11442             r, s);
11443   %}
11444 
11445   ins_pipe(ialu_reg_shift);
11446 %}
11447 
11448 // Shift Left followed by Shift Right.
11449 // This idiom is used by the compiler for the i2b bytecode etc.
11450 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11451 %{
11452   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11453   // Make sure we are not going to exceed what sbfmw can do.
11454   predicate((unsigned int)n->in(2)->get_int() <= 31
11455             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11456 
11457   ins_cost(INSN_COST * 2);
11458   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11459   ins_encode %{
11460     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11461     int s = 31 - lshift;
11462     int r = (rshift - lshift) & 31;
11463     __ sbfmw(as_Register($dst$$reg),
11464             as_Register($src$$reg),
11465             r, s);
11466   %}
11467 
11468   ins_pipe(ialu_reg_shift);
11469 %}
11470 
11471 // Shift Left followed by Shift Right.
11472 // This idiom is used by the compiler for the i2b bytecode etc.
11473 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11474 %{
11475   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11476   // Make sure we are not going to exceed what ubfm can do.
11477   predicate((unsigned int)n->in(2)->get_int() <= 63
11478             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11479 
11480   ins_cost(INSN_COST * 2);
11481   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11482   ins_encode %{
11483     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11484     int s = 63 - lshift;
11485     int r = (rshift - lshift) & 63;
11486     __ ubfm(as_Register($dst$$reg),
11487             as_Register($src$$reg),
11488             r, s);
11489   %}
11490 
11491   ins_pipe(ialu_reg_shift);
11492 %}
11493 
11494 // Shift Left followed by Shift Right.
11495 // This idiom is used by the compiler for the i2b bytecode etc.
11496 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11497 %{
11498   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11499   // Make sure we are not going to exceed what ubfmw can do.
11500   predicate((unsigned int)n->in(2)->get_int() <= 31
11501             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11502 
11503   ins_cost(INSN_COST * 2);
11504   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11505   ins_encode %{
11506     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11507     int s = 31 - lshift;
11508     int r = (rshift - lshift) & 31;
11509     __ ubfmw(as_Register($dst$$reg),
11510             as_Register($src$$reg),
11511             r, s);
11512   %}
11513 
11514   ins_pipe(ialu_reg_shift);
11515 %}
11516 // Bitfield extract with shift & mask
11517 
11518 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11519 %{
11520   match(Set dst (AndI (URShiftI src rshift) mask));
11521 
11522   ins_cost(INSN_COST);
11523   format %{ "ubfxw $dst, $src, $mask" %}
11524   ins_encode %{
11525     int rshift = $rshift$$constant;
11526     long mask = $mask$$constant;
11527     int width = exact_log2(mask+1);
11528     __ ubfxw(as_Register($dst$$reg),
11529             as_Register($src$$reg), rshift, width);
11530   %}
11531   ins_pipe(ialu_reg_shift);
11532 %}
11533 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11534 %{
11535   match(Set dst (AndL (URShiftL src rshift) mask));
11536 
11537   ins_cost(INSN_COST);
11538   format %{ "ubfx $dst, $src, $mask" %}
11539   ins_encode %{
11540     int rshift = $rshift$$constant;
11541     long mask = $mask$$constant;
11542     int width = exact_log2(mask+1);
11543     __ ubfx(as_Register($dst$$reg),
11544             as_Register($src$$reg), rshift, width);
11545   %}
11546   ins_pipe(ialu_reg_shift);
11547 %}
11548 
11549 // We can use ubfx when extending an And with a mask when we know mask
11550 // is positive.  We know that because immI_bitmask guarantees it.
11551 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11552 %{
11553   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11554 
11555   ins_cost(INSN_COST * 2);
11556   format %{ "ubfx $dst, $src, $mask" %}
11557   ins_encode %{
11558     int rshift = $rshift$$constant;
11559     long mask = $mask$$constant;
11560     int width = exact_log2(mask+1);
11561     __ ubfx(as_Register($dst$$reg),
11562             as_Register($src$$reg), rshift, width);
11563   %}
11564   ins_pipe(ialu_reg_shift);
11565 %}
11566 
11567 // Rotations
11568 
11569 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11570 %{
11571   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11572   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11573 
11574   ins_cost(INSN_COST);
11575   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11576 
11577   ins_encode %{
11578     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11579             $rshift$$constant & 63);
11580   %}
11581   ins_pipe(ialu_reg_reg_extr);
11582 %}
11583 
11584 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11585 %{
11586   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11587   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11588 
11589   ins_cost(INSN_COST);
11590   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11591 
11592   ins_encode %{
11593     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11594             $rshift$$constant & 31);
11595   %}
11596   ins_pipe(ialu_reg_reg_extr);
11597 %}
11598 
11599 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11600 %{
11601   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11602   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11603 
11604   ins_cost(INSN_COST);
11605   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11606 
11607   ins_encode %{
11608     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11609             $rshift$$constant & 63);
11610   %}
11611   ins_pipe(ialu_reg_reg_extr);
11612 %}
11613 
11614 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11615 %{
11616   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11617   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11618 
11619   ins_cost(INSN_COST);
11620   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11621 
11622   ins_encode %{
11623     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11624             $rshift$$constant & 31);
11625   %}
11626   ins_pipe(ialu_reg_reg_extr);
11627 %}
11628 
11629 
11630 // rol expander
11631 
11632 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11633 %{
11634   effect(DEF dst, USE src, USE shift);
11635 
11636   format %{ "rol    $dst, $src, $shift" %}
11637   ins_cost(INSN_COST * 3);
11638   ins_encode %{
11639     __ subw(rscratch1, zr, as_Register($shift$$reg));
11640     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11641             rscratch1);
11642     %}
11643   ins_pipe(ialu_reg_reg_vshift);
11644 %}
11645 
11646 // rol expander
11647 
11648 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11649 %{
11650   effect(DEF dst, USE src, USE shift);
11651 
11652   format %{ "rol    $dst, $src, $shift" %}
11653   ins_cost(INSN_COST * 3);
11654   ins_encode %{
11655     __ subw(rscratch1, zr, as_Register($shift$$reg));
11656     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11657             rscratch1);
11658     %}
11659   ins_pipe(ialu_reg_reg_vshift);
11660 %}
11661 
11662 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11663 %{
11664   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11665 
11666   expand %{
11667     rolL_rReg(dst, src, shift, cr);
11668   %}
11669 %}
11670 
11671 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11672 %{
11673   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11674 
11675   expand %{
11676     rolL_rReg(dst, src, shift, cr);
11677   %}
11678 %}
11679 
11680 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11681 %{
11682   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11683 
11684   expand %{
11685     rolL_rReg(dst, src, shift, cr);
11686   %}
11687 %}
11688 
11689 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11690 %{
11691   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11692 
11693   expand %{
11694     rolL_rReg(dst, src, shift, cr);
11695   %}
11696 %}
11697 
11698 // ror expander
11699 
11700 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11701 %{
11702   effect(DEF dst, USE src, USE shift);
11703 
11704   format %{ "ror    $dst, $src, $shift" %}
11705   ins_cost(INSN_COST);
11706   ins_encode %{
11707     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11708             as_Register($shift$$reg));
11709     %}
11710   ins_pipe(ialu_reg_reg_vshift);
11711 %}
11712 
11713 // ror expander
11714 
11715 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11716 %{
11717   effect(DEF dst, USE src, USE shift);
11718 
11719   format %{ "ror    $dst, $src, $shift" %}
11720   ins_cost(INSN_COST);
11721   ins_encode %{
11722     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11723             as_Register($shift$$reg));
11724     %}
11725   ins_pipe(ialu_reg_reg_vshift);
11726 %}
11727 
11728 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11729 %{
11730   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11731 
11732   expand %{
11733     rorL_rReg(dst, src, shift, cr);
11734   %}
11735 %}
11736 
11737 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11738 %{
11739   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11740 
11741   expand %{
11742     rorL_rReg(dst, src, shift, cr);
11743   %}
11744 %}
11745 
11746 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11747 %{
11748   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11749 
11750   expand %{
11751     rorL_rReg(dst, src, shift, cr);
11752   %}
11753 %}
11754 
11755 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11756 %{
11757   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11758 
11759   expand %{
11760     rorL_rReg(dst, src, shift, cr);
11761   %}
11762 %}
11763 
11764 // Add/subtract (extended)
11765 
11766 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11767 %{
11768   match(Set dst (AddL src1 (ConvI2L src2)));
11769   ins_cost(INSN_COST);
11770   format %{ "add  $dst, $src1, sxtw $src2" %}
11771 
11772    ins_encode %{
11773      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11774             as_Register($src2$$reg), ext::sxtw);
11775    %}
11776   ins_pipe(ialu_reg_reg);
11777 %};
11778 
11779 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11780 %{
11781   match(Set dst (SubL src1 (ConvI2L src2)));
11782   ins_cost(INSN_COST);
11783   format %{ "sub  $dst, $src1, sxtw $src2" %}
11784 
11785    ins_encode %{
11786      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11787             as_Register($src2$$reg), ext::sxtw);
11788    %}
11789   ins_pipe(ialu_reg_reg);
11790 %};
11791 
11792 
11793 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11794 %{
11795   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11796   ins_cost(INSN_COST);
11797   format %{ "add  $dst, $src1, sxth $src2" %}
11798 
11799    ins_encode %{
11800      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11801             as_Register($src2$$reg), ext::sxth);
11802    %}
11803   ins_pipe(ialu_reg_reg);
11804 %}
11805 
11806 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11807 %{
11808   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11809   ins_cost(INSN_COST);
11810   format %{ "add  $dst, $src1, sxtb $src2" %}
11811 
11812    ins_encode %{
11813      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11814             as_Register($src2$$reg), ext::sxtb);
11815    %}
11816   ins_pipe(ialu_reg_reg);
11817 %}
11818 
11819 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11820 %{
11821   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11822   ins_cost(INSN_COST);
11823   format %{ "add  $dst, $src1, uxtb $src2" %}
11824 
11825    ins_encode %{
11826      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11827             as_Register($src2$$reg), ext::uxtb);
11828    %}
11829   ins_pipe(ialu_reg_reg);
11830 %}
11831 
11832 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11833 %{
11834   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11835   ins_cost(INSN_COST);
11836   format %{ "add  $dst, $src1, sxth $src2" %}
11837 
11838    ins_encode %{
11839      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11840             as_Register($src2$$reg), ext::sxth);
11841    %}
11842   ins_pipe(ialu_reg_reg);
11843 %}
11844 
11845 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11846 %{
11847   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11848   ins_cost(INSN_COST);
11849   format %{ "add  $dst, $src1, sxtw $src2" %}
11850 
11851    ins_encode %{
11852      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11853             as_Register($src2$$reg), ext::sxtw);
11854    %}
11855   ins_pipe(ialu_reg_reg);
11856 %}
11857 
11858 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11859 %{
11860   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11861   ins_cost(INSN_COST);
11862   format %{ "add  $dst, $src1, sxtb $src2" %}
11863 
11864    ins_encode %{
11865      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11866             as_Register($src2$$reg), ext::sxtb);
11867    %}
11868   ins_pipe(ialu_reg_reg);
11869 %}
11870 
11871 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11872 %{
11873   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11874   ins_cost(INSN_COST);
11875   format %{ "add  $dst, $src1, uxtb $src2" %}
11876 
11877    ins_encode %{
11878      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11879             as_Register($src2$$reg), ext::uxtb);
11880    %}
11881   ins_pipe(ialu_reg_reg);
11882 %}
11883 
11884 
11885 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11886 %{
11887   match(Set dst (AddI src1 (AndI src2 mask)));
11888   ins_cost(INSN_COST);
11889   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11890 
11891    ins_encode %{
11892      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11893             as_Register($src2$$reg), ext::uxtb);
11894    %}
11895   ins_pipe(ialu_reg_reg);
11896 %}
11897 
11898 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11899 %{
11900   match(Set dst (AddI src1 (AndI src2 mask)));
11901   ins_cost(INSN_COST);
11902   format %{ "addw  $dst, $src1, $src2, uxth" %}
11903 
11904    ins_encode %{
11905      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11906             as_Register($src2$$reg), ext::uxth);
11907    %}
11908   ins_pipe(ialu_reg_reg);
11909 %}
11910 
11911 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11912 %{
11913   match(Set dst (AddL src1 (AndL src2 mask)));
11914   ins_cost(INSN_COST);
11915   format %{ "add  $dst, $src1, $src2, uxtb" %}
11916 
11917    ins_encode %{
11918      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11919             as_Register($src2$$reg), ext::uxtb);
11920    %}
11921   ins_pipe(ialu_reg_reg);
11922 %}
11923 
11924 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11925 %{
11926   match(Set dst (AddL src1 (AndL src2 mask)));
11927   ins_cost(INSN_COST);
11928   format %{ "add  $dst, $src1, $src2, uxth" %}
11929 
11930    ins_encode %{
11931      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11932             as_Register($src2$$reg), ext::uxth);
11933    %}
11934   ins_pipe(ialu_reg_reg);
11935 %}
11936 
11937 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11938 %{
11939   match(Set dst (AddL src1 (AndL src2 mask)));
11940   ins_cost(INSN_COST);
11941   format %{ "add  $dst, $src1, $src2, uxtw" %}
11942 
11943    ins_encode %{
11944      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11945             as_Register($src2$$reg), ext::uxtw);
11946    %}
11947   ins_pipe(ialu_reg_reg);
11948 %}
11949 
11950 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11951 %{
11952   match(Set dst (SubI src1 (AndI src2 mask)));
11953   ins_cost(INSN_COST);
11954   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11955 
11956    ins_encode %{
11957      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11958             as_Register($src2$$reg), ext::uxtb);
11959    %}
11960   ins_pipe(ialu_reg_reg);
11961 %}
11962 
11963 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11964 %{
11965   match(Set dst (SubI src1 (AndI src2 mask)));
11966   ins_cost(INSN_COST);
11967   format %{ "subw  $dst, $src1, $src2, uxth" %}
11968 
11969    ins_encode %{
11970      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11971             as_Register($src2$$reg), ext::uxth);
11972    %}
11973   ins_pipe(ialu_reg_reg);
11974 %}
11975 
11976 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11977 %{
11978   match(Set dst (SubL src1 (AndL src2 mask)));
11979   ins_cost(INSN_COST);
11980   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11981 
11982    ins_encode %{
11983      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11984             as_Register($src2$$reg), ext::uxtb);
11985    %}
11986   ins_pipe(ialu_reg_reg);
11987 %}
11988 
11989 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11990 %{
11991   match(Set dst (SubL src1 (AndL src2 mask)));
11992   ins_cost(INSN_COST);
11993   format %{ "sub  $dst, $src1, $src2, uxth" %}
11994 
11995    ins_encode %{
11996      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11997             as_Register($src2$$reg), ext::uxth);
11998    %}
11999   ins_pipe(ialu_reg_reg);
12000 %}
12001 
12002 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12003 %{
12004   match(Set dst (SubL src1 (AndL src2 mask)));
12005   ins_cost(INSN_COST);
12006   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12007 
12008    ins_encode %{
12009      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12010             as_Register($src2$$reg), ext::uxtw);
12011    %}
12012   ins_pipe(ialu_reg_reg);
12013 %}
12014 
12015 // END This section of the file is automatically generated. Do not edit --------------
12016 
12017 // ============================================================================
12018 // Floating Point Arithmetic Instructions
12019 
12020 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12021   match(Set dst (AddF src1 src2));
12022 
12023   ins_cost(INSN_COST * 5);
12024   format %{ "fadds   $dst, $src1, $src2" %}
12025 
12026   ins_encode %{
12027     __ fadds(as_FloatRegister($dst$$reg),
12028              as_FloatRegister($src1$$reg),
12029              as_FloatRegister($src2$$reg));
12030   %}
12031 
12032   ins_pipe(pipe_class_default);
12033 %}
12034 
12035 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12036   match(Set dst (AddD src1 src2));
12037 
12038   ins_cost(INSN_COST * 5);
12039   format %{ "faddd   $dst, $src1, $src2" %}
12040 
12041   ins_encode %{
12042     __ faddd(as_FloatRegister($dst$$reg),
12043              as_FloatRegister($src1$$reg),
12044              as_FloatRegister($src2$$reg));
12045   %}
12046 
12047   ins_pipe(pipe_class_default);
12048 %}
12049 
12050 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12051   match(Set dst (SubF src1 src2));
12052 
12053   ins_cost(INSN_COST * 5);
12054   format %{ "fsubs   $dst, $src1, $src2" %}
12055 
12056   ins_encode %{
12057     __ fsubs(as_FloatRegister($dst$$reg),
12058              as_FloatRegister($src1$$reg),
12059              as_FloatRegister($src2$$reg));
12060   %}
12061 
12062   ins_pipe(pipe_class_default);
12063 %}
12064 
12065 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12066   match(Set dst (SubD src1 src2));
12067 
12068   ins_cost(INSN_COST * 5);
12069   format %{ "fsubd   $dst, $src1, $src2" %}
12070 
12071   ins_encode %{
12072     __ fsubd(as_FloatRegister($dst$$reg),
12073              as_FloatRegister($src1$$reg),
12074              as_FloatRegister($src2$$reg));
12075   %}
12076 
12077   ins_pipe(pipe_class_default);
12078 %}
12079 
12080 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12081   match(Set dst (MulF src1 src2));
12082 
12083   ins_cost(INSN_COST * 6);
12084   format %{ "fmuls   $dst, $src1, $src2" %}
12085 
12086   ins_encode %{
12087     __ fmuls(as_FloatRegister($dst$$reg),
12088              as_FloatRegister($src1$$reg),
12089              as_FloatRegister($src2$$reg));
12090   %}
12091 
12092   ins_pipe(pipe_class_default);
12093 %}
12094 
12095 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12096   match(Set dst (MulD src1 src2));
12097 
12098   ins_cost(INSN_COST * 6);
12099   format %{ "fmuld   $dst, $src1, $src2" %}
12100 
12101   ins_encode %{
12102     __ fmuld(as_FloatRegister($dst$$reg),
12103              as_FloatRegister($src1$$reg),
12104              as_FloatRegister($src2$$reg));
12105   %}
12106 
12107   ins_pipe(pipe_class_default);
12108 %}
12109 
12110 // We cannot use these fused mul w add/sub ops because they don't
12111 // produce the same result as the equivalent separated ops
12112 // (essentially they don't round the intermediate result). that's a
12113 // shame. leaving them here in case we can idenitfy cases where it is
12114 // legitimate to use them
12115 
12116 
12117 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12118 //   match(Set dst (AddF (MulF src1 src2) src3));
12119 
12120 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12121 
12122 //   ins_encode %{
12123 //     __ fmadds(as_FloatRegister($dst$$reg),
12124 //              as_FloatRegister($src1$$reg),
12125 //              as_FloatRegister($src2$$reg),
12126 //              as_FloatRegister($src3$$reg));
12127 //   %}
12128 
12129 //   ins_pipe(pipe_class_default);
12130 // %}
12131 
12132 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12133 //   match(Set dst (AddD (MulD src1 src2) src3));
12134 
12135 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12136 
12137 //   ins_encode %{
12138 //     __ fmaddd(as_FloatRegister($dst$$reg),
12139 //              as_FloatRegister($src1$$reg),
12140 //              as_FloatRegister($src2$$reg),
12141 //              as_FloatRegister($src3$$reg));
12142 //   %}
12143 
12144 //   ins_pipe(pipe_class_default);
12145 // %}
12146 
12147 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12148 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12149 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12150 
12151 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12152 
12153 //   ins_encode %{
12154 //     __ fmsubs(as_FloatRegister($dst$$reg),
12155 //               as_FloatRegister($src1$$reg),
12156 //               as_FloatRegister($src2$$reg),
12157 //              as_FloatRegister($src3$$reg));
12158 //   %}
12159 
12160 //   ins_pipe(pipe_class_default);
12161 // %}
12162 
12163 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12164 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12165 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12166 
12167 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12168 
12169 //   ins_encode %{
12170 //     __ fmsubd(as_FloatRegister($dst$$reg),
12171 //               as_FloatRegister($src1$$reg),
12172 //               as_FloatRegister($src2$$reg),
12173 //               as_FloatRegister($src3$$reg));
12174 //   %}
12175 
12176 //   ins_pipe(pipe_class_default);
12177 // %}
12178 
12179 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12180 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12181 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12182 
12183 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12184 
12185 //   ins_encode %{
12186 //     __ fnmadds(as_FloatRegister($dst$$reg),
12187 //                as_FloatRegister($src1$$reg),
12188 //                as_FloatRegister($src2$$reg),
12189 //                as_FloatRegister($src3$$reg));
12190 //   %}
12191 
12192 //   ins_pipe(pipe_class_default);
12193 // %}
12194 
12195 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12196 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12197 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12198 
12199 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12200 
12201 //   ins_encode %{
12202 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12203 //                as_FloatRegister($src1$$reg),
12204 //                as_FloatRegister($src2$$reg),
12205 //                as_FloatRegister($src3$$reg));
12206 //   %}
12207 
12208 //   ins_pipe(pipe_class_default);
12209 // %}
12210 
12211 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12212 //   match(Set dst (SubF (MulF src1 src2) src3));
12213 
12214 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12215 
12216 //   ins_encode %{
12217 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12218 //                as_FloatRegister($src1$$reg),
12219 //                as_FloatRegister($src2$$reg),
12220 //                as_FloatRegister($src3$$reg));
12221 //   %}
12222 
12223 //   ins_pipe(pipe_class_default);
12224 // %}
12225 
12226 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12227 //   match(Set dst (SubD (MulD src1 src2) src3));
12228 
12229 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12230 
12231 //   ins_encode %{
12232 //   // n.b. insn name should be fnmsubd
12233 //     __ fnmsub(as_FloatRegister($dst$$reg),
12234 //                as_FloatRegister($src1$$reg),
12235 //                as_FloatRegister($src2$$reg),
12236 //                as_FloatRegister($src3$$reg));
12237 //   %}
12238 
12239 //   ins_pipe(pipe_class_default);
12240 // %}
12241 
12242 
12243 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12244   match(Set dst (DivF src1  src2));
12245 
12246   ins_cost(INSN_COST * 18);
12247   format %{ "fdivs   $dst, $src1, $src2" %}
12248 
12249   ins_encode %{
12250     __ fdivs(as_FloatRegister($dst$$reg),
12251              as_FloatRegister($src1$$reg),
12252              as_FloatRegister($src2$$reg));
12253   %}
12254 
12255   ins_pipe(pipe_class_default);
12256 %}
12257 
12258 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12259   match(Set dst (DivD src1  src2));
12260 
12261   ins_cost(INSN_COST * 32);
12262   format %{ "fdivd   $dst, $src1, $src2" %}
12263 
12264   ins_encode %{
12265     __ fdivd(as_FloatRegister($dst$$reg),
12266              as_FloatRegister($src1$$reg),
12267              as_FloatRegister($src2$$reg));
12268   %}
12269 
12270   ins_pipe(pipe_class_default);
12271 %}
12272 
12273 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12274   match(Set dst (NegF src));
12275 
12276   ins_cost(INSN_COST * 3);
12277   format %{ "fneg   $dst, $src" %}
12278 
12279   ins_encode %{
12280     __ fnegs(as_FloatRegister($dst$$reg),
12281              as_FloatRegister($src$$reg));
12282   %}
12283 
12284   ins_pipe(pipe_class_default);
12285 %}
12286 
12287 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12288   match(Set dst (NegD src));
12289 
12290   ins_cost(INSN_COST * 3);
12291   format %{ "fnegd   $dst, $src" %}
12292 
12293   ins_encode %{
12294     __ fnegd(as_FloatRegister($dst$$reg),
12295              as_FloatRegister($src$$reg));
12296   %}
12297 
12298   ins_pipe(pipe_class_default);
12299 %}
12300 
12301 instruct absF_reg(vRegF dst, vRegF src) %{
12302   match(Set dst (AbsF src));
12303 
12304   ins_cost(INSN_COST * 3);
12305   format %{ "fabss   $dst, $src" %}
12306   ins_encode %{
12307     __ fabss(as_FloatRegister($dst$$reg),
12308              as_FloatRegister($src$$reg));
12309   %}
12310 
12311   ins_pipe(pipe_class_default);
12312 %}
12313 
12314 instruct absD_reg(vRegD dst, vRegD src) %{
12315   match(Set dst (AbsD src));
12316 
12317   ins_cost(INSN_COST * 3);
12318   format %{ "fabsd   $dst, $src" %}
12319   ins_encode %{
12320     __ fabsd(as_FloatRegister($dst$$reg),
12321              as_FloatRegister($src$$reg));
12322   %}
12323 
12324   ins_pipe(pipe_class_default);
12325 %}
12326 
12327 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12328   match(Set dst (SqrtD src));
12329 
12330   ins_cost(INSN_COST * 50);
12331   format %{ "fsqrtd  $dst, $src" %}
12332   ins_encode %{
12333     __ fsqrtd(as_FloatRegister($dst$$reg),
12334              as_FloatRegister($src$$reg));
12335   %}
12336 
12337   ins_pipe(pipe_class_default);
12338 %}
12339 
12340 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12341   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12342 
12343   ins_cost(INSN_COST * 50);
12344   format %{ "fsqrts  $dst, $src" %}
12345   ins_encode %{
12346     __ fsqrts(as_FloatRegister($dst$$reg),
12347              as_FloatRegister($src$$reg));
12348   %}
12349 
12350   ins_pipe(pipe_class_default);
12351 %}
12352 
12353 // ============================================================================
12354 // Logical Instructions
12355 
12356 // Integer Logical Instructions
12357 
12358 // And Instructions
12359 
12360 
12361 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12362   match(Set dst (AndI src1 src2));
12363 
12364   format %{ "andw  $dst, $src1, $src2\t# int" %}
12365 
12366   ins_cost(INSN_COST);
12367   ins_encode %{
12368     __ andw(as_Register($dst$$reg),
12369             as_Register($src1$$reg),
12370             as_Register($src2$$reg));
12371   %}
12372 
12373   ins_pipe(ialu_reg_reg);
12374 %}
12375 
12376 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12377   match(Set dst (AndI src1 src2));
12378 
12379   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12380 
12381   ins_cost(INSN_COST);
12382   ins_encode %{
12383     __ andw(as_Register($dst$$reg),
12384             as_Register($src1$$reg),
12385             (unsigned long)($src2$$constant));
12386   %}
12387 
12388   ins_pipe(ialu_reg_imm);
12389 %}
12390 
12391 // Or Instructions
12392 
12393 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12394   match(Set dst (OrI src1 src2));
12395 
12396   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12397 
12398   ins_cost(INSN_COST);
12399   ins_encode %{
12400     __ orrw(as_Register($dst$$reg),
12401             as_Register($src1$$reg),
12402             as_Register($src2$$reg));
12403   %}
12404 
12405   ins_pipe(ialu_reg_reg);
12406 %}
12407 
12408 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12409   match(Set dst (OrI src1 src2));
12410 
12411   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12412 
12413   ins_cost(INSN_COST);
12414   ins_encode %{
12415     __ orrw(as_Register($dst$$reg),
12416             as_Register($src1$$reg),
12417             (unsigned long)($src2$$constant));
12418   %}
12419 
12420   ins_pipe(ialu_reg_imm);
12421 %}
12422 
12423 // Xor Instructions
12424 
12425 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12426   match(Set dst (XorI src1 src2));
12427 
12428   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12429 
12430   ins_cost(INSN_COST);
12431   ins_encode %{
12432     __ eorw(as_Register($dst$$reg),
12433             as_Register($src1$$reg),
12434             as_Register($src2$$reg));
12435   %}
12436 
12437   ins_pipe(ialu_reg_reg);
12438 %}
12439 
12440 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12441   match(Set dst (XorI src1 src2));
12442 
12443   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12444 
12445   ins_cost(INSN_COST);
12446   ins_encode %{
12447     __ eorw(as_Register($dst$$reg),
12448             as_Register($src1$$reg),
12449             (unsigned long)($src2$$constant));
12450   %}
12451 
12452   ins_pipe(ialu_reg_imm);
12453 %}
12454 
12455 // Long Logical Instructions
12456 // TODO
12457 
12458 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12459   match(Set dst (AndL src1 src2));
12460 
12461   format %{ "and  $dst, $src1, $src2\t# int" %}
12462 
12463   ins_cost(INSN_COST);
12464   ins_encode %{
12465     __ andr(as_Register($dst$$reg),
12466             as_Register($src1$$reg),
12467             as_Register($src2$$reg));
12468   %}
12469 
12470   ins_pipe(ialu_reg_reg);
12471 %}
12472 
12473 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12474   match(Set dst (AndL src1 src2));
12475 
12476   format %{ "and  $dst, $src1, $src2\t# int" %}
12477 
12478   ins_cost(INSN_COST);
12479   ins_encode %{
12480     __ andr(as_Register($dst$$reg),
12481             as_Register($src1$$reg),
12482             (unsigned long)($src2$$constant));
12483   %}
12484 
12485   ins_pipe(ialu_reg_imm);
12486 %}
12487 
12488 // Or Instructions
12489 
12490 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12491   match(Set dst (OrL src1 src2));
12492 
12493   format %{ "orr  $dst, $src1, $src2\t# int" %}
12494 
12495   ins_cost(INSN_COST);
12496   ins_encode %{
12497     __ orr(as_Register($dst$$reg),
12498            as_Register($src1$$reg),
12499            as_Register($src2$$reg));
12500   %}
12501 
12502   ins_pipe(ialu_reg_reg);
12503 %}
12504 
12505 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12506   match(Set dst (OrL src1 src2));
12507 
12508   format %{ "orr  $dst, $src1, $src2\t# int" %}
12509 
12510   ins_cost(INSN_COST);
12511   ins_encode %{
12512     __ orr(as_Register($dst$$reg),
12513            as_Register($src1$$reg),
12514            (unsigned long)($src2$$constant));
12515   %}
12516 
12517   ins_pipe(ialu_reg_imm);
12518 %}
12519 
12520 // Xor Instructions
12521 
12522 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12523   match(Set dst (XorL src1 src2));
12524 
12525   format %{ "eor  $dst, $src1, $src2\t# int" %}
12526 
12527   ins_cost(INSN_COST);
12528   ins_encode %{
12529     __ eor(as_Register($dst$$reg),
12530            as_Register($src1$$reg),
12531            as_Register($src2$$reg));
12532   %}
12533 
12534   ins_pipe(ialu_reg_reg);
12535 %}
12536 
12537 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12538   match(Set dst (XorL src1 src2));
12539 
12540   ins_cost(INSN_COST);
12541   format %{ "eor  $dst, $src1, $src2\t# int" %}
12542 
12543   ins_encode %{
12544     __ eor(as_Register($dst$$reg),
12545            as_Register($src1$$reg),
12546            (unsigned long)($src2$$constant));
12547   %}
12548 
12549   ins_pipe(ialu_reg_imm);
12550 %}
12551 
12552 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12553 %{
12554   match(Set dst (ConvI2L src));
12555 
12556   ins_cost(INSN_COST);
12557   format %{ "sxtw  $dst, $src\t# i2l" %}
12558   ins_encode %{
12559     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12560   %}
12561   ins_pipe(ialu_reg_shift);
12562 %}
12563 
12564 // this pattern occurs in bigmath arithmetic
12565 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12566 %{
12567   match(Set dst (AndL (ConvI2L src) mask));
12568 
12569   ins_cost(INSN_COST);
12570   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12571   ins_encode %{
12572     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12573   %}
12574 
12575   ins_pipe(ialu_reg_shift);
12576 %}
12577 
12578 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12579   match(Set dst (ConvL2I src));
12580 
12581   ins_cost(INSN_COST);
12582   format %{ "movw  $dst, $src \t// l2i" %}
12583 
12584   ins_encode %{
12585     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12586   %}
12587 
12588   ins_pipe(ialu_reg);
12589 %}
12590 
12591 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12592 %{
12593   match(Set dst (Conv2B src));
12594   effect(KILL cr);
12595 
12596   format %{
12597     "cmpw $src, zr\n\t"
12598     "cset $dst, ne"
12599   %}
12600 
12601   ins_encode %{
12602     __ cmpw(as_Register($src$$reg), zr);
12603     __ cset(as_Register($dst$$reg), Assembler::NE);
12604   %}
12605 
12606   ins_pipe(ialu_reg);
12607 %}
12608 
12609 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12610 %{
12611   match(Set dst (Conv2B src));
12612   effect(KILL cr);
12613 
12614   format %{
12615     "cmp  $src, zr\n\t"
12616     "cset $dst, ne"
12617   %}
12618 
12619   ins_encode %{
12620     __ cmp(as_Register($src$$reg), zr);
12621     __ cset(as_Register($dst$$reg), Assembler::NE);
12622   %}
12623 
12624   ins_pipe(ialu_reg);
12625 %}
12626 
12627 instruct convD2F_reg(vRegF dst, vRegD src) %{
12628   match(Set dst (ConvD2F src));
12629 
12630   ins_cost(INSN_COST * 5);
12631   format %{ "fcvtd  $dst, $src \t// d2f" %}
12632 
12633   ins_encode %{
12634     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12635   %}
12636 
12637   ins_pipe(pipe_class_default);
12638 %}
12639 
12640 instruct convF2D_reg(vRegD dst, vRegF src) %{
12641   match(Set dst (ConvF2D src));
12642 
12643   ins_cost(INSN_COST * 5);
12644   format %{ "fcvts  $dst, $src \t// f2d" %}
12645 
12646   ins_encode %{
12647     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12648   %}
12649 
12650   ins_pipe(pipe_class_default);
12651 %}
12652 
12653 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12654   match(Set dst (ConvF2I src));
12655 
12656   ins_cost(INSN_COST * 5);
12657   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12658 
12659   ins_encode %{
12660     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12661   %}
12662 
12663   ins_pipe(pipe_class_default);
12664 %}
12665 
12666 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12667   match(Set dst (ConvF2L src));
12668 
12669   ins_cost(INSN_COST * 5);
12670   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12671 
12672   ins_encode %{
12673     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12674   %}
12675 
12676   ins_pipe(pipe_class_default);
12677 %}
12678 
12679 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12680   match(Set dst (ConvI2F src));
12681 
12682   ins_cost(INSN_COST * 5);
12683   format %{ "scvtfws  $dst, $src \t// i2f" %}
12684 
12685   ins_encode %{
12686     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12687   %}
12688 
12689   ins_pipe(pipe_class_default);
12690 %}
12691 
12692 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12693   match(Set dst (ConvL2F src));
12694 
12695   ins_cost(INSN_COST * 5);
12696   format %{ "scvtfs  $dst, $src \t// l2f" %}
12697 
12698   ins_encode %{
12699     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12700   %}
12701 
12702   ins_pipe(pipe_class_default);
12703 %}
12704 
12705 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12706   match(Set dst (ConvD2I src));
12707 
12708   ins_cost(INSN_COST * 5);
12709   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12710 
12711   ins_encode %{
12712     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12713   %}
12714 
12715   ins_pipe(pipe_class_default);
12716 %}
12717 
12718 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12719   match(Set dst (ConvD2L src));
12720 
12721   ins_cost(INSN_COST * 5);
12722   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12723 
12724   ins_encode %{
12725     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12726   %}
12727 
12728   ins_pipe(pipe_class_default);
12729 %}
12730 
12731 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12732   match(Set dst (ConvI2D src));
12733 
12734   ins_cost(INSN_COST * 5);
12735   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12736 
12737   ins_encode %{
12738     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12739   %}
12740 
12741   ins_pipe(pipe_class_default);
12742 %}
12743 
12744 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12745   match(Set dst (ConvL2D src));
12746 
12747   ins_cost(INSN_COST * 5);
12748   format %{ "scvtfd  $dst, $src \t// l2d" %}
12749 
12750   ins_encode %{
12751     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12752   %}
12753 
12754   ins_pipe(pipe_class_default);
12755 %}
12756 
12757 // stack <-> reg and reg <-> reg shuffles with no conversion
12758 
12759 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12760 
12761   match(Set dst (MoveF2I src));
12762 
12763   effect(DEF dst, USE src);
12764 
12765   ins_cost(4 * INSN_COST);
12766 
12767   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12768 
12769   ins_encode %{
12770     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12771   %}
12772 
12773   ins_pipe(iload_reg_reg);
12774 
12775 %}
12776 
12777 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12778 
12779   match(Set dst (MoveI2F src));
12780 
12781   effect(DEF dst, USE src);
12782 
12783   ins_cost(4 * INSN_COST);
12784 
12785   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12786 
12787   ins_encode %{
12788     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12789   %}
12790 
12791   ins_pipe(pipe_class_memory);
12792 
12793 %}
12794 
12795 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12796 
12797   match(Set dst (MoveD2L src));
12798 
12799   effect(DEF dst, USE src);
12800 
12801   ins_cost(4 * INSN_COST);
12802 
12803   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12804 
12805   ins_encode %{
12806     __ ldr($dst$$Register, Address(sp, $src$$disp));
12807   %}
12808 
12809   ins_pipe(iload_reg_reg);
12810 
12811 %}
12812 
12813 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12814 
12815   match(Set dst (MoveL2D src));
12816 
12817   effect(DEF dst, USE src);
12818 
12819   ins_cost(4 * INSN_COST);
12820 
12821   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12822 
12823   ins_encode %{
12824     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12825   %}
12826 
12827   ins_pipe(pipe_class_memory);
12828 
12829 %}
12830 
12831 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12832 
12833   match(Set dst (MoveF2I src));
12834 
12835   effect(DEF dst, USE src);
12836 
12837   ins_cost(INSN_COST);
12838 
12839   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12840 
12841   ins_encode %{
12842     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12843   %}
12844 
12845   ins_pipe(pipe_class_memory);
12846 
12847 %}
12848 
12849 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12850 
12851   match(Set dst (MoveI2F src));
12852 
12853   effect(DEF dst, USE src);
12854 
12855   ins_cost(INSN_COST);
12856 
12857   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12858 
12859   ins_encode %{
12860     __ strw($src$$Register, Address(sp, $dst$$disp));
12861   %}
12862 
12863   ins_pipe(istore_reg_reg);
12864 
12865 %}
12866 
12867 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12868 
12869   match(Set dst (MoveD2L src));
12870 
12871   effect(DEF dst, USE src);
12872 
12873   ins_cost(INSN_COST);
12874 
12875   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12876 
12877   ins_encode %{
12878     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12879   %}
12880 
12881   ins_pipe(pipe_class_memory);
12882 
12883 %}
12884 
12885 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12886 
12887   match(Set dst (MoveL2D src));
12888 
12889   effect(DEF dst, USE src);
12890 
12891   ins_cost(INSN_COST);
12892 
12893   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12894 
12895   ins_encode %{
12896     __ str($src$$Register, Address(sp, $dst$$disp));
12897   %}
12898 
12899   ins_pipe(istore_reg_reg);
12900 
12901 %}
12902 
12903 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12904 
12905   match(Set dst (MoveF2I src));
12906 
12907   effect(DEF dst, USE src);
12908 
12909   ins_cost(INSN_COST);
12910 
12911   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12912 
12913   ins_encode %{
12914     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12915   %}
12916 
12917   ins_pipe(pipe_class_memory);
12918 
12919 %}
12920 
12921 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12922 
12923   match(Set dst (MoveI2F src));
12924 
12925   effect(DEF dst, USE src);
12926 
12927   ins_cost(INSN_COST);
12928 
12929   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12930 
12931   ins_encode %{
12932     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12933   %}
12934 
12935   ins_pipe(pipe_class_memory);
12936 
12937 %}
12938 
12939 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12940 
12941   match(Set dst (MoveD2L src));
12942 
12943   effect(DEF dst, USE src);
12944 
12945   ins_cost(INSN_COST);
12946 
12947   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12948 
12949   ins_encode %{
12950     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12951   %}
12952 
12953   ins_pipe(pipe_class_memory);
12954 
12955 %}
12956 
12957 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12958 
12959   match(Set dst (MoveL2D src));
12960 
12961   effect(DEF dst, USE src);
12962 
12963   ins_cost(INSN_COST);
12964 
12965   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12966 
12967   ins_encode %{
12968     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12969   %}
12970 
12971   ins_pipe(pipe_class_memory);
12972 
12973 %}
12974 
12975 // ============================================================================
12976 // clearing of an array
12977 
12978 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12979 %{
12980   match(Set dummy (ClearArray cnt base));
12981   effect(USE_KILL cnt, USE_KILL base);
12982 
12983   ins_cost(4 * INSN_COST);
12984   format %{ "ClearArray $cnt, $base" %}
12985 
12986   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
12987 
12988   ins_pipe(pipe_class_memory);
12989 %}
12990 
12991 // ============================================================================
12992 // Overflow Math Instructions
12993 
12994 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12995 %{
12996   match(Set cr (OverflowAddI op1 op2));
12997 
12998   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12999   ins_cost(INSN_COST);
13000   ins_encode %{
13001     __ cmnw($op1$$Register, $op2$$Register);
13002   %}
13003 
13004   ins_pipe(icmp_reg_reg);
13005 %}
13006 
13007 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13008 %{
13009   match(Set cr (OverflowAddI op1 op2));
13010 
13011   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13012   ins_cost(INSN_COST);
13013   ins_encode %{
13014     __ cmnw($op1$$Register, $op2$$constant);
13015   %}
13016 
13017   ins_pipe(icmp_reg_imm);
13018 %}
13019 
13020 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13021 %{
13022   match(Set cr (OverflowAddL op1 op2));
13023 
13024   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13025   ins_cost(INSN_COST);
13026   ins_encode %{
13027     __ cmn($op1$$Register, $op2$$Register);
13028   %}
13029 
13030   ins_pipe(icmp_reg_reg);
13031 %}
13032 
13033 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13034 %{
13035   match(Set cr (OverflowAddL op1 op2));
13036 
13037   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13038   ins_cost(INSN_COST);
13039   ins_encode %{
13040     __ cmn($op1$$Register, $op2$$constant);
13041   %}
13042 
13043   ins_pipe(icmp_reg_imm);
13044 %}
13045 
13046 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13047 %{
13048   match(Set cr (OverflowSubI op1 op2));
13049 
13050   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13051   ins_cost(INSN_COST);
13052   ins_encode %{
13053     __ cmpw($op1$$Register, $op2$$Register);
13054   %}
13055 
13056   ins_pipe(icmp_reg_reg);
13057 %}
13058 
13059 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13060 %{
13061   match(Set cr (OverflowSubI op1 op2));
13062 
13063   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13064   ins_cost(INSN_COST);
13065   ins_encode %{
13066     __ cmpw($op1$$Register, $op2$$constant);
13067   %}
13068 
13069   ins_pipe(icmp_reg_imm);
13070 %}
13071 
13072 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13073 %{
13074   match(Set cr (OverflowSubL op1 op2));
13075 
13076   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13077   ins_cost(INSN_COST);
13078   ins_encode %{
13079     __ cmp($op1$$Register, $op2$$Register);
13080   %}
13081 
13082   ins_pipe(icmp_reg_reg);
13083 %}
13084 
13085 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13086 %{
13087   match(Set cr (OverflowSubL op1 op2));
13088 
13089   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13090   ins_cost(INSN_COST);
13091   ins_encode %{
13092     __ cmp($op1$$Register, $op2$$constant);
13093   %}
13094 
13095   ins_pipe(icmp_reg_imm);
13096 %}
13097 
13098 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13099 %{
13100   match(Set cr (OverflowSubI zero op1));
13101 
13102   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13103   ins_cost(INSN_COST);
13104   ins_encode %{
13105     __ cmpw(zr, $op1$$Register);
13106   %}
13107 
13108   ins_pipe(icmp_reg_imm);
13109 %}
13110 
13111 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13112 %{
13113   match(Set cr (OverflowSubL zero op1));
13114 
13115   format %{ "cmp   zr, $op1\t# overflow check long" %}
13116   ins_cost(INSN_COST);
13117   ins_encode %{
13118     __ cmp(zr, $op1$$Register);
13119   %}
13120 
13121   ins_pipe(icmp_reg_imm);
13122 %}
13123 
13124 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13125 %{
13126   match(Set cr (OverflowMulI op1 op2));
13127 
13128   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13129             "cmp   rscratch1, rscratch1, sxtw\n\t"
13130             "movw  rscratch1, #0x80000000\n\t"
13131             "cselw rscratch1, rscratch1, zr, NE\n\t"
13132             "cmpw  rscratch1, #1" %}
13133   ins_cost(5 * INSN_COST);
13134   ins_encode %{
13135     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13136     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13137     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13138     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13139     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13140   %}
13141 
13142   ins_pipe(pipe_slow);
13143 %}
13144 
13145 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13146 %{
13147   match(If cmp (OverflowMulI op1 op2));
13148   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13149             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13150   effect(USE labl, KILL cr);
13151 
13152   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13153             "cmp   rscratch1, rscratch1, sxtw\n\t"
13154             "b$cmp   $labl" %}
13155   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13156   ins_encode %{
13157     Label* L = $labl$$label;
13158     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13159     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13160     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13161     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13162   %}
13163 
13164   ins_pipe(pipe_serial);
13165 %}
13166 
13167 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13168 %{
13169   match(Set cr (OverflowMulL op1 op2));
13170 
13171   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13172             "smulh rscratch2, $op1, $op2\n\t"
13173             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13174             "movw  rscratch1, #0x80000000\n\t"
13175             "cselw rscratch1, rscratch1, zr, NE\n\t"
13176             "cmpw  rscratch1, #1" %}
13177   ins_cost(6 * INSN_COST);
13178   ins_encode %{
13179     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13180     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13181     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13182     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13183     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13184     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13185   %}
13186 
13187   ins_pipe(pipe_slow);
13188 %}
13189 
13190 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13191 %{
13192   match(If cmp (OverflowMulL op1 op2));
13193   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13194             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13195   effect(USE labl, KILL cr);
13196 
13197   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13198             "smulh rscratch2, $op1, $op2\n\t"
13199             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13200             "b$cmp $labl" %}
13201   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13202   ins_encode %{
13203     Label* L = $labl$$label;
13204     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13205     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13206     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13207     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13208     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13209   %}
13210 
13211   ins_pipe(pipe_serial);
13212 %}
13213 
13214 // ============================================================================
13215 // Compare Instructions
13216 
13217 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13218 %{
13219   match(Set cr (CmpI op1 op2));
13220 
13221   effect(DEF cr, USE op1, USE op2);
13222 
13223   ins_cost(INSN_COST);
13224   format %{ "cmpw  $op1, $op2" %}
13225 
13226   ins_encode(aarch64_enc_cmpw(op1, op2));
13227 
13228   ins_pipe(icmp_reg_reg);
13229 %}
13230 
13231 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13232 %{
13233   match(Set cr (CmpI op1 zero));
13234 
13235   effect(DEF cr, USE op1);
13236 
13237   ins_cost(INSN_COST);
13238   format %{ "cmpw $op1, 0" %}
13239 
13240   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13241 
13242   ins_pipe(icmp_reg_imm);
13243 %}
13244 
13245 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13246 %{
13247   match(Set cr (CmpI op1 op2));
13248 
13249   effect(DEF cr, USE op1);
13250 
13251   ins_cost(INSN_COST);
13252   format %{ "cmpw  $op1, $op2" %}
13253 
13254   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13255 
13256   ins_pipe(icmp_reg_imm);
13257 %}
13258 
13259 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13260 %{
13261   match(Set cr (CmpI op1 op2));
13262 
13263   effect(DEF cr, USE op1);
13264 
13265   ins_cost(INSN_COST * 2);
13266   format %{ "cmpw  $op1, $op2" %}
13267 
13268   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13269 
13270   ins_pipe(icmp_reg_imm);
13271 %}
13272 
13273 // Unsigned compare Instructions; really, same as signed compare
13274 // except it should only be used to feed an If or a CMovI which takes a
13275 // cmpOpU.
13276 
13277 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13278 %{
13279   match(Set cr (CmpU op1 op2));
13280 
13281   effect(DEF cr, USE op1, USE op2);
13282 
13283   ins_cost(INSN_COST);
13284   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13285 
13286   ins_encode(aarch64_enc_cmpw(op1, op2));
13287 
13288   ins_pipe(icmp_reg_reg);
13289 %}
13290 
13291 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13292 %{
13293   match(Set cr (CmpU op1 zero));
13294 
13295   effect(DEF cr, USE op1);
13296 
13297   ins_cost(INSN_COST);
13298   format %{ "cmpw $op1, #0\t# unsigned" %}
13299 
13300   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13301 
13302   ins_pipe(icmp_reg_imm);
13303 %}
13304 
13305 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13306 %{
13307   match(Set cr (CmpU op1 op2));
13308 
13309   effect(DEF cr, USE op1);
13310 
13311   ins_cost(INSN_COST);
13312   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13313 
13314   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13315 
13316   ins_pipe(icmp_reg_imm);
13317 %}
13318 
13319 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13320 %{
13321   match(Set cr (CmpU op1 op2));
13322 
13323   effect(DEF cr, USE op1);
13324 
13325   ins_cost(INSN_COST * 2);
13326   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13327 
13328   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13329 
13330   ins_pipe(icmp_reg_imm);
13331 %}
13332 
13333 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13334 %{
13335   match(Set cr (CmpL op1 op2));
13336 
13337   effect(DEF cr, USE op1, USE op2);
13338 
13339   ins_cost(INSN_COST);
13340   format %{ "cmp  $op1, $op2" %}
13341 
13342   ins_encode(aarch64_enc_cmp(op1, op2));
13343 
13344   ins_pipe(icmp_reg_reg);
13345 %}
13346 
13347 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13348 %{
13349   match(Set cr (CmpL op1 zero));
13350 
13351   effect(DEF cr, USE op1);
13352 
13353   ins_cost(INSN_COST);
13354   format %{ "tst  $op1" %}
13355 
13356   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13357 
13358   ins_pipe(icmp_reg_imm);
13359 %}
13360 
13361 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13362 %{
13363   match(Set cr (CmpL op1 op2));
13364 
13365   effect(DEF cr, USE op1);
13366 
13367   ins_cost(INSN_COST);
13368   format %{ "cmp  $op1, $op2" %}
13369 
13370   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13371 
13372   ins_pipe(icmp_reg_imm);
13373 %}
13374 
13375 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13376 %{
13377   match(Set cr (CmpL op1 op2));
13378 
13379   effect(DEF cr, USE op1);
13380 
13381   ins_cost(INSN_COST * 2);
13382   format %{ "cmp  $op1, $op2" %}
13383 
13384   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13385 
13386   ins_pipe(icmp_reg_imm);
13387 %}
13388 
13389 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13390 %{
13391   match(Set cr (CmpP op1 op2));
13392 
13393   effect(DEF cr, USE op1, USE op2);
13394 
13395   ins_cost(INSN_COST);
13396   format %{ "cmp  $op1, $op2\t // ptr" %}
13397 
13398   ins_encode(aarch64_enc_cmpp(op1, op2));
13399 
13400   ins_pipe(icmp_reg_reg);
13401 %}
13402 
13403 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13404 %{
13405   match(Set cr (CmpN op1 op2));
13406 
13407   effect(DEF cr, USE op1, USE op2);
13408 
13409   ins_cost(INSN_COST);
13410   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13411 
13412   ins_encode(aarch64_enc_cmpn(op1, op2));
13413 
13414   ins_pipe(icmp_reg_reg);
13415 %}
13416 
13417 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13418 %{
13419   match(Set cr (CmpP op1 zero));
13420 
13421   effect(DEF cr, USE op1, USE zero);
13422 
13423   ins_cost(INSN_COST);
13424   format %{ "cmp  $op1, 0\t // ptr" %}
13425 
13426   ins_encode(aarch64_enc_testp(op1));
13427 
13428   ins_pipe(icmp_reg_imm);
13429 %}
13430 
13431 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13432 %{
13433   match(Set cr (CmpN op1 zero));
13434 
13435   effect(DEF cr, USE op1, USE zero);
13436 
13437   ins_cost(INSN_COST);
13438   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13439 
13440   ins_encode(aarch64_enc_testn(op1));
13441 
13442   ins_pipe(icmp_reg_imm);
13443 %}
13444 
13445 // FP comparisons
13446 //
13447 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13448 // using normal cmpOp. See declaration of rFlagsReg for details.
13449 
13450 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13451 %{
13452   match(Set cr (CmpF src1 src2));
13453 
13454   ins_cost(3 * INSN_COST);
13455   format %{ "fcmps $src1, $src2" %}
13456 
13457   ins_encode %{
13458     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13459   %}
13460 
13461   ins_pipe(pipe_class_compare);
13462 %}
13463 
13464 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13465 %{
13466   match(Set cr (CmpF src1 src2));
13467 
13468   ins_cost(3 * INSN_COST);
13469   format %{ "fcmps $src1, 0.0" %}
13470 
13471   ins_encode %{
13472     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13473   %}
13474 
13475   ins_pipe(pipe_class_compare);
13476 %}
13477 // FROM HERE
13478 
13479 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13480 %{
13481   match(Set cr (CmpD src1 src2));
13482 
13483   ins_cost(3 * INSN_COST);
13484   format %{ "fcmpd $src1, $src2" %}
13485 
13486   ins_encode %{
13487     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13488   %}
13489 
13490   ins_pipe(pipe_class_compare);
13491 %}
13492 
13493 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13494 %{
13495   match(Set cr (CmpD src1 src2));
13496 
13497   ins_cost(3 * INSN_COST);
13498   format %{ "fcmpd $src1, 0.0" %}
13499 
13500   ins_encode %{
13501     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13502   %}
13503 
13504   ins_pipe(pipe_class_compare);
13505 %}
13506 
13507 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13508 %{
13509   match(Set dst (CmpF3 src1 src2));
13510   effect(KILL cr);
13511 
13512   ins_cost(5 * INSN_COST);
13513   format %{ "fcmps $src1, $src2\n\t"
13514             "csinvw($dst, zr, zr, eq\n\t"
13515             "csnegw($dst, $dst, $dst, lt)"
13516   %}
13517 
13518   ins_encode %{
13519     Label done;
13520     FloatRegister s1 = as_FloatRegister($src1$$reg);
13521     FloatRegister s2 = as_FloatRegister($src2$$reg);
13522     Register d = as_Register($dst$$reg);
13523     __ fcmps(s1, s2);
13524     // installs 0 if EQ else -1
13525     __ csinvw(d, zr, zr, Assembler::EQ);
13526     // keeps -1 if less or unordered else installs 1
13527     __ csnegw(d, d, d, Assembler::LT);
13528     __ bind(done);
13529   %}
13530 
13531   ins_pipe(pipe_class_default);
13532 
13533 %}
13534 
13535 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13536 %{
13537   match(Set dst (CmpD3 src1 src2));
13538   effect(KILL cr);
13539 
13540   ins_cost(5 * INSN_COST);
13541   format %{ "fcmpd $src1, $src2\n\t"
13542             "csinvw($dst, zr, zr, eq\n\t"
13543             "csnegw($dst, $dst, $dst, lt)"
13544   %}
13545 
13546   ins_encode %{
13547     Label done;
13548     FloatRegister s1 = as_FloatRegister($src1$$reg);
13549     FloatRegister s2 = as_FloatRegister($src2$$reg);
13550     Register d = as_Register($dst$$reg);
13551     __ fcmpd(s1, s2);
13552     // installs 0 if EQ else -1
13553     __ csinvw(d, zr, zr, Assembler::EQ);
13554     // keeps -1 if less or unordered else installs 1
13555     __ csnegw(d, d, d, Assembler::LT);
13556     __ bind(done);
13557   %}
13558   ins_pipe(pipe_class_default);
13559 
13560 %}
13561 
13562 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13563 %{
13564   match(Set dst (CmpF3 src1 zero));
13565   effect(KILL cr);
13566 
13567   ins_cost(5 * INSN_COST);
13568   format %{ "fcmps $src1, 0.0\n\t"
13569             "csinvw($dst, zr, zr, eq\n\t"
13570             "csnegw($dst, $dst, $dst, lt)"
13571   %}
13572 
13573   ins_encode %{
13574     Label done;
13575     FloatRegister s1 = as_FloatRegister($src1$$reg);
13576     Register d = as_Register($dst$$reg);
13577     __ fcmps(s1, 0.0D);
13578     // installs 0 if EQ else -1
13579     __ csinvw(d, zr, zr, Assembler::EQ);
13580     // keeps -1 if less or unordered else installs 1
13581     __ csnegw(d, d, d, Assembler::LT);
13582     __ bind(done);
13583   %}
13584 
13585   ins_pipe(pipe_class_default);
13586 
13587 %}
13588 
13589 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13590 %{
13591   match(Set dst (CmpD3 src1 zero));
13592   effect(KILL cr);
13593 
13594   ins_cost(5 * INSN_COST);
13595   format %{ "fcmpd $src1, 0.0\n\t"
13596             "csinvw($dst, zr, zr, eq\n\t"
13597             "csnegw($dst, $dst, $dst, lt)"
13598   %}
13599 
13600   ins_encode %{
13601     Label done;
13602     FloatRegister s1 = as_FloatRegister($src1$$reg);
13603     Register d = as_Register($dst$$reg);
13604     __ fcmpd(s1, 0.0D);
13605     // installs 0 if EQ else -1
13606     __ csinvw(d, zr, zr, Assembler::EQ);
13607     // keeps -1 if less or unordered else installs 1
13608     __ csnegw(d, d, d, Assembler::LT);
13609     __ bind(done);
13610   %}
13611   ins_pipe(pipe_class_default);
13612 
13613 %}
13614 
13615 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13616 %{
13617   match(Set dst (CmpLTMask p q));
13618   effect(KILL cr);
13619 
13620   ins_cost(3 * INSN_COST);
13621 
13622   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13623             "csetw $dst, lt\n\t"
13624             "subw $dst, zr, $dst"
13625   %}
13626 
13627   ins_encode %{
13628     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13629     __ csetw(as_Register($dst$$reg), Assembler::LT);
13630     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13631   %}
13632 
13633   ins_pipe(ialu_reg_reg);
13634 %}
13635 
13636 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13637 %{
13638   match(Set dst (CmpLTMask src zero));
13639   effect(KILL cr);
13640 
13641   ins_cost(INSN_COST);
13642 
13643   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13644 
13645   ins_encode %{
13646     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13647   %}
13648 
13649   ins_pipe(ialu_reg_shift);
13650 %}
13651 
13652 // ============================================================================
13653 // Max and Min
13654 
13655 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13656 %{
13657   match(Set dst (MinI src1 src2));
13658 
13659   effect(DEF dst, USE src1, USE src2, KILL cr);
13660   size(8);
13661 
13662   ins_cost(INSN_COST * 3);
13663   format %{
13664     "cmpw $src1 $src2\t signed int\n\t"
13665     "cselw $dst, $src1, $src2 lt\t"
13666   %}
13667 
13668   ins_encode %{
13669     __ cmpw(as_Register($src1$$reg),
13670             as_Register($src2$$reg));
13671     __ cselw(as_Register($dst$$reg),
13672              as_Register($src1$$reg),
13673              as_Register($src2$$reg),
13674              Assembler::LT);
13675   %}
13676 
13677   ins_pipe(ialu_reg_reg);
13678 %}
13679 // FROM HERE
13680 
13681 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13682 %{
13683   match(Set dst (MaxI src1 src2));
13684 
13685   effect(DEF dst, USE src1, USE src2, KILL cr);
13686   size(8);
13687 
13688   ins_cost(INSN_COST * 3);
13689   format %{
13690     "cmpw $src1 $src2\t signed int\n\t"
13691     "cselw $dst, $src1, $src2 gt\t"
13692   %}
13693 
13694   ins_encode %{
13695     __ cmpw(as_Register($src1$$reg),
13696             as_Register($src2$$reg));
13697     __ cselw(as_Register($dst$$reg),
13698              as_Register($src1$$reg),
13699              as_Register($src2$$reg),
13700              Assembler::GT);
13701   %}
13702 
13703   ins_pipe(ialu_reg_reg);
13704 %}
13705 
13706 // ============================================================================
13707 // Branch Instructions
13708 
13709 // Direct Branch.
13710 instruct branch(label lbl)
13711 %{
13712   match(Goto);
13713 
13714   effect(USE lbl);
13715 
13716   ins_cost(BRANCH_COST);
13717   format %{ "b  $lbl" %}
13718 
13719   ins_encode(aarch64_enc_b(lbl));
13720 
13721   ins_pipe(pipe_branch);
13722 %}
13723 
13724 // Conditional Near Branch
13725 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13726 %{
13727   // Same match rule as `branchConFar'.
13728   match(If cmp cr);
13729 
13730   effect(USE lbl);
13731 
13732   ins_cost(BRANCH_COST);
13733   // If set to 1 this indicates that the current instruction is a
13734   // short variant of a long branch. This avoids using this
13735   // instruction in first-pass matching. It will then only be used in
13736   // the `Shorten_branches' pass.
13737   // ins_short_branch(1);
13738   format %{ "b$cmp  $lbl" %}
13739 
13740   ins_encode(aarch64_enc_br_con(cmp, lbl));
13741 
13742   ins_pipe(pipe_branch_cond);
13743 %}
13744 
13745 // Conditional Near Branch Unsigned
13746 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13747 %{
13748   // Same match rule as `branchConFar'.
13749   match(If cmp cr);
13750 
13751   effect(USE lbl);
13752 
13753   ins_cost(BRANCH_COST);
13754   // If set to 1 this indicates that the current instruction is a
13755   // short variant of a long branch. This avoids using this
13756   // instruction in first-pass matching. It will then only be used in
13757   // the `Shorten_branches' pass.
13758   // ins_short_branch(1);
13759   format %{ "b$cmp  $lbl\t# unsigned" %}
13760 
13761   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13762 
13763   ins_pipe(pipe_branch_cond);
13764 %}
13765 
13766 // Make use of CBZ and CBNZ.  These instructions, as well as being
13767 // shorter than (cmp; branch), have the additional benefit of not
13768 // killing the flags.
13769 
13770 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13771   match(If cmp (CmpI op1 op2));
13772   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13773             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13774   effect(USE labl);
13775 
13776   ins_cost(BRANCH_COST);
13777   format %{ "cbw$cmp   $op1, $labl" %}
13778   ins_encode %{
13779     Label* L = $labl$$label;
13780     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13781     if (cond == Assembler::EQ)
13782       __ cbzw($op1$$Register, *L);
13783     else
13784       __ cbnzw($op1$$Register, *L);
13785   %}
13786   ins_pipe(pipe_cmp_branch);
13787 %}
13788 
13789 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13790   match(If cmp (CmpL op1 op2));
13791   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13792             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13793   effect(USE labl);
13794 
13795   ins_cost(BRANCH_COST);
13796   format %{ "cb$cmp   $op1, $labl" %}
13797   ins_encode %{
13798     Label* L = $labl$$label;
13799     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13800     if (cond == Assembler::EQ)
13801       __ cbz($op1$$Register, *L);
13802     else
13803       __ cbnz($op1$$Register, *L);
13804   %}
13805   ins_pipe(pipe_cmp_branch);
13806 %}
13807 
13808 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13809   match(If cmp (CmpP op1 op2));
13810   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13811             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13812   effect(USE labl);
13813 
13814   ins_cost(BRANCH_COST);
13815   format %{ "cb$cmp   $op1, $labl" %}
13816   ins_encode %{
13817     Label* L = $labl$$label;
13818     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13819     if (cond == Assembler::EQ)
13820       __ cbz($op1$$Register, *L);
13821     else
13822       __ cbnz($op1$$Register, *L);
13823   %}
13824   ins_pipe(pipe_cmp_branch);
13825 %}
13826 
13827 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
13828   match(If cmp (CmpP (DecodeN oop) zero));
13829   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13830             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13831   effect(USE labl);
13832 
13833   ins_cost(BRANCH_COST);
13834   format %{ "cb$cmp   $oop, $labl" %}
13835   ins_encode %{
13836     Label* L = $labl$$label;
13837     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13838     if (cond == Assembler::EQ)
13839       __ cbzw($oop$$Register, *L);
13840     else
13841       __ cbnzw($oop$$Register, *L);
13842   %}
13843   ins_pipe(pipe_cmp_branch);
13844 %}
13845 
13846 // Test bit and Branch
13847 
13848 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13849   match(If cmp (CmpL op1 op2));
13850   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
13851             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
13852   effect(USE labl);
13853 
13854   ins_cost(BRANCH_COST);
13855   format %{ "cb$cmp   $op1, $labl" %}
13856   ins_encode %{
13857     Label* L = $labl$$label;
13858     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13859     if (cond == Assembler::LT)
13860       __ tbnz($op1$$Register, 63, *L);
13861     else
13862       __ tbz($op1$$Register, 63, *L);
13863   %}
13864   ins_pipe(pipe_cmp_branch);
13865 %}
13866 
13867 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13868   match(If cmp (CmpI op1 op2));
13869   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
13870             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
13871   effect(USE labl);
13872 
13873   ins_cost(BRANCH_COST);
13874   format %{ "cb$cmp   $op1, $labl" %}
13875   ins_encode %{
13876     Label* L = $labl$$label;
13877     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13878     if (cond == Assembler::LT)
13879       __ tbnz($op1$$Register, 31, *L);
13880     else
13881       __ tbz($op1$$Register, 31, *L);
13882   %}
13883   ins_pipe(pipe_cmp_branch);
13884 %}
13885 
13886 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl, rFlagsReg cr) %{
13887   match(If cmp (CmpL (AndL op1 op2) op3));
13888   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
13889             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
13890             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
13891   effect(USE labl);
13892 
13893   ins_cost(BRANCH_COST);
13894   format %{ "tb$cmp   $op1, $op2, $labl" %}
13895   ins_encode %{
13896     Label* L = $labl$$label;
13897     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13898     int bit = exact_log2($op2$$constant);
13899     if (cond == Assembler::EQ)
13900       __ tbz($op1$$Register, bit, *L);
13901     else
13902       __ tbnz($op1$$Register, bit, *L);
13903   %}
13904   ins_pipe(pipe_cmp_branch);
13905 %}
13906 
13907 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl, rFlagsReg cr) %{
13908   match(If cmp (CmpI (AndI op1 op2) op3));
13909   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
13910             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
13911             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
13912   effect(USE labl);
13913 
13914   ins_cost(BRANCH_COST);
13915   format %{ "tb$cmp   $op1, $op2, $labl" %}
13916   ins_encode %{
13917     Label* L = $labl$$label;
13918     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13919     int bit = exact_log2($op2$$constant);
13920     if (cond == Assembler::EQ)
13921       __ tbz($op1$$Register, bit, *L);
13922     else
13923       __ tbnz($op1$$Register, bit, *L);
13924   %}
13925   ins_pipe(pipe_cmp_branch);
13926 %}
13927 
13928 // Test bits
13929 
13930 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
13931   match(Set cr (CmpL (AndL op1 op2) op3));
13932   predicate(Assembler::operand_valid_for_logical_immediate
13933             (/*is_32*/false, n->in(1)->in(2)->get_long()));
13934 
13935   ins_cost(INSN_COST);
13936   format %{ "tst $op1, $op2 # long" %}
13937   ins_encode %{
13938     __ tst($op1$$Register, $op2$$constant);
13939   %}
13940   ins_pipe(ialu_reg_reg);
13941 %}
13942 
13943 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
13944   match(Set cr (CmpI (AndI op1 op2) op3));
13945   predicate(Assembler::operand_valid_for_logical_immediate
13946             (/*is_32*/true, n->in(1)->in(2)->get_int()));
13947 
13948   ins_cost(INSN_COST);
13949   format %{ "tst $op1, $op2 # int" %}
13950   ins_encode %{
13951     __ tstw($op1$$Register, $op2$$constant);
13952   %}
13953   ins_pipe(ialu_reg_reg);
13954 %}
13955 
13956 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
13957   match(Set cr (CmpL (AndL op1 op2) op3));
13958 
13959   ins_cost(INSN_COST);
13960   format %{ "tst $op1, $op2 # long" %}
13961   ins_encode %{
13962     __ tst($op1$$Register, $op2$$Register);
13963   %}
13964   ins_pipe(ialu_reg_reg);
13965 %}
13966 
13967 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
13968   match(Set cr (CmpI (AndI op1 op2) op3));
13969 
13970   ins_cost(INSN_COST);
13971   format %{ "tstw $op1, $op2 # int" %}
13972   ins_encode %{
13973     __ tstw($op1$$Register, $op2$$Register);
13974   %}
13975   ins_pipe(ialu_reg_reg);
13976 %}
13977 
13978 
13979 // Conditional Far Branch
13980 // Conditional Far Branch Unsigned
13981 // TODO: fixme
13982 
13983 // counted loop end branch near
13984 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13985 %{
13986   match(CountedLoopEnd cmp cr);
13987 
13988   effect(USE lbl);
13989 
13990   ins_cost(BRANCH_COST);
13991   // short variant.
13992   // ins_short_branch(1);
13993   format %{ "b$cmp $lbl \t// counted loop end" %}
13994 
13995   ins_encode(aarch64_enc_br_con(cmp, lbl));
13996 
13997   ins_pipe(pipe_branch);
13998 %}
13999 
14000 // counted loop end branch near Unsigned
14001 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14002 %{
14003   match(CountedLoopEnd cmp cr);
14004 
14005   effect(USE lbl);
14006 
14007   ins_cost(BRANCH_COST);
14008   // short variant.
14009   // ins_short_branch(1);
14010   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14011 
14012   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14013 
14014   ins_pipe(pipe_branch);
14015 %}
14016 
14017 // counted loop end branch far
14018 // counted loop end branch far unsigned
14019 // TODO: fixme
14020 
14021 // ============================================================================
14022 // inlined locking and unlocking
14023 
14024 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14025 %{
14026   match(Set cr (FastLock object box));
14027   effect(TEMP tmp, TEMP tmp2);
14028 
14029   // TODO
14030   // identify correct cost
14031   ins_cost(5 * INSN_COST);
14032   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14033 
14034   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14035 
14036   ins_pipe(pipe_serial);
14037 %}
14038 
14039 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14040 %{
14041   match(Set cr (FastUnlock object box));
14042   effect(TEMP tmp, TEMP tmp2);
14043 
14044   ins_cost(5 * INSN_COST);
14045   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14046 
14047   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14048 
14049   ins_pipe(pipe_serial);
14050 %}
14051 
14052 
14053 // ============================================================================
14054 // Safepoint Instructions
14055 
14056 // TODO
14057 // provide a near and far version of this code
14058 
14059 instruct safePoint(iRegP poll)
14060 %{
14061   match(SafePoint poll);
14062 
14063   format %{
14064     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14065   %}
14066   ins_encode %{
14067     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14068   %}
14069   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14070 %}
14071 
14072 
14073 // ============================================================================
14074 // Procedure Call/Return Instructions
14075 
14076 // Call Java Static Instruction
14077 
14078 instruct CallStaticJavaDirect(method meth)
14079 %{
14080   match(CallStaticJava);
14081 
14082   effect(USE meth);
14083 
14084   ins_cost(CALL_COST);
14085 
14086   format %{ "call,static $meth \t// ==> " %}
14087 
14088   ins_encode( aarch64_enc_java_static_call(meth),
14089               aarch64_enc_call_epilog );
14090 
14091   ins_pipe(pipe_class_call);
14092 %}
14093 
14094 // TO HERE
14095 
14096 // Call Java Dynamic Instruction
14097 instruct CallDynamicJavaDirect(method meth)
14098 %{
14099   match(CallDynamicJava);
14100 
14101   effect(USE meth);
14102 
14103   ins_cost(CALL_COST);
14104 
14105   format %{ "CALL,dynamic $meth \t// ==> " %}
14106 
14107   ins_encode( aarch64_enc_java_dynamic_call(meth),
14108                aarch64_enc_call_epilog );
14109 
14110   ins_pipe(pipe_class_call);
14111 %}
14112 
14113 // Call Runtime Instruction
14114 
14115 instruct CallRuntimeDirect(method meth)
14116 %{
14117   match(CallRuntime);
14118 
14119   effect(USE meth);
14120 
14121   ins_cost(CALL_COST);
14122 
14123   format %{ "CALL, runtime $meth" %}
14124 
14125   ins_encode( aarch64_enc_java_to_runtime(meth) );
14126 
14127   ins_pipe(pipe_class_call);
14128 %}
14129 
14130 // Call Runtime Instruction
14131 
14132 instruct CallLeafDirect(method meth)
14133 %{
14134   match(CallLeaf);
14135 
14136   effect(USE meth);
14137 
14138   ins_cost(CALL_COST);
14139 
14140   format %{ "CALL, runtime leaf $meth" %}
14141 
14142   ins_encode( aarch64_enc_java_to_runtime(meth) );
14143 
14144   ins_pipe(pipe_class_call);
14145 %}
14146 
14147 // Call Runtime Instruction
14148 
14149 instruct CallLeafNoFPDirect(method meth)
14150 %{
14151   match(CallLeafNoFP);
14152 
14153   effect(USE meth);
14154 
14155   ins_cost(CALL_COST);
14156 
14157   format %{ "CALL, runtime leaf nofp $meth" %}
14158 
14159   ins_encode( aarch64_enc_java_to_runtime(meth) );
14160 
14161   ins_pipe(pipe_class_call);
14162 %}
14163 
14164 // Tail Call; Jump from runtime stub to Java code.
14165 // Also known as an 'interprocedural jump'.
14166 // Target of jump will eventually return to caller.
14167 // TailJump below removes the return address.
14168 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14169 %{
14170   match(TailCall jump_target method_oop);
14171 
14172   ins_cost(CALL_COST);
14173 
14174   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14175 
14176   ins_encode(aarch64_enc_tail_call(jump_target));
14177 
14178   ins_pipe(pipe_class_call);
14179 %}
14180 
14181 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14182 %{
14183   match(TailJump jump_target ex_oop);
14184 
14185   ins_cost(CALL_COST);
14186 
14187   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14188 
14189   ins_encode(aarch64_enc_tail_jmp(jump_target));
14190 
14191   ins_pipe(pipe_class_call);
14192 %}
14193 
14194 // Create exception oop: created by stack-crawling runtime code.
14195 // Created exception is now available to this handler, and is setup
14196 // just prior to jumping to this handler. No code emitted.
14197 // TODO check
14198 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14199 instruct CreateException(iRegP_R0 ex_oop)
14200 %{
14201   match(Set ex_oop (CreateEx));
14202 
14203   format %{ " -- \t// exception oop; no code emitted" %}
14204 
14205   size(0);
14206 
14207   ins_encode( /*empty*/ );
14208 
14209   ins_pipe(pipe_class_empty);
14210 %}
14211 
14212 // Rethrow exception: The exception oop will come in the first
14213 // argument position. Then JUMP (not call) to the rethrow stub code.
14214 instruct RethrowException() %{
14215   match(Rethrow);
14216   ins_cost(CALL_COST);
14217 
14218   format %{ "b rethrow_stub" %}
14219 
14220   ins_encode( aarch64_enc_rethrow() );
14221 
14222   ins_pipe(pipe_class_call);
14223 %}
14224 
14225 
14226 // Return Instruction
14227 // epilog node loads ret address into lr as part of frame pop
14228 instruct Ret()
14229 %{
14230   match(Return);
14231 
14232   format %{ "ret\t// return register" %}
14233 
14234   ins_encode( aarch64_enc_ret() );
14235 
14236   ins_pipe(pipe_branch);
14237 %}
14238 
14239 // Die now.
14240 instruct ShouldNotReachHere() %{
14241   match(Halt);
14242 
14243   ins_cost(CALL_COST);
14244   format %{ "ShouldNotReachHere" %}
14245 
14246   ins_encode %{
14247     // TODO
14248     // implement proper trap call here
14249     __ brk(999);
14250   %}
14251 
14252   ins_pipe(pipe_class_default);
14253 %}
14254 
14255 // ============================================================================
14256 // Partial Subtype Check
14257 //
14258 // superklass array for an instance of the superklass.  Set a hidden
14259 // internal cache on a hit (cache is checked with exposed code in
14260 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14261 // encoding ALSO sets flags.
14262 
14263 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14264 %{
14265   match(Set result (PartialSubtypeCheck sub super));
14266   effect(KILL cr, KILL temp);
14267 
14268   ins_cost(1100);  // slightly larger than the next version
14269   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14270 
14271   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14272 
14273   opcode(0x1); // Force zero of result reg on hit
14274 
14275   ins_pipe(pipe_class_memory);
14276 %}
14277 
14278 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14279 %{
14280   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14281   effect(KILL temp, KILL result);
14282 
14283   ins_cost(1100);  // slightly larger than the next version
14284   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14285 
14286   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14287 
14288   opcode(0x0); // Don't zero result reg on hit
14289 
14290   ins_pipe(pipe_class_memory);
14291 %}
14292 
14293 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14294                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14295 %{
14296   predicate(!CompactStrings);
14297   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14298   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14299 
14300   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14301   ins_encode %{
14302     __ string_compare($str1$$Register, $str2$$Register,
14303                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14304                       $tmp1$$Register);
14305   %}
14306   ins_pipe(pipe_class_memory);
14307 %}
14308 
14309 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14310        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14311 %{
14312   predicate(!CompactStrings);
14313   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14314   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14315          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14316   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14317 
14318   ins_encode %{
14319     __ string_indexof($str1$$Register, $str2$$Register,
14320                       $cnt1$$Register, $cnt2$$Register,
14321                       $tmp1$$Register, $tmp2$$Register,
14322                       $tmp3$$Register, $tmp4$$Register,
14323                       -1, $result$$Register);
14324   %}
14325   ins_pipe(pipe_class_memory);
14326 %}
14327 
14328 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14329                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14330                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14331 %{
14332   predicate(!CompactStrings);
14333   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14334   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14335          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14336   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14337 
14338   ins_encode %{
14339     int icnt2 = (int)$int_cnt2$$constant;
14340     __ string_indexof($str1$$Register, $str2$$Register,
14341                       $cnt1$$Register, zr,
14342                       $tmp1$$Register, $tmp2$$Register,
14343                       $tmp3$$Register, $tmp4$$Register,
14344                       icnt2, $result$$Register);
14345   %}
14346   ins_pipe(pipe_class_memory);
14347 %}
14348 
14349 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14350                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
14351 %{
14352   predicate(!CompactStrings);
14353   match(Set result (StrEquals (Binary str1 str2) cnt));
14354   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14355 
14356   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
14357   ins_encode %{
14358     __ string_equals($str1$$Register, $str2$$Register,
14359                       $cnt$$Register, $result$$Register,
14360                       $tmp$$Register);
14361   %}
14362   ins_pipe(pipe_class_memory);
14363 %}
14364 
14365 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14366                       iRegP_R10 tmp, rFlagsReg cr)
14367 %{
14368   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14369   match(Set result (AryEq ary1 ary2));
14370   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14371 
14372   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14373   ins_encode %{
14374     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
14375                           $result$$Register, $tmp$$Register);
14376   %}
14377   ins_pipe(pipe_class_memory);
14378 %}
14379 
14380 // encode char[] to byte[] in ISO_8859_1
14381 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14382                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14383                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14384                           iRegI_R0 result, rFlagsReg cr)
14385 %{
14386   match(Set result (EncodeISOArray src (Binary dst len)));
14387   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14388          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14389 
14390   format %{ "Encode array $src,$dst,$len -> $result" %}
14391   ins_encode %{
14392     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14393          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14394          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14395   %}
14396   ins_pipe( pipe_class_memory );
14397 %}
14398 
14399 // ============================================================================
14400 // This name is KNOWN by the ADLC and cannot be changed.
14401 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14402 // for this guy.
14403 instruct tlsLoadP(thread_RegP dst)
14404 %{
14405   match(Set dst (ThreadLocal));
14406 
14407   ins_cost(0);
14408 
14409   format %{ " -- \t// $dst=Thread::current(), empty" %}
14410 
14411   size(0);
14412 
14413   ins_encode( /*empty*/ );
14414 
14415   ins_pipe(pipe_class_empty);
14416 %}
14417 
14418 // ====================VECTOR INSTRUCTIONS=====================================
14419 
14420 // Load vector (32 bits)
14421 instruct loadV4(vecD dst, vmem mem)
14422 %{
14423   predicate(n->as_LoadVector()->memory_size() == 4);
14424   match(Set dst (LoadVector mem));
14425   ins_cost(4 * INSN_COST);
14426   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14427   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14428   ins_pipe(pipe_class_memory);
14429 %}
14430 
14431 // Load vector (64 bits)
14432 instruct loadV8(vecD dst, vmem mem)
14433 %{
14434   predicate(n->as_LoadVector()->memory_size() == 8);
14435   match(Set dst (LoadVector mem));
14436   ins_cost(4 * INSN_COST);
14437   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14438   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14439   ins_pipe(pipe_class_memory);
14440 %}
14441 
14442 // Load Vector (128 bits)
14443 instruct loadV16(vecX dst, vmem mem)
14444 %{
14445   predicate(n->as_LoadVector()->memory_size() == 16);
14446   match(Set dst (LoadVector mem));
14447   ins_cost(4 * INSN_COST);
14448   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14449   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14450   ins_pipe(pipe_class_memory);
14451 %}
14452 
14453 // Store Vector (32 bits)
14454 instruct storeV4(vecD src, vmem mem)
14455 %{
14456   predicate(n->as_StoreVector()->memory_size() == 4);
14457   match(Set mem (StoreVector mem src));
14458   ins_cost(4 * INSN_COST);
14459   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14460   ins_encode( aarch64_enc_strvS(src, mem) );
14461   ins_pipe(pipe_class_memory);
14462 %}
14463 
14464 // Store Vector (64 bits)
14465 instruct storeV8(vecD src, vmem mem)
14466 %{
14467   predicate(n->as_StoreVector()->memory_size() == 8);
14468   match(Set mem (StoreVector mem src));
14469   ins_cost(4 * INSN_COST);
14470   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14471   ins_encode( aarch64_enc_strvD(src, mem) );
14472   ins_pipe(pipe_class_memory);
14473 %}
14474 
14475 // Store Vector (128 bits)
14476 instruct storeV16(vecX src, vmem mem)
14477 %{
14478   predicate(n->as_StoreVector()->memory_size() == 16);
14479   match(Set mem (StoreVector mem src));
14480   ins_cost(4 * INSN_COST);
14481   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14482   ins_encode( aarch64_enc_strvQ(src, mem) );
14483   ins_pipe(pipe_class_memory);
14484 %}
14485 
14486 instruct replicate8B(vecD dst, iRegIorL2I src)
14487 %{
14488   predicate(n->as_Vector()->length() == 4 ||
14489             n->as_Vector()->length() == 8);
14490   match(Set dst (ReplicateB src));
14491   ins_cost(INSN_COST);
14492   format %{ "dup  $dst, $src\t# vector (8B)" %}
14493   ins_encode %{
14494     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14495   %}
14496   ins_pipe(pipe_class_default);
14497 %}
14498 
14499 instruct replicate16B(vecX dst, iRegIorL2I src)
14500 %{
14501   predicate(n->as_Vector()->length() == 16);
14502   match(Set dst (ReplicateB src));
14503   ins_cost(INSN_COST);
14504   format %{ "dup  $dst, $src\t# vector (16B)" %}
14505   ins_encode %{
14506     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14507   %}
14508   ins_pipe(pipe_class_default);
14509 %}
14510 
14511 instruct replicate8B_imm(vecD dst, immI con)
14512 %{
14513   predicate(n->as_Vector()->length() == 4 ||
14514             n->as_Vector()->length() == 8);
14515   match(Set dst (ReplicateB con));
14516   ins_cost(INSN_COST);
14517   format %{ "movi  $dst, $con\t# vector(8B)" %}
14518   ins_encode %{
14519     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14520   %}
14521   ins_pipe(pipe_class_default);
14522 %}
14523 
14524 instruct replicate16B_imm(vecX dst, immI con)
14525 %{
14526   predicate(n->as_Vector()->length() == 16);
14527   match(Set dst (ReplicateB con));
14528   ins_cost(INSN_COST);
14529   format %{ "movi  $dst, $con\t# vector(16B)" %}
14530   ins_encode %{
14531     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14532   %}
14533   ins_pipe(pipe_class_default);
14534 %}
14535 
14536 instruct replicate4S(vecD dst, iRegIorL2I src)
14537 %{
14538   predicate(n->as_Vector()->length() == 2 ||
14539             n->as_Vector()->length() == 4);
14540   match(Set dst (ReplicateS src));
14541   ins_cost(INSN_COST);
14542   format %{ "dup  $dst, $src\t# vector (4S)" %}
14543   ins_encode %{
14544     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14545   %}
14546   ins_pipe(pipe_class_default);
14547 %}
14548 
14549 instruct replicate8S(vecX dst, iRegIorL2I src)
14550 %{
14551   predicate(n->as_Vector()->length() == 8);
14552   match(Set dst (ReplicateS src));
14553   ins_cost(INSN_COST);
14554   format %{ "dup  $dst, $src\t# vector (8S)" %}
14555   ins_encode %{
14556     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14557   %}
14558   ins_pipe(pipe_class_default);
14559 %}
14560 
14561 instruct replicate4S_imm(vecD dst, immI con)
14562 %{
14563   predicate(n->as_Vector()->length() == 2 ||
14564             n->as_Vector()->length() == 4);
14565   match(Set dst (ReplicateS con));
14566   ins_cost(INSN_COST);
14567   format %{ "movi  $dst, $con\t# vector(4H)" %}
14568   ins_encode %{
14569     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14570   %}
14571   ins_pipe(pipe_class_default);
14572 %}
14573 
14574 instruct replicate8S_imm(vecX dst, immI con)
14575 %{
14576   predicate(n->as_Vector()->length() == 8);
14577   match(Set dst (ReplicateS con));
14578   ins_cost(INSN_COST);
14579   format %{ "movi  $dst, $con\t# vector(8H)" %}
14580   ins_encode %{
14581     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
14582   %}
14583   ins_pipe(pipe_class_default);
14584 %}
14585 
14586 instruct replicate2I(vecD dst, iRegIorL2I src)
14587 %{
14588   predicate(n->as_Vector()->length() == 2);
14589   match(Set dst (ReplicateI src));
14590   ins_cost(INSN_COST);
14591   format %{ "dup  $dst, $src\t# vector (2I)" %}
14592   ins_encode %{
14593     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
14594   %}
14595   ins_pipe(pipe_class_default);
14596 %}
14597 
14598 instruct replicate4I(vecX dst, iRegIorL2I src)
14599 %{
14600   predicate(n->as_Vector()->length() == 4);
14601   match(Set dst (ReplicateI src));
14602   ins_cost(INSN_COST);
14603   format %{ "dup  $dst, $src\t# vector (4I)" %}
14604   ins_encode %{
14605     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
14606   %}
14607   ins_pipe(pipe_class_default);
14608 %}
14609 
14610 instruct replicate2I_imm(vecD dst, immI con)
14611 %{
14612   predicate(n->as_Vector()->length() == 2);
14613   match(Set dst (ReplicateI con));
14614   ins_cost(INSN_COST);
14615   format %{ "movi  $dst, $con\t# vector(2I)" %}
14616   ins_encode %{
14617     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
14618   %}
14619   ins_pipe(pipe_class_default);
14620 %}
14621 
14622 instruct replicate4I_imm(vecX dst, immI con)
14623 %{
14624   predicate(n->as_Vector()->length() == 4);
14625   match(Set dst (ReplicateI con));
14626   ins_cost(INSN_COST);
14627   format %{ "movi  $dst, $con\t# vector(4I)" %}
14628   ins_encode %{
14629     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
14630   %}
14631   ins_pipe(pipe_class_default);
14632 %}
14633 
14634 instruct replicate2L(vecX dst, iRegL src)
14635 %{
14636   predicate(n->as_Vector()->length() == 2);
14637   match(Set dst (ReplicateL src));
14638   ins_cost(INSN_COST);
14639   format %{ "dup  $dst, $src\t# vector (2L)" %}
14640   ins_encode %{
14641     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
14642   %}
14643   ins_pipe(pipe_class_default);
14644 %}
14645 
14646 instruct replicate2L_zero(vecX dst, immI0 zero)
14647 %{
14648   predicate(n->as_Vector()->length() == 2);
14649   match(Set dst (ReplicateI zero));
14650   ins_cost(INSN_COST);
14651   format %{ "movi  $dst, $zero\t# vector(4I)" %}
14652   ins_encode %{
14653     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14654            as_FloatRegister($dst$$reg),
14655            as_FloatRegister($dst$$reg));
14656   %}
14657   ins_pipe(pipe_class_default);
14658 %}
14659 
14660 instruct replicate2F(vecD dst, vRegF src)
14661 %{
14662   predicate(n->as_Vector()->length() == 2);
14663   match(Set dst (ReplicateF src));
14664   ins_cost(INSN_COST);
14665   format %{ "dup  $dst, $src\t# vector (2F)" %}
14666   ins_encode %{
14667     __ dup(as_FloatRegister($dst$$reg), __ T2S,
14668            as_FloatRegister($src$$reg));
14669   %}
14670   ins_pipe(pipe_class_default);
14671 %}
14672 
14673 instruct replicate4F(vecX dst, vRegF src)
14674 %{
14675   predicate(n->as_Vector()->length() == 4);
14676   match(Set dst (ReplicateF src));
14677   ins_cost(INSN_COST);
14678   format %{ "dup  $dst, $src\t# vector (4F)" %}
14679   ins_encode %{
14680     __ dup(as_FloatRegister($dst$$reg), __ T4S,
14681            as_FloatRegister($src$$reg));
14682   %}
14683   ins_pipe(pipe_class_default);
14684 %}
14685 
14686 instruct replicate2D(vecX dst, vRegD src)
14687 %{
14688   predicate(n->as_Vector()->length() == 2);
14689   match(Set dst (ReplicateD src));
14690   ins_cost(INSN_COST);
14691   format %{ "dup  $dst, $src\t# vector (2D)" %}
14692   ins_encode %{
14693     __ dup(as_FloatRegister($dst$$reg), __ T2D,
14694            as_FloatRegister($src$$reg));
14695   %}
14696   ins_pipe(pipe_class_default);
14697 %}
14698 
14699 // ====================REDUCTION ARITHMETIC====================================
14700 
14701 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
14702 %{
14703   match(Set dst (AddReductionVI src1 src2));
14704   ins_cost(INSN_COST);
14705   effect(TEMP tmp, TEMP tmp2);
14706   format %{ "umov  $tmp, $src2, S, 0\n\t"
14707             "umov  $tmp2, $src2, S, 1\n\t"
14708             "addw  $dst, $src1, $tmp\n\t"
14709             "addw  $dst, $dst, $tmp2\t add reduction2i"
14710   %}
14711   ins_encode %{
14712     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14713     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14714     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
14715     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
14716   %}
14717   ins_pipe(pipe_class_default);
14718 %}
14719 
14720 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14721 %{
14722   match(Set dst (AddReductionVI src1 src2));
14723   ins_cost(INSN_COST);
14724   effect(TEMP tmp, TEMP tmp2);
14725   format %{ "addv  $tmp, T4S, $src2\n\t"
14726             "umov  $tmp2, $tmp, S, 0\n\t"
14727             "addw  $dst, $tmp2, $src1\t add reduction4i"
14728   %}
14729   ins_encode %{
14730     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
14731             as_FloatRegister($src2$$reg));
14732     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14733     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
14734   %}
14735   ins_pipe(pipe_class_default);
14736 %}
14737 
14738 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
14739 %{
14740   match(Set dst (MulReductionVI src1 src2));
14741   ins_cost(INSN_COST);
14742   effect(TEMP tmp, TEMP dst);
14743   format %{ "umov  $tmp, $src2, S, 0\n\t"
14744             "mul   $dst, $tmp, $src1\n\t"
14745             "umov  $tmp, $src2, S, 1\n\t"
14746             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
14747   %}
14748   ins_encode %{
14749     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14750     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
14751     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14752     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
14753   %}
14754   ins_pipe(pipe_class_default);
14755 %}
14756 
14757 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14758 %{
14759   match(Set dst (MulReductionVI src1 src2));
14760   ins_cost(INSN_COST);
14761   effect(TEMP tmp, TEMP tmp2, TEMP dst);
14762   format %{ "ins   $tmp, $src2, 0, 1\n\t"
14763             "mul   $tmp, $tmp, $src2\n\t"
14764             "umov  $tmp2, $tmp, S, 0\n\t"
14765             "mul   $dst, $tmp2, $src1\n\t"
14766             "umov  $tmp2, $tmp, S, 1\n\t"
14767             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
14768   %}
14769   ins_encode %{
14770     __ ins(as_FloatRegister($tmp$$reg), __ D,
14771            as_FloatRegister($src2$$reg), 0, 1);
14772     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
14773            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
14774     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14775     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
14776     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
14777     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
14778   %}
14779   ins_pipe(pipe_class_default);
14780 %}
14781 
14782 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14783 %{
14784   match(Set dst (AddReductionVF src1 src2));
14785   ins_cost(INSN_COST);
14786   effect(TEMP tmp, TEMP dst);
14787   format %{ "fadds $dst, $src1, $src2\n\t"
14788             "ins   $tmp, S, $src2, 0, 1\n\t"
14789             "fadds $dst, $dst, $tmp\t add reduction2f"
14790   %}
14791   ins_encode %{
14792     __ fadds(as_FloatRegister($dst$$reg),
14793              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14794     __ ins(as_FloatRegister($tmp$$reg), __ S,
14795            as_FloatRegister($src2$$reg), 0, 1);
14796     __ fadds(as_FloatRegister($dst$$reg),
14797              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14798   %}
14799   ins_pipe(pipe_class_default);
14800 %}
14801 
14802 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14803 %{
14804   match(Set dst (AddReductionVF src1 src2));
14805   ins_cost(INSN_COST);
14806   effect(TEMP tmp, TEMP dst);
14807   format %{ "fadds $dst, $src1, $src2\n\t"
14808             "ins   $tmp, S, $src2, 0, 1\n\t"
14809             "fadds $dst, $dst, $tmp\n\t"
14810             "ins   $tmp, S, $src2, 0, 2\n\t"
14811             "fadds $dst, $dst, $tmp\n\t"
14812             "ins   $tmp, S, $src2, 0, 3\n\t"
14813             "fadds $dst, $dst, $tmp\t add reduction4f"
14814   %}
14815   ins_encode %{
14816     __ fadds(as_FloatRegister($dst$$reg),
14817              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14818     __ ins(as_FloatRegister($tmp$$reg), __ S,
14819            as_FloatRegister($src2$$reg), 0, 1);
14820     __ fadds(as_FloatRegister($dst$$reg),
14821              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14822     __ ins(as_FloatRegister($tmp$$reg), __ S,
14823            as_FloatRegister($src2$$reg), 0, 2);
14824     __ fadds(as_FloatRegister($dst$$reg),
14825              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14826     __ ins(as_FloatRegister($tmp$$reg), __ S,
14827            as_FloatRegister($src2$$reg), 0, 3);
14828     __ fadds(as_FloatRegister($dst$$reg),
14829              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14830   %}
14831   ins_pipe(pipe_class_default);
14832 %}
14833 
14834 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14835 %{
14836   match(Set dst (MulReductionVF src1 src2));
14837   ins_cost(INSN_COST);
14838   effect(TEMP tmp, TEMP dst);
14839   format %{ "fmuls $dst, $src1, $src2\n\t"
14840             "ins   $tmp, S, $src2, 0, 1\n\t"
14841             "fmuls $dst, $dst, $tmp\t add reduction4f"
14842   %}
14843   ins_encode %{
14844     __ fmuls(as_FloatRegister($dst$$reg),
14845              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14846     __ ins(as_FloatRegister($tmp$$reg), __ S,
14847            as_FloatRegister($src2$$reg), 0, 1);
14848     __ fmuls(as_FloatRegister($dst$$reg),
14849              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14850   %}
14851   ins_pipe(pipe_class_default);
14852 %}
14853 
14854 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14855 %{
14856   match(Set dst (MulReductionVF src1 src2));
14857   ins_cost(INSN_COST);
14858   effect(TEMP tmp, TEMP dst);
14859   format %{ "fmuls $dst, $src1, $src2\n\t"
14860             "ins   $tmp, S, $src2, 0, 1\n\t"
14861             "fmuls $dst, $dst, $tmp\n\t"
14862             "ins   $tmp, S, $src2, 0, 2\n\t"
14863             "fmuls $dst, $dst, $tmp\n\t"
14864             "ins   $tmp, S, $src2, 0, 3\n\t"
14865             "fmuls $dst, $dst, $tmp\t add reduction4f"
14866   %}
14867   ins_encode %{
14868     __ fmuls(as_FloatRegister($dst$$reg),
14869              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14870     __ ins(as_FloatRegister($tmp$$reg), __ S,
14871            as_FloatRegister($src2$$reg), 0, 1);
14872     __ fmuls(as_FloatRegister($dst$$reg),
14873              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14874     __ ins(as_FloatRegister($tmp$$reg), __ S,
14875            as_FloatRegister($src2$$reg), 0, 2);
14876     __ fmuls(as_FloatRegister($dst$$reg),
14877              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14878     __ ins(as_FloatRegister($tmp$$reg), __ S,
14879            as_FloatRegister($src2$$reg), 0, 3);
14880     __ fmuls(as_FloatRegister($dst$$reg),
14881              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14882   %}
14883   ins_pipe(pipe_class_default);
14884 %}
14885 
14886 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14887 %{
14888   match(Set dst (AddReductionVD src1 src2));
14889   ins_cost(INSN_COST);
14890   effect(TEMP tmp, TEMP dst);
14891   format %{ "faddd $dst, $src1, $src2\n\t"
14892             "ins   $tmp, D, $src2, 0, 1\n\t"
14893             "faddd $dst, $dst, $tmp\t add reduction2d"
14894   %}
14895   ins_encode %{
14896     __ faddd(as_FloatRegister($dst$$reg),
14897              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14898     __ ins(as_FloatRegister($tmp$$reg), __ D,
14899            as_FloatRegister($src2$$reg), 0, 1);
14900     __ faddd(as_FloatRegister($dst$$reg),
14901              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14902   %}
14903   ins_pipe(pipe_class_default);
14904 %}
14905 
14906 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14907 %{
14908   match(Set dst (MulReductionVD src1 src2));
14909   ins_cost(INSN_COST);
14910   effect(TEMP tmp, TEMP dst);
14911   format %{ "fmuld $dst, $src1, $src2\n\t"
14912             "ins   $tmp, D, $src2, 0, 1\n\t"
14913             "fmuld $dst, $dst, $tmp\t add reduction2d"
14914   %}
14915   ins_encode %{
14916     __ fmuld(as_FloatRegister($dst$$reg),
14917              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14918     __ ins(as_FloatRegister($tmp$$reg), __ D,
14919            as_FloatRegister($src2$$reg), 0, 1);
14920     __ fmuld(as_FloatRegister($dst$$reg),
14921              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14922   %}
14923   ins_pipe(pipe_class_default);
14924 %}
14925 
14926 // ====================VECTOR ARITHMETIC=======================================
14927 
14928 // --------------------------------- ADD --------------------------------------
14929 
14930 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14931 %{
14932   predicate(n->as_Vector()->length() == 4 ||
14933             n->as_Vector()->length() == 8);
14934   match(Set dst (AddVB src1 src2));
14935   ins_cost(INSN_COST);
14936   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14937   ins_encode %{
14938     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14939             as_FloatRegister($src1$$reg),
14940             as_FloatRegister($src2$$reg));
14941   %}
14942   ins_pipe(pipe_class_default);
14943 %}
14944 
14945 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14946 %{
14947   predicate(n->as_Vector()->length() == 16);
14948   match(Set dst (AddVB src1 src2));
14949   ins_cost(INSN_COST);
14950   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14951   ins_encode %{
14952     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14953             as_FloatRegister($src1$$reg),
14954             as_FloatRegister($src2$$reg));
14955   %}
14956   ins_pipe(pipe_class_default);
14957 %}
14958 
14959 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14960 %{
14961   predicate(n->as_Vector()->length() == 2 ||
14962             n->as_Vector()->length() == 4);
14963   match(Set dst (AddVS src1 src2));
14964   ins_cost(INSN_COST);
14965   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14966   ins_encode %{
14967     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14968             as_FloatRegister($src1$$reg),
14969             as_FloatRegister($src2$$reg));
14970   %}
14971   ins_pipe(pipe_class_default);
14972 %}
14973 
14974 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14975 %{
14976   predicate(n->as_Vector()->length() == 8);
14977   match(Set dst (AddVS src1 src2));
14978   ins_cost(INSN_COST);
14979   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14980   ins_encode %{
14981     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14982             as_FloatRegister($src1$$reg),
14983             as_FloatRegister($src2$$reg));
14984   %}
14985   ins_pipe(pipe_class_default);
14986 %}
14987 
14988 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14989 %{
14990   predicate(n->as_Vector()->length() == 2);
14991   match(Set dst (AddVI src1 src2));
14992   ins_cost(INSN_COST);
14993   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14994   ins_encode %{
14995     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14996             as_FloatRegister($src1$$reg),
14997             as_FloatRegister($src2$$reg));
14998   %}
14999   ins_pipe(pipe_class_default);
15000 %}
15001 
15002 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15003 %{
15004   predicate(n->as_Vector()->length() == 4);
15005   match(Set dst (AddVI src1 src2));
15006   ins_cost(INSN_COST);
15007   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15008   ins_encode %{
15009     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15010             as_FloatRegister($src1$$reg),
15011             as_FloatRegister($src2$$reg));
15012   %}
15013   ins_pipe(pipe_class_default);
15014 %}
15015 
15016 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15017 %{
15018   predicate(n->as_Vector()->length() == 2);
15019   match(Set dst (AddVL src1 src2));
15020   ins_cost(INSN_COST);
15021   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15022   ins_encode %{
15023     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15024             as_FloatRegister($src1$$reg),
15025             as_FloatRegister($src2$$reg));
15026   %}
15027   ins_pipe(pipe_class_default);
15028 %}
15029 
15030 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15031 %{
15032   predicate(n->as_Vector()->length() == 2);
15033   match(Set dst (AddVF src1 src2));
15034   ins_cost(INSN_COST);
15035   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15036   ins_encode %{
15037     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15038             as_FloatRegister($src1$$reg),
15039             as_FloatRegister($src2$$reg));
15040   %}
15041   ins_pipe(pipe_class_default);
15042 %}
15043 
15044 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15045 %{
15046   predicate(n->as_Vector()->length() == 4);
15047   match(Set dst (AddVF src1 src2));
15048   ins_cost(INSN_COST);
15049   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15050   ins_encode %{
15051     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15052             as_FloatRegister($src1$$reg),
15053             as_FloatRegister($src2$$reg));
15054   %}
15055   ins_pipe(pipe_class_default);
15056 %}
15057 
15058 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15059 %{
15060   match(Set dst (AddVD src1 src2));
15061   ins_cost(INSN_COST);
15062   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15063   ins_encode %{
15064     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15065             as_FloatRegister($src1$$reg),
15066             as_FloatRegister($src2$$reg));
15067   %}
15068   ins_pipe(pipe_class_default);
15069 %}
15070 
15071 // --------------------------------- SUB --------------------------------------
15072 
15073 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15074 %{
15075   predicate(n->as_Vector()->length() == 4 ||
15076             n->as_Vector()->length() == 8);
15077   match(Set dst (SubVB src1 src2));
15078   ins_cost(INSN_COST);
15079   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15080   ins_encode %{
15081     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15082             as_FloatRegister($src1$$reg),
15083             as_FloatRegister($src2$$reg));
15084   %}
15085   ins_pipe(pipe_class_default);
15086 %}
15087 
15088 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15089 %{
15090   predicate(n->as_Vector()->length() == 16);
15091   match(Set dst (SubVB src1 src2));
15092   ins_cost(INSN_COST);
15093   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15094   ins_encode %{
15095     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15096             as_FloatRegister($src1$$reg),
15097             as_FloatRegister($src2$$reg));
15098   %}
15099   ins_pipe(pipe_class_default);
15100 %}
15101 
15102 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15103 %{
15104   predicate(n->as_Vector()->length() == 2 ||
15105             n->as_Vector()->length() == 4);
15106   match(Set dst (SubVS src1 src2));
15107   ins_cost(INSN_COST);
15108   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15109   ins_encode %{
15110     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15111             as_FloatRegister($src1$$reg),
15112             as_FloatRegister($src2$$reg));
15113   %}
15114   ins_pipe(pipe_class_default);
15115 %}
15116 
15117 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15118 %{
15119   predicate(n->as_Vector()->length() == 8);
15120   match(Set dst (SubVS src1 src2));
15121   ins_cost(INSN_COST);
15122   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15123   ins_encode %{
15124     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15125             as_FloatRegister($src1$$reg),
15126             as_FloatRegister($src2$$reg));
15127   %}
15128   ins_pipe(pipe_class_default);
15129 %}
15130 
15131 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15132 %{
15133   predicate(n->as_Vector()->length() == 2);
15134   match(Set dst (SubVI src1 src2));
15135   ins_cost(INSN_COST);
15136   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15137   ins_encode %{
15138     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15139             as_FloatRegister($src1$$reg),
15140             as_FloatRegister($src2$$reg));
15141   %}
15142   ins_pipe(pipe_class_default);
15143 %}
15144 
15145 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15146 %{
15147   predicate(n->as_Vector()->length() == 4);
15148   match(Set dst (SubVI src1 src2));
15149   ins_cost(INSN_COST);
15150   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15151   ins_encode %{
15152     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15153             as_FloatRegister($src1$$reg),
15154             as_FloatRegister($src2$$reg));
15155   %}
15156   ins_pipe(pipe_class_default);
15157 %}
15158 
15159 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15160 %{
15161   predicate(n->as_Vector()->length() == 2);
15162   match(Set dst (SubVL src1 src2));
15163   ins_cost(INSN_COST);
15164   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15165   ins_encode %{
15166     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15167             as_FloatRegister($src1$$reg),
15168             as_FloatRegister($src2$$reg));
15169   %}
15170   ins_pipe(pipe_class_default);
15171 %}
15172 
15173 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15174 %{
15175   predicate(n->as_Vector()->length() == 2);
15176   match(Set dst (SubVF src1 src2));
15177   ins_cost(INSN_COST);
15178   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15179   ins_encode %{
15180     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15181             as_FloatRegister($src1$$reg),
15182             as_FloatRegister($src2$$reg));
15183   %}
15184   ins_pipe(pipe_class_default);
15185 %}
15186 
15187 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15188 %{
15189   predicate(n->as_Vector()->length() == 4);
15190   match(Set dst (SubVF src1 src2));
15191   ins_cost(INSN_COST);
15192   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15193   ins_encode %{
15194     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15195             as_FloatRegister($src1$$reg),
15196             as_FloatRegister($src2$$reg));
15197   %}
15198   ins_pipe(pipe_class_default);
15199 %}
15200 
15201 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15202 %{
15203   predicate(n->as_Vector()->length() == 2);
15204   match(Set dst (SubVD src1 src2));
15205   ins_cost(INSN_COST);
15206   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15207   ins_encode %{
15208     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15209             as_FloatRegister($src1$$reg),
15210             as_FloatRegister($src2$$reg));
15211   %}
15212   ins_pipe(pipe_class_default);
15213 %}
15214 
15215 // --------------------------------- MUL --------------------------------------
15216 
15217 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15218 %{
15219   predicate(n->as_Vector()->length() == 2 ||
15220             n->as_Vector()->length() == 4);
15221   match(Set dst (MulVS src1 src2));
15222   ins_cost(INSN_COST);
15223   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15224   ins_encode %{
15225     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15226             as_FloatRegister($src1$$reg),
15227             as_FloatRegister($src2$$reg));
15228   %}
15229   ins_pipe(pipe_class_default);
15230 %}
15231 
15232 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15233 %{
15234   predicate(n->as_Vector()->length() == 8);
15235   match(Set dst (MulVS src1 src2));
15236   ins_cost(INSN_COST);
15237   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15238   ins_encode %{
15239     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15240             as_FloatRegister($src1$$reg),
15241             as_FloatRegister($src2$$reg));
15242   %}
15243   ins_pipe(pipe_class_default);
15244 %}
15245 
15246 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15247 %{
15248   predicate(n->as_Vector()->length() == 2);
15249   match(Set dst (MulVI src1 src2));
15250   ins_cost(INSN_COST);
15251   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15252   ins_encode %{
15253     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15254             as_FloatRegister($src1$$reg),
15255             as_FloatRegister($src2$$reg));
15256   %}
15257   ins_pipe(pipe_class_default);
15258 %}
15259 
15260 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15261 %{
15262   predicate(n->as_Vector()->length() == 4);
15263   match(Set dst (MulVI src1 src2));
15264   ins_cost(INSN_COST);
15265   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15266   ins_encode %{
15267     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15268             as_FloatRegister($src1$$reg),
15269             as_FloatRegister($src2$$reg));
15270   %}
15271   ins_pipe(pipe_class_default);
15272 %}
15273 
15274 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15275 %{
15276   predicate(n->as_Vector()->length() == 2);
15277   match(Set dst (MulVF src1 src2));
15278   ins_cost(INSN_COST);
15279   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15280   ins_encode %{
15281     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15282             as_FloatRegister($src1$$reg),
15283             as_FloatRegister($src2$$reg));
15284   %}
15285   ins_pipe(pipe_class_default);
15286 %}
15287 
15288 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15289 %{
15290   predicate(n->as_Vector()->length() == 4);
15291   match(Set dst (MulVF src1 src2));
15292   ins_cost(INSN_COST);
15293   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15294   ins_encode %{
15295     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15296             as_FloatRegister($src1$$reg),
15297             as_FloatRegister($src2$$reg));
15298   %}
15299   ins_pipe(pipe_class_default);
15300 %}
15301 
15302 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15303 %{
15304   predicate(n->as_Vector()->length() == 2);
15305   match(Set dst (MulVD src1 src2));
15306   ins_cost(INSN_COST);
15307   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15308   ins_encode %{
15309     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15310             as_FloatRegister($src1$$reg),
15311             as_FloatRegister($src2$$reg));
15312   %}
15313   ins_pipe(pipe_class_default);
15314 %}
15315 
15316 // --------------------------------- DIV --------------------------------------
15317 
15318 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15319 %{
15320   predicate(n->as_Vector()->length() == 2);
15321   match(Set dst (DivVF src1 src2));
15322   ins_cost(INSN_COST);
15323   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15324   ins_encode %{
15325     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15326             as_FloatRegister($src1$$reg),
15327             as_FloatRegister($src2$$reg));
15328   %}
15329   ins_pipe(pipe_class_default);
15330 %}
15331 
15332 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15333 %{
15334   predicate(n->as_Vector()->length() == 4);
15335   match(Set dst (DivVF src1 src2));
15336   ins_cost(INSN_COST);
15337   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15338   ins_encode %{
15339     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15340             as_FloatRegister($src1$$reg),
15341             as_FloatRegister($src2$$reg));
15342   %}
15343   ins_pipe(pipe_class_default);
15344 %}
15345 
15346 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15347 %{
15348   predicate(n->as_Vector()->length() == 2);
15349   match(Set dst (DivVD src1 src2));
15350   ins_cost(INSN_COST);
15351   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15352   ins_encode %{
15353     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15354             as_FloatRegister($src1$$reg),
15355             as_FloatRegister($src2$$reg));
15356   %}
15357   ins_pipe(pipe_class_default);
15358 %}
15359 
15360 // --------------------------------- SQRT -------------------------------------
15361 
15362 instruct vsqrt2D(vecX dst, vecX src)
15363 %{
15364   predicate(n->as_Vector()->length() == 2);
15365   match(Set dst (SqrtVD src));
15366   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
15367   ins_encode %{
15368     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
15369              as_FloatRegister($src$$reg));
15370   %}
15371   ins_pipe(pipe_class_default);
15372 %}
15373 
15374 // --------------------------------- ABS --------------------------------------
15375 
15376 instruct vabs2F(vecD dst, vecD src)
15377 %{
15378   predicate(n->as_Vector()->length() == 2);
15379   match(Set dst (AbsVF src));
15380   ins_cost(INSN_COST * 3);
15381   format %{ "fabs  $dst,$src\t# vector (2S)" %}
15382   ins_encode %{
15383     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
15384             as_FloatRegister($src$$reg));
15385   %}
15386   ins_pipe(pipe_class_default);
15387 %}
15388 
15389 instruct vabs4F(vecX dst, vecX src)
15390 %{
15391   predicate(n->as_Vector()->length() == 4);
15392   match(Set dst (AbsVF src));
15393   ins_cost(INSN_COST * 3);
15394   format %{ "fabs  $dst,$src\t# vector (4S)" %}
15395   ins_encode %{
15396     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
15397             as_FloatRegister($src$$reg));
15398   %}
15399   ins_pipe(pipe_class_default);
15400 %}
15401 
15402 instruct vabs2D(vecX dst, vecX src)
15403 %{
15404   predicate(n->as_Vector()->length() == 2);
15405   match(Set dst (AbsVD src));
15406   ins_cost(INSN_COST * 3);
15407   format %{ "fabs  $dst,$src\t# vector (2D)" %}
15408   ins_encode %{
15409     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
15410             as_FloatRegister($src$$reg));
15411   %}
15412   ins_pipe(pipe_class_default);
15413 %}
15414 
15415 // --------------------------------- NEG --------------------------------------
15416 
15417 instruct vneg2F(vecD dst, vecD src)
15418 %{
15419   predicate(n->as_Vector()->length() == 2);
15420   match(Set dst (NegVF src));
15421   ins_cost(INSN_COST * 3);
15422   format %{ "fneg  $dst,$src\t# vector (2S)" %}
15423   ins_encode %{
15424     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
15425             as_FloatRegister($src$$reg));
15426   %}
15427   ins_pipe(pipe_class_default);
15428 %}
15429 
15430 instruct vneg4F(vecX dst, vecX src)
15431 %{
15432   predicate(n->as_Vector()->length() == 4);
15433   match(Set dst (NegVF src));
15434   ins_cost(INSN_COST * 3);
15435   format %{ "fneg  $dst,$src\t# vector (4S)" %}
15436   ins_encode %{
15437     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
15438             as_FloatRegister($src$$reg));
15439   %}
15440   ins_pipe(pipe_class_default);
15441 %}
15442 
15443 instruct vneg2D(vecX dst, vecX src)
15444 %{
15445   predicate(n->as_Vector()->length() == 2);
15446   match(Set dst (NegVD src));
15447   ins_cost(INSN_COST * 3);
15448   format %{ "fneg  $dst,$src\t# vector (2D)" %}
15449   ins_encode %{
15450     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
15451             as_FloatRegister($src$$reg));
15452   %}
15453   ins_pipe(pipe_class_default);
15454 %}
15455 
15456 // --------------------------------- AND --------------------------------------
15457 
15458 instruct vand8B(vecD dst, vecD src1, vecD src2)
15459 %{
15460   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15461             n->as_Vector()->length_in_bytes() == 8);
15462   match(Set dst (AndV src1 src2));
15463   ins_cost(INSN_COST);
15464   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15465   ins_encode %{
15466     __ andr(as_FloatRegister($dst$$reg), __ T8B,
15467             as_FloatRegister($src1$$reg),
15468             as_FloatRegister($src2$$reg));
15469   %}
15470   ins_pipe(pipe_class_default);
15471 %}
15472 
15473 instruct vand16B(vecX dst, vecX src1, vecX src2)
15474 %{
15475   predicate(n->as_Vector()->length_in_bytes() == 16);
15476   match(Set dst (AndV src1 src2));
15477   ins_cost(INSN_COST);
15478   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
15479   ins_encode %{
15480     __ andr(as_FloatRegister($dst$$reg), __ T16B,
15481             as_FloatRegister($src1$$reg),
15482             as_FloatRegister($src2$$reg));
15483   %}
15484   ins_pipe(pipe_class_default);
15485 %}
15486 
15487 // --------------------------------- OR ---------------------------------------
15488 
15489 instruct vor8B(vecD dst, vecD src1, vecD src2)
15490 %{
15491   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15492             n->as_Vector()->length_in_bytes() == 8);
15493   match(Set dst (OrV src1 src2));
15494   ins_cost(INSN_COST);
15495   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15496   ins_encode %{
15497     __ orr(as_FloatRegister($dst$$reg), __ T8B,
15498             as_FloatRegister($src1$$reg),
15499             as_FloatRegister($src2$$reg));
15500   %}
15501   ins_pipe(pipe_class_default);
15502 %}
15503 
15504 instruct vor16B(vecX dst, vecX src1, vecX src2)
15505 %{
15506   predicate(n->as_Vector()->length_in_bytes() == 16);
15507   match(Set dst (OrV src1 src2));
15508   ins_cost(INSN_COST);
15509   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
15510   ins_encode %{
15511     __ orr(as_FloatRegister($dst$$reg), __ T16B,
15512             as_FloatRegister($src1$$reg),
15513             as_FloatRegister($src2$$reg));
15514   %}
15515   ins_pipe(pipe_class_default);
15516 %}
15517 
15518 // --------------------------------- XOR --------------------------------------
15519 
15520 instruct vxor8B(vecD dst, vecD src1, vecD src2)
15521 %{
15522   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15523             n->as_Vector()->length_in_bytes() == 8);
15524   match(Set dst (XorV src1 src2));
15525   ins_cost(INSN_COST);
15526   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
15527   ins_encode %{
15528     __ eor(as_FloatRegister($dst$$reg), __ T8B,
15529             as_FloatRegister($src1$$reg),
15530             as_FloatRegister($src2$$reg));
15531   %}
15532   ins_pipe(pipe_class_default);
15533 %}
15534 
15535 instruct vxor16B(vecX dst, vecX src1, vecX src2)
15536 %{
15537   predicate(n->as_Vector()->length_in_bytes() == 16);
15538   match(Set dst (XorV src1 src2));
15539   ins_cost(INSN_COST);
15540   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
15541   ins_encode %{
15542     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15543             as_FloatRegister($src1$$reg),
15544             as_FloatRegister($src2$$reg));
15545   %}
15546   ins_pipe(pipe_class_default);
15547 %}
15548 
15549 // ------------------------------ Shift ---------------------------------------
15550 
15551 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
15552   match(Set dst (LShiftCntV cnt));
15553   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
15554   ins_encode %{
15555     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15556   %}
15557   ins_pipe(pipe_class_default);
15558 %}
15559 
15560 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
15561 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
15562   match(Set dst (RShiftCntV cnt));
15563   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
15564   ins_encode %{
15565     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15566     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
15567   %}
15568   ins_pipe(pipe_class_default);
15569 %}
15570 
15571 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
15572   predicate(n->as_Vector()->length() == 4 ||
15573             n->as_Vector()->length() == 8);
15574   match(Set dst (LShiftVB src shift));
15575   match(Set dst (RShiftVB src shift));
15576   ins_cost(INSN_COST);
15577   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
15578   ins_encode %{
15579     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
15580             as_FloatRegister($src$$reg),
15581             as_FloatRegister($shift$$reg));
15582   %}
15583   ins_pipe(pipe_class_default);
15584 %}
15585 
15586 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
15587   predicate(n->as_Vector()->length() == 16);
15588   match(Set dst (LShiftVB src shift));
15589   match(Set dst (RShiftVB src shift));
15590   ins_cost(INSN_COST);
15591   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
15592   ins_encode %{
15593     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
15594             as_FloatRegister($src$$reg),
15595             as_FloatRegister($shift$$reg));
15596   %}
15597   ins_pipe(pipe_class_default);
15598 %}
15599 
15600 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
15601   predicate(n->as_Vector()->length() == 4 ||
15602             n->as_Vector()->length() == 8);
15603   match(Set dst (URShiftVB src shift));
15604   ins_cost(INSN_COST);
15605   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
15606   ins_encode %{
15607     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
15608             as_FloatRegister($src$$reg),
15609             as_FloatRegister($shift$$reg));
15610   %}
15611   ins_pipe(pipe_class_default);
15612 %}
15613 
15614 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
15615   predicate(n->as_Vector()->length() == 16);
15616   match(Set dst (URShiftVB src shift));
15617   ins_cost(INSN_COST);
15618   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
15619   ins_encode %{
15620     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
15621             as_FloatRegister($src$$reg),
15622             as_FloatRegister($shift$$reg));
15623   %}
15624   ins_pipe(pipe_class_default);
15625 %}
15626 
15627 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
15628   predicate(n->as_Vector()->length() == 4 ||
15629             n->as_Vector()->length() == 8);
15630   match(Set dst (LShiftVB src shift));
15631   ins_cost(INSN_COST);
15632   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
15633   ins_encode %{
15634     int sh = (int)$shift$$constant & 31;
15635     if (sh >= 8) {
15636       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15637              as_FloatRegister($src$$reg),
15638              as_FloatRegister($src$$reg));
15639     } else {
15640       __ shl(as_FloatRegister($dst$$reg), __ T8B,
15641              as_FloatRegister($src$$reg), sh);
15642     }
15643   %}
15644   ins_pipe(pipe_class_default);
15645 %}
15646 
15647 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
15648   predicate(n->as_Vector()->length() == 16);
15649   match(Set dst (LShiftVB src shift));
15650   ins_cost(INSN_COST);
15651   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
15652   ins_encode %{
15653     int sh = (int)$shift$$constant & 31;
15654     if (sh >= 8) {
15655       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15656              as_FloatRegister($src$$reg),
15657              as_FloatRegister($src$$reg));
15658     } else {
15659       __ shl(as_FloatRegister($dst$$reg), __ T16B,
15660              as_FloatRegister($src$$reg), sh);
15661     }
15662   %}
15663   ins_pipe(pipe_class_default);
15664 %}
15665 
15666 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
15667   predicate(n->as_Vector()->length() == 4 ||
15668             n->as_Vector()->length() == 8);
15669   match(Set dst (RShiftVB src shift));
15670   ins_cost(INSN_COST);
15671   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
15672   ins_encode %{
15673     int sh = (int)$shift$$constant & 31;
15674     if (sh >= 8) sh = 7;
15675     sh = -sh & 7;
15676     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
15677            as_FloatRegister($src$$reg), sh);
15678   %}
15679   ins_pipe(pipe_class_default);
15680 %}
15681 
15682 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
15683   predicate(n->as_Vector()->length() == 16);
15684   match(Set dst (RShiftVB src shift));
15685   ins_cost(INSN_COST);
15686   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
15687   ins_encode %{
15688     int sh = (int)$shift$$constant & 31;
15689     if (sh >= 8) sh = 7;
15690     sh = -sh & 7;
15691     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
15692            as_FloatRegister($src$$reg), sh);
15693   %}
15694   ins_pipe(pipe_class_default);
15695 %}
15696 
15697 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
15698   predicate(n->as_Vector()->length() == 4 ||
15699             n->as_Vector()->length() == 8);
15700   match(Set dst (URShiftVB src shift));
15701   ins_cost(INSN_COST);
15702   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
15703   ins_encode %{
15704     int sh = (int)$shift$$constant & 31;
15705     if (sh >= 8) {
15706       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15707              as_FloatRegister($src$$reg),
15708              as_FloatRegister($src$$reg));
15709     } else {
15710       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
15711              as_FloatRegister($src$$reg), -sh & 7);
15712     }
15713   %}
15714   ins_pipe(pipe_class_default);
15715 %}
15716 
15717 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
15718   predicate(n->as_Vector()->length() == 16);
15719   match(Set dst (URShiftVB src shift));
15720   ins_cost(INSN_COST);
15721   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
15722   ins_encode %{
15723     int sh = (int)$shift$$constant & 31;
15724     if (sh >= 8) {
15725       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15726              as_FloatRegister($src$$reg),
15727              as_FloatRegister($src$$reg));
15728     } else {
15729       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
15730              as_FloatRegister($src$$reg), -sh & 7);
15731     }
15732   %}
15733   ins_pipe(pipe_class_default);
15734 %}
15735 
15736 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
15737   predicate(n->as_Vector()->length() == 2 ||
15738             n->as_Vector()->length() == 4);
15739   match(Set dst (LShiftVS src shift));
15740   match(Set dst (RShiftVS src shift));
15741   ins_cost(INSN_COST);
15742   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
15743   ins_encode %{
15744     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
15745             as_FloatRegister($src$$reg),
15746             as_FloatRegister($shift$$reg));
15747   %}
15748   ins_pipe(pipe_class_default);
15749 %}
15750 
15751 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
15752   predicate(n->as_Vector()->length() == 8);
15753   match(Set dst (LShiftVS src shift));
15754   match(Set dst (RShiftVS src shift));
15755   ins_cost(INSN_COST);
15756   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
15757   ins_encode %{
15758     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15759             as_FloatRegister($src$$reg),
15760             as_FloatRegister($shift$$reg));
15761   %}
15762   ins_pipe(pipe_class_default);
15763 %}
15764 
15765 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
15766   predicate(n->as_Vector()->length() == 2 ||
15767             n->as_Vector()->length() == 4);
15768   match(Set dst (URShiftVS src shift));
15769   ins_cost(INSN_COST);
15770   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
15771   ins_encode %{
15772     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
15773             as_FloatRegister($src$$reg),
15774             as_FloatRegister($shift$$reg));
15775   %}
15776   ins_pipe(pipe_class_default);
15777 %}
15778 
15779 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
15780   predicate(n->as_Vector()->length() == 8);
15781   match(Set dst (URShiftVS src shift));
15782   ins_cost(INSN_COST);
15783   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
15784   ins_encode %{
15785     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
15786             as_FloatRegister($src$$reg),
15787             as_FloatRegister($shift$$reg));
15788   %}
15789   ins_pipe(pipe_class_default);
15790 %}
15791 
15792 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15793   predicate(n->as_Vector()->length() == 2 ||
15794             n->as_Vector()->length() == 4);
15795   match(Set dst (LShiftVS src shift));
15796   ins_cost(INSN_COST);
15797   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15798   ins_encode %{
15799     int sh = (int)$shift$$constant & 31;
15800     if (sh >= 16) {
15801       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15802              as_FloatRegister($src$$reg),
15803              as_FloatRegister($src$$reg));
15804     } else {
15805       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15806              as_FloatRegister($src$$reg), sh);
15807     }
15808   %}
15809   ins_pipe(pipe_class_default);
15810 %}
15811 
15812 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15813   predicate(n->as_Vector()->length() == 8);
15814   match(Set dst (LShiftVS src shift));
15815   ins_cost(INSN_COST);
15816   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15817   ins_encode %{
15818     int sh = (int)$shift$$constant & 31;
15819     if (sh >= 16) {
15820       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15821              as_FloatRegister($src$$reg),
15822              as_FloatRegister($src$$reg));
15823     } else {
15824       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15825              as_FloatRegister($src$$reg), sh);
15826     }
15827   %}
15828   ins_pipe(pipe_class_default);
15829 %}
15830 
15831 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15832   predicate(n->as_Vector()->length() == 2 ||
15833             n->as_Vector()->length() == 4);
15834   match(Set dst (RShiftVS src shift));
15835   ins_cost(INSN_COST);
15836   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15837   ins_encode %{
15838     int sh = (int)$shift$$constant & 31;
15839     if (sh >= 16) sh = 15;
15840     sh = -sh & 15;
15841     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15842            as_FloatRegister($src$$reg), sh);
15843   %}
15844   ins_pipe(pipe_class_default);
15845 %}
15846 
15847 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15848   predicate(n->as_Vector()->length() == 8);
15849   match(Set dst (RShiftVS src shift));
15850   ins_cost(INSN_COST);
15851   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15852   ins_encode %{
15853     int sh = (int)$shift$$constant & 31;
15854     if (sh >= 16) sh = 15;
15855     sh = -sh & 15;
15856     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15857            as_FloatRegister($src$$reg), sh);
15858   %}
15859   ins_pipe(pipe_class_default);
15860 %}
15861 
15862 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15863   predicate(n->as_Vector()->length() == 2 ||
15864             n->as_Vector()->length() == 4);
15865   match(Set dst (URShiftVS src shift));
15866   ins_cost(INSN_COST);
15867   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15868   ins_encode %{
15869     int sh = (int)$shift$$constant & 31;
15870     if (sh >= 16) {
15871       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15872              as_FloatRegister($src$$reg),
15873              as_FloatRegister($src$$reg));
15874     } else {
15875       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15876              as_FloatRegister($src$$reg), -sh & 15);
15877     }
15878   %}
15879   ins_pipe(pipe_class_default);
15880 %}
15881 
15882 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15883   predicate(n->as_Vector()->length() == 8);
15884   match(Set dst (URShiftVS src shift));
15885   ins_cost(INSN_COST);
15886   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15887   ins_encode %{
15888     int sh = (int)$shift$$constant & 31;
15889     if (sh >= 16) {
15890       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15891              as_FloatRegister($src$$reg),
15892              as_FloatRegister($src$$reg));
15893     } else {
15894       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15895              as_FloatRegister($src$$reg), -sh & 15);
15896     }
15897   %}
15898   ins_pipe(pipe_class_default);
15899 %}
15900 
15901 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
15902   predicate(n->as_Vector()->length() == 2);
15903   match(Set dst (LShiftVI src shift));
15904   match(Set dst (RShiftVI src shift));
15905   ins_cost(INSN_COST);
15906   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15907   ins_encode %{
15908     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15909             as_FloatRegister($src$$reg),
15910             as_FloatRegister($shift$$reg));
15911   %}
15912   ins_pipe(pipe_class_default);
15913 %}
15914 
15915 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15916   predicate(n->as_Vector()->length() == 4);
15917   match(Set dst (LShiftVI src shift));
15918   match(Set dst (RShiftVI src shift));
15919   ins_cost(INSN_COST);
15920   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15921   ins_encode %{
15922     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15923             as_FloatRegister($src$$reg),
15924             as_FloatRegister($shift$$reg));
15925   %}
15926   ins_pipe(pipe_class_default);
15927 %}
15928 
15929 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
15930   predicate(n->as_Vector()->length() == 2);
15931   match(Set dst (URShiftVI src shift));
15932   ins_cost(INSN_COST);
15933   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
15934   ins_encode %{
15935     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15936             as_FloatRegister($src$$reg),
15937             as_FloatRegister($shift$$reg));
15938   %}
15939   ins_pipe(pipe_class_default);
15940 %}
15941 
15942 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
15943   predicate(n->as_Vector()->length() == 4);
15944   match(Set dst (URShiftVI src shift));
15945   ins_cost(INSN_COST);
15946   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
15947   ins_encode %{
15948     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15949             as_FloatRegister($src$$reg),
15950             as_FloatRegister($shift$$reg));
15951   %}
15952   ins_pipe(pipe_class_default);
15953 %}
15954 
15955 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15956   predicate(n->as_Vector()->length() == 2);
15957   match(Set dst (LShiftVI src shift));
15958   ins_cost(INSN_COST);
15959   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15960   ins_encode %{
15961     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15962            as_FloatRegister($src$$reg),
15963            (int)$shift$$constant & 31);
15964   %}
15965   ins_pipe(pipe_class_default);
15966 %}
15967 
15968 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15969   predicate(n->as_Vector()->length() == 4);
15970   match(Set dst (LShiftVI src shift));
15971   ins_cost(INSN_COST);
15972   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15973   ins_encode %{
15974     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15975            as_FloatRegister($src$$reg),
15976            (int)$shift$$constant & 31);
15977   %}
15978   ins_pipe(pipe_class_default);
15979 %}
15980 
15981 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15982   predicate(n->as_Vector()->length() == 2);
15983   match(Set dst (RShiftVI src shift));
15984   ins_cost(INSN_COST);
15985   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15986   ins_encode %{
15987     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15988             as_FloatRegister($src$$reg),
15989             -(int)$shift$$constant & 31);
15990   %}
15991   ins_pipe(pipe_class_default);
15992 %}
15993 
15994 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15995   predicate(n->as_Vector()->length() == 4);
15996   match(Set dst (RShiftVI src shift));
15997   ins_cost(INSN_COST);
15998   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15999   ins_encode %{
16000     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16001             as_FloatRegister($src$$reg),
16002             -(int)$shift$$constant & 31);
16003   %}
16004   ins_pipe(pipe_class_default);
16005 %}
16006 
16007 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16008   predicate(n->as_Vector()->length() == 2);
16009   match(Set dst (URShiftVI src shift));
16010   ins_cost(INSN_COST);
16011   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16012   ins_encode %{
16013     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16014             as_FloatRegister($src$$reg),
16015             -(int)$shift$$constant & 31);
16016   %}
16017   ins_pipe(pipe_class_default);
16018 %}
16019 
16020 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16021   predicate(n->as_Vector()->length() == 4);
16022   match(Set dst (URShiftVI src shift));
16023   ins_cost(INSN_COST);
16024   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16025   ins_encode %{
16026     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16027             as_FloatRegister($src$$reg),
16028             -(int)$shift$$constant & 31);
16029   %}
16030   ins_pipe(pipe_class_default);
16031 %}
16032 
16033 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16034   predicate(n->as_Vector()->length() == 2);
16035   match(Set dst (LShiftVL src shift));
16036   match(Set dst (RShiftVL src shift));
16037   ins_cost(INSN_COST);
16038   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16039   ins_encode %{
16040     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16041             as_FloatRegister($src$$reg),
16042             as_FloatRegister($shift$$reg));
16043   %}
16044   ins_pipe(pipe_class_default);
16045 %}
16046 
16047 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16048   predicate(n->as_Vector()->length() == 2);
16049   match(Set dst (URShiftVL src shift));
16050   ins_cost(INSN_COST);
16051   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16052   ins_encode %{
16053     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16054             as_FloatRegister($src$$reg),
16055             as_FloatRegister($shift$$reg));
16056   %}
16057   ins_pipe(pipe_class_default);
16058 %}
16059 
16060 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16061   predicate(n->as_Vector()->length() == 2);
16062   match(Set dst (LShiftVL src shift));
16063   ins_cost(INSN_COST);
16064   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16065   ins_encode %{
16066     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16067            as_FloatRegister($src$$reg),
16068            (int)$shift$$constant & 63);
16069   %}
16070   ins_pipe(pipe_class_default);
16071 %}
16072 
16073 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16074   predicate(n->as_Vector()->length() == 2);
16075   match(Set dst (RShiftVL src shift));
16076   ins_cost(INSN_COST);
16077   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16078   ins_encode %{
16079     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16080             as_FloatRegister($src$$reg),
16081             -(int)$shift$$constant & 63);
16082   %}
16083   ins_pipe(pipe_class_default);
16084 %}
16085 
16086 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16087   predicate(n->as_Vector()->length() == 2);
16088   match(Set dst (URShiftVL src shift));
16089   ins_cost(INSN_COST);
16090   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16091   ins_encode %{
16092     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16093             as_FloatRegister($src$$reg),
16094             -(int)$shift$$constant & 63);
16095   %}
16096   ins_pipe(pipe_class_default);
16097 %}
16098 
16099 //----------PEEPHOLE RULES-----------------------------------------------------
16100 // These must follow all instruction definitions as they use the names
16101 // defined in the instructions definitions.
16102 //
16103 // peepmatch ( root_instr_name [preceding_instruction]* );
16104 //
16105 // peepconstraint %{
16106 // (instruction_number.operand_name relational_op instruction_number.operand_name
16107 //  [, ...] );
16108 // // instruction numbers are zero-based using left to right order in peepmatch
16109 //
16110 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16111 // // provide an instruction_number.operand_name for each operand that appears
16112 // // in the replacement instruction's match rule
16113 //
16114 // ---------VM FLAGS---------------------------------------------------------
16115 //
16116 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16117 //
16118 // Each peephole rule is given an identifying number starting with zero and
16119 // increasing by one in the order seen by the parser.  An individual peephole
16120 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16121 // on the command-line.
16122 //
16123 // ---------CURRENT LIMITATIONS----------------------------------------------
16124 //
16125 // Only match adjacent instructions in same basic block
16126 // Only equality constraints
16127 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16128 // Only one replacement instruction
16129 //
16130 // ---------EXAMPLE----------------------------------------------------------
16131 //
16132 // // pertinent parts of existing instructions in architecture description
16133 // instruct movI(iRegINoSp dst, iRegI src)
16134 // %{
16135 //   match(Set dst (CopyI src));
16136 // %}
16137 //
16138 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16139 // %{
16140 //   match(Set dst (AddI dst src));
16141 //   effect(KILL cr);
16142 // %}
16143 //
16144 // // Change (inc mov) to lea
16145 // peephole %{
16146 //   // increment preceeded by register-register move
16147 //   peepmatch ( incI_iReg movI );
16148 //   // require that the destination register of the increment
16149 //   // match the destination register of the move
16150 //   peepconstraint ( 0.dst == 1.dst );
16151 //   // construct a replacement instruction that sets
16152 //   // the destination to ( move's source register + one )
16153 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16154 // %}
16155 //
16156 
16157 // Implementation no longer uses movX instructions since
16158 // machine-independent system no longer uses CopyX nodes.
16159 //
16160 // peephole
16161 // %{
16162 //   peepmatch (incI_iReg movI);
16163 //   peepconstraint (0.dst == 1.dst);
16164 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16165 // %}
16166 
16167 // peephole
16168 // %{
16169 //   peepmatch (decI_iReg movI);
16170 //   peepconstraint (0.dst == 1.dst);
16171 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16172 // %}
16173 
16174 // peephole
16175 // %{
16176 //   peepmatch (addI_iReg_imm movI);
16177 //   peepconstraint (0.dst == 1.dst);
16178 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16179 // %}
16180 
16181 // peephole
16182 // %{
16183 //   peepmatch (incL_iReg movL);
16184 //   peepconstraint (0.dst == 1.dst);
16185 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16186 // %}
16187 
16188 // peephole
16189 // %{
16190 //   peepmatch (decL_iReg movL);
16191 //   peepconstraint (0.dst == 1.dst);
16192 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16193 // %}
16194 
16195 // peephole
16196 // %{
16197 //   peepmatch (addL_iReg_imm movL);
16198 //   peepconstraint (0.dst == 1.dst);
16199 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16200 // %}
16201 
16202 // peephole
16203 // %{
16204 //   peepmatch (addP_iReg_imm movP);
16205 //   peepconstraint (0.dst == 1.dst);
16206 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16207 // %}
16208 
16209 // // Change load of spilled value to only a spill
16210 // instruct storeI(memory mem, iRegI src)
16211 // %{
16212 //   match(Set mem (StoreI mem src));
16213 // %}
16214 //
16215 // instruct loadI(iRegINoSp dst, memory mem)
16216 // %{
16217 //   match(Set dst (LoadI mem));
16218 // %}
16219 //
16220 
16221 //----------SMARTSPILL RULES---------------------------------------------------
16222 // These must follow all instruction definitions as they use the names
16223 // defined in the instructions definitions.
16224 
16225 // Local Variables:
16226 // mode: c++
16227 // End: