1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 128bit vector registers
 869 reg_class vectorx_reg(
 870     V0, V0_H, V0_J, V0_K,
 871     V1, V1_H, V1_J, V1_K,
 872     V2, V2_H, V2_J, V2_K,
 873     V3, V3_H, V3_J, V3_K,
 874     V4, V4_H, V4_J, V4_K,
 875     V5, V5_H, V5_J, V5_K,
 876     V6, V6_H, V6_J, V6_K,
 877     V7, V7_H, V7_J, V7_K,
 878     V8, V8_H, V8_J, V8_K,
 879     V9, V9_H, V9_J, V9_K,
 880     V10, V10_H, V10_J, V10_K,
 881     V11, V11_H, V11_J, V11_K,
 882     V12, V12_H, V12_J, V12_K,
 883     V13, V13_H, V13_J, V13_K,
 884     V14, V14_H, V14_J, V14_K,
 885     V15, V15_H, V15_J, V15_K,
 886     V16, V16_H, V16_J, V16_K,
 887     V17, V17_H, V17_J, V17_K,
 888     V18, V18_H, V18_J, V18_K,
 889     V19, V19_H, V19_J, V19_K,
 890     V20, V20_H, V20_J, V20_K,
 891     V21, V21_H, V21_J, V21_K,
 892     V22, V22_H, V22_J, V22_K,
 893     V23, V23_H, V23_J, V23_K,
 894     V24, V24_H, V24_J, V24_K,
 895     V25, V25_H, V25_J, V25_K,
 896     V26, V26_H, V26_J, V26_K,
 897     V27, V27_H, V27_J, V27_K,
 898     V28, V28_H, V28_J, V28_K,
 899     V29, V29_H, V29_J, V29_K,
 900     V30, V30_H, V30_J, V30_K,
 901     V31, V31_H, V31_J, V31_K
 902 );
 903 
 904 // Class for 128 bit register v0
 905 reg_class v0_reg(
 906     V0, V0_H
 907 );
 908 
 909 // Class for 128 bit register v1
 910 reg_class v1_reg(
 911     V1, V1_H
 912 );
 913 
 914 // Class for 128 bit register v2
 915 reg_class v2_reg(
 916     V2, V2_H
 917 );
 918 
 919 // Class for 128 bit register v3
 920 reg_class v3_reg(
 921     V3, V3_H
 922 );
 923 
 924 // Singleton class for condition codes
 925 reg_class int_flags(RFLAGS);
 926 
 927 %}
 928 
 929 //----------DEFINITION BLOCK---------------------------------------------------
 930 // Define name --> value mappings to inform the ADLC of an integer valued name
 931 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 932 // Format:
 933 //        int_def  <name>         ( <int_value>, <expression>);
 934 // Generated Code in ad_<arch>.hpp
 935 //        #define  <name>   (<expression>)
 936 //        // value == <int_value>
 937 // Generated code in ad_<arch>.cpp adlc_verification()
 938 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 939 //
 940 
 941 // we follow the ppc-aix port in using a simple cost model which ranks
 942 // register operations as cheap, memory ops as more expensive and
 943 // branches as most expensive. the first two have a low as well as a
 944 // normal cost. huge cost appears to be a way of saying don't do
 945 // something
 946 
 947 definitions %{
 948   // The default cost (of a register move instruction).
 949   int_def INSN_COST            (    100,     100);
 950   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 951   int_def CALL_COST            (    200,     2 * INSN_COST);
 952   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 953 %}
 954 
 955 
 956 //----------SOURCE BLOCK-------------------------------------------------------
 957 // This is a block of C++ code which provides values, functions, and
 958 // definitions necessary in the rest of the architecture description
 959 
 960 source_hpp %{
 961 
 962 #include "gc/shared/cardTableModRefBS.hpp"
 963 
 964 class CallStubImpl {
 965 
 966   //--------------------------------------------------------------
 967   //---<  Used for optimization in Compile::shorten_branches  >---
 968   //--------------------------------------------------------------
 969 
 970  public:
 971   // Size of call trampoline stub.
 972   static uint size_call_trampoline() {
 973     return 0; // no call trampolines on this platform
 974   }
 975 
 976   // number of relocations needed by a call trampoline stub
 977   static uint reloc_call_trampoline() {
 978     return 0; // no call trampolines on this platform
 979   }
 980 };
 981 
 982 class HandlerImpl {
 983 
 984  public:
 985 
 986   static int emit_exception_handler(CodeBuffer &cbuf);
 987   static int emit_deopt_handler(CodeBuffer& cbuf);
 988 
 989   static uint size_exception_handler() {
 990     return MacroAssembler::far_branch_size();
 991   }
 992 
 993   static uint size_deopt_handler() {
 994     // count one adr and one far branch instruction
 995     return 4 * NativeInstruction::instruction_size;
 996   }
 997 };
 998 
 999   // graph traversal helpers
1000   MemBarNode *has_parent_membar(const Node *n,
1001                                 ProjNode *&ctl, ProjNode *&mem);
1002   MemBarNode *has_child_membar(const MemBarNode *n,
1003                                ProjNode *&ctl, ProjNode *&mem);
1004 
1005   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1006   bool unnecessary_acquire(const Node *barrier);
1007   bool needs_acquiring_load(const Node *load);
1008 
1009   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1010   bool unnecessary_release(const Node *barrier);
1011   bool unnecessary_volatile(const Node *barrier);
1012   bool needs_releasing_store(const Node *store);
1013 
1014   // Use barrier instructions for unsafe volatile gets rather than
1015   // trying to identify an exact signature for them
1016   const bool UseBarriersForUnsafeVolatileGet = false;
1017 %}
1018 
1019 source %{
1020 
1021   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1022   // use to implement volatile reads and writes. For a volatile read
1023   // we simply need
1024   //
1025   //   ldar<x>
1026   //
1027   // and for a volatile write we need
1028   //
1029   //   stlr<x>
1030   // 
1031   // Alternatively, we can implement them by pairing a normal
1032   // load/store with a memory barrier. For a volatile read we need
1033   // 
1034   //   ldr<x>
1035   //   dmb ishld
1036   //
1037   // for a volatile write
1038   //
1039   //   dmb ish
1040   //   str<x>
1041   //   dmb ish
1042   //
1043   // In order to generate the desired instruction sequence we need to
1044   // be able to identify specific 'signature' ideal graph node
1045   // sequences which i) occur as a translation of a volatile reads or
1046   // writes and ii) do not occur through any other translation or
1047   // graph transformation. We can then provide alternative aldc
1048   // matching rules which translate these node sequences to the
1049   // desired machine code sequences. Selection of the alternative
1050   // rules can be implemented by predicates which identify the
1051   // relevant node sequences.
1052   //
1053   // The ideal graph generator translates a volatile read to the node
1054   // sequence
1055   //
1056   //   LoadX[mo_acquire]
1057   //   MemBarAcquire
1058   //
1059   // As a special case when using the compressed oops optimization we
1060   // may also see this variant
1061   //
1062   //   LoadN[mo_acquire]
1063   //   DecodeN
1064   //   MemBarAcquire
1065   //
1066   // A volatile write is translated to the node sequence
1067   //
1068   //   MemBarRelease
1069   //   StoreX[mo_release]
1070   //   MemBarVolatile
1071   //
1072   // n.b. the above node patterns are generated with a strict
1073   // 'signature' configuration of input and output dependencies (see
1074   // the predicates below for exact details). The two signatures are
1075   // unique to translated volatile reads/stores -- they will not
1076   // appear as a result of any other bytecode translation or inlining
1077   // nor as a consequence of optimizing transforms.
1078   //
1079   // We also want to catch inlined unsafe volatile gets and puts and
1080   // be able to implement them using either ldar<x>/stlr<x> or some
1081   // combination of ldr<x>/stlr<x> and dmb instructions.
1082   //
1083   // Inlined unsafe volatiles puts manifest as a minor variant of the
1084   // normal volatile put node sequence containing an extra cpuorder
1085   // membar
1086   //
1087   //   MemBarRelease
1088   //   MemBarCPUOrder
1089   //   StoreX[mo_release]
1090   //   MemBarVolatile
1091   //
1092   // n.b. as an aside, the cpuorder membar is not itself subject to
1093   // matching and translation by adlc rules.  However, the rule
1094   // predicates need to detect its presence in order to correctly
1095   // select the desired adlc rules.
1096   //
1097   // Inlined unsafe volatiles gets manifest as a somewhat different
1098   // node sequence to a normal volatile get
1099   //
1100   //   MemBarCPUOrder
1101   //        ||       \\
1102   //   MemBarAcquire LoadX[mo_acquire]
1103   //        ||
1104   //   MemBarCPUOrder
1105   //
1106   // In this case the acquire membar does not directly depend on the
1107   // load. However, we can be sure that the load is generated from an
1108   // inlined unsafe volatile get if we see it dependent on this unique
1109   // sequence of membar nodes. Similarly, given an acquire membar we
1110   // can know that it was added because of an inlined unsafe volatile
1111   // get if it is fed and feeds a cpuorder membar and if its feed
1112   // membar also feeds an acquiring load.
1113   //
1114   // So, where we can identify these volatile read and write
1115   // signatures we can choose to plant either of the above two code
1116   // sequences. For a volatile read we can simply plant a normal
1117   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1118   // also choose to inhibit translation of the MemBarAcquire and
1119   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1120   //
1121   // When we recognise a volatile store signature we can choose to
1122   // plant at a dmb ish as a translation for the MemBarRelease, a
1123   // normal str<x> and then a dmb ish for the MemBarVolatile.
1124   // Alternatively, we can inhibit translation of the MemBarRelease
1125   // and MemBarVolatile and instead plant a simple stlr<x>
1126   // instruction.
1127   //
1128   // Of course, the above only applies when we see these signature
1129   // configurations. We still want to plant dmb instructions in any
1130   // other cases where we may see a MemBarAcquire, MemBarRelease or
1131   // MemBarVolatile. For example, at the end of a constructor which
1132   // writes final/volatile fields we will see a MemBarRelease
1133   // instruction and this needs a 'dmb ish' lest we risk the
1134   // constructed object being visible without making the
1135   // final/volatile field writes visible.
1136   //
1137   // n.b. the translation rules below which rely on detection of the
1138   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1139   // If we see anything other than the signature configurations we
1140   // always just translate the loads and stors to ldr<x> and str<x>
1141   // and translate acquire, release and volatile membars to the
1142   // relevant dmb instructions.
1143   //
1144   // n.b.b as a case in point for the above comment, the current
1145   // predicates don't detect the precise signature for certain types
1146   // of volatile object stores (where the heap_base input type is not
1147   // known at compile-time to be non-NULL). In those cases the
1148   // MemBarRelease and MemBarVolatile bracket an if-then-else sequence
1149   // with a store in each branch (we need a different store depending
1150   // on whether heap_base is actually NULL). In such a case we will
1151   // just plant a dmb both before and after the branch/merge. The
1152   // predicate could (and probably should) be fixed later to also
1153   // detect this case.
1154 
1155   // graph traversal helpers
1156 
1157   // if node n is linked to a parent MemBarNode by an intervening
1158   // Control or Memory ProjNode return the MemBarNode otherwise return
1159   // NULL.
1160   //
1161   // n may only be a Load or a MemBar.
1162   //
1163   // The ProjNode* references c and m are used to return the relevant
1164   // nodes.
1165 
1166   MemBarNode *has_parent_membar(const Node *n, ProjNode *&c, ProjNode *&m)
1167   {
1168     Node *ctl = NULL;
1169     Node *mem = NULL;
1170     Node *membar = NULL;
1171 
1172     if (n->is_Load()) {
1173       ctl = n->lookup(LoadNode::Control);
1174       mem = n->lookup(LoadNode::Memory);
1175     } else if (n->is_MemBar()) {
1176       ctl = n->lookup(TypeFunc::Control);
1177       mem = n->lookup(TypeFunc::Memory);
1178     } else {
1179         return NULL;
1180     }
1181 
1182     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj())
1183       return NULL;
1184 
1185     c = ctl->as_Proj();
1186 
1187     membar = ctl->lookup(0);
1188 
1189     if (!membar || !membar->is_MemBar())
1190       return NULL;
1191 
1192     m = mem->as_Proj();
1193 
1194     if (mem->lookup(0) != membar)
1195       return NULL;
1196 
1197     return membar->as_MemBar();
1198   }
1199 
1200   // if n is linked to a child MemBarNode by intervening Control and
1201   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1202   //
1203   // The ProjNode** arguments c and m are used to return pointers to
1204   // the relevant nodes. A null argument means don't don't return a
1205   // value.
1206 
1207   MemBarNode *has_child_membar(const MemBarNode *n, ProjNode *&c, ProjNode *&m)
1208   {
1209     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1210     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1211 
1212     // MemBar needs to have both a Ctl and Mem projection
1213     if (! ctl || ! mem)
1214       return NULL;
1215 
1216     c = ctl;
1217     m = mem;
1218 
1219     MemBarNode *child = NULL;
1220     Node *x;
1221 
1222     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1223       x = ctl->fast_out(i);
1224       // if we see a membar we keep hold of it. we may also see a new
1225       // arena copy of the original but it will appear later
1226       if (x->is_MemBar()) {
1227           child = x->as_MemBar();
1228           break;
1229       }
1230     }
1231 
1232     if (child == NULL)
1233       return NULL;
1234 
1235     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1236       x = mem->fast_out(i);
1237       // if we see a membar we keep hold of it. we may also see a new
1238       // arena copy of the original but it will appear later
1239       if (x == child) {
1240         return child;
1241       }
1242     }
1243     return NULL;
1244   }
1245 
1246   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1247 
1248 bool unnecessary_acquire(const Node *barrier) {
1249   // assert barrier->is_MemBar();
1250   if (UseBarriersForVolatile)
1251     // we need to plant a dmb
1252     return false;
1253 
1254   // a volatile read derived from bytecode (or also from an inlined
1255   // SHA field read via LibraryCallKit::load_field_from_object)
1256   // manifests as a LoadX[mo_acquire] followed by an acquire membar
1257   // with a bogus read dependency on it's preceding load. so in those
1258   // cases we will find the load node at the PARMS offset of the
1259   // acquire membar.  n.b. there may be an intervening DecodeN node.
1260   //
1261   // a volatile load derived from an inlined unsafe field access
1262   // manifests as a cpuorder membar with Ctl and Mem projections
1263   // feeding both an acquire membar and a LoadX[mo_acquire]. The
1264   // acquire then feeds another cpuorder membar via Ctl and Mem
1265   // projections. The load has no output dependency on these trailing
1266   // membars because subsequent nodes inserted into the graph take
1267   // their control feed from the final membar cpuorder meaning they
1268   // are all ordered after the load.
1269 
1270   Node *x = barrier->lookup(TypeFunc::Parms);
1271   if (x) {
1272     // we are starting from an acquire and it has a fake dependency
1273     //
1274     // need to check for
1275     //
1276     //   LoadX[mo_acquire]
1277     //   {  |1   }
1278     //   {DecodeN}
1279     //      |Parms
1280     //   MemBarAcquire*
1281     //
1282     // where * tags node we were passed
1283     // and |k means input k
1284     if (x->is_DecodeNarrowPtr())
1285       x = x->in(1);
1286 
1287     return (x->is_Load() && x->as_Load()->is_acquire());
1288   }
1289   
1290   // only continue if we want to try to match unsafe volatile gets
1291   if (UseBarriersForUnsafeVolatileGet)
1292     return false;
1293 
1294   // need to check for
1295   //
1296   //     MemBarCPUOrder
1297   //        ||       \\
1298   //   MemBarAcquire* LoadX[mo_acquire]
1299   //        ||
1300   //   MemBarCPUOrder
1301   //
1302   // where * tags node we were passed
1303   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
1304 
1305   // check for a parent MemBarCPUOrder
1306   ProjNode *ctl;
1307   ProjNode *mem;
1308   MemBarNode *parent = has_parent_membar(barrier, ctl, mem);
1309   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
1310     return false;
1311   // ensure the proj nodes both feed a LoadX[mo_acquire]
1312   LoadNode *ld = NULL;
1313   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1314     x = ctl->fast_out(i);
1315     // if we see a load we keep hold of it and stop searching
1316     if (x->is_Load()) {
1317       ld = x->as_Load();
1318       break;
1319     }
1320   }
1321   // it must be an acquiring load
1322   if (! ld || ! ld->is_acquire())
1323     return false;
1324   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1325     x = mem->fast_out(i);
1326     // if we see the same load we drop it and stop searching
1327     if (x == ld) {
1328       ld = NULL;
1329       break;
1330     }
1331   }
1332   // we must have dropped the load
1333   if (ld)
1334     return false;
1335   // check for a child cpuorder membar
1336   MemBarNode *child  = has_child_membar(barrier->as_MemBar(), ctl, mem);
1337   if (!child || child->Opcode() != Op_MemBarCPUOrder)
1338     return false;
1339 
1340   return true;
1341 }
1342 
1343 bool needs_acquiring_load(const Node *n)
1344 {
1345   // assert n->is_Load();
1346   if (UseBarriersForVolatile)
1347     // we use a normal load and a dmb
1348     return false;
1349 
1350   LoadNode *ld = n->as_Load();
1351 
1352   if (!ld->is_acquire())
1353     return false;
1354 
1355   // check if this load is feeding an acquire membar
1356   //
1357   //   LoadX[mo_acquire]
1358   //   {  |1   }
1359   //   {DecodeN}
1360   //      |Parms
1361   //   MemBarAcquire*
1362   //
1363   // where * tags node we were passed
1364   // and |k means input k
1365 
1366   Node *start = ld;
1367   Node *mbacq = NULL;
1368 
1369   // if we hit a DecodeNarrowPtr we reset the start node and restart
1370   // the search through the outputs
1371  restart:
1372 
1373   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
1374     Node *x = start->fast_out(i);
1375     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
1376       mbacq = x;
1377     } else if (!mbacq &&
1378                (x->is_DecodeNarrowPtr() ||
1379                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
1380       start = x;
1381       goto restart;
1382     }
1383   }
1384 
1385   if (mbacq) {
1386     return true;
1387   }
1388 
1389   // only continue if we want to try to match unsafe volatile gets
1390   if (UseBarriersForUnsafeVolatileGet)
1391     return false;
1392 
1393   // check if Ctl and Proj feed comes from a MemBarCPUOrder
1394   //
1395   //     MemBarCPUOrder
1396   //        ||       \\
1397   //   MemBarAcquire* LoadX[mo_acquire]
1398   //        ||
1399   //   MemBarCPUOrder
1400 
1401   MemBarNode *membar;
1402   ProjNode *ctl;
1403   ProjNode *mem;
1404 
1405   membar = has_parent_membar(ld, ctl, mem);
1406 
1407   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1408     return false;
1409 
1410   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
1411 
1412   membar = has_child_membar(membar, ctl, mem);
1413 
1414   if (!membar || !membar->Opcode() == Op_MemBarAcquire)
1415     return false;
1416 
1417   membar = has_child_membar(membar, ctl, mem);
1418   
1419   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1420     return false;
1421 
1422   return true;
1423 }
1424 
1425 bool unnecessary_release(const Node *n) {
1426   // assert n->is_MemBar();
1427   if (UseBarriersForVolatile)
1428     // we need to plant a dmb
1429     return false;
1430 
1431   // ok, so we can omit this release barrier if it has been inserted
1432   // as part of a volatile store sequence
1433   //
1434   //   MemBarRelease
1435   //  {      ||      }
1436   //  {MemBarCPUOrder} -- optional
1437   //         ||     \\
1438   //         ||     StoreX[mo_release]
1439   //         | \     /
1440   //         | MergeMem
1441   //         | /
1442   //   MemBarVolatile
1443   //
1444   // where
1445   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1446   //  | \ and / indicate further routing of the Ctl and Mem feeds
1447   // 
1448   // so we need to check that
1449   //
1450   // ia) the release membar (or its dependent cpuorder membar) feeds
1451   // control to a store node (via a Control project node)
1452   //
1453   // ii) the store is ordered release
1454   //
1455   // iii) the release membar (or its dependent cpuorder membar) feeds
1456   // control to a volatile membar (via the same Control project node)
1457   //
1458   // iv) the release membar feeds memory to a merge mem and to the
1459   // same store (both via a single Memory proj node)
1460   //
1461   // v) the store outputs to the merge mem
1462   //
1463   // vi) the merge mem outputs to the same volatile membar
1464   //
1465   // n.b. if this is an inlined unsafe node then the release membar
1466   // may feed its control and memory links via an intervening cpuorder
1467   // membar. this case can be dealt with when we check the release
1468   // membar projections. if they both feed a single cpuorder membar
1469   // node continue to make the same checks as above but with the
1470   // cpuorder membar substituted for the release membar. if they don't
1471   // both feed a cpuorder membar then the check fails.
1472   //
1473   // n.b.b. for an inlined unsafe store of an object in the case where
1474   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1475   // an embedded if then else where we expect the store. this is
1476   // needed to do the right type of store depending on whether
1477   // heap_base is NULL. We could check for that but for now we can
1478   // just take the hit of on inserting a redundant dmb for this
1479   // redundant volatile membar
1480 
1481   MemBarNode *barrier = n->as_MemBar();
1482   ProjNode *ctl;
1483   ProjNode *mem;
1484   // check for an intervening cpuorder membar
1485   MemBarNode *b = has_child_membar(barrier, ctl, mem);
1486   if (b && b->Opcode() == Op_MemBarCPUOrder) {
1487     // ok, so start form the dependent cpuorder barrier
1488     barrier = b;
1489   }
1490   // check the ctl and mem flow
1491   ctl = barrier->proj_out(TypeFunc::Control);
1492   mem = barrier->proj_out(TypeFunc::Memory);
1493 
1494   // the barrier needs to have both a Ctl and Mem projection
1495   if (! ctl || ! mem)
1496     return false;
1497 
1498   Node *x = NULL;
1499   Node *mbvol = NULL;
1500   StoreNode * st = NULL;
1501 
1502   // For a normal volatile write the Ctl ProjNode should have output
1503   // to a MemBarVolatile and a Store marked as releasing
1504   //
1505   // n.b. for an inlined unsafe store of an object in the case where
1506   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1507   // an embedded if then else where we expect the store. this is
1508   // needed to do the right type of store depending on whether
1509   // heap_base is NULL. We could check for that case too but for now
1510   // we can just take the hit of inserting a dmb and a non-volatile
1511   // store to implement the volatile store
1512 
1513   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1514     x = ctl->fast_out(i);
1515     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1516       if (mbvol) {
1517         return false;
1518       }
1519       mbvol = x;
1520     } else if (x->is_Store()) {
1521       st = x->as_Store();
1522       if (! st->is_release()) {
1523         return false;
1524       }
1525     } else if (!x->is_Mach()) {
1526       // we may see mach nodes added during matching but nothing else
1527       return false;
1528     }
1529   }
1530 
1531   if (!mbvol || !st)
1532     return false;
1533 
1534   // the Mem ProjNode should output to a MergeMem and the same Store
1535   Node *mm = NULL;
1536   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1537     x = mem->fast_out(i);
1538     if (!mm && x->is_MergeMem()) {
1539       mm = x;
1540     } else if (x != st && !x->is_Mach()) {
1541       // we may see mach nodes added during matching but nothing else
1542       return false;
1543     }
1544   }
1545 
1546   if (!mm)
1547     return false;
1548 
1549   // the MergeMem should output to the MemBarVolatile
1550   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1551     x = mm->fast_out(i);
1552     if (x != mbvol && !x->is_Mach()) {
1553       // we may see mach nodes added during matching but nothing else
1554       return false;
1555     }
1556   }
1557 
1558   return true;
1559 }
1560 
1561 bool unnecessary_volatile(const Node *n) {
1562   // assert n->is_MemBar();
1563   if (UseBarriersForVolatile)
1564     // we need to plant a dmb
1565     return false;
1566 
1567   // ok, so we can omit this volatile barrier if it has been inserted
1568   // as part of a volatile store sequence
1569   //
1570   //   MemBarRelease
1571   //  {      ||      }
1572   //  {MemBarCPUOrder} -- optional
1573   //         ||     \\
1574   //         ||     StoreX[mo_release]
1575   //         | \     /
1576   //         | MergeMem
1577   //         | /
1578   //   MemBarVolatile
1579   //
1580   // where
1581   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1582   //  | \ and / indicate further routing of the Ctl and Mem feeds
1583   // 
1584   // we need to check that
1585   //
1586   // i) the volatile membar gets its control feed from a release
1587   // membar (or its dependent cpuorder membar) via a Control project
1588   // node
1589   //
1590   // ii) the release membar (or its dependent cpuorder membar) also
1591   // feeds control to a store node via the same proj node
1592   //
1593   // iii) the store is ordered release
1594   //
1595   // iv) the release membar (or its dependent cpuorder membar) feeds
1596   // memory to a merge mem and to the same store (both via a single
1597   // Memory proj node)
1598   //
1599   // v) the store outputs to the merge mem
1600   //
1601   // vi) the merge mem outputs to the volatile membar
1602   //
1603   // n.b. for an inlined unsafe store of an object in the case where
1604   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1605   // an embedded if then else where we expect the store. this is
1606   // needed to do the right type of store depending on whether
1607   // heap_base is NULL. We could check for that but for now we can
1608   // just take the hit of on inserting a redundant dmb for this
1609   // redundant volatile membar
1610 
1611   MemBarNode *mbvol = n->as_MemBar();
1612   Node *x = n->lookup(TypeFunc::Control);
1613 
1614   if (! x || !x->is_Proj())
1615     return false;
1616 
1617   ProjNode *proj = x->as_Proj();
1618 
1619   x = proj->lookup(0);
1620 
1621   if (!x || !x->is_MemBar())
1622     return false;
1623 
1624   MemBarNode *barrier = x->as_MemBar();
1625 
1626   // if the barrier is a release membar we have what we want. if it is
1627   // a cpuorder membar then we need to ensure that it is fed by a
1628   // release membar in which case we proceed to check the graph below
1629   // this cpuorder membar as the feed
1630 
1631   if (x->Opcode() != Op_MemBarRelease) {
1632     if (x->Opcode() != Op_MemBarCPUOrder)
1633       return false;
1634     ProjNode *ctl;
1635     ProjNode *mem;
1636     MemBarNode *b = has_parent_membar(x, ctl, mem);
1637     if (!b || !b->Opcode() == Op_MemBarRelease)
1638       return false;
1639   }
1640 
1641   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1642   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1643 
1644   // barrier needs to have both a Ctl and Mem projection
1645   // and we need to have reached it via the Ctl projection
1646   if (! ctl || ! mem || ctl != proj)
1647     return false;
1648 
1649   StoreNode * st = NULL;
1650 
1651   // The Ctl ProjNode should have output to a MemBarVolatile and
1652   // a Store marked as releasing
1653   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1654     x = ctl->fast_out(i);
1655     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1656       if (x != mbvol) {
1657         return false;
1658       }
1659     } else if (x->is_Store()) {
1660       st = x->as_Store();
1661       if (! st->is_release()) {
1662         return false;
1663       }
1664     } else if (!x->is_Mach()){
1665       // we may see mach nodes added during matching but nothing else
1666       return false;
1667     }
1668   }
1669 
1670   if (!st)
1671     return false;
1672 
1673   // the Mem ProjNode should output to a MergeMem and the same Store
1674   Node *mm = NULL;
1675   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1676     x = mem->fast_out(i);
1677     if (!mm && x->is_MergeMem()) {
1678       mm = x;
1679     } else if (x != st && !x->is_Mach()) {
1680       // we may see mach nodes added during matching but nothing else
1681       return false;
1682     }
1683   }
1684 
1685   if (!mm)
1686     return false;
1687 
1688   // the MergeMem should output to the MemBarVolatile
1689   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1690     x = mm->fast_out(i);
1691     if (x != mbvol && !x->is_Mach()) {
1692       // we may see mach nodes added during matching but nothing else
1693       return false;
1694     }
1695   }
1696 
1697   return true;
1698 }
1699 
1700 
1701 
1702 bool needs_releasing_store(const Node *n)
1703 {
1704   // assert n->is_Store();
1705   if (UseBarriersForVolatile)
1706     // we use a normal store and dmb combination
1707     return false;
1708 
1709   StoreNode *st = n->as_Store();
1710 
1711   if (!st->is_release())
1712     return false;
1713 
1714   // check if this store is bracketed by a release (or its dependent
1715   // cpuorder membar) and a volatile membar
1716   //
1717   //   MemBarRelease
1718   //  {      ||      }
1719   //  {MemBarCPUOrder} -- optional
1720   //         ||     \\
1721   //         ||     StoreX[mo_release]
1722   //         | \     /
1723   //         | MergeMem
1724   //         | /
1725   //   MemBarVolatile
1726   //
1727   // where
1728   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1729   //  | \ and / indicate further routing of the Ctl and Mem feeds
1730   // 
1731 
1732 
1733   Node *x = st->lookup(TypeFunc::Control);
1734 
1735   if (! x || !x->is_Proj())
1736     return false;
1737 
1738   ProjNode *proj = x->as_Proj();
1739 
1740   x = proj->lookup(0);
1741 
1742   if (!x || !x->is_MemBar())
1743     return false;
1744 
1745   MemBarNode *barrier = x->as_MemBar();
1746 
1747   // if the barrier is a release membar we have what we want. if it is
1748   // a cpuorder membar then we need to ensure that it is fed by a
1749   // release membar in which case we proceed to check the graph below
1750   // this cpuorder membar as the feed
1751 
1752   if (x->Opcode() != Op_MemBarRelease) {
1753     if (x->Opcode() != Op_MemBarCPUOrder)
1754       return false;
1755     Node *ctl = x->lookup(TypeFunc::Control);
1756     Node *mem = x->lookup(TypeFunc::Memory);
1757     if (!ctl || !ctl->is_Proj() || !mem || !mem->is_Proj())
1758       return false;
1759     x = ctl->lookup(0);
1760     if (!x || !x->is_MemBar() || !x->Opcode() == Op_MemBarRelease)
1761       return false;
1762     Node *y = mem->lookup(0);
1763     if (!y || y != x)
1764       return false;
1765   }
1766 
1767   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1768   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1769 
1770   // MemBarRelease needs to have both a Ctl and Mem projection
1771   // and we need to have reached it via the Ctl projection
1772   if (! ctl || ! mem || ctl != proj)
1773     return false;
1774 
1775   MemBarNode *mbvol = NULL;
1776 
1777   // The Ctl ProjNode should have output to a MemBarVolatile and
1778   // a Store marked as releasing
1779   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1780     x = ctl->fast_out(i);
1781     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1782       mbvol = x->as_MemBar();
1783     } else if (x->is_Store()) {
1784       if (x != st) {
1785         return false;
1786       }
1787     } else if (!x->is_Mach()){
1788       return false;
1789     }
1790   }
1791 
1792   if (!mbvol)
1793     return false;
1794 
1795   // the Mem ProjNode should output to a MergeMem and the same Store
1796   Node *mm = NULL;
1797   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1798     x = mem->fast_out(i);
1799     if (!mm && x->is_MergeMem()) {
1800       mm = x;
1801     } else if (x != st && !x->is_Mach()) {
1802       return false;
1803     }
1804   }
1805 
1806   if (!mm)
1807     return false;
1808 
1809   // the MergeMem should output to the MemBarVolatile
1810   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1811     x = mm->fast_out(i);
1812     if (x != mbvol && !x->is_Mach()) {
1813       return false;
1814     }
1815   }
1816 
1817   return true;
1818 }
1819 
1820 
1821 
1822 #define __ _masm.
1823 
1824 // advance declarations for helper functions to convert register
1825 // indices to register objects
1826 
1827 // the ad file has to provide implementations of certain methods
1828 // expected by the generic code
1829 //
1830 // REQUIRED FUNCTIONALITY
1831 
1832 //=============================================================================
1833 
1834 // !!!!! Special hack to get all types of calls to specify the byte offset
1835 //       from the start of the call to the point where the return address
1836 //       will point.
1837 
1838 int MachCallStaticJavaNode::ret_addr_offset()
1839 {
1840   // call should be a simple bl
1841   int off = 4;
1842   return off;
1843 }
1844 
1845 int MachCallDynamicJavaNode::ret_addr_offset()
1846 {
1847   return 16; // movz, movk, movk, bl
1848 }
1849 
1850 int MachCallRuntimeNode::ret_addr_offset() {
1851   // for generated stubs the call will be
1852   //   far_call(addr)
1853   // for real runtime callouts it will be six instructions
1854   // see aarch64_enc_java_to_runtime
1855   //   adr(rscratch2, retaddr)
1856   //   lea(rscratch1, RuntimeAddress(addr)
1857   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1858   //   blrt rscratch1
1859   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1860   if (cb) {
1861     return MacroAssembler::far_branch_size();
1862   } else {
1863     return 6 * NativeInstruction::instruction_size;
1864   }
1865 }
1866 
1867 // Indicate if the safepoint node needs the polling page as an input
1868 
1869 // the shared code plants the oop data at the start of the generated
1870 // code for the safepoint node and that needs ot be at the load
1871 // instruction itself. so we cannot plant a mov of the safepoint poll
1872 // address followed by a load. setting this to true means the mov is
1873 // scheduled as a prior instruction. that's better for scheduling
1874 // anyway.
1875 
1876 bool SafePointNode::needs_polling_address_input()
1877 {
1878   return true;
1879 }
1880 
1881 //=============================================================================
1882 
1883 #ifndef PRODUCT
1884 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1885   st->print("BREAKPOINT");
1886 }
1887 #endif
1888 
1889 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1890   MacroAssembler _masm(&cbuf);
1891   __ brk(0);
1892 }
1893 
1894 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1895   return MachNode::size(ra_);
1896 }
1897 
1898 //=============================================================================
1899 
1900 #ifndef PRODUCT
1901   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1902     st->print("nop \t# %d bytes pad for loops and calls", _count);
1903   }
1904 #endif
1905 
1906   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1907     MacroAssembler _masm(&cbuf);
1908     for (int i = 0; i < _count; i++) {
1909       __ nop();
1910     }
1911   }
1912 
1913   uint MachNopNode::size(PhaseRegAlloc*) const {
1914     return _count * NativeInstruction::instruction_size;
1915   }
1916 
1917 //=============================================================================
1918 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1919 
1920 int Compile::ConstantTable::calculate_table_base_offset() const {
1921   return 0;  // absolute addressing, no offset
1922 }
1923 
1924 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1925 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1926   ShouldNotReachHere();
1927 }
1928 
1929 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1930   // Empty encoding
1931 }
1932 
1933 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1934   return 0;
1935 }
1936 
1937 #ifndef PRODUCT
1938 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1939   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1940 }
1941 #endif
1942 
1943 #ifndef PRODUCT
1944 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1945   Compile* C = ra_->C;
1946 
1947   int framesize = C->frame_slots() << LogBytesPerInt;
1948 
1949   if (C->need_stack_bang(framesize))
1950     st->print("# stack bang size=%d\n\t", framesize);
1951 
1952   if (framesize < ((1 << 9) + 2 * wordSize)) {
1953     st->print("sub  sp, sp, #%d\n\t", framesize);
1954     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1955     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1956   } else {
1957     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1958     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1959     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1960     st->print("sub  sp, sp, rscratch1");
1961   }
1962 }
1963 #endif
1964 
1965 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1966   Compile* C = ra_->C;
1967   MacroAssembler _masm(&cbuf);
1968 
1969   // n.b. frame size includes space for return pc and rfp
1970   const long framesize = C->frame_size_in_bytes();
1971   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1972 
1973   // insert a nop at the start of the prolog so we can patch in a
1974   // branch if we need to invalidate the method later
1975   __ nop();
1976 
1977   int bangsize = C->bang_size_in_bytes();
1978   if (C->need_stack_bang(bangsize) && UseStackBanging)
1979     __ generate_stack_overflow_check(bangsize);
1980 
1981   __ build_frame(framesize);
1982 
1983   if (NotifySimulator) {
1984     __ notify(Assembler::method_entry);
1985   }
1986 
1987   if (VerifyStackAtCalls) {
1988     Unimplemented();
1989   }
1990 
1991   C->set_frame_complete(cbuf.insts_size());
1992 
1993   if (C->has_mach_constant_base_node()) {
1994     // NOTE: We set the table base offset here because users might be
1995     // emitted before MachConstantBaseNode.
1996     Compile::ConstantTable& constant_table = C->constant_table();
1997     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1998   }
1999 }
2000 
2001 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2002 {
2003   return MachNode::size(ra_); // too many variables; just compute it
2004                               // the hard way
2005 }
2006 
2007 int MachPrologNode::reloc() const
2008 {
2009   return 0;
2010 }
2011 
2012 //=============================================================================
2013 
2014 #ifndef PRODUCT
2015 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2016   Compile* C = ra_->C;
2017   int framesize = C->frame_slots() << LogBytesPerInt;
2018 
2019   st->print("# pop frame %d\n\t",framesize);
2020 
2021   if (framesize == 0) {
2022     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2023   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2024     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2025     st->print("add  sp, sp, #%d\n\t", framesize);
2026   } else {
2027     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2028     st->print("add  sp, sp, rscratch1\n\t");
2029     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2030   }
2031 
2032   if (do_polling() && C->is_method_compilation()) {
2033     st->print("# touch polling page\n\t");
2034     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2035     st->print("ldr zr, [rscratch1]");
2036   }
2037 }
2038 #endif
2039 
2040 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2041   Compile* C = ra_->C;
2042   MacroAssembler _masm(&cbuf);
2043   int framesize = C->frame_slots() << LogBytesPerInt;
2044 
2045   __ remove_frame(framesize);
2046 
2047   if (NotifySimulator) {
2048     __ notify(Assembler::method_reentry);
2049   }
2050 
2051   if (do_polling() && C->is_method_compilation()) {
2052     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2053   }
2054 }
2055 
2056 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2057   // Variable size. Determine dynamically.
2058   return MachNode::size(ra_);
2059 }
2060 
2061 int MachEpilogNode::reloc() const {
2062   // Return number of relocatable values contained in this instruction.
2063   return 1; // 1 for polling page.
2064 }
2065 
2066 const Pipeline * MachEpilogNode::pipeline() const {
2067   return MachNode::pipeline_class();
2068 }
2069 
2070 // This method seems to be obsolete. It is declared in machnode.hpp
2071 // and defined in all *.ad files, but it is never called. Should we
2072 // get rid of it?
2073 int MachEpilogNode::safepoint_offset() const {
2074   assert(do_polling(), "no return for this epilog node");
2075   return 4;
2076 }
2077 
2078 //=============================================================================
2079 
2080 // Figure out which register class each belongs in: rc_int, rc_float or
2081 // rc_stack.
2082 enum RC { rc_bad, rc_int, rc_float, rc_stack };
2083 
2084 static enum RC rc_class(OptoReg::Name reg) {
2085 
2086   if (reg == OptoReg::Bad) {
2087     return rc_bad;
2088   }
2089 
2090   // we have 30 int registers * 2 halves
2091   // (rscratch1 and rscratch2 are omitted)
2092 
2093   if (reg < 60) {
2094     return rc_int;
2095   }
2096 
2097   // we have 32 float register * 2 halves
2098   if (reg < 60 + 128) {
2099     return rc_float;
2100   }
2101 
2102   // Between float regs & stack is the flags regs.
2103   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
2104 
2105   return rc_stack;
2106 }
2107 
2108 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
2109   Compile* C = ra_->C;
2110 
2111   // Get registers to move.
2112   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
2113   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
2114   OptoReg::Name dst_hi = ra_->get_reg_second(this);
2115   OptoReg::Name dst_lo = ra_->get_reg_first(this);
2116 
2117   enum RC src_hi_rc = rc_class(src_hi);
2118   enum RC src_lo_rc = rc_class(src_lo);
2119   enum RC dst_hi_rc = rc_class(dst_hi);
2120   enum RC dst_lo_rc = rc_class(dst_lo);
2121 
2122   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
2123 
2124   if (src_hi != OptoReg::Bad) {
2125     assert((src_lo&1)==0 && src_lo+1==src_hi &&
2126            (dst_lo&1)==0 && dst_lo+1==dst_hi,
2127            "expected aligned-adjacent pairs");
2128   }
2129 
2130   if (src_lo == dst_lo && src_hi == dst_hi) {
2131     return 0;            // Self copy, no move.
2132   }
2133 
2134   if (bottom_type()->isa_vect() != NULL) {
2135     uint len = 4;
2136     if (cbuf) {
2137       MacroAssembler _masm(cbuf);
2138       uint ireg = ideal_reg();
2139       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
2140       assert(ireg == Op_VecX, "sanity");
2141       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
2142         // stack->stack
2143         int src_offset = ra_->reg2offset(src_lo);
2144         int dst_offset = ra_->reg2offset(dst_lo);
2145         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
2146         len = 8;
2147         if (src_offset < 512) {
2148           __ ldp(rscratch1, rscratch2, Address(sp, src_offset));
2149         } else {
2150           __ ldr(rscratch1, Address(sp, src_offset));
2151           __ ldr(rscratch2, Address(sp, src_offset+4));
2152           len += 4;
2153         }
2154         if (dst_offset < 512) {
2155           __ stp(rscratch1, rscratch2, Address(sp, dst_offset));
2156         } else {
2157           __ str(rscratch1, Address(sp, dst_offset));
2158           __ str(rscratch2, Address(sp, dst_offset+4));
2159           len += 4;
2160         }
2161       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
2162         __ orr(as_FloatRegister(Matcher::_regEncode[dst_lo]), __ T16B,
2163                as_FloatRegister(Matcher::_regEncode[src_lo]),
2164                as_FloatRegister(Matcher::_regEncode[src_lo]));
2165       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
2166         __ str(as_FloatRegister(Matcher::_regEncode[src_lo]), __ Q,
2167                Address(sp, ra_->reg2offset(dst_lo)));
2168       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
2169         __ ldr(as_FloatRegister(Matcher::_regEncode[dst_lo]), __ Q,
2170                Address(sp, ra_->reg2offset(src_lo)));
2171       } else {
2172         ShouldNotReachHere();
2173       }
2174     } else if (st) {
2175       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
2176         // stack->stack
2177         int src_offset = ra_->reg2offset(src_lo);
2178         int dst_offset = ra_->reg2offset(dst_lo);
2179         if (src_offset < 512) {
2180           st->print("ldp  rscratch1, rscratch2, [sp, #%d]", src_offset);
2181         } else {
2182           st->print("ldr  rscratch1, [sp, #%d]", src_offset);
2183           st->print("\nldr  rscratch2, [sp, #%d]", src_offset+4);
2184         }
2185         if (dst_offset < 512) {
2186           st->print("\nstp  rscratch1, rscratch2, [sp, #%d]", dst_offset);
2187         } else {
2188           st->print("\nstr  rscratch1, [sp, #%d]", dst_offset);
2189           st->print("\nstr  rscratch2, [sp, #%d]", dst_offset+4);
2190         }
2191         st->print("\t# vector spill, stack to stack");
2192       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
2193         st->print("mov  %s, %s\t# vector spill, reg to reg",
2194                    Matcher::regName[dst_lo], Matcher::regName[src_lo]);
2195       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
2196         st->print("str  %s, [sp, #%d]\t# vector spill, reg to stack",
2197                    Matcher::regName[src_lo], ra_->reg2offset(dst_lo));
2198       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
2199         st->print("ldr  %s, [sp, #%d]\t# vector spill, stack to reg",
2200                    Matcher::regName[dst_lo], ra_->reg2offset(src_lo));
2201       }
2202     }
2203     return len;
2204   }
2205 
2206   switch (src_lo_rc) {
2207   case rc_int:
2208     if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
2209       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2210           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2211           // 64 bit
2212         if (cbuf) {
2213           MacroAssembler _masm(cbuf);
2214           __ mov(as_Register(Matcher::_regEncode[dst_lo]),
2215                  as_Register(Matcher::_regEncode[src_lo]));
2216         } else if (st) {
2217           st->print("mov  %s, %s\t# shuffle",
2218                     Matcher::regName[dst_lo],
2219                     Matcher::regName[src_lo]);
2220         }
2221       } else {
2222         // 32 bit
2223         if (cbuf) {
2224           MacroAssembler _masm(cbuf);
2225           __ movw(as_Register(Matcher::_regEncode[dst_lo]),
2226                   as_Register(Matcher::_regEncode[src_lo]));
2227         } else if (st) {
2228           st->print("movw  %s, %s\t# shuffle",
2229                     Matcher::regName[dst_lo],
2230                     Matcher::regName[src_lo]);
2231         }
2232       }
2233     } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
2234       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2235           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2236           // 64 bit
2237         if (cbuf) {
2238           MacroAssembler _masm(cbuf);
2239           __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2240                    as_Register(Matcher::_regEncode[src_lo]));
2241         } else if (st) {
2242           st->print("fmovd  %s, %s\t# shuffle",
2243                     Matcher::regName[dst_lo],
2244                     Matcher::regName[src_lo]);
2245         }
2246       } else {
2247         // 32 bit
2248         if (cbuf) {
2249           MacroAssembler _masm(cbuf);
2250           __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2251                    as_Register(Matcher::_regEncode[src_lo]));
2252         } else if (st) {
2253           st->print("fmovs  %s, %s\t# shuffle",
2254                     Matcher::regName[dst_lo],
2255                     Matcher::regName[src_lo]);
2256         }
2257       }
2258     } else {                    // gpr --> stack spill
2259       assert(dst_lo_rc == rc_stack, "spill to bad register class");
2260       int dst_offset = ra_->reg2offset(dst_lo);
2261       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2262           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2263           // 64 bit
2264         if (cbuf) {
2265           MacroAssembler _masm(cbuf);
2266           __ str(as_Register(Matcher::_regEncode[src_lo]),
2267                  Address(sp, dst_offset));
2268         } else if (st) {
2269           st->print("str  %s, [sp, #%d]\t# spill",
2270                     Matcher::regName[src_lo],
2271                     dst_offset);
2272         }
2273       } else {
2274         // 32 bit
2275         if (cbuf) {
2276           MacroAssembler _masm(cbuf);
2277           __ strw(as_Register(Matcher::_regEncode[src_lo]),
2278                  Address(sp, dst_offset));
2279         } else if (st) {
2280           st->print("strw  %s, [sp, #%d]\t# spill",
2281                     Matcher::regName[src_lo],
2282                     dst_offset);
2283         }
2284       }
2285     }
2286     return 4;
2287   case rc_float:
2288     if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
2289       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2290           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2291           // 64 bit
2292         if (cbuf) {
2293           MacroAssembler _masm(cbuf);
2294           __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
2295                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2296         } else if (st) {
2297           st->print("fmovd  %s, %s\t# shuffle",
2298                     Matcher::regName[dst_lo],
2299                     Matcher::regName[src_lo]);
2300         }
2301       } else {
2302         // 32 bit
2303         if (cbuf) {
2304           MacroAssembler _masm(cbuf);
2305           __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
2306                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2307         } else if (st) {
2308           st->print("fmovs  %s, %s\t# shuffle",
2309                     Matcher::regName[dst_lo],
2310                     Matcher::regName[src_lo]);
2311         }
2312       }
2313     } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
2314       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2315           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2316           // 64 bit
2317         if (cbuf) {
2318           MacroAssembler _masm(cbuf);
2319           __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2320                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2321         } else if (st) {
2322           st->print("fmovd  %s, %s\t# shuffle",
2323                     Matcher::regName[dst_lo],
2324                     Matcher::regName[src_lo]);
2325         }
2326       } else {
2327         // 32 bit
2328         if (cbuf) {
2329           MacroAssembler _masm(cbuf);
2330           __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2331                    as_FloatRegister(Matcher::_regEncode[src_lo]));
2332         } else if (st) {
2333           st->print("fmovs  %s, %s\t# shuffle",
2334                     Matcher::regName[dst_lo],
2335                     Matcher::regName[src_lo]);
2336         }
2337       }
2338     } else {                    // fpr --> stack spill
2339       assert(dst_lo_rc == rc_stack, "spill to bad register class");
2340       int dst_offset = ra_->reg2offset(dst_lo);
2341       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2342           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2343           // 64 bit
2344         if (cbuf) {
2345           MacroAssembler _masm(cbuf);
2346           __ strd(as_FloatRegister(Matcher::_regEncode[src_lo]),
2347                  Address(sp, dst_offset));
2348         } else if (st) {
2349           st->print("strd  %s, [sp, #%d]\t# spill",
2350                     Matcher::regName[src_lo],
2351                     dst_offset);
2352         }
2353       } else {
2354         // 32 bit
2355         if (cbuf) {
2356           MacroAssembler _masm(cbuf);
2357           __ strs(as_FloatRegister(Matcher::_regEncode[src_lo]),
2358                  Address(sp, dst_offset));
2359         } else if (st) {
2360           st->print("strs  %s, [sp, #%d]\t# spill",
2361                     Matcher::regName[src_lo],
2362                     dst_offset);
2363         }
2364       }
2365     }
2366     return 4;
2367   case rc_stack:
2368     int src_offset = ra_->reg2offset(src_lo);
2369     if (dst_lo_rc == rc_int) {  // stack --> gpr load
2370       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2371           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2372           // 64 bit
2373         if (cbuf) {
2374           MacroAssembler _masm(cbuf);
2375           __ ldr(as_Register(Matcher::_regEncode[dst_lo]),
2376                  Address(sp, src_offset));
2377         } else if (st) {
2378           st->print("ldr  %s, [sp, %d]\t# restore",
2379                     Matcher::regName[dst_lo],
2380                     src_offset);
2381         }
2382       } else {
2383         // 32 bit
2384         if (cbuf) {
2385           MacroAssembler _masm(cbuf);
2386           __ ldrw(as_Register(Matcher::_regEncode[dst_lo]),
2387                   Address(sp, src_offset));
2388         } else if (st) {
2389           st->print("ldr  %s, [sp, %d]\t# restore",
2390                     Matcher::regName[dst_lo],
2391                    src_offset);
2392         }
2393       }
2394       return 4;
2395     } else if (dst_lo_rc == rc_float) { // stack --> fpr load
2396       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2397           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2398           // 64 bit
2399         if (cbuf) {
2400           MacroAssembler _masm(cbuf);
2401           __ ldrd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2402                  Address(sp, src_offset));
2403         } else if (st) {
2404           st->print("ldrd  %s, [sp, %d]\t# restore",
2405                     Matcher::regName[dst_lo],
2406                     src_offset);
2407         }
2408       } else {
2409         // 32 bit
2410         if (cbuf) {
2411           MacroAssembler _masm(cbuf);
2412           __ ldrs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2413                   Address(sp, src_offset));
2414         } else if (st) {
2415           st->print("ldrs  %s, [sp, %d]\t# restore",
2416                     Matcher::regName[dst_lo],
2417                    src_offset);
2418         }
2419       }
2420       return 4;
2421     } else {                    // stack --> stack copy
2422       assert(dst_lo_rc == rc_stack, "spill to bad register class");
2423       int dst_offset = ra_->reg2offset(dst_lo);
2424       if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
2425           (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
2426           // 64 bit
2427         if (cbuf) {
2428           MacroAssembler _masm(cbuf);
2429           __ ldr(rscratch1, Address(sp, src_offset));
2430           __ str(rscratch1, Address(sp, dst_offset));
2431         } else if (st) {
2432           st->print("ldr  rscratch1, [sp, %d]\t# mem-mem spill",
2433                     src_offset);
2434           st->print("\n\t");
2435           st->print("str  rscratch1, [sp, %d]",
2436                     dst_offset);
2437         }
2438       } else {
2439         // 32 bit
2440         if (cbuf) {
2441           MacroAssembler _masm(cbuf);
2442           __ ldrw(rscratch1, Address(sp, src_offset));
2443           __ strw(rscratch1, Address(sp, dst_offset));
2444         } else if (st) {
2445           st->print("ldrw  rscratch1, [sp, %d]\t# mem-mem spill",
2446                     src_offset);
2447           st->print("\n\t");
2448           st->print("strw  rscratch1, [sp, %d]",
2449                     dst_offset);
2450         }
2451       }
2452       return 8;
2453     }
2454   }
2455 
2456   assert(false," bad rc_class for spill ");
2457   Unimplemented();
2458   return 0;
2459 
2460 }
2461 
2462 #ifndef PRODUCT
2463 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2464   if (!ra_)
2465     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2466   else
2467     implementation(NULL, ra_, false, st);
2468 }
2469 #endif
2470 
2471 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2472   implementation(&cbuf, ra_, false, NULL);
2473 }
2474 
2475 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2476   return implementation(NULL, ra_, true, NULL);
2477 }
2478 
2479 //=============================================================================
2480 
2481 #ifndef PRODUCT
2482 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2483   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2484   int reg = ra_->get_reg_first(this);
2485   st->print("add %s, rsp, #%d]\t# box lock",
2486             Matcher::regName[reg], offset);
2487 }
2488 #endif
2489 
2490 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2491   MacroAssembler _masm(&cbuf);
2492 
2493   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2494   int reg    = ra_->get_encode(this);
2495 
2496   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2497     __ add(as_Register(reg), sp, offset);
2498   } else {
2499     ShouldNotReachHere();
2500   }
2501 }
2502 
2503 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2504   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2505   return 4;
2506 }
2507 
2508 //=============================================================================
2509 
2510 #ifndef PRODUCT
2511 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2512 {
2513   st->print_cr("# MachUEPNode");
2514   if (UseCompressedClassPointers) {
2515     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2516     if (Universe::narrow_klass_shift() != 0) {
2517       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2518     }
2519   } else {
2520    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2521   }
2522   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2523   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2524 }
2525 #endif
2526 
2527 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2528 {
2529   // This is the unverified entry point.
2530   MacroAssembler _masm(&cbuf);
2531 
2532   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2533   Label skip;
2534   // TODO
2535   // can we avoid this skip and still use a reloc?
2536   __ br(Assembler::EQ, skip);
2537   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2538   __ bind(skip);
2539 }
2540 
2541 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2542 {
2543   return MachNode::size(ra_);
2544 }
2545 
2546 // REQUIRED EMIT CODE
2547 
2548 //=============================================================================
2549 
2550 // Emit exception handler code.
2551 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2552 {
2553   // mov rscratch1 #exception_blob_entry_point
2554   // br rscratch1
2555   // Note that the code buffer's insts_mark is always relative to insts.
2556   // That's why we must use the macroassembler to generate a handler.
2557   MacroAssembler _masm(&cbuf);
2558   address base =
2559   __ start_a_stub(size_exception_handler());
2560   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2561   int offset = __ offset();
2562   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2563   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2564   __ end_a_stub();
2565   return offset;
2566 }
2567 
2568 // Emit deopt handler code.
2569 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2570 {
2571   // Note that the code buffer's insts_mark is always relative to insts.
2572   // That's why we must use the macroassembler to generate a handler.
2573   MacroAssembler _masm(&cbuf);
2574   address base =
2575   __ start_a_stub(size_deopt_handler());
2576   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2577   int offset = __ offset();
2578 
2579   __ adr(lr, __ pc());
2580   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2581 
2582   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2583   __ end_a_stub();
2584   return offset;
2585 }
2586 
2587 // REQUIRED MATCHER CODE
2588 
2589 //=============================================================================
2590 
2591 const bool Matcher::match_rule_supported(int opcode) {
2592 
2593   // TODO
2594   // identify extra cases that we might want to provide match rules for
2595   // e.g. Op_StrEquals and other intrinsics
2596   if (!has_match_rule(opcode)) {
2597     return false;
2598   }
2599 
2600   return true;  // Per default match rules are supported.
2601 }
2602 
2603 int Matcher::regnum_to_fpu_offset(int regnum)
2604 {
2605   Unimplemented();
2606   return 0;
2607 }
2608 
2609 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
2610 {
2611   Unimplemented();
2612   return false;
2613 }
2614 
2615 const bool Matcher::isSimpleConstant64(jlong value) {
2616   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2617   // Probably always true, even if a temp register is required.
2618   return true;
2619 }
2620 
2621 // true just means we have fast l2f conversion
2622 const bool Matcher::convL2FSupported(void) {
2623   return true;
2624 }
2625 
2626 // Vector width in bytes.
2627 const int Matcher::vector_width_in_bytes(BasicType bt) {
2628   int size = MIN2(16,(int)MaxVectorSize);
2629   // Minimum 2 values in vector
2630   if (size < 2*type2aelembytes(bt)) size = 0;
2631   // But never < 4
2632   if (size < 4) size = 0;
2633   return size;
2634 }
2635 
2636 // Limits on vector size (number of elements) loaded into vector.
2637 const int Matcher::max_vector_size(const BasicType bt) {
2638   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2639 }
2640 const int Matcher::min_vector_size(const BasicType bt) {
2641   //return (type2aelembytes(bt) == 1) ? 4 : 2;
2642   // For the moment, only support 1 vector size, 128 bits
2643   return max_vector_size(bt);
2644 }
2645 
2646 // Vector ideal reg.
2647 const int Matcher::vector_ideal_reg(int len) {
2648   return Op_VecX;
2649 }
2650 
2651 // Only lowest bits of xmm reg are used for vector shift count.
2652 const int Matcher::vector_shift_count_ideal_reg(int size) {
2653   return Op_VecX;
2654 }
2655 
2656 // AES support not yet implemented
2657 const bool Matcher::pass_original_key_for_aes() {
2658   return false;
2659 }
2660 
2661 // x86 supports misaligned vectors store/load.
2662 const bool Matcher::misaligned_vectors_ok() {
2663   // TODO fixme
2664   // return !AlignVector; // can be changed by flag
2665   return false;
2666 }
2667 
2668 // false => size gets scaled to BytesPerLong, ok.
2669 const bool Matcher::init_array_count_is_in_bytes = false;
2670 
2671 // Threshold size for cleararray.
2672 const int Matcher::init_array_short_size = 18 * BytesPerLong;
2673 
2674 // Use conditional move (CMOVL)
2675 const int Matcher::long_cmove_cost() {
2676   // long cmoves are no more expensive than int cmoves
2677   return 0;
2678 }
2679 
2680 const int Matcher::float_cmove_cost() {
2681   // float cmoves are no more expensive than int cmoves
2682   return 0;
2683 }
2684 
2685 // Does the CPU require late expand (see block.cpp for description of late expand)?
2686 const bool Matcher::require_postalloc_expand = false;
2687 
2688 // Should the Matcher clone shifts on addressing modes, expecting them
2689 // to be subsumed into complex addressing expressions or compute them
2690 // into registers?  True for Intel but false for most RISCs
2691 const bool Matcher::clone_shift_expressions = false;
2692 
2693 // Do we need to mask the count passed to shift instructions or does
2694 // the cpu only look at the lower 5/6 bits anyway?
2695 const bool Matcher::need_masked_shift_count = false;
2696 
2697 // This affects two different things:
2698 //  - how Decode nodes are matched
2699 //  - how ImplicitNullCheck opportunities are recognized
2700 // If true, the matcher will try to remove all Decodes and match them
2701 // (as operands) into nodes. NullChecks are not prepared to deal with
2702 // Decodes by final_graph_reshaping().
2703 // If false, final_graph_reshaping() forces the decode behind the Cmp
2704 // for a NullCheck. The matcher matches the Decode node into a register.
2705 // Implicit_null_check optimization moves the Decode along with the
2706 // memory operation back up before the NullCheck.
2707 bool Matcher::narrow_oop_use_complex_address() {
2708   return Universe::narrow_oop_shift() == 0;
2709 }
2710 
2711 bool Matcher::narrow_klass_use_complex_address() {
2712 // TODO
2713 // decide whether we need to set this to true
2714   return false;
2715 }
2716 
2717 // Is it better to copy float constants, or load them directly from
2718 // memory?  Intel can load a float constant from a direct address,
2719 // requiring no extra registers.  Most RISCs will have to materialize
2720 // an address into a register first, so they would do better to copy
2721 // the constant from stack.
2722 const bool Matcher::rematerialize_float_constants = false;
2723 
2724 // If CPU can load and store mis-aligned doubles directly then no
2725 // fixup is needed.  Else we split the double into 2 integer pieces
2726 // and move it piece-by-piece.  Only happens when passing doubles into
2727 // C code as the Java calling convention forces doubles to be aligned.
2728 const bool Matcher::misaligned_doubles_ok = true;
2729 
2730 // No-op on amd64
2731 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2732   Unimplemented();
2733 }
2734 
2735 // Advertise here if the CPU requires explicit rounding operations to
2736 // implement the UseStrictFP mode.
2737 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2738 
2739 // Are floats converted to double when stored to stack during
2740 // deoptimization?
2741 bool Matcher::float_in_double() { return true; }
2742 
2743 // Do ints take an entire long register or just half?
2744 // The relevant question is how the int is callee-saved:
2745 // the whole long is written but de-opt'ing will have to extract
2746 // the relevant 32 bits.
2747 const bool Matcher::int_in_long = true;
2748 
2749 // Return whether or not this register is ever used as an argument.
2750 // This function is used on startup to build the trampoline stubs in
2751 // generateOptoStub.  Registers not mentioned will be killed by the VM
2752 // call in the trampoline, and arguments in those registers not be
2753 // available to the callee.
2754 bool Matcher::can_be_java_arg(int reg)
2755 {
2756   return
2757     reg ==  R0_num || reg == R0_H_num ||
2758     reg ==  R1_num || reg == R1_H_num ||
2759     reg ==  R2_num || reg == R2_H_num ||
2760     reg ==  R3_num || reg == R3_H_num ||
2761     reg ==  R4_num || reg == R4_H_num ||
2762     reg ==  R5_num || reg == R5_H_num ||
2763     reg ==  R6_num || reg == R6_H_num ||
2764     reg ==  R7_num || reg == R7_H_num ||
2765     reg ==  V0_num || reg == V0_H_num ||
2766     reg ==  V1_num || reg == V1_H_num ||
2767     reg ==  V2_num || reg == V2_H_num ||
2768     reg ==  V3_num || reg == V3_H_num ||
2769     reg ==  V4_num || reg == V4_H_num ||
2770     reg ==  V5_num || reg == V5_H_num ||
2771     reg ==  V6_num || reg == V6_H_num ||
2772     reg ==  V7_num || reg == V7_H_num;
2773 }
2774 
2775 bool Matcher::is_spillable_arg(int reg)
2776 {
2777   return can_be_java_arg(reg);
2778 }
2779 
2780 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2781   return false;
2782 }
2783 
2784 RegMask Matcher::divI_proj_mask() {
2785   ShouldNotReachHere();
2786   return RegMask();
2787 }
2788 
2789 // Register for MODI projection of divmodI.
2790 RegMask Matcher::modI_proj_mask() {
2791   ShouldNotReachHere();
2792   return RegMask();
2793 }
2794 
2795 // Register for DIVL projection of divmodL.
2796 RegMask Matcher::divL_proj_mask() {
2797   ShouldNotReachHere();
2798   return RegMask();
2799 }
2800 
2801 // Register for MODL projection of divmodL.
2802 RegMask Matcher::modL_proj_mask() {
2803   ShouldNotReachHere();
2804   return RegMask();
2805 }
2806 
2807 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2808   return FP_REG_mask();
2809 }
2810 
2811 // helper for encoding java_to_runtime calls on sim
2812 //
2813 // this is needed to compute the extra arguments required when
2814 // planting a call to the simulator blrt instruction. the TypeFunc
2815 // can be queried to identify the counts for integral, and floating
2816 // arguments and the return type
2817 
2818 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2819 {
2820   int gps = 0;
2821   int fps = 0;
2822   const TypeTuple *domain = tf->domain();
2823   int max = domain->cnt();
2824   for (int i = TypeFunc::Parms; i < max; i++) {
2825     const Type *t = domain->field_at(i);
2826     switch(t->basic_type()) {
2827     case T_FLOAT:
2828     case T_DOUBLE:
2829       fps++;
2830     default:
2831       gps++;
2832     }
2833   }
2834   gpcnt = gps;
2835   fpcnt = fps;
2836   BasicType rt = tf->return_type();
2837   switch (rt) {
2838   case T_VOID:
2839     rtype = MacroAssembler::ret_type_void;
2840     break;
2841   default:
2842     rtype = MacroAssembler::ret_type_integral;
2843     break;
2844   case T_FLOAT:
2845     rtype = MacroAssembler::ret_type_float;
2846     break;
2847   case T_DOUBLE:
2848     rtype = MacroAssembler::ret_type_double;
2849     break;
2850   }
2851 }
2852 
2853 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2854   MacroAssembler _masm(&cbuf);                                          \
2855   {                                                                     \
2856     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2857     guarantee(DISP == 0, "mode not permitted for volatile");            \
2858     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2859     __ INSN(REG, as_Register(BASE));                                    \
2860   }
2861 
2862 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2863 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2864 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2865                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2866 
2867   // Used for all non-volatile memory accesses.  The use of
2868   // $mem->opcode() to discover whether this pattern uses sign-extended
2869   // offsets is something of a kludge.
2870   static void loadStore(MacroAssembler masm, mem_insn insn,
2871                          Register reg, int opcode,
2872                          Register base, int index, int size, int disp)
2873   {
2874     Address::extend scale;
2875 
2876     // Hooboy, this is fugly.  We need a way to communicate to the
2877     // encoder that the index needs to be sign extended, so we have to
2878     // enumerate all the cases.
2879     switch (opcode) {
2880     case INDINDEXSCALEDOFFSETI2L:
2881     case INDINDEXSCALEDI2L:
2882     case INDINDEXSCALEDOFFSETI2LN:
2883     case INDINDEXSCALEDI2LN:
2884     case INDINDEXOFFSETI2L:
2885     case INDINDEXOFFSETI2LN:
2886       scale = Address::sxtw(size);
2887       break;
2888     default:
2889       scale = Address::lsl(size);
2890     }
2891 
2892     if (index == -1) {
2893       (masm.*insn)(reg, Address(base, disp));
2894     } else {
2895       if (disp == 0) {
2896         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2897       } else {
2898         masm.lea(rscratch1, Address(base, disp));
2899         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2900       }
2901     }
2902   }
2903 
2904   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2905                          FloatRegister reg, int opcode,
2906                          Register base, int index, int size, int disp)
2907   {
2908     Address::extend scale;
2909 
2910     switch (opcode) {
2911     case INDINDEXSCALEDOFFSETI2L:
2912     case INDINDEXSCALEDI2L:
2913     case INDINDEXSCALEDOFFSETI2LN:
2914     case INDINDEXSCALEDI2LN:
2915       scale = Address::sxtw(size);
2916       break;
2917     default:
2918       scale = Address::lsl(size);
2919     }
2920 
2921      if (index == -1) {
2922       (masm.*insn)(reg, Address(base, disp));
2923     } else {
2924       if (disp == 0) {
2925         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2926       } else {
2927         masm.lea(rscratch1, Address(base, disp));
2928         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2929       }
2930     }
2931   }
2932 
2933   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2934                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2935                          int opcode, Register base, int index, int size, int disp)
2936   {
2937     if (index == -1) {
2938       (masm.*insn)(reg, T, Address(base, disp));
2939     } else {
2940       assert(disp == 0, "unsupported address mode");
2941       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2942     }
2943   }
2944 
2945 %}
2946 
2947 
2948 
2949 //----------ENCODING BLOCK-----------------------------------------------------
2950 // This block specifies the encoding classes used by the compiler to
2951 // output byte streams.  Encoding classes are parameterized macros
2952 // used by Machine Instruction Nodes in order to generate the bit
2953 // encoding of the instruction.  Operands specify their base encoding
2954 // interface with the interface keyword.  There are currently
2955 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2956 // COND_INTER.  REG_INTER causes an operand to generate a function
2957 // which returns its register number when queried.  CONST_INTER causes
2958 // an operand to generate a function which returns the value of the
2959 // constant when queried.  MEMORY_INTER causes an operand to generate
2960 // four functions which return the Base Register, the Index Register,
2961 // the Scale Value, and the Offset Value of the operand when queried.
2962 // COND_INTER causes an operand to generate six functions which return
2963 // the encoding code (ie - encoding bits for the instruction)
2964 // associated with each basic boolean condition for a conditional
2965 // instruction.
2966 //
2967 // Instructions specify two basic values for encoding.  Again, a
2968 // function is available to check if the constant displacement is an
2969 // oop. They use the ins_encode keyword to specify their encoding
2970 // classes (which must be a sequence of enc_class names, and their
2971 // parameters, specified in the encoding block), and they use the
2972 // opcode keyword to specify, in order, their primary, secondary, and
2973 // tertiary opcode.  Only the opcode sections which a particular
2974 // instruction needs for encoding need to be specified.
2975 encode %{
2976   // Build emit functions for each basic byte or larger field in the
2977   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2978   // from C++ code in the enc_class source block.  Emit functions will
2979   // live in the main source block for now.  In future, we can
2980   // generalize this by adding a syntax that specifies the sizes of
2981   // fields in an order, so that the adlc can build the emit functions
2982   // automagically
2983 
2984   // catch all for unimplemented encodings
2985   enc_class enc_unimplemented %{
2986     MacroAssembler _masm(&cbuf);
2987     __ unimplemented("C2 catch all");
2988   %}
2989 
2990   // BEGIN Non-volatile memory access
2991 
2992   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2993     Register dst_reg = as_Register($dst$$reg);
2994     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2995                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2996   %}
2997 
2998   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2999     Register dst_reg = as_Register($dst$$reg);
3000     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3001                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3002   %}
3003 
3004   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3005     Register dst_reg = as_Register($dst$$reg);
3006     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3007                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3008   %}
3009 
3010   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3011     Register dst_reg = as_Register($dst$$reg);
3012     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3013                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3014   %}
3015 
3016   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3017     Register dst_reg = as_Register($dst$$reg);
3018     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3019                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3020   %}
3021 
3022   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3023     Register dst_reg = as_Register($dst$$reg);
3024     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3025                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3026   %}
3027 
3028   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3029     Register dst_reg = as_Register($dst$$reg);
3030     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3031                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3032   %}
3033 
3034   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3035     Register dst_reg = as_Register($dst$$reg);
3036     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3037                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3038   %}
3039 
3040   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3041     Register dst_reg = as_Register($dst$$reg);
3042     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3043                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3044   %}
3045 
3046   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3047     Register dst_reg = as_Register($dst$$reg);
3048     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3049                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3050   %}
3051 
3052   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3053     Register dst_reg = as_Register($dst$$reg);
3054     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3055                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3056   %}
3057 
3058   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3059     Register dst_reg = as_Register($dst$$reg);
3060     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3061                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3062   %}
3063 
3064   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3065     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3066     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3067                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3068   %}
3069 
3070   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3071     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3072     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3073                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3074   %}
3075 
3076   enc_class aarch64_enc_ldrvS(vecX dst, memory mem) %{
3077     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3078     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3079        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3080   %}
3081 
3082   enc_class aarch64_enc_ldrvD(vecX dst, memory mem) %{
3083     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3084     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3085        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3086   %}
3087 
3088   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3089     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3090     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3091        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3092   %}
3093 
3094   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3095     Register src_reg = as_Register($src$$reg);
3096     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3097                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3098   %}
3099 
3100   enc_class aarch64_enc_strb0(memory mem) %{
3101     MacroAssembler _masm(&cbuf);
3102     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3103                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3104   %}
3105 
3106   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3107     Register src_reg = as_Register($src$$reg);
3108     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3109                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3110   %}
3111 
3112   enc_class aarch64_enc_strh0(memory mem) %{
3113     MacroAssembler _masm(&cbuf);
3114     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3115                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3116   %}
3117 
3118   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3119     Register src_reg = as_Register($src$$reg);
3120     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3121                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3122   %}
3123 
3124   enc_class aarch64_enc_strw0(memory mem) %{
3125     MacroAssembler _masm(&cbuf);
3126     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3127                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3128   %}
3129 
3130   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3131     Register src_reg = as_Register($src$$reg);
3132     // we sometimes get asked to store the stack pointer into the
3133     // current thread -- we cannot do that directly on AArch64
3134     if (src_reg == r31_sp) {
3135       MacroAssembler _masm(&cbuf);
3136       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3137       __ mov(rscratch2, sp);
3138       src_reg = rscratch2;
3139     }
3140     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3141                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3142   %}
3143 
3144   enc_class aarch64_enc_str0(memory mem) %{
3145     MacroAssembler _masm(&cbuf);
3146     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3147                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3148   %}
3149 
3150   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3151     FloatRegister src_reg = as_FloatRegister($src$$reg);
3152     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3153                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3154   %}
3155 
3156   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3157     FloatRegister src_reg = as_FloatRegister($src$$reg);
3158     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3159                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3160   %}
3161 
3162   enc_class aarch64_enc_strvS(vecX src, memory mem) %{
3163     FloatRegister src_reg = as_FloatRegister($src$$reg);
3164     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3165        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3166   %}
3167 
3168   enc_class aarch64_enc_strvD(vecX src, memory mem) %{
3169     FloatRegister src_reg = as_FloatRegister($src$$reg);
3170     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3171        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3172   %}
3173 
3174   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3175     FloatRegister src_reg = as_FloatRegister($src$$reg);
3176     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3177        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3178   %}
3179 
3180   // END Non-volatile memory access
3181 
3182   // volatile loads and stores
3183 
3184   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3185     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3186                  rscratch1, stlrb);
3187   %}
3188 
3189   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3190     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3191                  rscratch1, stlrh);
3192   %}
3193 
3194   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3195     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3196                  rscratch1, stlrw);
3197   %}
3198 
3199 
3200   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3201     Register dst_reg = as_Register($dst$$reg);
3202     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3203              rscratch1, ldarb);
3204     __ sxtbw(dst_reg, dst_reg);
3205   %}
3206 
3207   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3208     Register dst_reg = as_Register($dst$$reg);
3209     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3210              rscratch1, ldarb);
3211     __ sxtb(dst_reg, dst_reg);
3212   %}
3213 
3214   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3215     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3216              rscratch1, ldarb);
3217   %}
3218 
3219   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3220     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3221              rscratch1, ldarb);
3222   %}
3223 
3224   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3225     Register dst_reg = as_Register($dst$$reg);
3226     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3227              rscratch1, ldarh);
3228     __ sxthw(dst_reg, dst_reg);
3229   %}
3230 
3231   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3232     Register dst_reg = as_Register($dst$$reg);
3233     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3234              rscratch1, ldarh);
3235     __ sxth(dst_reg, dst_reg);
3236   %}
3237 
3238   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
3239     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3240              rscratch1, ldarh);
3241   %}
3242 
3243   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
3244     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3245              rscratch1, ldarh);
3246   %}
3247 
3248   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
3249     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3250              rscratch1, ldarw);
3251   %}
3252 
3253   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
3254     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3255              rscratch1, ldarw);
3256   %}
3257 
3258   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
3259     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3260              rscratch1, ldar);
3261   %}
3262 
3263   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
3264     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3265              rscratch1, ldarw);
3266     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
3267   %}
3268 
3269   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
3270     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3271              rscratch1, ldar);
3272     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
3273   %}
3274 
3275   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
3276     Register src_reg = as_Register($src$$reg);
3277     // we sometimes get asked to store the stack pointer into the
3278     // current thread -- we cannot do that directly on AArch64
3279     if (src_reg == r31_sp) {
3280         MacroAssembler _masm(&cbuf);
3281       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3282       __ mov(rscratch2, sp);
3283       src_reg = rscratch2;
3284     }
3285     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3286                  rscratch1, stlr);
3287   %}
3288 
3289   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
3290     {
3291       MacroAssembler _masm(&cbuf);
3292       FloatRegister src_reg = as_FloatRegister($src$$reg);
3293       __ fmovs(rscratch2, src_reg);
3294     }
3295     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3296                  rscratch1, stlrw);
3297   %}
3298 
3299   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
3300     {
3301       MacroAssembler _masm(&cbuf);
3302       FloatRegister src_reg = as_FloatRegister($src$$reg);
3303       __ fmovd(rscratch2, src_reg);
3304     }
3305     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3306                  rscratch1, stlr);
3307   %}
3308 
3309   // synchronized read/update encodings
3310 
3311   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
3312     MacroAssembler _masm(&cbuf);
3313     Register dst_reg = as_Register($dst$$reg);
3314     Register base = as_Register($mem$$base);
3315     int index = $mem$$index;
3316     int scale = $mem$$scale;
3317     int disp = $mem$$disp;
3318     if (index == -1) {
3319        if (disp != 0) {
3320         __ lea(rscratch1, Address(base, disp));
3321         __ ldaxr(dst_reg, rscratch1);
3322       } else {
3323         // TODO
3324         // should we ever get anything other than this case?
3325         __ ldaxr(dst_reg, base);
3326       }
3327     } else {
3328       Register index_reg = as_Register(index);
3329       if (disp == 0) {
3330         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3331         __ ldaxr(dst_reg, rscratch1);
3332       } else {
3333         __ lea(rscratch1, Address(base, disp));
3334         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3335         __ ldaxr(dst_reg, rscratch1);
3336       }
3337     }
3338   %}
3339 
3340   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
3341     MacroAssembler _masm(&cbuf);
3342     Register src_reg = as_Register($src$$reg);
3343     Register base = as_Register($mem$$base);
3344     int index = $mem$$index;
3345     int scale = $mem$$scale;
3346     int disp = $mem$$disp;
3347     if (index == -1) {
3348        if (disp != 0) {
3349         __ lea(rscratch2, Address(base, disp));
3350         __ stlxr(rscratch1, src_reg, rscratch2);
3351       } else {
3352         // TODO
3353         // should we ever get anything other than this case?
3354         __ stlxr(rscratch1, src_reg, base);
3355       }
3356     } else {
3357       Register index_reg = as_Register(index);
3358       if (disp == 0) {
3359         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3360         __ stlxr(rscratch1, src_reg, rscratch2);
3361       } else {
3362         __ lea(rscratch2, Address(base, disp));
3363         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3364         __ stlxr(rscratch1, src_reg, rscratch2);
3365       }
3366     }
3367     __ cmpw(rscratch1, zr);
3368   %}
3369 
3370   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3371     MacroAssembler _masm(&cbuf);
3372     Register old_reg = as_Register($oldval$$reg);
3373     Register new_reg = as_Register($newval$$reg);
3374     Register base = as_Register($mem$$base);
3375     Register addr_reg;
3376     int index = $mem$$index;
3377     int scale = $mem$$scale;
3378     int disp = $mem$$disp;
3379     if (index == -1) {
3380        if (disp != 0) {
3381         __ lea(rscratch2, Address(base, disp));
3382         addr_reg = rscratch2;
3383       } else {
3384         // TODO
3385         // should we ever get anything other than this case?
3386         addr_reg = base;
3387       }
3388     } else {
3389       Register index_reg = as_Register(index);
3390       if (disp == 0) {
3391         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3392         addr_reg = rscratch2;
3393       } else {
3394         __ lea(rscratch2, Address(base, disp));
3395         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3396         addr_reg = rscratch2;
3397       }
3398     }
3399     Label retry_load, done;
3400     __ bind(retry_load);
3401     __ ldxr(rscratch1, addr_reg);
3402     __ cmp(rscratch1, old_reg);
3403     __ br(Assembler::NE, done);
3404     __ stlxr(rscratch1, new_reg, addr_reg);
3405     __ cbnzw(rscratch1, retry_load);
3406     __ bind(done);
3407   %}
3408 
3409   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3410     MacroAssembler _masm(&cbuf);
3411     Register old_reg = as_Register($oldval$$reg);
3412     Register new_reg = as_Register($newval$$reg);
3413     Register base = as_Register($mem$$base);
3414     Register addr_reg;
3415     int index = $mem$$index;
3416     int scale = $mem$$scale;
3417     int disp = $mem$$disp;
3418     if (index == -1) {
3419        if (disp != 0) {
3420         __ lea(rscratch2, Address(base, disp));
3421         addr_reg = rscratch2;
3422       } else {
3423         // TODO
3424         // should we ever get anything other than this case?
3425         addr_reg = base;
3426       }
3427     } else {
3428       Register index_reg = as_Register(index);
3429       if (disp == 0) {
3430         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3431         addr_reg = rscratch2;
3432       } else {
3433         __ lea(rscratch2, Address(base, disp));
3434         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3435         addr_reg = rscratch2;
3436       }
3437     }
3438     Label retry_load, done;
3439     __ bind(retry_load);
3440     __ ldxrw(rscratch1, addr_reg);
3441     __ cmpw(rscratch1, old_reg);
3442     __ br(Assembler::NE, done);
3443     __ stlxrw(rscratch1, new_reg, addr_reg);
3444     __ cbnzw(rscratch1, retry_load);
3445     __ bind(done);
3446   %}
3447 
3448   // auxiliary used for CompareAndSwapX to set result register
3449   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3450     MacroAssembler _masm(&cbuf);
3451     Register res_reg = as_Register($res$$reg);
3452     __ cset(res_reg, Assembler::EQ);
3453   %}
3454 
3455   // prefetch encodings
3456 
3457   enc_class aarch64_enc_prefetchw(memory mem) %{
3458     MacroAssembler _masm(&cbuf);
3459     Register base = as_Register($mem$$base);
3460     int index = $mem$$index;
3461     int scale = $mem$$scale;
3462     int disp = $mem$$disp;
3463     if (index == -1) {
3464       __ prfm(Address(base, disp), PSTL1KEEP);
3465       __ nop();
3466     } else {
3467       Register index_reg = as_Register(index);
3468       if (disp == 0) {
3469         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3470       } else {
3471         __ lea(rscratch1, Address(base, disp));
3472         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3473       }
3474     }
3475   %}
3476 
3477   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
3478     MacroAssembler _masm(&cbuf);
3479     Register cnt_reg = as_Register($cnt$$reg);
3480     Register base_reg = as_Register($base$$reg);
3481     // base is word aligned
3482     // cnt is count of words
3483 
3484     Label loop;
3485     Label entry;
3486 
3487 //  Algorithm:
3488 //
3489 //    scratch1 = cnt & 7;
3490 //    cnt -= scratch1;
3491 //    p += scratch1;
3492 //    switch (scratch1) {
3493 //      do {
3494 //        cnt -= 8;
3495 //          p[-8] = 0;
3496 //        case 7:
3497 //          p[-7] = 0;
3498 //        case 6:
3499 //          p[-6] = 0;
3500 //          // ...
3501 //        case 1:
3502 //          p[-1] = 0;
3503 //        case 0:
3504 //          p += 8;
3505 //      } while (cnt);
3506 //    }
3507 
3508     const int unroll = 8; // Number of str(zr) instructions we'll unroll
3509 
3510     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
3511     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
3512     // base_reg always points to the end of the region we're about to zero
3513     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
3514     __ adr(rscratch2, entry);
3515     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
3516     __ br(rscratch2);
3517     __ bind(loop);
3518     __ sub(cnt_reg, cnt_reg, unroll);
3519     for (int i = -unroll; i < 0; i++)
3520       __ str(zr, Address(base_reg, i * wordSize));
3521     __ bind(entry);
3522     __ add(base_reg, base_reg, unroll * wordSize);
3523     __ cbnz(cnt_reg, loop);
3524   %}
3525 
3526   /// mov envcodings
3527 
3528   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3529     MacroAssembler _masm(&cbuf);
3530     u_int32_t con = (u_int32_t)$src$$constant;
3531     Register dst_reg = as_Register($dst$$reg);
3532     if (con == 0) {
3533       __ movw(dst_reg, zr);
3534     } else {
3535       __ movw(dst_reg, con);
3536     }
3537   %}
3538 
3539   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3540     MacroAssembler _masm(&cbuf);
3541     Register dst_reg = as_Register($dst$$reg);
3542     u_int64_t con = (u_int64_t)$src$$constant;
3543     if (con == 0) {
3544       __ mov(dst_reg, zr);
3545     } else {
3546       __ mov(dst_reg, con);
3547     }
3548   %}
3549 
3550   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3551     MacroAssembler _masm(&cbuf);
3552     Register dst_reg = as_Register($dst$$reg);
3553     address con = (address)$src$$constant;
3554     if (con == NULL || con == (address)1) {
3555       ShouldNotReachHere();
3556     } else {
3557       relocInfo::relocType rtype = $src->constant_reloc();
3558       if (rtype == relocInfo::oop_type) {
3559         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3560       } else if (rtype == relocInfo::metadata_type) {
3561         __ mov_metadata(dst_reg, (Metadata*)con);
3562       } else {
3563         assert(rtype == relocInfo::none, "unexpected reloc type");
3564         if (con < (address)(uintptr_t)os::vm_page_size()) {
3565           __ mov(dst_reg, con);
3566         } else {
3567           unsigned long offset;
3568           __ adrp(dst_reg, con, offset);
3569           __ add(dst_reg, dst_reg, offset);
3570         }
3571       }
3572     }
3573   %}
3574 
3575   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3576     MacroAssembler _masm(&cbuf);
3577     Register dst_reg = as_Register($dst$$reg);
3578     __ mov(dst_reg, zr);
3579   %}
3580 
3581   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3582     MacroAssembler _masm(&cbuf);
3583     Register dst_reg = as_Register($dst$$reg);
3584     __ mov(dst_reg, (u_int64_t)1);
3585   %}
3586 
3587   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3588     MacroAssembler _masm(&cbuf);
3589     address page = (address)$src$$constant;
3590     Register dst_reg = as_Register($dst$$reg);
3591     unsigned long off;
3592     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3593     assert(off == 0, "assumed offset == 0");
3594   %}
3595 
3596   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3597     MacroAssembler _masm(&cbuf);
3598     address page = (address)$src$$constant;
3599     Register dst_reg = as_Register($dst$$reg);
3600     unsigned long off;
3601     __ adrp(dst_reg, ExternalAddress(page), off);
3602     assert(off == 0, "assumed offset == 0");
3603   %}
3604 
3605   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3606     MacroAssembler _masm(&cbuf);
3607     Register dst_reg = as_Register($dst$$reg);
3608     address con = (address)$src$$constant;
3609     if (con == NULL) {
3610       ShouldNotReachHere();
3611     } else {
3612       relocInfo::relocType rtype = $src->constant_reloc();
3613       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3614       __ set_narrow_oop(dst_reg, (jobject)con);
3615     }
3616   %}
3617 
3618   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3619     MacroAssembler _masm(&cbuf);
3620     Register dst_reg = as_Register($dst$$reg);
3621     __ mov(dst_reg, zr);
3622   %}
3623 
3624   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3625     MacroAssembler _masm(&cbuf);
3626     Register dst_reg = as_Register($dst$$reg);
3627     address con = (address)$src$$constant;
3628     if (con == NULL) {
3629       ShouldNotReachHere();
3630     } else {
3631       relocInfo::relocType rtype = $src->constant_reloc();
3632       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3633       __ set_narrow_klass(dst_reg, (Klass *)con);
3634     }
3635   %}
3636 
3637   // arithmetic encodings
3638 
3639   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3640     MacroAssembler _masm(&cbuf);
3641     Register dst_reg = as_Register($dst$$reg);
3642     Register src_reg = as_Register($src1$$reg);
3643     int32_t con = (int32_t)$src2$$constant;
3644     // add has primary == 0, subtract has primary == 1
3645     if ($primary) { con = -con; }
3646     if (con < 0) {
3647       __ subw(dst_reg, src_reg, -con);
3648     } else {
3649       __ addw(dst_reg, src_reg, con);
3650     }
3651   %}
3652 
3653   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3654     MacroAssembler _masm(&cbuf);
3655     Register dst_reg = as_Register($dst$$reg);
3656     Register src_reg = as_Register($src1$$reg);
3657     int32_t con = (int32_t)$src2$$constant;
3658     // add has primary == 0, subtract has primary == 1
3659     if ($primary) { con = -con; }
3660     if (con < 0) {
3661       __ sub(dst_reg, src_reg, -con);
3662     } else {
3663       __ add(dst_reg, src_reg, con);
3664     }
3665   %}
3666 
3667   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3668     MacroAssembler _masm(&cbuf);
3669    Register dst_reg = as_Register($dst$$reg);
3670    Register src1_reg = as_Register($src1$$reg);
3671    Register src2_reg = as_Register($src2$$reg);
3672     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3673   %}
3674 
3675   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3676     MacroAssembler _masm(&cbuf);
3677    Register dst_reg = as_Register($dst$$reg);
3678    Register src1_reg = as_Register($src1$$reg);
3679    Register src2_reg = as_Register($src2$$reg);
3680     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3681   %}
3682 
3683   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3684     MacroAssembler _masm(&cbuf);
3685    Register dst_reg = as_Register($dst$$reg);
3686    Register src1_reg = as_Register($src1$$reg);
3687    Register src2_reg = as_Register($src2$$reg);
3688     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3689   %}
3690 
3691   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3692     MacroAssembler _masm(&cbuf);
3693    Register dst_reg = as_Register($dst$$reg);
3694    Register src1_reg = as_Register($src1$$reg);
3695    Register src2_reg = as_Register($src2$$reg);
3696     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3697   %}
3698 
3699   // compare instruction encodings
3700 
3701   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3702     MacroAssembler _masm(&cbuf);
3703     Register reg1 = as_Register($src1$$reg);
3704     Register reg2 = as_Register($src2$$reg);
3705     __ cmpw(reg1, reg2);
3706   %}
3707 
3708   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3709     MacroAssembler _masm(&cbuf);
3710     Register reg = as_Register($src1$$reg);
3711     int32_t val = $src2$$constant;
3712     if (val >= 0) {
3713       __ subsw(zr, reg, val);
3714     } else {
3715       __ addsw(zr, reg, -val);
3716     }
3717   %}
3718 
3719   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3720     MacroAssembler _masm(&cbuf);
3721     Register reg1 = as_Register($src1$$reg);
3722     u_int32_t val = (u_int32_t)$src2$$constant;
3723     __ movw(rscratch1, val);
3724     __ cmpw(reg1, rscratch1);
3725   %}
3726 
3727   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3728     MacroAssembler _masm(&cbuf);
3729     Register reg1 = as_Register($src1$$reg);
3730     Register reg2 = as_Register($src2$$reg);
3731     __ cmp(reg1, reg2);
3732   %}
3733 
3734   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3735     MacroAssembler _masm(&cbuf);
3736     Register reg = as_Register($src1$$reg);
3737     int64_t val = $src2$$constant;
3738     if (val >= 0) {
3739       __ subs(zr, reg, val);
3740     } else if (val != -val) {
3741       __ adds(zr, reg, -val);
3742     } else {
3743     // aargh, Long.MIN_VALUE is a special case
3744       __ orr(rscratch1, zr, (u_int64_t)val);
3745       __ subs(zr, reg, rscratch1);
3746     }
3747   %}
3748 
3749   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3750     MacroAssembler _masm(&cbuf);
3751     Register reg1 = as_Register($src1$$reg);
3752     u_int64_t val = (u_int64_t)$src2$$constant;
3753     __ mov(rscratch1, val);
3754     __ cmp(reg1, rscratch1);
3755   %}
3756 
3757   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3758     MacroAssembler _masm(&cbuf);
3759     Register reg1 = as_Register($src1$$reg);
3760     Register reg2 = as_Register($src2$$reg);
3761     __ cmp(reg1, reg2);
3762   %}
3763 
3764   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3765     MacroAssembler _masm(&cbuf);
3766     Register reg1 = as_Register($src1$$reg);
3767     Register reg2 = as_Register($src2$$reg);
3768     __ cmpw(reg1, reg2);
3769   %}
3770 
3771   enc_class aarch64_enc_testp(iRegP src) %{
3772     MacroAssembler _masm(&cbuf);
3773     Register reg = as_Register($src$$reg);
3774     __ cmp(reg, zr);
3775   %}
3776 
3777   enc_class aarch64_enc_testn(iRegN src) %{
3778     MacroAssembler _masm(&cbuf);
3779     Register reg = as_Register($src$$reg);
3780     __ cmpw(reg, zr);
3781   %}
3782 
3783   enc_class aarch64_enc_b(label lbl) %{
3784     MacroAssembler _masm(&cbuf);
3785     Label *L = $lbl$$label;
3786     __ b(*L);
3787   %}
3788 
3789   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3790     MacroAssembler _masm(&cbuf);
3791     Label *L = $lbl$$label;
3792     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3793   %}
3794 
3795   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3796     MacroAssembler _masm(&cbuf);
3797     Label *L = $lbl$$label;
3798     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3799   %}
3800 
3801   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3802   %{
3803      Register sub_reg = as_Register($sub$$reg);
3804      Register super_reg = as_Register($super$$reg);
3805      Register temp_reg = as_Register($temp$$reg);
3806      Register result_reg = as_Register($result$$reg);
3807 
3808      Label miss;
3809      MacroAssembler _masm(&cbuf);
3810      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3811                                      NULL, &miss,
3812                                      /*set_cond_codes:*/ true);
3813      if ($primary) {
3814        __ mov(result_reg, zr);
3815      }
3816      __ bind(miss);
3817   %}
3818 
3819   enc_class aarch64_enc_java_static_call(method meth) %{
3820     MacroAssembler _masm(&cbuf);
3821 
3822     address addr = (address)$meth$$method;
3823     if (!_method) {
3824       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3825       __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3826     } else if (_optimized_virtual) {
3827       __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3828     } else {
3829       __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3830     }
3831 
3832     if (_method) {
3833       // Emit stub for static call
3834       CompiledStaticCall::emit_to_interp_stub(cbuf);
3835     }
3836   %}
3837 
3838   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3839     MacroAssembler _masm(&cbuf);
3840     __ ic_call((address)$meth$$method);
3841   %}
3842 
3843   enc_class aarch64_enc_call_epilog() %{
3844     MacroAssembler _masm(&cbuf);
3845     if (VerifyStackAtCalls) {
3846       // Check that stack depth is unchanged: find majik cookie on stack
3847       __ call_Unimplemented();
3848     }
3849   %}
3850 
3851   enc_class aarch64_enc_java_to_runtime(method meth) %{
3852     MacroAssembler _masm(&cbuf);
3853 
3854     // some calls to generated routines (arraycopy code) are scheduled
3855     // by C2 as runtime calls. if so we can call them using a br (they
3856     // will be in a reachable segment) otherwise we have to use a blrt
3857     // which loads the absolute address into a register.
3858     address entry = (address)$meth$$method;
3859     CodeBlob *cb = CodeCache::find_blob(entry);
3860     if (cb) {
3861       __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3862     } else {
3863       int gpcnt;
3864       int fpcnt;
3865       int rtype;
3866       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3867       Label retaddr;
3868       __ adr(rscratch2, retaddr);
3869       __ lea(rscratch1, RuntimeAddress(entry));
3870       // Leave a breadcrumb for JavaThread::pd_last_frame().
3871       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3872       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3873       __ bind(retaddr);
3874       __ add(sp, sp, 2 * wordSize);
3875     }
3876   %}
3877 
3878   enc_class aarch64_enc_rethrow() %{
3879     MacroAssembler _masm(&cbuf);
3880     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3881   %}
3882 
3883   enc_class aarch64_enc_ret() %{
3884     MacroAssembler _masm(&cbuf);
3885     __ ret(lr);
3886   %}
3887 
3888   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3889     MacroAssembler _masm(&cbuf);
3890     Register target_reg = as_Register($jump_target$$reg);
3891     __ br(target_reg);
3892   %}
3893 
3894   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3895     MacroAssembler _masm(&cbuf);
3896     Register target_reg = as_Register($jump_target$$reg);
3897     // exception oop should be in r0
3898     // ret addr has been popped into lr
3899     // callee expects it in r3
3900     __ mov(r3, lr);
3901     __ br(target_reg);
3902   %}
3903 
3904   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3905     MacroAssembler _masm(&cbuf);
3906     Register oop = as_Register($object$$reg);
3907     Register box = as_Register($box$$reg);
3908     Register disp_hdr = as_Register($tmp$$reg);
3909     Register tmp = as_Register($tmp2$$reg);
3910     Label cont;
3911     Label object_has_monitor;
3912     Label cas_failed;
3913 
3914     assert_different_registers(oop, box, tmp, disp_hdr);
3915 
3916     // Load markOop from object into displaced_header.
3917     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3918 
3919     // Always do locking in runtime.
3920     if (EmitSync & 0x01) {
3921       __ cmp(oop, zr);
3922       return;
3923     }
3924 
3925     if (UseBiasedLocking) {
3926       __ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
3927     }
3928 
3929     // Handle existing monitor
3930     if (EmitSync & 0x02) {
3931       // we can use AArch64's bit test and branch here but
3932       // markoopDesc does not define a bit index just the bit value
3933       // so assert in case the bit pos changes
3934 #     define __monitor_value_log2 1
3935       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3936       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3937 #     undef __monitor_value_log2
3938     }
3939 
3940     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3941     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3942 
3943     // Load Compare Value application register.
3944 
3945     // Initialize the box. (Must happen before we update the object mark!)
3946     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3947 
3948     // Compare object markOop with mark and if equal exchange scratch1
3949     // with object markOop.
3950     // Note that this is simply a CAS: it does not generate any
3951     // barriers.  These are separately generated by
3952     // membar_acquire_lock().
3953     {
3954       Label retry_load;
3955       __ bind(retry_load);
3956       __ ldxr(tmp, oop);
3957       __ cmp(tmp, disp_hdr);
3958       __ br(Assembler::NE, cas_failed);
3959       // use stlxr to ensure update is immediately visible
3960       __ stlxr(tmp, box, oop);
3961       __ cbzw(tmp, cont);
3962       __ b(retry_load);
3963     }
3964 
3965     // Formerly:
3966     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3967     //               /*newv=*/box,
3968     //               /*addr=*/oop,
3969     //               /*tmp=*/tmp,
3970     //               cont,
3971     //               /*fail*/NULL);
3972 
3973     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3974 
3975     // If the compare-and-exchange succeeded, then we found an unlocked
3976     // object, will have now locked it will continue at label cont
3977 
3978     __ bind(cas_failed);
3979     // We did not see an unlocked object so try the fast recursive case.
3980 
3981     // Check if the owner is self by comparing the value in the
3982     // markOop of object (disp_hdr) with the stack pointer.
3983     __ mov(rscratch1, sp);
3984     __ sub(disp_hdr, disp_hdr, rscratch1);
3985     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3986     // If condition is true we are cont and hence we can store 0 as the
3987     // displaced header in the box, which indicates that it is a recursive lock.
3988     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3989     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3990 
3991     // Handle existing monitor.
3992     if ((EmitSync & 0x02) == 0) {
3993       __ b(cont);
3994 
3995       __ bind(object_has_monitor);
3996       // The object's monitor m is unlocked iff m->owner == NULL,
3997       // otherwise m->owner may contain a thread or a stack address.
3998       //
3999       // Try to CAS m->owner from NULL to current thread.
4000       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4001       __ mov(disp_hdr, zr);
4002 
4003       {
4004         Label retry_load, fail;
4005         __ bind(retry_load);
4006         __ ldxr(rscratch1, tmp);
4007         __ cmp(disp_hdr, rscratch1);
4008         __ br(Assembler::NE, fail);
4009         // use stlxr to ensure update is immediately visible
4010         __ stlxr(rscratch1, rthread, tmp);
4011         __ cbnzw(rscratch1, retry_load);
4012         __ bind(fail);
4013       }
4014 
4015       // Label next;
4016       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4017       //               /*newv=*/rthread,
4018       //               /*addr=*/tmp,
4019       //               /*tmp=*/rscratch1,
4020       //               /*succeed*/next,
4021       //               /*fail*/NULL);
4022       // __ bind(next);
4023 
4024       // store a non-null value into the box.
4025       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4026 
4027       // PPC port checks the following invariants
4028       // #ifdef ASSERT
4029       // bne(flag, cont);
4030       // We have acquired the monitor, check some invariants.
4031       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4032       // Invariant 1: _recursions should be 0.
4033       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4034       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4035       //                        "monitor->_recursions should be 0", -1);
4036       // Invariant 2: OwnerIsThread shouldn't be 0.
4037       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4038       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4039       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4040       // #endif
4041     }
4042 
4043     __ bind(cont);
4044     // flag == EQ indicates success
4045     // flag == NE indicates failure
4046 
4047   %}
4048 
4049   // TODO
4050   // reimplement this with custom cmpxchgptr code
4051   // which avoids some of the unnecessary branching
4052   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4053     MacroAssembler _masm(&cbuf);
4054     Register oop = as_Register($object$$reg);
4055     Register box = as_Register($box$$reg);
4056     Register disp_hdr = as_Register($tmp$$reg);
4057     Register tmp = as_Register($tmp2$$reg);
4058     Label cont;
4059     Label object_has_monitor;
4060     Label cas_failed;
4061 
4062     assert_different_registers(oop, box, tmp, disp_hdr);
4063 
4064     // Always do locking in runtime.
4065     if (EmitSync & 0x01) {
4066       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4067       return;
4068     }
4069 
4070     if (UseBiasedLocking) {
4071       __ biased_locking_exit(oop, tmp, cont);
4072     }
4073 
4074     // Find the lock address and load the displaced header from the stack.
4075     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4076 
4077     // If the displaced header is 0, we have a recursive unlock.
4078     __ cmp(disp_hdr, zr);
4079     __ br(Assembler::EQ, cont);
4080 
4081 
4082     // Handle existing monitor.
4083     if ((EmitSync & 0x02) == 0) {
4084       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4085       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4086     }
4087 
4088     // Check if it is still a light weight lock, this is is true if we
4089     // see the stack address of the basicLock in the markOop of the
4090     // object.
4091 
4092       {
4093         Label retry_load;
4094         __ bind(retry_load);
4095         __ ldxr(tmp, oop);
4096         __ cmp(box, tmp);
4097         __ br(Assembler::NE, cas_failed);
4098         // use stlxr to ensure update is immediately visible
4099         __ stlxr(tmp, disp_hdr, oop);
4100         __ cbzw(tmp, cont);
4101         __ b(retry_load);
4102       }
4103 
4104     // __ cmpxchgptr(/*compare_value=*/box,
4105     //               /*exchange_value=*/disp_hdr,
4106     //               /*where=*/oop,
4107     //               /*result=*/tmp,
4108     //               cont,
4109     //               /*cas_failed*/NULL);
4110     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4111 
4112     __ bind(cas_failed);
4113 
4114     // Handle existing monitor.
4115     if ((EmitSync & 0x02) == 0) {
4116       __ b(cont);
4117 
4118       __ bind(object_has_monitor);
4119       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4120       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4121       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4122       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4123       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4124       __ cmp(rscratch1, zr);
4125       __ br(Assembler::NE, cont);
4126 
4127       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4128       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4129       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4130       __ cmp(rscratch1, zr);
4131       __ cbnz(rscratch1, cont);
4132       // need a release store here
4133       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4134       __ stlr(rscratch1, tmp); // rscratch1 is zero
4135     }
4136 
4137     __ bind(cont);
4138     // flag == EQ indicates success
4139     // flag == NE indicates failure
4140   %}
4141 
4142 %}
4143 
4144 //----------FRAME--------------------------------------------------------------
4145 // Definition of frame structure and management information.
4146 //
4147 //  S T A C K   L A Y O U T    Allocators stack-slot number
4148 //                             |   (to get allocators register number
4149 //  G  Owned by    |        |  v    add OptoReg::stack0())
4150 //  r   CALLER     |        |
4151 //  o     |        +--------+      pad to even-align allocators stack-slot
4152 //  w     V        |  pad0  |        numbers; owned by CALLER
4153 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4154 //  h     ^        |   in   |  5
4155 //        |        |  args  |  4   Holes in incoming args owned by SELF
4156 //  |     |        |        |  3
4157 //  |     |        +--------+
4158 //  V     |        | old out|      Empty on Intel, window on Sparc
4159 //        |    old |preserve|      Must be even aligned.
4160 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4161 //        |        |   in   |  3   area for Intel ret address
4162 //     Owned by    |preserve|      Empty on Sparc.
4163 //       SELF      +--------+
4164 //        |        |  pad2  |  2   pad to align old SP
4165 //        |        +--------+  1
4166 //        |        | locks  |  0
4167 //        |        +--------+----> OptoReg::stack0(), even aligned
4168 //        |        |  pad1  | 11   pad to align new SP
4169 //        |        +--------+
4170 //        |        |        | 10
4171 //        |        | spills |  9   spills
4172 //        V        |        |  8   (pad0 slot for callee)
4173 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4174 //        ^        |  out   |  7
4175 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4176 //     Owned by    +--------+
4177 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4178 //        |    new |preserve|      Must be even-aligned.
4179 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4180 //        |        |        |
4181 //
4182 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4183 //         known from SELF's arguments and the Java calling convention.
4184 //         Region 6-7 is determined per call site.
4185 // Note 2: If the calling convention leaves holes in the incoming argument
4186 //         area, those holes are owned by SELF.  Holes in the outgoing area
4187 //         are owned by the CALLEE.  Holes should not be nessecary in the
4188 //         incoming area, as the Java calling convention is completely under
4189 //         the control of the AD file.  Doubles can be sorted and packed to
4190 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4191 //         varargs C calling conventions.
4192 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4193 //         even aligned with pad0 as needed.
4194 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4195 //           (the latter is true on Intel but is it false on AArch64?)
4196 //         region 6-11 is even aligned; it may be padded out more so that
4197 //         the region from SP to FP meets the minimum stack alignment.
4198 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4199 //         alignment.  Region 11, pad1, may be dynamically extended so that
4200 //         SP meets the minimum alignment.
4201 
4202 frame %{
4203   // What direction does stack grow in (assumed to be same for C & Java)
4204   stack_direction(TOWARDS_LOW);
4205 
4206   // These three registers define part of the calling convention
4207   // between compiled code and the interpreter.
4208 
4209   // Inline Cache Register or methodOop for I2C.
4210   inline_cache_reg(R12);
4211 
4212   // Method Oop Register when calling interpreter.
4213   interpreter_method_oop_reg(R12);
4214 
4215   // Number of stack slots consumed by locking an object
4216   sync_stack_slots(2);
4217 
4218   // Compiled code's Frame Pointer
4219   frame_pointer(R31);
4220 
4221   // Interpreter stores its frame pointer in a register which is
4222   // stored to the stack by I2CAdaptors.
4223   // I2CAdaptors convert from interpreted java to compiled java.
4224   interpreter_frame_pointer(R29);
4225 
4226   // Stack alignment requirement
4227   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4228 
4229   // Number of stack slots between incoming argument block and the start of
4230   // a new frame.  The PROLOG must add this many slots to the stack.  The
4231   // EPILOG must remove this many slots. aarch64 needs two slots for
4232   // return address and fp.
4233   // TODO think this is correct but check
4234   in_preserve_stack_slots(4);
4235 
4236   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4237   // for calls to C.  Supports the var-args backing area for register parms.
4238   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4239 
4240   // The after-PROLOG location of the return address.  Location of
4241   // return address specifies a type (REG or STACK) and a number
4242   // representing the register number (i.e. - use a register name) or
4243   // stack slot.
4244   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4245   // Otherwise, it is above the locks and verification slot and alignment word
4246   // TODO this may well be correct but need to check why that - 2 is there
4247   // ppc port uses 0 but we definitely need to allow for fixed_slots
4248   // which folds in the space used for monitors
4249   return_addr(STACK - 2 +
4250               round_to((Compile::current()->in_preserve_stack_slots() +
4251                         Compile::current()->fixed_slots()),
4252                        stack_alignment_in_slots()));
4253 
4254   // Body of function which returns an integer array locating
4255   // arguments either in registers or in stack slots.  Passed an array
4256   // of ideal registers called "sig" and a "length" count.  Stack-slot
4257   // offsets are based on outgoing arguments, i.e. a CALLER setting up
4258   // arguments for a CALLEE.  Incoming stack arguments are
4259   // automatically biased by the preserve_stack_slots field above.
4260 
4261   calling_convention
4262   %{
4263     // No difference between ingoing/outgoing just pass false
4264     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4265   %}
4266 
4267   c_calling_convention
4268   %{
4269     // This is obviously always outgoing
4270     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
4271   %}
4272 
4273   // Location of compiled Java return values.  Same as C for now.
4274   return_value
4275   %{
4276     // TODO do we allow ideal_reg == Op_RegN???
4277     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4278            "only return normal values");
4279 
4280     static const int lo[Op_RegL + 1] = { // enum name
4281       0,                                 // Op_Node
4282       0,                                 // Op_Set
4283       R0_num,                            // Op_RegN
4284       R0_num,                            // Op_RegI
4285       R0_num,                            // Op_RegP
4286       V0_num,                            // Op_RegF
4287       V0_num,                            // Op_RegD
4288       R0_num                             // Op_RegL
4289     };
4290 
4291     static const int hi[Op_RegL + 1] = { // enum name
4292       0,                                 // Op_Node
4293       0,                                 // Op_Set
4294       OptoReg::Bad,                       // Op_RegN
4295       OptoReg::Bad,                      // Op_RegI
4296       R0_H_num,                          // Op_RegP
4297       OptoReg::Bad,                      // Op_RegF
4298       V0_H_num,                          // Op_RegD
4299       R0_H_num                           // Op_RegL
4300     };
4301 
4302     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4303   %}
4304 %}
4305 
4306 //----------ATTRIBUTES---------------------------------------------------------
4307 //----------Operand Attributes-------------------------------------------------
4308 op_attrib op_cost(1);        // Required cost attribute
4309 
4310 //----------Instruction Attributes---------------------------------------------
4311 ins_attrib ins_cost(INSN_COST); // Required cost attribute
4312 ins_attrib ins_size(32);        // Required size attribute (in bits)
4313 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4314                                 // a non-matching short branch variant
4315                                 // of some long branch?
4316 ins_attrib ins_alignment(4);    // Required alignment attribute (must
4317                                 // be a power of 2) specifies the
4318                                 // alignment that some part of the
4319                                 // instruction (not necessarily the
4320                                 // start) requires.  If > 1, a
4321                                 // compute_padding() function must be
4322                                 // provided for the instruction
4323 
4324 //----------OPERANDS-----------------------------------------------------------
4325 // Operand definitions must precede instruction definitions for correct parsing
4326 // in the ADLC because operands constitute user defined types which are used in
4327 // instruction definitions.
4328 
4329 //----------Simple Operands----------------------------------------------------
4330 
4331 // Integer operands 32 bit
4332 // 32 bit immediate
4333 operand immI()
4334 %{
4335   match(ConI);
4336 
4337   op_cost(0);
4338   format %{ %}
4339   interface(CONST_INTER);
4340 %}
4341 
4342 // 32 bit zero
4343 operand immI0()
4344 %{
4345   predicate(n->get_int() == 0);
4346   match(ConI);
4347 
4348   op_cost(0);
4349   format %{ %}
4350   interface(CONST_INTER);
4351 %}
4352 
4353 // 32 bit unit increment
4354 operand immI_1()
4355 %{
4356   predicate(n->get_int() == 1);
4357   match(ConI);
4358 
4359   op_cost(0);
4360   format %{ %}
4361   interface(CONST_INTER);
4362 %}
4363 
4364 // 32 bit unit decrement
4365 operand immI_M1()
4366 %{
4367   predicate(n->get_int() == -1);
4368   match(ConI);
4369 
4370   op_cost(0);
4371   format %{ %}
4372   interface(CONST_INTER);
4373 %}
4374 
4375 operand immI_le_4()
4376 %{
4377   predicate(n->get_int() <= 4);
4378   match(ConI);
4379 
4380   op_cost(0);
4381   format %{ %}
4382   interface(CONST_INTER);
4383 %}
4384 
4385 operand immI_31()
4386 %{
4387   predicate(n->get_int() == 31);
4388   match(ConI);
4389 
4390   op_cost(0);
4391   format %{ %}
4392   interface(CONST_INTER);
4393 %}
4394 
4395 operand immI_8()
4396 %{
4397   predicate(n->get_int() == 8);
4398   match(ConI);
4399 
4400   op_cost(0);
4401   format %{ %}
4402   interface(CONST_INTER);
4403 %}
4404 
4405 operand immI_16()
4406 %{
4407   predicate(n->get_int() == 16);
4408   match(ConI);
4409 
4410   op_cost(0);
4411   format %{ %}
4412   interface(CONST_INTER);
4413 %}
4414 
4415 operand immI_24()
4416 %{
4417   predicate(n->get_int() == 24);
4418   match(ConI);
4419 
4420   op_cost(0);
4421   format %{ %}
4422   interface(CONST_INTER);
4423 %}
4424 
4425 operand immI_32()
4426 %{
4427   predicate(n->get_int() == 32);
4428   match(ConI);
4429 
4430   op_cost(0);
4431   format %{ %}
4432   interface(CONST_INTER);
4433 %}
4434 
4435 operand immI_48()
4436 %{
4437   predicate(n->get_int() == 48);
4438   match(ConI);
4439 
4440   op_cost(0);
4441   format %{ %}
4442   interface(CONST_INTER);
4443 %}
4444 
4445 operand immI_56()
4446 %{
4447   predicate(n->get_int() == 56);
4448   match(ConI);
4449 
4450   op_cost(0);
4451   format %{ %}
4452   interface(CONST_INTER);
4453 %}
4454 
4455 operand immI_64()
4456 %{
4457   predicate(n->get_int() == 64);
4458   match(ConI);
4459 
4460   op_cost(0);
4461   format %{ %}
4462   interface(CONST_INTER);
4463 %}
4464 
4465 operand immI_255()
4466 %{
4467   predicate(n->get_int() == 255);
4468   match(ConI);
4469 
4470   op_cost(0);
4471   format %{ %}
4472   interface(CONST_INTER);
4473 %}
4474 
4475 operand immI_65535()
4476 %{
4477   predicate(n->get_int() == 65535);
4478   match(ConI);
4479 
4480   op_cost(0);
4481   format %{ %}
4482   interface(CONST_INTER);
4483 %}
4484 
4485 operand immL_63()
4486 %{
4487   predicate(n->get_int() == 63);
4488   match(ConI);
4489 
4490   op_cost(0);
4491   format %{ %}
4492   interface(CONST_INTER);
4493 %}
4494 
4495 operand immL_255()
4496 %{
4497   predicate(n->get_int() == 255);
4498   match(ConI);
4499 
4500   op_cost(0);
4501   format %{ %}
4502   interface(CONST_INTER);
4503 %}
4504 
4505 operand immL_65535()
4506 %{
4507   predicate(n->get_long() == 65535L);
4508   match(ConL);
4509 
4510   op_cost(0);
4511   format %{ %}
4512   interface(CONST_INTER);
4513 %}
4514 
4515 operand immL_4294967295()
4516 %{
4517   predicate(n->get_long() == 4294967295L);
4518   match(ConL);
4519 
4520   op_cost(0);
4521   format %{ %}
4522   interface(CONST_INTER);
4523 %}
4524 
4525 operand immL_bitmask()
4526 %{
4527   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4528             && is_power_of_2(n->get_long() + 1));
4529   match(ConL);
4530 
4531   op_cost(0);
4532   format %{ %}
4533   interface(CONST_INTER);
4534 %}
4535 
4536 operand immI_bitmask()
4537 %{
4538   predicate(((n->get_int() & 0xc0000000) == 0)
4539             && is_power_of_2(n->get_int() + 1));
4540   match(ConI);
4541 
4542   op_cost(0);
4543   format %{ %}
4544   interface(CONST_INTER);
4545 %}
4546 
4547 // Scale values for scaled offset addressing modes (up to long but not quad)
4548 operand immIScale()
4549 %{
4550   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4551   match(ConI);
4552 
4553   op_cost(0);
4554   format %{ %}
4555   interface(CONST_INTER);
4556 %}
4557 
4558 // 26 bit signed offset -- for pc-relative branches
4559 operand immI26()
4560 %{
4561   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4562   match(ConI);
4563 
4564   op_cost(0);
4565   format %{ %}
4566   interface(CONST_INTER);
4567 %}
4568 
4569 // 19 bit signed offset -- for pc-relative loads
4570 operand immI19()
4571 %{
4572   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4573   match(ConI);
4574 
4575   op_cost(0);
4576   format %{ %}
4577   interface(CONST_INTER);
4578 %}
4579 
4580 // 12 bit unsigned offset -- for base plus immediate loads
4581 operand immIU12()
4582 %{
4583   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4584   match(ConI);
4585 
4586   op_cost(0);
4587   format %{ %}
4588   interface(CONST_INTER);
4589 %}
4590 
4591 operand immLU12()
4592 %{
4593   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4594   match(ConL);
4595 
4596   op_cost(0);
4597   format %{ %}
4598   interface(CONST_INTER);
4599 %}
4600 
4601 // Offset for scaled or unscaled immediate loads and stores
4602 operand immIOffset()
4603 %{
4604   predicate(Address::offset_ok_for_immed(n->get_int()));
4605   match(ConI);
4606 
4607   op_cost(0);
4608   format %{ %}
4609   interface(CONST_INTER);
4610 %}
4611 
4612 operand immLoffset()
4613 %{
4614   predicate(Address::offset_ok_for_immed(n->get_long()));
4615   match(ConL);
4616 
4617   op_cost(0);
4618   format %{ %}
4619   interface(CONST_INTER);
4620 %}
4621 
4622 // 32 bit integer valid for add sub immediate
4623 operand immIAddSub()
4624 %{
4625   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4626   match(ConI);
4627   op_cost(0);
4628   format %{ %}
4629   interface(CONST_INTER);
4630 %}
4631 
4632 // 32 bit unsigned integer valid for logical immediate
4633 // TODO -- check this is right when e.g the mask is 0x80000000
4634 operand immILog()
4635 %{
4636   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4637   match(ConI);
4638 
4639   op_cost(0);
4640   format %{ %}
4641   interface(CONST_INTER);
4642 %}
4643 
4644 // Integer operands 64 bit
4645 // 64 bit immediate
4646 operand immL()
4647 %{
4648   match(ConL);
4649 
4650   op_cost(0);
4651   format %{ %}
4652   interface(CONST_INTER);
4653 %}
4654 
4655 // 64 bit zero
4656 operand immL0()
4657 %{
4658   predicate(n->get_long() == 0);
4659   match(ConL);
4660 
4661   op_cost(0);
4662   format %{ %}
4663   interface(CONST_INTER);
4664 %}
4665 
4666 // 64 bit unit increment
4667 operand immL_1()
4668 %{
4669   predicate(n->get_long() == 1);
4670   match(ConL);
4671 
4672   op_cost(0);
4673   format %{ %}
4674   interface(CONST_INTER);
4675 %}
4676 
4677 // 64 bit unit decrement
4678 operand immL_M1()
4679 %{
4680   predicate(n->get_long() == -1);
4681   match(ConL);
4682 
4683   op_cost(0);
4684   format %{ %}
4685   interface(CONST_INTER);
4686 %}
4687 
4688 // 32 bit offset of pc in thread anchor
4689 
4690 operand immL_pc_off()
4691 %{
4692   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4693                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4694   match(ConL);
4695 
4696   op_cost(0);
4697   format %{ %}
4698   interface(CONST_INTER);
4699 %}
4700 
4701 // 64 bit integer valid for add sub immediate
4702 operand immLAddSub()
4703 %{
4704   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4705   match(ConL);
4706   op_cost(0);
4707   format %{ %}
4708   interface(CONST_INTER);
4709 %}
4710 
4711 // 64 bit integer valid for logical immediate
4712 operand immLLog()
4713 %{
4714   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4715   match(ConL);
4716   op_cost(0);
4717   format %{ %}
4718   interface(CONST_INTER);
4719 %}
4720 
4721 // Long Immediate: low 32-bit mask
4722 operand immL_32bits()
4723 %{
4724   predicate(n->get_long() == 0xFFFFFFFFL);
4725   match(ConL);
4726   op_cost(0);
4727   format %{ %}
4728   interface(CONST_INTER);
4729 %}
4730 
4731 // Pointer operands
4732 // Pointer Immediate
4733 operand immP()
4734 %{
4735   match(ConP);
4736 
4737   op_cost(0);
4738   format %{ %}
4739   interface(CONST_INTER);
4740 %}
4741 
4742 // NULL Pointer Immediate
4743 operand immP0()
4744 %{
4745   predicate(n->get_ptr() == 0);
4746   match(ConP);
4747 
4748   op_cost(0);
4749   format %{ %}
4750   interface(CONST_INTER);
4751 %}
4752 
4753 // Pointer Immediate One
4754 // this is used in object initialization (initial object header)
4755 operand immP_1()
4756 %{
4757   predicate(n->get_ptr() == 1);
4758   match(ConP);
4759 
4760   op_cost(0);
4761   format %{ %}
4762   interface(CONST_INTER);
4763 %}
4764 
4765 // Polling Page Pointer Immediate
4766 operand immPollPage()
4767 %{
4768   predicate((address)n->get_ptr() == os::get_polling_page());
4769   match(ConP);
4770 
4771   op_cost(0);
4772   format %{ %}
4773   interface(CONST_INTER);
4774 %}
4775 
4776 // Card Table Byte Map Base
4777 operand immByteMapBase()
4778 %{
4779   // Get base of card map
4780   predicate((jbyte*)n->get_ptr() ==
4781         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
4782   match(ConP);
4783 
4784   op_cost(0);
4785   format %{ %}
4786   interface(CONST_INTER);
4787 %}
4788 
4789 // Pointer Immediate Minus One
4790 // this is used when we want to write the current PC to the thread anchor
4791 operand immP_M1()
4792 %{
4793   predicate(n->get_ptr() == -1);
4794   match(ConP);
4795 
4796   op_cost(0);
4797   format %{ %}
4798   interface(CONST_INTER);
4799 %}
4800 
4801 // Pointer Immediate Minus Two
4802 // this is used when we want to write the current PC to the thread anchor
4803 operand immP_M2()
4804 %{
4805   predicate(n->get_ptr() == -2);
4806   match(ConP);
4807 
4808   op_cost(0);
4809   format %{ %}
4810   interface(CONST_INTER);
4811 %}
4812 
4813 // Float and Double operands
4814 // Double Immediate
4815 operand immD()
4816 %{
4817   match(ConD);
4818   op_cost(0);
4819   format %{ %}
4820   interface(CONST_INTER);
4821 %}
4822 
4823 // Double Immediate: +0.0d
4824 operand immD0()
4825 %{
4826   predicate(jlong_cast(n->getd()) == 0);
4827   match(ConD);
4828 
4829   op_cost(0);
4830   format %{ %}
4831   interface(CONST_INTER);
4832 %}
4833 
4834 // constant 'double +0.0'.
4835 operand immDPacked()
4836 %{
4837   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4838   match(ConD);
4839   op_cost(0);
4840   format %{ %}
4841   interface(CONST_INTER);
4842 %}
4843 
4844 // Float Immediate
4845 operand immF()
4846 %{
4847   match(ConF);
4848   op_cost(0);
4849   format %{ %}
4850   interface(CONST_INTER);
4851 %}
4852 
4853 // Float Immediate: +0.0f.
4854 operand immF0()
4855 %{
4856   predicate(jint_cast(n->getf()) == 0);
4857   match(ConF);
4858 
4859   op_cost(0);
4860   format %{ %}
4861   interface(CONST_INTER);
4862 %}
4863 
4864 //
4865 operand immFPacked()
4866 %{
4867   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4868   match(ConF);
4869   op_cost(0);
4870   format %{ %}
4871   interface(CONST_INTER);
4872 %}
4873 
4874 // Narrow pointer operands
4875 // Narrow Pointer Immediate
4876 operand immN()
4877 %{
4878   match(ConN);
4879 
4880   op_cost(0);
4881   format %{ %}
4882   interface(CONST_INTER);
4883 %}
4884 
4885 // Narrow NULL Pointer Immediate
4886 operand immN0()
4887 %{
4888   predicate(n->get_narrowcon() == 0);
4889   match(ConN);
4890 
4891   op_cost(0);
4892   format %{ %}
4893   interface(CONST_INTER);
4894 %}
4895 
4896 operand immNKlass()
4897 %{
4898   match(ConNKlass);
4899 
4900   op_cost(0);
4901   format %{ %}
4902   interface(CONST_INTER);
4903 %}
4904 
4905 // Integer 32 bit Register Operands
4906 // Integer 32 bitRegister (excludes SP)
4907 operand iRegI()
4908 %{
4909   constraint(ALLOC_IN_RC(any_reg32));
4910   match(RegI);
4911   match(iRegINoSp);
4912   op_cost(0);
4913   format %{ %}
4914   interface(REG_INTER);
4915 %}
4916 
4917 // Integer 32 bit Register not Special
4918 operand iRegINoSp()
4919 %{
4920   constraint(ALLOC_IN_RC(no_special_reg32));
4921   match(RegI);
4922   op_cost(0);
4923   format %{ %}
4924   interface(REG_INTER);
4925 %}
4926 
4927 // Integer 64 bit Register Operands
4928 // Integer 64 bit Register (includes SP)
4929 operand iRegL()
4930 %{
4931   constraint(ALLOC_IN_RC(any_reg));
4932   match(RegL);
4933   match(iRegLNoSp);
4934   op_cost(0);
4935   format %{ %}
4936   interface(REG_INTER);
4937 %}
4938 
4939 // Integer 64 bit Register not Special
4940 operand iRegLNoSp()
4941 %{
4942   constraint(ALLOC_IN_RC(no_special_reg));
4943   match(RegL);
4944   format %{ %}
4945   interface(REG_INTER);
4946 %}
4947 
4948 // Pointer Register Operands
4949 // Pointer Register
4950 operand iRegP()
4951 %{
4952   constraint(ALLOC_IN_RC(ptr_reg));
4953   match(RegP);
4954   match(iRegPNoSp);
4955   match(iRegP_R0);
4956   //match(iRegP_R2);
4957   //match(iRegP_R4);
4958   //match(iRegP_R5);
4959   match(thread_RegP);
4960   op_cost(0);
4961   format %{ %}
4962   interface(REG_INTER);
4963 %}
4964 
4965 // Pointer 64 bit Register not Special
4966 operand iRegPNoSp()
4967 %{
4968   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4969   match(RegP);
4970   // match(iRegP);
4971   // match(iRegP_R0);
4972   // match(iRegP_R2);
4973   // match(iRegP_R4);
4974   // match(iRegP_R5);
4975   // match(thread_RegP);
4976   op_cost(0);
4977   format %{ %}
4978   interface(REG_INTER);
4979 %}
4980 
4981 // Pointer 64 bit Register R0 only
4982 operand iRegP_R0()
4983 %{
4984   constraint(ALLOC_IN_RC(r0_reg));
4985   match(RegP);
4986   // match(iRegP);
4987   match(iRegPNoSp);
4988   op_cost(0);
4989   format %{ %}
4990   interface(REG_INTER);
4991 %}
4992 
4993 // Pointer 64 bit Register R1 only
4994 operand iRegP_R1()
4995 %{
4996   constraint(ALLOC_IN_RC(r1_reg));
4997   match(RegP);
4998   // match(iRegP);
4999   match(iRegPNoSp);
5000   op_cost(0);
5001   format %{ %}
5002   interface(REG_INTER);
5003 %}
5004 
5005 // Pointer 64 bit Register R2 only
5006 operand iRegP_R2()
5007 %{
5008   constraint(ALLOC_IN_RC(r2_reg));
5009   match(RegP);
5010   // match(iRegP);
5011   match(iRegPNoSp);
5012   op_cost(0);
5013   format %{ %}
5014   interface(REG_INTER);
5015 %}
5016 
5017 // Pointer 64 bit Register R3 only
5018 operand iRegP_R3()
5019 %{
5020   constraint(ALLOC_IN_RC(r3_reg));
5021   match(RegP);
5022   // match(iRegP);
5023   match(iRegPNoSp);
5024   op_cost(0);
5025   format %{ %}
5026   interface(REG_INTER);
5027 %}
5028 
5029 // Pointer 64 bit Register R4 only
5030 operand iRegP_R4()
5031 %{
5032   constraint(ALLOC_IN_RC(r4_reg));
5033   match(RegP);
5034   // match(iRegP);
5035   match(iRegPNoSp);
5036   op_cost(0);
5037   format %{ %}
5038   interface(REG_INTER);
5039 %}
5040 
5041 // Pointer 64 bit Register R5 only
5042 operand iRegP_R5()
5043 %{
5044   constraint(ALLOC_IN_RC(r5_reg));
5045   match(RegP);
5046   // match(iRegP);
5047   match(iRegPNoSp);
5048   op_cost(0);
5049   format %{ %}
5050   interface(REG_INTER);
5051 %}
5052 
5053 // Pointer 64 bit Register R10 only
5054 operand iRegP_R10()
5055 %{
5056   constraint(ALLOC_IN_RC(r10_reg));
5057   match(RegP);
5058   // match(iRegP);
5059   match(iRegPNoSp);
5060   op_cost(0);
5061   format %{ %}
5062   interface(REG_INTER);
5063 %}
5064 
5065 // Long 64 bit Register R11 only
5066 operand iRegL_R11()
5067 %{
5068   constraint(ALLOC_IN_RC(r11_reg));
5069   match(RegL);
5070   match(iRegLNoSp);
5071   op_cost(0);
5072   format %{ %}
5073   interface(REG_INTER);
5074 %}
5075 
5076 // Pointer 64 bit Register FP only
5077 operand iRegP_FP()
5078 %{
5079   constraint(ALLOC_IN_RC(fp_reg));
5080   match(RegP);
5081   // match(iRegP);
5082   op_cost(0);
5083   format %{ %}
5084   interface(REG_INTER);
5085 %}
5086 
5087 // Register R0 only
5088 operand iRegI_R0()
5089 %{
5090   constraint(ALLOC_IN_RC(int_r0_reg));
5091   match(RegI);
5092   match(iRegINoSp);
5093   op_cost(0);
5094   format %{ %}
5095   interface(REG_INTER);
5096 %}
5097 
5098 // Register R2 only
5099 operand iRegI_R2()
5100 %{
5101   constraint(ALLOC_IN_RC(int_r2_reg));
5102   match(RegI);
5103   match(iRegINoSp);
5104   op_cost(0);
5105   format %{ %}
5106   interface(REG_INTER);
5107 %}
5108 
5109 // Register R3 only
5110 operand iRegI_R3()
5111 %{
5112   constraint(ALLOC_IN_RC(int_r3_reg));
5113   match(RegI);
5114   match(iRegINoSp);
5115   op_cost(0);
5116   format %{ %}
5117   interface(REG_INTER);
5118 %}
5119 
5120 
5121 // Register R2 only
5122 operand iRegI_R4()
5123 %{
5124   constraint(ALLOC_IN_RC(int_r4_reg));
5125   match(RegI);
5126   match(iRegINoSp);
5127   op_cost(0);
5128   format %{ %}
5129   interface(REG_INTER);
5130 %}
5131 
5132 
5133 // Pointer Register Operands
5134 // Narrow Pointer Register
5135 operand iRegN()
5136 %{
5137   constraint(ALLOC_IN_RC(any_reg32));
5138   match(RegN);
5139   match(iRegNNoSp);
5140   op_cost(0);
5141   format %{ %}
5142   interface(REG_INTER);
5143 %}
5144 
5145 // Integer 64 bit Register not Special
5146 operand iRegNNoSp()
5147 %{
5148   constraint(ALLOC_IN_RC(no_special_reg32));
5149   match(RegN);
5150   op_cost(0);
5151   format %{ %}
5152   interface(REG_INTER);
5153 %}
5154 
5155 // heap base register -- used for encoding immN0
5156 
5157 operand iRegIHeapbase()
5158 %{
5159   constraint(ALLOC_IN_RC(heapbase_reg));
5160   match(RegI);
5161   op_cost(0);
5162   format %{ %}
5163   interface(REG_INTER);
5164 %}
5165 
5166 // Float Register
5167 // Float register operands
5168 operand vRegF()
5169 %{
5170   constraint(ALLOC_IN_RC(float_reg));
5171   match(RegF);
5172 
5173   op_cost(0);
5174   format %{ %}
5175   interface(REG_INTER);
5176 %}
5177 
5178 // Double Register
5179 // Double register operands
5180 operand vRegD()
5181 %{
5182   constraint(ALLOC_IN_RC(double_reg));
5183   match(RegD);
5184 
5185   op_cost(0);
5186   format %{ %}
5187   interface(REG_INTER);
5188 %}
5189 
5190 operand vecX()
5191 %{
5192   constraint(ALLOC_IN_RC(vectorx_reg));
5193   match(VecX);
5194 
5195   op_cost(0);
5196   format %{ %}
5197   interface(REG_INTER);
5198 %}
5199 
5200 operand vRegD_V0()
5201 %{
5202   constraint(ALLOC_IN_RC(v0_reg));
5203   match(RegD);
5204   op_cost(0);
5205   format %{ %}
5206   interface(REG_INTER);
5207 %}
5208 
5209 operand vRegD_V1()
5210 %{
5211   constraint(ALLOC_IN_RC(v1_reg));
5212   match(RegD);
5213   op_cost(0);
5214   format %{ %}
5215   interface(REG_INTER);
5216 %}
5217 
5218 operand vRegD_V2()
5219 %{
5220   constraint(ALLOC_IN_RC(v2_reg));
5221   match(RegD);
5222   op_cost(0);
5223   format %{ %}
5224   interface(REG_INTER);
5225 %}
5226 
5227 operand vRegD_V3()
5228 %{
5229   constraint(ALLOC_IN_RC(v3_reg));
5230   match(RegD);
5231   op_cost(0);
5232   format %{ %}
5233   interface(REG_INTER);
5234 %}
5235 
5236 // Flags register, used as output of signed compare instructions
5237 
5238 // note that on AArch64 we also use this register as the output for
5239 // for floating point compare instructions (CmpF CmpD). this ensures
5240 // that ordered inequality tests use GT, GE, LT or LE none of which
5241 // pass through cases where the result is unordered i.e. one or both
5242 // inputs to the compare is a NaN. this means that the ideal code can
5243 // replace e.g. a GT with an LE and not end up capturing the NaN case
5244 // (where the comparison should always fail). EQ and NE tests are
5245 // always generated in ideal code so that unordered folds into the NE
5246 // case, matching the behaviour of AArch64 NE.
5247 //
5248 // This differs from x86 where the outputs of FP compares use a
5249 // special FP flags registers and where compares based on this
5250 // register are distinguished into ordered inequalities (cmpOpUCF) and
5251 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5252 // to explicitly handle the unordered case in branches. x86 also has
5253 // to include extra CMoveX rules to accept a cmpOpUCF input.
5254 
5255 operand rFlagsReg()
5256 %{
5257   constraint(ALLOC_IN_RC(int_flags));
5258   match(RegFlags);
5259 
5260   op_cost(0);
5261   format %{ "RFLAGS" %}
5262   interface(REG_INTER);
5263 %}
5264 
5265 // Flags register, used as output of unsigned compare instructions
5266 operand rFlagsRegU()
5267 %{
5268   constraint(ALLOC_IN_RC(int_flags));
5269   match(RegFlags);
5270 
5271   op_cost(0);
5272   format %{ "RFLAGSU" %}
5273   interface(REG_INTER);
5274 %}
5275 
5276 // Special Registers
5277 
5278 // Method Register
5279 operand inline_cache_RegP(iRegP reg)
5280 %{
5281   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5282   match(reg);
5283   match(iRegPNoSp);
5284   op_cost(0);
5285   format %{ %}
5286   interface(REG_INTER);
5287 %}
5288 
5289 operand interpreter_method_oop_RegP(iRegP reg)
5290 %{
5291   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5292   match(reg);
5293   match(iRegPNoSp);
5294   op_cost(0);
5295   format %{ %}
5296   interface(REG_INTER);
5297 %}
5298 
5299 // Thread Register
5300 operand thread_RegP(iRegP reg)
5301 %{
5302   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5303   match(reg);
5304   op_cost(0);
5305   format %{ %}
5306   interface(REG_INTER);
5307 %}
5308 
5309 operand lr_RegP(iRegP reg)
5310 %{
5311   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5312   match(reg);
5313   op_cost(0);
5314   format %{ %}
5315   interface(REG_INTER);
5316 %}
5317 
5318 //----------Memory Operands----------------------------------------------------
5319 
5320 operand indirect(iRegP reg)
5321 %{
5322   constraint(ALLOC_IN_RC(ptr_reg));
5323   match(reg);
5324   op_cost(0);
5325   format %{ "[$reg]" %}
5326   interface(MEMORY_INTER) %{
5327     base($reg);
5328     index(0xffffffff);
5329     scale(0x0);
5330     disp(0x0);
5331   %}
5332 %}
5333 
5334 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
5335 %{
5336   constraint(ALLOC_IN_RC(ptr_reg));
5337   match(AddP (AddP reg (LShiftL lreg scale)) off);
5338   op_cost(INSN_COST);
5339   format %{ "$reg, $lreg lsl($scale), $off" %}
5340   interface(MEMORY_INTER) %{
5341     base($reg);
5342     index($lreg);
5343     scale($scale);
5344     disp($off);
5345   %}
5346 %}
5347 
5348 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
5349 %{
5350   constraint(ALLOC_IN_RC(ptr_reg));
5351   match(AddP (AddP reg (LShiftL lreg scale)) off);
5352   op_cost(INSN_COST);
5353   format %{ "$reg, $lreg lsl($scale), $off" %}
5354   interface(MEMORY_INTER) %{
5355     base($reg);
5356     index($lreg);
5357     scale($scale);
5358     disp($off);
5359   %}
5360 %}
5361 
5362 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
5363 %{
5364   constraint(ALLOC_IN_RC(ptr_reg));
5365   match(AddP (AddP reg (ConvI2L ireg)) off);
5366   op_cost(INSN_COST);
5367   format %{ "$reg, $ireg, $off I2L" %}
5368   interface(MEMORY_INTER) %{
5369     base($reg);
5370     index($ireg);
5371     scale(0x0);
5372     disp($off);
5373   %}
5374 %}
5375 
5376 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
5377 %{
5378   constraint(ALLOC_IN_RC(ptr_reg));
5379   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5380   op_cost(INSN_COST);
5381   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
5382   interface(MEMORY_INTER) %{
5383     base($reg);
5384     index($ireg);
5385     scale($scale);
5386     disp($off);
5387   %}
5388 %}
5389 
5390 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5391 %{
5392   constraint(ALLOC_IN_RC(ptr_reg));
5393   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5394   op_cost(0);
5395   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5396   interface(MEMORY_INTER) %{
5397     base($reg);
5398     index($ireg);
5399     scale($scale);
5400     disp(0x0);
5401   %}
5402 %}
5403 
5404 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5405 %{
5406   constraint(ALLOC_IN_RC(ptr_reg));
5407   match(AddP reg (LShiftL lreg scale));
5408   op_cost(0);
5409   format %{ "$reg, $lreg lsl($scale)" %}
5410   interface(MEMORY_INTER) %{
5411     base($reg);
5412     index($lreg);
5413     scale($scale);
5414     disp(0x0);
5415   %}
5416 %}
5417 
5418 operand indIndex(iRegP reg, iRegL lreg)
5419 %{
5420   constraint(ALLOC_IN_RC(ptr_reg));
5421   match(AddP reg lreg);
5422   op_cost(0);
5423   format %{ "$reg, $lreg" %}
5424   interface(MEMORY_INTER) %{
5425     base($reg);
5426     index($lreg);
5427     scale(0x0);
5428     disp(0x0);
5429   %}
5430 %}
5431 
5432 operand indOffI(iRegP reg, immIOffset off)
5433 %{
5434   constraint(ALLOC_IN_RC(ptr_reg));
5435   match(AddP reg off);
5436   op_cost(0);
5437   format %{ "[$reg, $off]" %}
5438   interface(MEMORY_INTER) %{
5439     base($reg);
5440     index(0xffffffff);
5441     scale(0x0);
5442     disp($off);
5443   %}
5444 %}
5445 
5446 operand indOffL(iRegP reg, immLoffset off)
5447 %{
5448   constraint(ALLOC_IN_RC(ptr_reg));
5449   match(AddP reg off);
5450   op_cost(0);
5451   format %{ "[$reg, $off]" %}
5452   interface(MEMORY_INTER) %{
5453     base($reg);
5454     index(0xffffffff);
5455     scale(0x0);
5456     disp($off);
5457   %}
5458 %}
5459 
5460 
5461 operand indirectN(iRegN reg)
5462 %{
5463   predicate(Universe::narrow_oop_shift() == 0);
5464   constraint(ALLOC_IN_RC(ptr_reg));
5465   match(DecodeN reg);
5466   op_cost(0);
5467   format %{ "[$reg]\t# narrow" %}
5468   interface(MEMORY_INTER) %{
5469     base($reg);
5470     index(0xffffffff);
5471     scale(0x0);
5472     disp(0x0);
5473   %}
5474 %}
5475 
5476 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
5477 %{
5478   predicate(Universe::narrow_oop_shift() == 0);
5479   constraint(ALLOC_IN_RC(ptr_reg));
5480   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5481   op_cost(0);
5482   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5483   interface(MEMORY_INTER) %{
5484     base($reg);
5485     index($lreg);
5486     scale($scale);
5487     disp($off);
5488   %}
5489 %}
5490 
5491 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
5492 %{
5493   predicate(Universe::narrow_oop_shift() == 0);
5494   constraint(ALLOC_IN_RC(ptr_reg));
5495   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5496   op_cost(INSN_COST);
5497   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5498   interface(MEMORY_INTER) %{
5499     base($reg);
5500     index($lreg);
5501     scale($scale);
5502     disp($off);
5503   %}
5504 %}
5505 
5506 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
5507 %{
5508   predicate(Universe::narrow_oop_shift() == 0);
5509   constraint(ALLOC_IN_RC(ptr_reg));
5510   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
5511   op_cost(INSN_COST);
5512   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
5513   interface(MEMORY_INTER) %{
5514     base($reg);
5515     index($ireg);
5516     scale(0x0);
5517     disp($off);
5518   %}
5519 %}
5520 
5521 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
5522 %{
5523   predicate(Universe::narrow_oop_shift() == 0);
5524   constraint(ALLOC_IN_RC(ptr_reg));
5525   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
5526   op_cost(INSN_COST);
5527   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
5528   interface(MEMORY_INTER) %{
5529     base($reg);
5530     index($ireg);
5531     scale($scale);
5532     disp($off);
5533   %}
5534 %}
5535 
5536 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5537 %{
5538   predicate(Universe::narrow_oop_shift() == 0);
5539   constraint(ALLOC_IN_RC(ptr_reg));
5540   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5541   op_cost(0);
5542   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5543   interface(MEMORY_INTER) %{
5544     base($reg);
5545     index($ireg);
5546     scale($scale);
5547     disp(0x0);
5548   %}
5549 %}
5550 
5551 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5552 %{
5553   predicate(Universe::narrow_oop_shift() == 0);
5554   constraint(ALLOC_IN_RC(ptr_reg));
5555   match(AddP (DecodeN reg) (LShiftL lreg scale));
5556   op_cost(0);
5557   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5558   interface(MEMORY_INTER) %{
5559     base($reg);
5560     index($lreg);
5561     scale($scale);
5562     disp(0x0);
5563   %}
5564 %}
5565 
5566 operand indIndexN(iRegN reg, iRegL lreg)
5567 %{
5568   predicate(Universe::narrow_oop_shift() == 0);
5569   constraint(ALLOC_IN_RC(ptr_reg));
5570   match(AddP (DecodeN reg) lreg);
5571   op_cost(0);
5572   format %{ "$reg, $lreg\t# narrow" %}
5573   interface(MEMORY_INTER) %{
5574     base($reg);
5575     index($lreg);
5576     scale(0x0);
5577     disp(0x0);
5578   %}
5579 %}
5580 
5581 operand indOffIN(iRegN reg, immIOffset off)
5582 %{
5583   predicate(Universe::narrow_oop_shift() == 0);
5584   constraint(ALLOC_IN_RC(ptr_reg));
5585   match(AddP (DecodeN reg) off);
5586   op_cost(0);
5587   format %{ "[$reg, $off]\t# narrow" %}
5588   interface(MEMORY_INTER) %{
5589     base($reg);
5590     index(0xffffffff);
5591     scale(0x0);
5592     disp($off);
5593   %}
5594 %}
5595 
5596 operand indOffLN(iRegN reg, immLoffset off)
5597 %{
5598   predicate(Universe::narrow_oop_shift() == 0);
5599   constraint(ALLOC_IN_RC(ptr_reg));
5600   match(AddP (DecodeN reg) off);
5601   op_cost(0);
5602   format %{ "[$reg, $off]\t# narrow" %}
5603   interface(MEMORY_INTER) %{
5604     base($reg);
5605     index(0xffffffff);
5606     scale(0x0);
5607     disp($off);
5608   %}
5609 %}
5610 
5611 
5612 
5613 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5614 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5615 %{
5616   constraint(ALLOC_IN_RC(ptr_reg));
5617   match(AddP reg off);
5618   op_cost(0);
5619   format %{ "[$reg, $off]" %}
5620   interface(MEMORY_INTER) %{
5621     base($reg);
5622     index(0xffffffff);
5623     scale(0x0);
5624     disp($off);
5625   %}
5626 %}
5627 
5628 //----------Special Memory Operands--------------------------------------------
5629 // Stack Slot Operand - This operand is used for loading and storing temporary
5630 //                      values on the stack where a match requires a value to
5631 //                      flow through memory.
5632 operand stackSlotP(sRegP reg)
5633 %{
5634   constraint(ALLOC_IN_RC(stack_slots));
5635   op_cost(100);
5636   // No match rule because this operand is only generated in matching
5637   // match(RegP);
5638   format %{ "[$reg]" %}
5639   interface(MEMORY_INTER) %{
5640     base(0x1e);  // RSP
5641     index(0x0);  // No Index
5642     scale(0x0);  // No Scale
5643     disp($reg);  // Stack Offset
5644   %}
5645 %}
5646 
5647 operand stackSlotI(sRegI reg)
5648 %{
5649   constraint(ALLOC_IN_RC(stack_slots));
5650   // No match rule because this operand is only generated in matching
5651   // match(RegI);
5652   format %{ "[$reg]" %}
5653   interface(MEMORY_INTER) %{
5654     base(0x1e);  // RSP
5655     index(0x0);  // No Index
5656     scale(0x0);  // No Scale
5657     disp($reg);  // Stack Offset
5658   %}
5659 %}
5660 
5661 operand stackSlotF(sRegF reg)
5662 %{
5663   constraint(ALLOC_IN_RC(stack_slots));
5664   // No match rule because this operand is only generated in matching
5665   // match(RegF);
5666   format %{ "[$reg]" %}
5667   interface(MEMORY_INTER) %{
5668     base(0x1e);  // RSP
5669     index(0x0);  // No Index
5670     scale(0x0);  // No Scale
5671     disp($reg);  // Stack Offset
5672   %}
5673 %}
5674 
5675 operand stackSlotD(sRegD reg)
5676 %{
5677   constraint(ALLOC_IN_RC(stack_slots));
5678   // No match rule because this operand is only generated in matching
5679   // match(RegD);
5680   format %{ "[$reg]" %}
5681   interface(MEMORY_INTER) %{
5682     base(0x1e);  // RSP
5683     index(0x0);  // No Index
5684     scale(0x0);  // No Scale
5685     disp($reg);  // Stack Offset
5686   %}
5687 %}
5688 
5689 operand stackSlotL(sRegL reg)
5690 %{
5691   constraint(ALLOC_IN_RC(stack_slots));
5692   // No match rule because this operand is only generated in matching
5693   // match(RegL);
5694   format %{ "[$reg]" %}
5695   interface(MEMORY_INTER) %{
5696     base(0x1e);  // RSP
5697     index(0x0);  // No Index
5698     scale(0x0);  // No Scale
5699     disp($reg);  // Stack Offset
5700   %}
5701 %}
5702 
5703 // Operands for expressing Control Flow
5704 // NOTE: Label is a predefined operand which should not be redefined in
5705 //       the AD file. It is generically handled within the ADLC.
5706 
5707 //----------Conditional Branch Operands----------------------------------------
5708 // Comparison Op  - This is the operation of the comparison, and is limited to
5709 //                  the following set of codes:
5710 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5711 //
5712 // Other attributes of the comparison, such as unsignedness, are specified
5713 // by the comparison instruction that sets a condition code flags register.
5714 // That result is represented by a flags operand whose subtype is appropriate
5715 // to the unsignedness (etc.) of the comparison.
5716 //
5717 // Later, the instruction which matches both the Comparison Op (a Bool) and
5718 // the flags (produced by the Cmp) specifies the coding of the comparison op
5719 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5720 
5721 // used for signed integral comparisons and fp comparisons
5722 
5723 operand cmpOp()
5724 %{
5725   match(Bool);
5726 
5727   format %{ "" %}
5728   interface(COND_INTER) %{
5729     equal(0x0, "eq");
5730     not_equal(0x1, "ne");
5731     less(0xb, "lt");
5732     greater_equal(0xa, "ge");
5733     less_equal(0xd, "le");
5734     greater(0xc, "gt");
5735     overflow(0x6, "vs");
5736     no_overflow(0x7, "vc");
5737   %}
5738 %}
5739 
5740 // used for unsigned integral comparisons
5741 
5742 operand cmpOpU()
5743 %{
5744   match(Bool);
5745 
5746   format %{ "" %}
5747   interface(COND_INTER) %{
5748     equal(0x0, "eq");
5749     not_equal(0x1, "ne");
5750     less(0x3, "lo");
5751     greater_equal(0x2, "hs");
5752     less_equal(0x9, "ls");
5753     greater(0x8, "hi");
5754     overflow(0x6, "vs");
5755     no_overflow(0x7, "vc");
5756   %}
5757 %}
5758 
5759 // Special operand allowing long args to int ops to be truncated for free
5760 
5761 operand iRegL2I(iRegL reg) %{
5762 
5763   op_cost(0);
5764 
5765   match(ConvL2I reg);
5766 
5767   format %{ "l2i($reg)" %}
5768 
5769   interface(REG_INTER)
5770 %}
5771 
5772 opclass vmem(indirect, indIndex, indOffI, indOffL);
5773 
5774 //----------OPERAND CLASSES----------------------------------------------------
5775 // Operand Classes are groups of operands that are used as to simplify
5776 // instruction definitions by not requiring the AD writer to specify
5777 // separate instructions for every form of operand when the
5778 // instruction accepts multiple operand types with the same basic
5779 // encoding and format. The classic case of this is memory operands.
5780 
5781 // memory is used to define read/write location for load/store
5782 // instruction defs. we can turn a memory op into an Address
5783 
5784 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
5785                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
5786 
5787 
5788 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5789 // operations. it allows the src to be either an iRegI or a (ConvL2I
5790 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5791 // can be elided because the 32-bit instruction will just employ the
5792 // lower 32 bits anyway.
5793 //
5794 // n.b. this does not elide all L2I conversions. if the truncated
5795 // value is consumed by more than one operation then the ConvL2I
5796 // cannot be bundled into the consuming nodes so an l2i gets planted
5797 // (actually a movw $dst $src) and the downstream instructions consume
5798 // the result of the l2i as an iRegI input. That's a shame since the
5799 // movw is actually redundant but its not too costly.
5800 
5801 opclass iRegIorL2I(iRegI, iRegL2I);
5802 
5803 //----------PIPELINE-----------------------------------------------------------
5804 // Rules which define the behavior of the target architectures pipeline.
5805 // Integer ALU reg operation
5806 pipeline %{
5807 
5808 attributes %{
5809   // ARM instructions are of fixed length
5810   fixed_size_instructions;        // Fixed size instructions TODO does
5811   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5812   // ARM instructions come in 32-bit word units
5813   instruction_unit_size = 4;         // An instruction is 4 bytes long
5814   instruction_fetch_unit_size = 64;  // The processor fetches one line
5815   instruction_fetch_units = 1;       // of 64 bytes
5816 
5817   // List of nop instructions
5818   nops( MachNop );
5819 %}
5820 
5821 // We don't use an actual pipeline model so don't care about resources
5822 // or description. we do use pipeline classes to introduce fixed
5823 // latencies
5824 
5825 //----------RESOURCES----------------------------------------------------------
5826 // Resources are the functional units available to the machine
5827 
5828 resources( INS0, INS1, INS01 = INS0 | INS1,
5829            ALU0, ALU1, ALU = ALU0 | ALU1,
5830            MAC,
5831            DIV,
5832            BRANCH,
5833            LDST,
5834            NEON_FP);
5835 
5836 //----------PIPELINE DESCRIPTION-----------------------------------------------
5837 // Pipeline Description specifies the stages in the machine's pipeline
5838 
5839 pipe_desc(ISS, EX1, EX2, WR);
5840 
5841 //----------PIPELINE CLASSES---------------------------------------------------
5842 // Pipeline Classes describe the stages in which input and output are
5843 // referenced by the hardware pipeline.
5844 
5845 //------- Integer ALU operations --------------------------
5846 
5847 // Integer ALU reg-reg operation
5848 // Operands needed in EX1, result generated in EX2
5849 // Eg.  ADD     x0, x1, x2
5850 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5851 %{
5852   single_instruction;
5853   dst    : EX2(write);
5854   src1   : EX1(read);
5855   src2   : EX1(read);
5856   INS01  : ISS; // Dual issue as instruction 0 or 1
5857   ALU    : EX2;
5858 %}
5859 
5860 // Integer ALU reg-reg operation with constant shift
5861 // Shifted register must be available in LATE_ISS instead of EX1
5862 // Eg.  ADD     x0, x1, x2, LSL #2
5863 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
5864 %{
5865   single_instruction;
5866   dst    : EX2(write);
5867   src1   : EX1(read);
5868   src2   : ISS(read);
5869   INS01  : ISS;
5870   ALU    : EX2;
5871 %}
5872 
5873 // Integer ALU reg operation with constant shift
5874 // Eg.  LSL     x0, x1, #shift
5875 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
5876 %{
5877   single_instruction;
5878   dst    : EX2(write);
5879   src1   : ISS(read);
5880   INS01  : ISS;
5881   ALU    : EX2;
5882 %}
5883 
5884 // Integer ALU reg-reg operation with variable shift
5885 // Both operands must be available in LATE_ISS instead of EX1
5886 // Result is available in EX1 instead of EX2
5887 // Eg.  LSLV    x0, x1, x2
5888 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
5889 %{
5890   single_instruction;
5891   dst    : EX1(write);
5892   src1   : ISS(read);
5893   src2   : ISS(read);
5894   INS01  : ISS;
5895   ALU    : EX1;
5896 %}
5897 
5898 // Integer ALU reg-reg operation with extract
5899 // As for _vshift above, but result generated in EX2
5900 // Eg.  EXTR    x0, x1, x2, #N
5901 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
5902 %{
5903   single_instruction;
5904   dst    : EX2(write);
5905   src1   : ISS(read);
5906   src2   : ISS(read);
5907   INS1   : ISS; // Can only dual issue as Instruction 1
5908   ALU    : EX1;
5909 %}
5910 
5911 // Integer ALU reg operation
5912 // Eg.  NEG     x0, x1
5913 pipe_class ialu_reg(iRegI dst, iRegI src)
5914 %{
5915   single_instruction;
5916   dst    : EX2(write);
5917   src    : EX1(read);
5918   INS01  : ISS;
5919   ALU    : EX2;
5920 %}
5921 
5922 // Integer ALU reg mmediate operation
5923 // Eg.  ADD     x0, x1, #N
5924 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
5925 %{
5926   single_instruction;
5927   dst    : EX2(write);
5928   src1   : EX1(read);
5929   INS01  : ISS;
5930   ALU    : EX2;
5931 %}
5932 
5933 // Integer ALU immediate operation (no source operands)
5934 // Eg.  MOV     x0, #N
5935 pipe_class ialu_imm(iRegI dst)
5936 %{
5937   single_instruction;
5938   dst    : EX1(write);
5939   INS01  : ISS;
5940   ALU    : EX1;
5941 %}
5942 
5943 //------- Compare operation -------------------------------
5944 
5945 // Compare reg-reg
5946 // Eg.  CMP     x0, x1
5947 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
5948 %{
5949   single_instruction;
5950 //  fixed_latency(16);
5951   cr     : EX2(write);
5952   op1    : EX1(read);
5953   op2    : EX1(read);
5954   INS01  : ISS;
5955   ALU    : EX2;
5956 %}
5957 
5958 // Compare reg-reg
5959 // Eg.  CMP     x0, #N
5960 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
5961 %{
5962   single_instruction;
5963 //  fixed_latency(16);
5964   cr     : EX2(write);
5965   op1    : EX1(read);
5966   INS01  : ISS;
5967   ALU    : EX2;
5968 %}
5969 
5970 //------- Conditional instructions ------------------------
5971 
5972 // Conditional no operands
5973 // Eg.  CSINC   x0, zr, zr, <cond>
5974 pipe_class icond_none(iRegI dst, rFlagsReg cr)
5975 %{
5976   single_instruction;
5977   cr     : EX1(read);
5978   dst    : EX2(write);
5979   INS01  : ISS;
5980   ALU    : EX2;
5981 %}
5982 
5983 // Conditional 2 operand
5984 // EG.  CSEL    X0, X1, X2, <cond>
5985 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
5986 %{
5987   single_instruction;
5988   cr     : EX1(read);
5989   src1   : EX1(read);
5990   src2   : EX1(read);
5991   dst    : EX2(write);
5992   INS01  : ISS;
5993   ALU    : EX2;
5994 %}
5995 
5996 // Conditional 2 operand
5997 // EG.  CSEL    X0, X1, X2, <cond>
5998 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
5999 %{
6000   single_instruction;
6001   cr     : EX1(read);
6002   src    : EX1(read);
6003   dst    : EX2(write);
6004   INS01  : ISS;
6005   ALU    : EX2;
6006 %}
6007 
6008 //------- Multiply pipeline operations --------------------
6009 
6010 // Multiply reg-reg
6011 // Eg.  MUL     w0, w1, w2
6012 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6013 %{
6014   single_instruction;
6015   dst    : WR(write);
6016   src1   : ISS(read);
6017   src2   : ISS(read);
6018   INS01  : ISS;
6019   MAC    : WR;
6020 %}
6021 
6022 // Multiply accumulate
6023 // Eg.  MADD    w0, w1, w2, w3
6024 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6025 %{
6026   single_instruction;
6027   dst    : WR(write);
6028   src1   : ISS(read);
6029   src2   : ISS(read);
6030   src3   : ISS(read);
6031   INS01  : ISS;
6032   MAC    : WR;
6033 %}
6034 
6035 // Eg.  MUL     w0, w1, w2
6036 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6037 %{
6038   single_instruction;
6039   fixed_latency(3); // Maximum latency for 64 bit mul
6040   dst    : WR(write);
6041   src1   : ISS(read);
6042   src2   : ISS(read);
6043   INS01  : ISS;
6044   MAC    : WR;
6045 %}
6046 
6047 // Multiply accumulate
6048 // Eg.  MADD    w0, w1, w2, w3
6049 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6050 %{
6051   single_instruction;
6052   fixed_latency(3); // Maximum latency for 64 bit mul
6053   dst    : WR(write);
6054   src1   : ISS(read);
6055   src2   : ISS(read);
6056   src3   : ISS(read);
6057   INS01  : ISS;
6058   MAC    : WR;
6059 %}
6060 
6061 //------- Divide pipeline operations --------------------
6062 
6063 // Eg.  SDIV    w0, w1, w2
6064 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6065 %{
6066   single_instruction;
6067   fixed_latency(8); // Maximum latency for 32 bit divide
6068   dst    : WR(write);
6069   src1   : ISS(read);
6070   src2   : ISS(read);
6071   INS0   : ISS; // Can only dual issue as instruction 0
6072   DIV    : WR;
6073 %}
6074 
6075 // Eg.  SDIV    x0, x1, x2
6076 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6077 %{
6078   single_instruction;
6079   fixed_latency(16); // Maximum latency for 64 bit divide
6080   dst    : WR(write);
6081   src1   : ISS(read);
6082   src2   : ISS(read);
6083   INS0   : ISS; // Can only dual issue as instruction 0
6084   DIV    : WR;
6085 %}
6086 
6087 //------- Load pipeline operations ------------------------
6088 
6089 // Load - prefetch
6090 // Eg.  PFRM    <mem>
6091 pipe_class iload_prefetch(memory mem)
6092 %{
6093   single_instruction;
6094   mem    : ISS(read);
6095   INS01  : ISS;
6096   LDST   : WR;
6097 %}
6098 
6099 // Load - reg, mem
6100 // Eg.  LDR     x0, <mem>
6101 pipe_class iload_reg_mem(iRegI dst, memory mem)
6102 %{
6103   single_instruction;
6104   dst    : WR(write);
6105   mem    : ISS(read);
6106   INS01  : ISS;
6107   LDST   : WR;
6108 %}
6109 
6110 // Load - reg, reg
6111 // Eg.  LDR     x0, [sp, x1]
6112 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6113 %{
6114   single_instruction;
6115   dst    : WR(write);
6116   src    : ISS(read);
6117   INS01  : ISS;
6118   LDST   : WR;
6119 %}
6120 
6121 //------- Store pipeline operations -----------------------
6122 
6123 // Store - zr, mem
6124 // Eg.  STR     zr, <mem>
6125 pipe_class istore_mem(memory mem)
6126 %{
6127   single_instruction;
6128   mem    : ISS(read);
6129   INS01  : ISS;
6130   LDST   : WR;
6131 %}
6132 
6133 // Store - reg, mem
6134 // Eg.  STR     x0, <mem>
6135 pipe_class istore_reg_mem(iRegI src, memory mem)
6136 %{
6137   single_instruction;
6138   mem    : ISS(read);
6139   src    : EX2(read);
6140   INS01  : ISS;
6141   LDST   : WR;
6142 %}
6143 
6144 // Store - reg, reg
6145 // Eg. STR      x0, [sp, x1]
6146 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6147 %{
6148   single_instruction;
6149   dst    : ISS(read);
6150   src    : EX2(read);
6151   INS01  : ISS;
6152   LDST   : WR;
6153 %}
6154 
6155 //------- Store pipeline operations -----------------------
6156 
6157 // Branch
6158 pipe_class pipe_branch()
6159 %{
6160   single_instruction;
6161   INS01  : ISS;
6162   BRANCH : EX1;
6163 %}
6164 
6165 // Conditional branch
6166 pipe_class pipe_branch_cond(rFlagsReg cr)
6167 %{
6168   single_instruction;
6169   cr     : EX1(read);
6170   INS01  : ISS;
6171   BRANCH : EX1;
6172 %}
6173 
6174 // Compare & Branch
6175 // EG.  CBZ/CBNZ
6176 pipe_class pipe_cmp_branch(iRegI op1)
6177 %{
6178   single_instruction;
6179   op1    : EX1(read);
6180   INS01  : ISS;
6181   BRANCH : EX1;
6182 %}
6183 
6184 //------- Synchronisation operations ----------------------
6185 
6186 // Any operation requiring serialization.
6187 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6188 pipe_class pipe_serial()
6189 %{
6190   single_instruction;
6191   force_serialization;
6192   fixed_latency(16);
6193   INS01  : ISS(2); // Cannot dual issue with any other instruction
6194   LDST   : WR;
6195 %}
6196 
6197 // Generic big/slow expanded idiom - also serialized
6198 pipe_class pipe_slow()
6199 %{
6200   instruction_count(10);
6201   multiple_bundles;
6202   force_serialization;
6203   fixed_latency(16);
6204   INS01  : ISS(2); // Cannot dual issue with any other instruction
6205   LDST   : WR;
6206 %}
6207 
6208 // Empty pipeline class
6209 pipe_class pipe_class_empty()
6210 %{
6211   single_instruction;
6212   fixed_latency(0);
6213 %}
6214 
6215 // Default pipeline class.
6216 pipe_class pipe_class_default()
6217 %{
6218   single_instruction;
6219   fixed_latency(2);
6220 %}
6221 
6222 // Pipeline class for compares.
6223 pipe_class pipe_class_compare()
6224 %{
6225   single_instruction;
6226   fixed_latency(16);
6227 %}
6228 
6229 // Pipeline class for memory operations.
6230 pipe_class pipe_class_memory()
6231 %{
6232   single_instruction;
6233   fixed_latency(16);
6234 %}
6235 
6236 // Pipeline class for call.
6237 pipe_class pipe_class_call()
6238 %{
6239   single_instruction;
6240   fixed_latency(100);
6241 %}
6242 
6243 // Define the class for the Nop node.
6244 define %{
6245    MachNop = pipe_class_empty;
6246 %}
6247 
6248 %}
6249 //----------INSTRUCTIONS-------------------------------------------------------
6250 //
6251 // match      -- States which machine-independent subtree may be replaced
6252 //               by this instruction.
6253 // ins_cost   -- The estimated cost of this instruction is used by instruction
6254 //               selection to identify a minimum cost tree of machine
6255 //               instructions that matches a tree of machine-independent
6256 //               instructions.
6257 // format     -- A string providing the disassembly for this instruction.
6258 //               The value of an instruction's operand may be inserted
6259 //               by referring to it with a '$' prefix.
6260 // opcode     -- Three instruction opcodes may be provided.  These are referred
6261 //               to within an encode class as $primary, $secondary, and $tertiary
6262 //               rrspectively.  The primary opcode is commonly used to
6263 //               indicate the type of machine instruction, while secondary
6264 //               and tertiary are often used for prefix options or addressing
6265 //               modes.
6266 // ins_encode -- A list of encode classes with parameters. The encode class
6267 //               name must have been defined in an 'enc_class' specification
6268 //               in the encode section of the architecture description.
6269 
6270 // ============================================================================
6271 // Memory (Load/Store) Instructions
6272 
6273 // Load Instructions
6274 
6275 // Load Byte (8 bit signed)
6276 instruct loadB(iRegINoSp dst, memory mem)
6277 %{
6278   match(Set dst (LoadB mem));
6279   predicate(!needs_acquiring_load(n));
6280 
6281   ins_cost(4 * INSN_COST);
6282   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6283 
6284   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6285 
6286   ins_pipe(iload_reg_mem);
6287 %}
6288 
6289 // Load Byte (8 bit signed) into long
6290 instruct loadB2L(iRegLNoSp dst, memory mem)
6291 %{
6292   match(Set dst (ConvI2L (LoadB mem)));
6293   predicate(!needs_acquiring_load(n->in(1)));
6294 
6295   ins_cost(4 * INSN_COST);
6296   format %{ "ldrsb  $dst, $mem\t# byte" %}
6297 
6298   ins_encode(aarch64_enc_ldrsb(dst, mem));
6299 
6300   ins_pipe(iload_reg_mem);
6301 %}
6302 
6303 // Load Byte (8 bit unsigned)
6304 instruct loadUB(iRegINoSp dst, memory mem)
6305 %{
6306   match(Set dst (LoadUB mem));
6307   predicate(!needs_acquiring_load(n));
6308 
6309   ins_cost(4 * INSN_COST);
6310   format %{ "ldrbw  $dst, $mem\t# byte" %}
6311 
6312   ins_encode(aarch64_enc_ldrb(dst, mem));
6313 
6314   ins_pipe(iload_reg_mem);
6315 %}
6316 
6317 // Load Byte (8 bit unsigned) into long
6318 instruct loadUB2L(iRegLNoSp dst, memory mem)
6319 %{
6320   match(Set dst (ConvI2L (LoadUB mem)));
6321   predicate(!needs_acquiring_load(n->in(1)));
6322 
6323   ins_cost(4 * INSN_COST);
6324   format %{ "ldrb  $dst, $mem\t# byte" %}
6325 
6326   ins_encode(aarch64_enc_ldrb(dst, mem));
6327 
6328   ins_pipe(iload_reg_mem);
6329 %}
6330 
6331 // Load Short (16 bit signed)
6332 instruct loadS(iRegINoSp dst, memory mem)
6333 %{
6334   match(Set dst (LoadS mem));
6335   predicate(!needs_acquiring_load(n));
6336 
6337   ins_cost(4 * INSN_COST);
6338   format %{ "ldrshw  $dst, $mem\t# short" %}
6339 
6340   ins_encode(aarch64_enc_ldrshw(dst, mem));
6341 
6342   ins_pipe(iload_reg_mem);
6343 %}
6344 
6345 // Load Short (16 bit signed) into long
6346 instruct loadS2L(iRegLNoSp dst, memory mem)
6347 %{
6348   match(Set dst (ConvI2L (LoadS mem)));
6349   predicate(!needs_acquiring_load(n->in(1)));
6350 
6351   ins_cost(4 * INSN_COST);
6352   format %{ "ldrsh  $dst, $mem\t# short" %}
6353 
6354   ins_encode(aarch64_enc_ldrsh(dst, mem));
6355 
6356   ins_pipe(iload_reg_mem);
6357 %}
6358 
6359 // Load Char (16 bit unsigned)
6360 instruct loadUS(iRegINoSp dst, memory mem)
6361 %{
6362   match(Set dst (LoadUS mem));
6363   predicate(!needs_acquiring_load(n));
6364 
6365   ins_cost(4 * INSN_COST);
6366   format %{ "ldrh  $dst, $mem\t# short" %}
6367 
6368   ins_encode(aarch64_enc_ldrh(dst, mem));
6369 
6370   ins_pipe(iload_reg_mem);
6371 %}
6372 
6373 // Load Short/Char (16 bit unsigned) into long
6374 instruct loadUS2L(iRegLNoSp dst, memory mem)
6375 %{
6376   match(Set dst (ConvI2L (LoadUS mem)));
6377   predicate(!needs_acquiring_load(n->in(1)));
6378 
6379   ins_cost(4 * INSN_COST);
6380   format %{ "ldrh  $dst, $mem\t# short" %}
6381 
6382   ins_encode(aarch64_enc_ldrh(dst, mem));
6383 
6384   ins_pipe(iload_reg_mem);
6385 %}
6386 
6387 // Load Integer (32 bit signed)
6388 instruct loadI(iRegINoSp dst, memory mem)
6389 %{
6390   match(Set dst (LoadI mem));
6391   predicate(!needs_acquiring_load(n));
6392 
6393   ins_cost(4 * INSN_COST);
6394   format %{ "ldrw  $dst, $mem\t# int" %}
6395 
6396   ins_encode(aarch64_enc_ldrw(dst, mem));
6397 
6398   ins_pipe(iload_reg_mem);
6399 %}
6400 
6401 // Load Integer (32 bit signed) into long
6402 instruct loadI2L(iRegLNoSp dst, memory mem)
6403 %{
6404   match(Set dst (ConvI2L (LoadI mem)));
6405   predicate(!needs_acquiring_load(n->in(1)));
6406 
6407   ins_cost(4 * INSN_COST);
6408   format %{ "ldrsw  $dst, $mem\t# int" %}
6409 
6410   ins_encode(aarch64_enc_ldrsw(dst, mem));
6411 
6412   ins_pipe(iload_reg_mem);
6413 %}
6414 
6415 // Load Integer (32 bit unsigned) into long
6416 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6417 %{
6418   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6419   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6420 
6421   ins_cost(4 * INSN_COST);
6422   format %{ "ldrw  $dst, $mem\t# int" %}
6423 
6424   ins_encode(aarch64_enc_ldrw(dst, mem));
6425 
6426   ins_pipe(iload_reg_mem);
6427 %}
6428 
6429 // Load Long (64 bit signed)
6430 instruct loadL(iRegLNoSp dst, memory mem)
6431 %{
6432   match(Set dst (LoadL mem));
6433   predicate(!needs_acquiring_load(n));
6434 
6435   ins_cost(4 * INSN_COST);
6436   format %{ "ldr  $dst, $mem\t# int" %}
6437 
6438   ins_encode(aarch64_enc_ldr(dst, mem));
6439 
6440   ins_pipe(iload_reg_mem);
6441 %}
6442 
6443 // Load Range
6444 instruct loadRange(iRegINoSp dst, memory mem)
6445 %{
6446   match(Set dst (LoadRange mem));
6447 
6448   ins_cost(4 * INSN_COST);
6449   format %{ "ldrw  $dst, $mem\t# range" %}
6450 
6451   ins_encode(aarch64_enc_ldrw(dst, mem));
6452 
6453   ins_pipe(iload_reg_mem);
6454 %}
6455 
6456 // Load Pointer
6457 instruct loadP(iRegPNoSp dst, memory mem)
6458 %{
6459   match(Set dst (LoadP mem));
6460   predicate(!needs_acquiring_load(n));
6461 
6462   ins_cost(4 * INSN_COST);
6463   format %{ "ldr  $dst, $mem\t# ptr" %}
6464 
6465   ins_encode(aarch64_enc_ldr(dst, mem));
6466 
6467   ins_pipe(iload_reg_mem);
6468 %}
6469 
6470 // Load Compressed Pointer
6471 instruct loadN(iRegNNoSp dst, memory mem)
6472 %{
6473   match(Set dst (LoadN mem));
6474   predicate(!needs_acquiring_load(n));
6475 
6476   ins_cost(4 * INSN_COST);
6477   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6478 
6479   ins_encode(aarch64_enc_ldrw(dst, mem));
6480 
6481   ins_pipe(iload_reg_mem);
6482 %}
6483 
6484 // Load Klass Pointer
6485 instruct loadKlass(iRegPNoSp dst, memory mem)
6486 %{
6487   match(Set dst (LoadKlass mem));
6488   predicate(!needs_acquiring_load(n));
6489 
6490   ins_cost(4 * INSN_COST);
6491   format %{ "ldr  $dst, $mem\t# class" %}
6492 
6493   ins_encode(aarch64_enc_ldr(dst, mem));
6494 
6495   ins_pipe(iload_reg_mem);
6496 %}
6497 
6498 // Load Narrow Klass Pointer
6499 instruct loadNKlass(iRegNNoSp dst, memory mem)
6500 %{
6501   match(Set dst (LoadNKlass mem));
6502   predicate(!needs_acquiring_load(n));
6503 
6504   ins_cost(4 * INSN_COST);
6505   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6506 
6507   ins_encode(aarch64_enc_ldrw(dst, mem));
6508 
6509   ins_pipe(iload_reg_mem);
6510 %}
6511 
6512 // Load Float
6513 instruct loadF(vRegF dst, memory mem)
6514 %{
6515   match(Set dst (LoadF mem));
6516   predicate(!needs_acquiring_load(n));
6517 
6518   ins_cost(4 * INSN_COST);
6519   format %{ "ldrs  $dst, $mem\t# float" %}
6520 
6521   ins_encode( aarch64_enc_ldrs(dst, mem) );
6522 
6523   ins_pipe(pipe_class_memory);
6524 %}
6525 
6526 // Load Double
6527 instruct loadD(vRegD dst, memory mem)
6528 %{
6529   match(Set dst (LoadD mem));
6530   predicate(!needs_acquiring_load(n));
6531 
6532   ins_cost(4 * INSN_COST);
6533   format %{ "ldrd  $dst, $mem\t# double" %}
6534 
6535   ins_encode( aarch64_enc_ldrd(dst, mem) );
6536 
6537   ins_pipe(pipe_class_memory);
6538 %}
6539 
6540 
6541 // Load Int Constant
6542 instruct loadConI(iRegINoSp dst, immI src)
6543 %{
6544   match(Set dst src);
6545 
6546   ins_cost(INSN_COST);
6547   format %{ "mov $dst, $src\t# int" %}
6548 
6549   ins_encode( aarch64_enc_movw_imm(dst, src) );
6550 
6551   ins_pipe(ialu_imm);
6552 %}
6553 
6554 // Load Long Constant
6555 instruct loadConL(iRegLNoSp dst, immL src)
6556 %{
6557   match(Set dst src);
6558 
6559   ins_cost(INSN_COST);
6560   format %{ "mov $dst, $src\t# long" %}
6561 
6562   ins_encode( aarch64_enc_mov_imm(dst, src) );
6563 
6564   ins_pipe(ialu_imm);
6565 %}
6566 
6567 // Load Pointer Constant
6568 
6569 instruct loadConP(iRegPNoSp dst, immP con)
6570 %{
6571   match(Set dst con);
6572 
6573   ins_cost(INSN_COST * 4);
6574   format %{
6575     "mov  $dst, $con\t# ptr\n\t"
6576   %}
6577 
6578   ins_encode(aarch64_enc_mov_p(dst, con));
6579 
6580   ins_pipe(ialu_imm);
6581 %}
6582 
6583 // Load Null Pointer Constant
6584 
6585 instruct loadConP0(iRegPNoSp dst, immP0 con)
6586 %{
6587   match(Set dst con);
6588 
6589   ins_cost(INSN_COST);
6590   format %{ "mov  $dst, $con\t# NULL ptr" %}
6591 
6592   ins_encode(aarch64_enc_mov_p0(dst, con));
6593 
6594   ins_pipe(ialu_imm);
6595 %}
6596 
6597 // Load Pointer Constant One
6598 
6599 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6600 %{
6601   match(Set dst con);
6602 
6603   ins_cost(INSN_COST);
6604   format %{ "mov  $dst, $con\t# NULL ptr" %}
6605 
6606   ins_encode(aarch64_enc_mov_p1(dst, con));
6607 
6608   ins_pipe(ialu_imm);
6609 %}
6610 
6611 // Load Poll Page Constant
6612 
6613 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6614 %{
6615   match(Set dst con);
6616 
6617   ins_cost(INSN_COST);
6618   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6619 
6620   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6621 
6622   ins_pipe(ialu_imm);
6623 %}
6624 
6625 // Load Byte Map Base Constant
6626 
6627 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6628 %{
6629   match(Set dst con);
6630 
6631   ins_cost(INSN_COST);
6632   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6633 
6634   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6635 
6636   ins_pipe(ialu_imm);
6637 %}
6638 
6639 // Load Narrow Pointer Constant
6640 
6641 instruct loadConN(iRegNNoSp dst, immN con)
6642 %{
6643   match(Set dst con);
6644 
6645   ins_cost(INSN_COST * 4);
6646   format %{ "mov  $dst, $con\t# compressed ptr" %}
6647 
6648   ins_encode(aarch64_enc_mov_n(dst, con));
6649 
6650   ins_pipe(ialu_imm);
6651 %}
6652 
6653 // Load Narrow Null Pointer Constant
6654 
6655 instruct loadConN0(iRegNNoSp dst, immN0 con)
6656 %{
6657   match(Set dst con);
6658 
6659   ins_cost(INSN_COST);
6660   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6661 
6662   ins_encode(aarch64_enc_mov_n0(dst, con));
6663 
6664   ins_pipe(ialu_imm);
6665 %}
6666 
6667 // Load Narrow Klass Constant
6668 
6669 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6670 %{
6671   match(Set dst con);
6672 
6673   ins_cost(INSN_COST);
6674   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6675 
6676   ins_encode(aarch64_enc_mov_nk(dst, con));
6677 
6678   ins_pipe(ialu_imm);
6679 %}
6680 
6681 // Load Packed Float Constant
6682 
6683 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6684   match(Set dst con);
6685   ins_cost(INSN_COST * 4);
6686   format %{ "fmovs  $dst, $con"%}
6687   ins_encode %{
6688     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6689   %}
6690 
6691   ins_pipe(pipe_class_default);
6692 %}
6693 
6694 // Load Float Constant
6695 
6696 instruct loadConF(vRegF dst, immF con) %{
6697   match(Set dst con);
6698 
6699   ins_cost(INSN_COST * 4);
6700 
6701   format %{
6702     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6703   %}
6704 
6705   ins_encode %{
6706     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6707   %}
6708 
6709   ins_pipe(pipe_class_default);
6710 %}
6711 
6712 // Load Packed Double Constant
6713 
6714 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6715   match(Set dst con);
6716   ins_cost(INSN_COST);
6717   format %{ "fmovd  $dst, $con"%}
6718   ins_encode %{
6719     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6720   %}
6721 
6722   ins_pipe(pipe_class_default);
6723 %}
6724 
6725 // Load Double Constant
6726 
6727 instruct loadConD(vRegD dst, immD con) %{
6728   match(Set dst con);
6729 
6730   ins_cost(INSN_COST * 5);
6731   format %{
6732     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6733   %}
6734 
6735   ins_encode %{
6736     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6737   %}
6738 
6739   ins_pipe(pipe_class_default);
6740 %}
6741 
6742 // Store Instructions
6743 
6744 // Store CMS card-mark Immediate
6745 instruct storeimmCM0(immI0 zero, memory mem)
6746 %{
6747   match(Set mem (StoreCM mem zero));
6748 
6749   ins_cost(INSN_COST);
6750   format %{ "strb zr, $mem\t# byte" %}
6751 
6752   ins_encode(aarch64_enc_strb0(mem));
6753 
6754   ins_pipe(istore_mem);
6755 %}
6756 
6757 // Store Byte
6758 instruct storeB(iRegIorL2I src, memory mem)
6759 %{
6760   match(Set mem (StoreB mem src));
6761   predicate(!needs_releasing_store(n));
6762 
6763   ins_cost(INSN_COST);
6764   format %{ "strb  $src, $mem\t# byte" %}
6765 
6766   ins_encode(aarch64_enc_strb(src, mem));
6767 
6768   ins_pipe(istore_reg_mem);
6769 %}
6770 
6771 
6772 instruct storeimmB0(immI0 zero, memory mem)
6773 %{
6774   match(Set mem (StoreB mem zero));
6775   predicate(!needs_releasing_store(n));
6776 
6777   ins_cost(INSN_COST);
6778   format %{ "strb zr, $mem\t# byte" %}
6779 
6780   ins_encode(aarch64_enc_strb0(mem));
6781 
6782   ins_pipe(istore_mem);
6783 %}
6784 
6785 // Store Char/Short
6786 instruct storeC(iRegIorL2I src, memory mem)
6787 %{
6788   match(Set mem (StoreC mem src));
6789   predicate(!needs_releasing_store(n));
6790 
6791   ins_cost(INSN_COST);
6792   format %{ "strh  $src, $mem\t# short" %}
6793 
6794   ins_encode(aarch64_enc_strh(src, mem));
6795 
6796   ins_pipe(istore_reg_mem);
6797 %}
6798 
6799 instruct storeimmC0(immI0 zero, memory mem)
6800 %{
6801   match(Set mem (StoreC mem zero));
6802   predicate(!needs_releasing_store(n));
6803 
6804   ins_cost(INSN_COST);
6805   format %{ "strh  zr, $mem\t# short" %}
6806 
6807   ins_encode(aarch64_enc_strh0(mem));
6808 
6809   ins_pipe(istore_mem);
6810 %}
6811 
6812 // Store Integer
6813 
6814 instruct storeI(iRegIorL2I src, memory mem)
6815 %{
6816   match(Set mem(StoreI mem src));
6817   predicate(!needs_releasing_store(n));
6818 
6819   ins_cost(INSN_COST);
6820   format %{ "strw  $src, $mem\t# int" %}
6821 
6822   ins_encode(aarch64_enc_strw(src, mem));
6823 
6824   ins_pipe(istore_reg_mem);
6825 %}
6826 
6827 instruct storeimmI0(immI0 zero, memory mem)
6828 %{
6829   match(Set mem(StoreI mem zero));
6830   predicate(!needs_releasing_store(n));
6831 
6832   ins_cost(INSN_COST);
6833   format %{ "strw  zr, $mem\t# int" %}
6834 
6835   ins_encode(aarch64_enc_strw0(mem));
6836 
6837   ins_pipe(istore_mem);
6838 %}
6839 
6840 // Store Long (64 bit signed)
6841 instruct storeL(iRegL src, memory mem)
6842 %{
6843   match(Set mem (StoreL mem src));
6844   predicate(!needs_releasing_store(n));
6845 
6846   ins_cost(INSN_COST);
6847   format %{ "str  $src, $mem\t# int" %}
6848 
6849   ins_encode(aarch64_enc_str(src, mem));
6850 
6851   ins_pipe(istore_reg_mem);
6852 %}
6853 
6854 // Store Long (64 bit signed)
6855 instruct storeimmL0(immL0 zero, memory mem)
6856 %{
6857   match(Set mem (StoreL mem zero));
6858   predicate(!needs_releasing_store(n));
6859 
6860   ins_cost(INSN_COST);
6861   format %{ "str  zr, $mem\t# int" %}
6862 
6863   ins_encode(aarch64_enc_str0(mem));
6864 
6865   ins_pipe(istore_mem);
6866 %}
6867 
6868 // Store Pointer
6869 instruct storeP(iRegP src, memory mem)
6870 %{
6871   match(Set mem (StoreP mem src));
6872   predicate(!needs_releasing_store(n));
6873 
6874   ins_cost(INSN_COST);
6875   format %{ "str  $src, $mem\t# ptr" %}
6876 
6877   ins_encode(aarch64_enc_str(src, mem));
6878 
6879   ins_pipe(istore_reg_mem);
6880 %}
6881 
6882 // Store Pointer
6883 instruct storeimmP0(immP0 zero, memory mem)
6884 %{
6885   match(Set mem (StoreP mem zero));
6886   predicate(!needs_releasing_store(n));
6887 
6888   ins_cost(INSN_COST);
6889   format %{ "str zr, $mem\t# ptr" %}
6890 
6891   ins_encode(aarch64_enc_str0(mem));
6892 
6893   ins_pipe(istore_mem);
6894 %}
6895 
6896 // Store Compressed Pointer
6897 instruct storeN(iRegN src, memory mem)
6898 %{
6899   match(Set mem (StoreN mem src));
6900   predicate(!needs_releasing_store(n));
6901 
6902   ins_cost(INSN_COST);
6903   format %{ "strw  $src, $mem\t# compressed ptr" %}
6904 
6905   ins_encode(aarch64_enc_strw(src, mem));
6906 
6907   ins_pipe(istore_reg_mem);
6908 %}
6909 
6910 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
6911 %{
6912   match(Set mem (StoreN mem zero));
6913   predicate(Universe::narrow_oop_base() == NULL &&
6914             Universe::narrow_klass_base() == NULL &&
6915             (!needs_releasing_store(n)));
6916 
6917   ins_cost(INSN_COST);
6918   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
6919 
6920   ins_encode(aarch64_enc_strw(heapbase, mem));
6921 
6922   ins_pipe(istore_reg_mem);
6923 %}
6924 
6925 // Store Float
6926 instruct storeF(vRegF src, memory mem)
6927 %{
6928   match(Set mem (StoreF mem src));
6929   predicate(!needs_releasing_store(n));
6930 
6931   ins_cost(INSN_COST);
6932   format %{ "strs  $src, $mem\t# float" %}
6933 
6934   ins_encode( aarch64_enc_strs(src, mem) );
6935 
6936   ins_pipe(pipe_class_memory);
6937 %}
6938 
6939 // TODO
6940 // implement storeImmF0 and storeFImmPacked
6941 
6942 // Store Double
6943 instruct storeD(vRegD src, memory mem)
6944 %{
6945   match(Set mem (StoreD mem src));
6946   predicate(!needs_releasing_store(n));
6947 
6948   ins_cost(INSN_COST);
6949   format %{ "strd  $src, $mem\t# double" %}
6950 
6951   ins_encode( aarch64_enc_strd(src, mem) );
6952 
6953   ins_pipe(pipe_class_memory);
6954 %}
6955 
6956 // Store Compressed Klass Pointer
6957 instruct storeNKlass(iRegN src, memory mem)
6958 %{
6959   predicate(!needs_releasing_store(n));
6960   match(Set mem (StoreNKlass mem src));
6961 
6962   ins_cost(INSN_COST);
6963   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
6964 
6965   ins_encode(aarch64_enc_strw(src, mem));
6966 
6967   ins_pipe(istore_reg_mem);
6968 %}
6969 
6970 // TODO
6971 // implement storeImmD0 and storeDImmPacked
6972 
6973 // prefetch instructions
6974 // Must be safe to execute with invalid address (cannot fault).
6975 
6976 instruct prefetchalloc( memory mem ) %{
6977   match(PrefetchAllocation mem);
6978 
6979   ins_cost(INSN_COST);
6980   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
6981 
6982   ins_encode( aarch64_enc_prefetchw(mem) );
6983 
6984   ins_pipe(iload_prefetch);
6985 %}
6986 
6987 //  ---------------- volatile loads and stores ----------------
6988 
6989 // Load Byte (8 bit signed)
6990 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6991 %{
6992   match(Set dst (LoadB mem));
6993 
6994   ins_cost(VOLATILE_REF_COST);
6995   format %{ "ldarsb  $dst, $mem\t# byte" %}
6996 
6997   ins_encode(aarch64_enc_ldarsb(dst, mem));
6998 
6999   ins_pipe(pipe_serial);
7000 %}
7001 
7002 // Load Byte (8 bit signed) into long
7003 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7004 %{
7005   match(Set dst (ConvI2L (LoadB mem)));
7006 
7007   ins_cost(VOLATILE_REF_COST);
7008   format %{ "ldarsb  $dst, $mem\t# byte" %}
7009 
7010   ins_encode(aarch64_enc_ldarsb(dst, mem));
7011 
7012   ins_pipe(pipe_serial);
7013 %}
7014 
7015 // Load Byte (8 bit unsigned)
7016 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7017 %{
7018   match(Set dst (LoadUB mem));
7019 
7020   ins_cost(VOLATILE_REF_COST);
7021   format %{ "ldarb  $dst, $mem\t# byte" %}
7022 
7023   ins_encode(aarch64_enc_ldarb(dst, mem));
7024 
7025   ins_pipe(pipe_serial);
7026 %}
7027 
7028 // Load Byte (8 bit unsigned) into long
7029 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7030 %{
7031   match(Set dst (ConvI2L (LoadUB mem)));
7032 
7033   ins_cost(VOLATILE_REF_COST);
7034   format %{ "ldarb  $dst, $mem\t# byte" %}
7035 
7036   ins_encode(aarch64_enc_ldarb(dst, mem));
7037 
7038   ins_pipe(pipe_serial);
7039 %}
7040 
7041 // Load Short (16 bit signed)
7042 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7043 %{
7044   match(Set dst (LoadS mem));
7045 
7046   ins_cost(VOLATILE_REF_COST);
7047   format %{ "ldarshw  $dst, $mem\t# short" %}
7048 
7049   ins_encode(aarch64_enc_ldarshw(dst, mem));
7050 
7051   ins_pipe(pipe_serial);
7052 %}
7053 
7054 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7055 %{
7056   match(Set dst (LoadUS mem));
7057 
7058   ins_cost(VOLATILE_REF_COST);
7059   format %{ "ldarhw  $dst, $mem\t# short" %}
7060 
7061   ins_encode(aarch64_enc_ldarhw(dst, mem));
7062 
7063   ins_pipe(pipe_serial);
7064 %}
7065 
7066 // Load Short/Char (16 bit unsigned) into long
7067 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7068 %{
7069   match(Set dst (ConvI2L (LoadUS mem)));
7070 
7071   ins_cost(VOLATILE_REF_COST);
7072   format %{ "ldarh  $dst, $mem\t# short" %}
7073 
7074   ins_encode(aarch64_enc_ldarh(dst, mem));
7075 
7076   ins_pipe(pipe_serial);
7077 %}
7078 
7079 // Load Short/Char (16 bit signed) into long
7080 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7081 %{
7082   match(Set dst (ConvI2L (LoadS mem)));
7083 
7084   ins_cost(VOLATILE_REF_COST);
7085   format %{ "ldarh  $dst, $mem\t# short" %}
7086 
7087   ins_encode(aarch64_enc_ldarsh(dst, mem));
7088 
7089   ins_pipe(pipe_serial);
7090 %}
7091 
7092 // Load Integer (32 bit signed)
7093 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7094 %{
7095   match(Set dst (LoadI mem));
7096 
7097   ins_cost(VOLATILE_REF_COST);
7098   format %{ "ldarw  $dst, $mem\t# int" %}
7099 
7100   ins_encode(aarch64_enc_ldarw(dst, mem));
7101 
7102   ins_pipe(pipe_serial);
7103 %}
7104 
7105 // Load Integer (32 bit unsigned) into long
7106 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7107 %{
7108   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7109 
7110   ins_cost(VOLATILE_REF_COST);
7111   format %{ "ldarw  $dst, $mem\t# int" %}
7112 
7113   ins_encode(aarch64_enc_ldarw(dst, mem));
7114 
7115   ins_pipe(pipe_serial);
7116 %}
7117 
7118 // Load Long (64 bit signed)
7119 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7120 %{
7121   match(Set dst (LoadL mem));
7122 
7123   ins_cost(VOLATILE_REF_COST);
7124   format %{ "ldar  $dst, $mem\t# int" %}
7125 
7126   ins_encode(aarch64_enc_ldar(dst, mem));
7127 
7128   ins_pipe(pipe_serial);
7129 %}
7130 
7131 // Load Pointer
7132 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7133 %{
7134   match(Set dst (LoadP mem));
7135 
7136   ins_cost(VOLATILE_REF_COST);
7137   format %{ "ldar  $dst, $mem\t# ptr" %}
7138 
7139   ins_encode(aarch64_enc_ldar(dst, mem));
7140 
7141   ins_pipe(pipe_serial);
7142 %}
7143 
7144 // Load Compressed Pointer
7145 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7146 %{
7147   match(Set dst (LoadN mem));
7148 
7149   ins_cost(VOLATILE_REF_COST);
7150   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7151 
7152   ins_encode(aarch64_enc_ldarw(dst, mem));
7153 
7154   ins_pipe(pipe_serial);
7155 %}
7156 
7157 // Load Float
7158 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7159 %{
7160   match(Set dst (LoadF mem));
7161 
7162   ins_cost(VOLATILE_REF_COST);
7163   format %{ "ldars  $dst, $mem\t# float" %}
7164 
7165   ins_encode( aarch64_enc_fldars(dst, mem) );
7166 
7167   ins_pipe(pipe_serial);
7168 %}
7169 
7170 // Load Double
7171 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7172 %{
7173   match(Set dst (LoadD mem));
7174 
7175   ins_cost(VOLATILE_REF_COST);
7176   format %{ "ldard  $dst, $mem\t# double" %}
7177 
7178   ins_encode( aarch64_enc_fldard(dst, mem) );
7179 
7180   ins_pipe(pipe_serial);
7181 %}
7182 
7183 // Store Byte
7184 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7185 %{
7186   match(Set mem (StoreB mem src));
7187 
7188   ins_cost(VOLATILE_REF_COST);
7189   format %{ "stlrb  $src, $mem\t# byte" %}
7190 
7191   ins_encode(aarch64_enc_stlrb(src, mem));
7192 
7193   ins_pipe(pipe_class_memory);
7194 %}
7195 
7196 // Store Char/Short
7197 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7198 %{
7199   match(Set mem (StoreC mem src));
7200 
7201   ins_cost(VOLATILE_REF_COST);
7202   format %{ "stlrh  $src, $mem\t# short" %}
7203 
7204   ins_encode(aarch64_enc_stlrh(src, mem));
7205 
7206   ins_pipe(pipe_class_memory);
7207 %}
7208 
7209 // Store Integer
7210 
7211 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7212 %{
7213   match(Set mem(StoreI mem src));
7214 
7215   ins_cost(VOLATILE_REF_COST);
7216   format %{ "stlrw  $src, $mem\t# int" %}
7217 
7218   ins_encode(aarch64_enc_stlrw(src, mem));
7219 
7220   ins_pipe(pipe_class_memory);
7221 %}
7222 
7223 // Store Long (64 bit signed)
7224 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7225 %{
7226   match(Set mem (StoreL mem src));
7227 
7228   ins_cost(VOLATILE_REF_COST);
7229   format %{ "stlr  $src, $mem\t# int" %}
7230 
7231   ins_encode(aarch64_enc_stlr(src, mem));
7232 
7233   ins_pipe(pipe_class_memory);
7234 %}
7235 
7236 // Store Pointer
7237 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7238 %{
7239   match(Set mem (StoreP mem src));
7240 
7241   ins_cost(VOLATILE_REF_COST);
7242   format %{ "stlr  $src, $mem\t# ptr" %}
7243 
7244   ins_encode(aarch64_enc_stlr(src, mem));
7245 
7246   ins_pipe(pipe_class_memory);
7247 %}
7248 
7249 // Store Compressed Pointer
7250 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7251 %{
7252   match(Set mem (StoreN mem src));
7253 
7254   ins_cost(VOLATILE_REF_COST);
7255   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7256 
7257   ins_encode(aarch64_enc_stlrw(src, mem));
7258 
7259   ins_pipe(pipe_class_memory);
7260 %}
7261 
7262 // Store Float
7263 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7264 %{
7265   match(Set mem (StoreF mem src));
7266 
7267   ins_cost(VOLATILE_REF_COST);
7268   format %{ "stlrs  $src, $mem\t# float" %}
7269 
7270   ins_encode( aarch64_enc_fstlrs(src, mem) );
7271 
7272   ins_pipe(pipe_class_memory);
7273 %}
7274 
7275 // TODO
7276 // implement storeImmF0 and storeFImmPacked
7277 
7278 // Store Double
7279 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7280 %{
7281   match(Set mem (StoreD mem src));
7282 
7283   ins_cost(VOLATILE_REF_COST);
7284   format %{ "stlrd  $src, $mem\t# double" %}
7285 
7286   ins_encode( aarch64_enc_fstlrd(src, mem) );
7287 
7288   ins_pipe(pipe_class_memory);
7289 %}
7290 
7291 //  ---------------- end of volatile loads and stores ----------------
7292 
7293 // ============================================================================
7294 // BSWAP Instructions
7295 
7296 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7297   match(Set dst (ReverseBytesI src));
7298 
7299   ins_cost(INSN_COST);
7300   format %{ "revw  $dst, $src" %}
7301 
7302   ins_encode %{
7303     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7304   %}
7305 
7306   ins_pipe(ialu_reg);
7307 %}
7308 
7309 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7310   match(Set dst (ReverseBytesL src));
7311 
7312   ins_cost(INSN_COST);
7313   format %{ "rev  $dst, $src" %}
7314 
7315   ins_encode %{
7316     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7317   %}
7318 
7319   ins_pipe(ialu_reg);
7320 %}
7321 
7322 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7323   match(Set dst (ReverseBytesUS src));
7324 
7325   ins_cost(INSN_COST);
7326   format %{ "rev16w  $dst, $src" %}
7327 
7328   ins_encode %{
7329     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7330   %}
7331 
7332   ins_pipe(ialu_reg);
7333 %}
7334 
7335 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7336   match(Set dst (ReverseBytesS src));
7337 
7338   ins_cost(INSN_COST);
7339   format %{ "rev16w  $dst, $src\n\t"
7340             "sbfmw $dst, $dst, #0, #15" %}
7341 
7342   ins_encode %{
7343     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7344     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7345   %}
7346 
7347   ins_pipe(ialu_reg);
7348 %}
7349 
7350 // ============================================================================
7351 // Zero Count Instructions
7352 
7353 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7354   match(Set dst (CountLeadingZerosI src));
7355 
7356   ins_cost(INSN_COST);
7357   format %{ "clzw  $dst, $src" %}
7358   ins_encode %{
7359     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7360   %}
7361 
7362   ins_pipe(ialu_reg);
7363 %}
7364 
7365 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7366   match(Set dst (CountLeadingZerosL src));
7367 
7368   ins_cost(INSN_COST);
7369   format %{ "clz   $dst, $src" %}
7370   ins_encode %{
7371     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7372   %}
7373 
7374   ins_pipe(ialu_reg);
7375 %}
7376 
7377 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7378   match(Set dst (CountTrailingZerosI src));
7379 
7380   ins_cost(INSN_COST * 2);
7381   format %{ "rbitw  $dst, $src\n\t"
7382             "clzw   $dst, $dst" %}
7383   ins_encode %{
7384     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7385     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7386   %}
7387 
7388   ins_pipe(ialu_reg);
7389 %}
7390 
7391 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7392   match(Set dst (CountTrailingZerosL src));
7393 
7394   ins_cost(INSN_COST * 2);
7395   format %{ "rbit   $dst, $src\n\t"
7396             "clz    $dst, $dst" %}
7397   ins_encode %{
7398     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7399     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7400   %}
7401 
7402   ins_pipe(ialu_reg);
7403 %}
7404 
7405 //---------- Population Count Instructions -------------------------------------
7406 //
7407 
7408 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7409   predicate(UsePopCountInstruction);
7410   match(Set dst (PopCountI src));
7411   effect(TEMP tmp);
7412   ins_cost(INSN_COST * 13);
7413 
7414   format %{ "movw   $src, $src\n\t"
7415             "mov    $tmp, $src\t# vector (1D)\n\t"
7416             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7417             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7418             "mov    $dst, $tmp\t# vector (1D)" %}
7419   ins_encode %{
7420     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7421     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7422     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7423     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7424     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7425   %}
7426 
7427   ins_pipe(pipe_class_default);
7428 %}
7429 
7430 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7431   predicate(UsePopCountInstruction);
7432   match(Set dst (PopCountI (LoadI mem)));
7433   effect(TEMP tmp);
7434   ins_cost(INSN_COST * 13);
7435 
7436   format %{ "ldrs   $tmp, $mem\n\t"
7437             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7438             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7439             "mov    $dst, $tmp\t# vector (1D)" %}
7440   ins_encode %{
7441     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7442     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7443                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7444     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7445     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7446     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7447   %}
7448 
7449   ins_pipe(pipe_class_default);
7450 %}
7451 
7452 // Note: Long.bitCount(long) returns an int.
7453 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7454   predicate(UsePopCountInstruction);
7455   match(Set dst (PopCountL src));
7456   effect(TEMP tmp);
7457   ins_cost(INSN_COST * 13);
7458 
7459   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7460             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7461             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7462             "mov    $dst, $tmp\t# vector (1D)" %}
7463   ins_encode %{
7464     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7465     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7466     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7467     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7468   %}
7469 
7470   ins_pipe(pipe_class_default);
7471 %}
7472 
7473 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7474   predicate(UsePopCountInstruction);
7475   match(Set dst (PopCountL (LoadL mem)));
7476   effect(TEMP tmp);
7477   ins_cost(INSN_COST * 13);
7478 
7479   format %{ "ldrd   $tmp, $mem\n\t"
7480             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7481             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7482             "mov    $dst, $tmp\t# vector (1D)" %}
7483   ins_encode %{
7484     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7485     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7486                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7487     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7488     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7489     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7490   %}
7491 
7492   ins_pipe(pipe_class_default);
7493 %}
7494 
7495 // ============================================================================
7496 // MemBar Instruction
7497 
7498 instruct load_fence() %{
7499   match(LoadFence);
7500   ins_cost(VOLATILE_REF_COST);
7501 
7502   format %{ "load_fence" %}
7503 
7504   ins_encode %{
7505     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7506   %}
7507   ins_pipe(pipe_serial);
7508 %}
7509 
7510 instruct unnecessary_membar_acquire() %{
7511   predicate(unnecessary_acquire(n));
7512   match(MemBarAcquire);
7513   ins_cost(0);
7514 
7515   format %{ "membar_acquire (elided)" %}
7516 
7517   ins_encode %{
7518     __ block_comment("membar_acquire (elided)");
7519   %}
7520 
7521   ins_pipe(pipe_class_empty);
7522 %}
7523 
7524 instruct membar_acquire() %{
7525   match(MemBarAcquire);
7526   ins_cost(VOLATILE_REF_COST);
7527 
7528   format %{ "membar_acquire" %}
7529 
7530   ins_encode %{
7531     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7532   %}
7533 
7534   ins_pipe(pipe_serial);
7535 %}
7536 
7537 
7538 instruct membar_acquire_lock() %{
7539   match(MemBarAcquireLock);
7540   ins_cost(VOLATILE_REF_COST);
7541 
7542   format %{ "membar_acquire_lock" %}
7543 
7544   ins_encode %{
7545     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7546   %}
7547 
7548   ins_pipe(pipe_serial);
7549 %}
7550 
7551 instruct store_fence() %{
7552   match(StoreFence);
7553   ins_cost(VOLATILE_REF_COST);
7554 
7555   format %{ "store_fence" %}
7556 
7557   ins_encode %{
7558     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7559   %}
7560   ins_pipe(pipe_serial);
7561 %}
7562 
7563 instruct unnecessary_membar_release() %{
7564   predicate(unnecessary_release(n));
7565   match(MemBarRelease);
7566   ins_cost(0);
7567 
7568   format %{ "membar_release (elided)" %}
7569 
7570   ins_encode %{
7571     __ block_comment("membar_release (elided)");
7572   %}
7573   ins_pipe(pipe_serial);
7574 %}
7575 
7576 instruct membar_release() %{
7577   match(MemBarRelease);
7578   ins_cost(VOLATILE_REF_COST);
7579 
7580   format %{ "membar_release" %}
7581 
7582   ins_encode %{
7583     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7584   %}
7585   ins_pipe(pipe_serial);
7586 %}
7587 
7588 instruct membar_storestore() %{
7589   match(MemBarStoreStore);
7590   ins_cost(VOLATILE_REF_COST);
7591 
7592   format %{ "MEMBAR-store-store" %}
7593 
7594   ins_encode %{
7595     __ membar(Assembler::StoreStore);
7596   %}
7597   ins_pipe(pipe_serial);
7598 %}
7599 
7600 instruct membar_release_lock() %{
7601   match(MemBarReleaseLock);
7602   ins_cost(VOLATILE_REF_COST);
7603 
7604   format %{ "membar_release_lock" %}
7605 
7606   ins_encode %{
7607     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7608   %}
7609 
7610   ins_pipe(pipe_serial);
7611 %}
7612 
7613 instruct unnecessary_membar_volatile() %{
7614   predicate(unnecessary_volatile(n));
7615   match(MemBarVolatile);
7616   ins_cost(0);
7617 
7618   format %{ "membar_volatile (elided)" %}
7619 
7620   ins_encode %{
7621     __ block_comment("membar_volatile (elided)");
7622   %}
7623 
7624   ins_pipe(pipe_serial);
7625 %}
7626 
7627 instruct membar_volatile() %{
7628   match(MemBarVolatile);
7629   ins_cost(VOLATILE_REF_COST*100);
7630 
7631   format %{ "membar_volatile" %}
7632 
7633   ins_encode %{
7634     __ membar(Assembler::StoreLoad);
7635   %}
7636 
7637   ins_pipe(pipe_serial);
7638 %}
7639 
7640 // ============================================================================
7641 // Cast/Convert Instructions
7642 
7643 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7644   match(Set dst (CastX2P src));
7645 
7646   ins_cost(INSN_COST);
7647   format %{ "mov $dst, $src\t# long -> ptr" %}
7648 
7649   ins_encode %{
7650     if ($dst$$reg != $src$$reg) {
7651       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7652     }
7653   %}
7654 
7655   ins_pipe(ialu_reg);
7656 %}
7657 
7658 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7659   match(Set dst (CastP2X src));
7660 
7661   ins_cost(INSN_COST);
7662   format %{ "mov $dst, $src\t# ptr -> long" %}
7663 
7664   ins_encode %{
7665     if ($dst$$reg != $src$$reg) {
7666       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7667     }
7668   %}
7669 
7670   ins_pipe(ialu_reg);
7671 %}
7672 
7673 // Convert oop into int for vectors alignment masking
7674 instruct convP2I(iRegINoSp dst, iRegP src) %{
7675   match(Set dst (ConvL2I (CastP2X src)));
7676 
7677   ins_cost(INSN_COST);
7678   format %{ "movw $dst, $src\t# ptr -> int" %}
7679   ins_encode %{
7680     __ movw($dst$$Register, $src$$Register);
7681   %}
7682 
7683   ins_pipe(ialu_reg);
7684 %}
7685 
7686 // Convert compressed oop into int for vectors alignment masking
7687 // in case of 32bit oops (heap < 4Gb).
7688 instruct convN2I(iRegINoSp dst, iRegN src)
7689 %{
7690   predicate(Universe::narrow_oop_shift() == 0);
7691   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7692 
7693   ins_cost(INSN_COST);
7694   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7695   ins_encode %{
7696     __ movw($dst$$Register, $src$$Register);
7697   %}
7698 
7699   ins_pipe(ialu_reg);
7700 %}
7701 
7702 
7703 // Convert oop pointer into compressed form
7704 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7705   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7706   match(Set dst (EncodeP src));
7707   effect(KILL cr);
7708   ins_cost(INSN_COST * 3);
7709   format %{ "encode_heap_oop $dst, $src" %}
7710   ins_encode %{
7711     Register s = $src$$Register;
7712     Register d = $dst$$Register;
7713     __ encode_heap_oop(d, s);
7714   %}
7715   ins_pipe(ialu_reg);
7716 %}
7717 
7718 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7719   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7720   match(Set dst (EncodeP src));
7721   ins_cost(INSN_COST * 3);
7722   format %{ "encode_heap_oop_not_null $dst, $src" %}
7723   ins_encode %{
7724     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7725   %}
7726   ins_pipe(ialu_reg);
7727 %}
7728 
7729 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7730   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7731             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7732   match(Set dst (DecodeN src));
7733   ins_cost(INSN_COST * 3);
7734   format %{ "decode_heap_oop $dst, $src" %}
7735   ins_encode %{
7736     Register s = $src$$Register;
7737     Register d = $dst$$Register;
7738     __ decode_heap_oop(d, s);
7739   %}
7740   ins_pipe(ialu_reg);
7741 %}
7742 
7743 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7744   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7745             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7746   match(Set dst (DecodeN src));
7747   ins_cost(INSN_COST * 3);
7748   format %{ "decode_heap_oop_not_null $dst, $src" %}
7749   ins_encode %{
7750     Register s = $src$$Register;
7751     Register d = $dst$$Register;
7752     __ decode_heap_oop_not_null(d, s);
7753   %}
7754   ins_pipe(ialu_reg);
7755 %}
7756 
7757 // n.b. AArch64 implementations of encode_klass_not_null and
7758 // decode_klass_not_null do not modify the flags register so, unlike
7759 // Intel, we don't kill CR as a side effect here
7760 
7761 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7762   match(Set dst (EncodePKlass src));
7763 
7764   ins_cost(INSN_COST * 3);
7765   format %{ "encode_klass_not_null $dst,$src" %}
7766 
7767   ins_encode %{
7768     Register src_reg = as_Register($src$$reg);
7769     Register dst_reg = as_Register($dst$$reg);
7770     __ encode_klass_not_null(dst_reg, src_reg);
7771   %}
7772 
7773    ins_pipe(ialu_reg);
7774 %}
7775 
7776 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7777   match(Set dst (DecodeNKlass src));
7778 
7779   ins_cost(INSN_COST * 3);
7780   format %{ "decode_klass_not_null $dst,$src" %}
7781 
7782   ins_encode %{
7783     Register src_reg = as_Register($src$$reg);
7784     Register dst_reg = as_Register($dst$$reg);
7785     if (dst_reg != src_reg) {
7786       __ decode_klass_not_null(dst_reg, src_reg);
7787     } else {
7788       __ decode_klass_not_null(dst_reg);
7789     }
7790   %}
7791 
7792    ins_pipe(ialu_reg);
7793 %}
7794 
7795 instruct checkCastPP(iRegPNoSp dst)
7796 %{
7797   match(Set dst (CheckCastPP dst));
7798 
7799   size(0);
7800   format %{ "# checkcastPP of $dst" %}
7801   ins_encode(/* empty encoding */);
7802   ins_pipe(pipe_class_empty);
7803 %}
7804 
7805 instruct castPP(iRegPNoSp dst)
7806 %{
7807   match(Set dst (CastPP dst));
7808 
7809   size(0);
7810   format %{ "# castPP of $dst" %}
7811   ins_encode(/* empty encoding */);
7812   ins_pipe(pipe_class_empty);
7813 %}
7814 
7815 instruct castII(iRegI dst)
7816 %{
7817   match(Set dst (CastII dst));
7818 
7819   size(0);
7820   format %{ "# castII of $dst" %}
7821   ins_encode(/* empty encoding */);
7822   ins_cost(0);
7823   ins_pipe(pipe_class_empty);
7824 %}
7825 
7826 // ============================================================================
7827 // Atomic operation instructions
7828 //
7829 // Intel and SPARC both implement Ideal Node LoadPLocked and
7830 // Store{PIL}Conditional instructions using a normal load for the
7831 // LoadPLocked and a CAS for the Store{PIL}Conditional.
7832 //
7833 // The ideal code appears only to use LoadPLocked/StorePLocked as a
7834 // pair to lock object allocations from Eden space when not using
7835 // TLABs.
7836 //
7837 // There does not appear to be a Load{IL}Locked Ideal Node and the
7838 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
7839 // and to use StoreIConditional only for 32-bit and StoreLConditional
7840 // only for 64-bit.
7841 //
7842 // We implement LoadPLocked and StorePLocked instructions using,
7843 // respectively the AArch64 hw load-exclusive and store-conditional
7844 // instructions. Whereas we must implement each of
7845 // Store{IL}Conditional using a CAS which employs a pair of
7846 // instructions comprising a load-exclusive followed by a
7847 // store-conditional.
7848 
7849 
7850 // Locked-load (linked load) of the current heap-top
7851 // used when updating the eden heap top
7852 // implemented using ldaxr on AArch64
7853 
7854 instruct loadPLocked(iRegPNoSp dst, indirect mem)
7855 %{
7856   match(Set dst (LoadPLocked mem));
7857 
7858   ins_cost(VOLATILE_REF_COST);
7859 
7860   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
7861 
7862   ins_encode(aarch64_enc_ldaxr(dst, mem));
7863 
7864   ins_pipe(pipe_serial);
7865 %}
7866 
7867 // Conditional-store of the updated heap-top.
7868 // Used during allocation of the shared heap.
7869 // Sets flag (EQ) on success.
7870 // implemented using stlxr on AArch64.
7871 
7872 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
7873 %{
7874   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7875 
7876   ins_cost(VOLATILE_REF_COST);
7877 
7878  // TODO
7879  // do we need to do a store-conditional release or can we just use a
7880  // plain store-conditional?
7881 
7882   format %{
7883     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
7884     "cmpw rscratch1, zr\t# EQ on successful write"
7885   %}
7886 
7887   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
7888 
7889   ins_pipe(pipe_serial);
7890 %}
7891 
7892 // this has to be implemented as a CAS
7893 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
7894 %{
7895   match(Set cr (StoreLConditional mem (Binary oldval newval)));
7896 
7897   ins_cost(VOLATILE_REF_COST);
7898 
7899   format %{
7900     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7901     "cmpw rscratch1, zr\t# EQ on successful write"
7902   %}
7903 
7904   ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
7905 
7906   ins_pipe(pipe_slow);
7907 %}
7908 
7909 // this has to be implemented as a CAS
7910 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
7911 %{
7912   match(Set cr (StoreIConditional mem (Binary oldval newval)));
7913 
7914   ins_cost(VOLATILE_REF_COST);
7915 
7916   format %{
7917     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7918     "cmpw rscratch1, zr\t# EQ on successful write"
7919   %}
7920 
7921   ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
7922 
7923   ins_pipe(pipe_slow);
7924 %}
7925 
7926 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
7927 // can't match them
7928 
7929 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
7930 
7931   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
7932 
7933   effect(KILL cr);
7934 
7935  format %{
7936     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
7937     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7938  %}
7939 
7940  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7941             aarch64_enc_cset_eq(res));
7942 
7943   ins_pipe(pipe_slow);
7944 %}
7945 
7946 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
7947 
7948   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
7949 
7950   effect(KILL cr);
7951 
7952  format %{
7953     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
7954     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7955  %}
7956 
7957  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7958             aarch64_enc_cset_eq(res));
7959 
7960   ins_pipe(pipe_slow);
7961 %}
7962 
7963 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
7964 
7965   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
7966 
7967   effect(KILL cr);
7968 
7969  format %{
7970     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
7971     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7972  %}
7973 
7974  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7975             aarch64_enc_cset_eq(res));
7976 
7977   ins_pipe(pipe_slow);
7978 %}
7979 
7980 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
7981 
7982   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
7983 
7984   effect(KILL cr);
7985 
7986  format %{
7987     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
7988     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7989  %}
7990 
7991  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7992             aarch64_enc_cset_eq(res));
7993 
7994   ins_pipe(pipe_slow);
7995 %}
7996 
7997 
7998 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
7999   match(Set prev (GetAndSetI mem newv));
8000   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8001   ins_encode %{
8002     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8003   %}
8004   ins_pipe(pipe_serial);
8005 %}
8006 
8007 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
8008   match(Set prev (GetAndSetL mem newv));
8009   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8010   ins_encode %{
8011     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8012   %}
8013   ins_pipe(pipe_serial);
8014 %}
8015 
8016 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
8017   match(Set prev (GetAndSetN mem newv));
8018   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8019   ins_encode %{
8020     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8021   %}
8022   ins_pipe(pipe_serial);
8023 %}
8024 
8025 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
8026   match(Set prev (GetAndSetP mem newv));
8027   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8028   ins_encode %{
8029     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8030   %}
8031   ins_pipe(pipe_serial);
8032 %}
8033 
8034 
8035 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8036   match(Set newval (GetAndAddL mem incr));
8037   ins_cost(INSN_COST * 10);
8038   format %{ "get_and_addL $newval, [$mem], $incr" %}
8039   ins_encode %{
8040     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8041   %}
8042   ins_pipe(pipe_serial);
8043 %}
8044 
8045 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8046   predicate(n->as_LoadStore()->result_not_used());
8047   match(Set dummy (GetAndAddL mem incr));
8048   ins_cost(INSN_COST * 9);
8049   format %{ "get_and_addL [$mem], $incr" %}
8050   ins_encode %{
8051     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8052   %}
8053   ins_pipe(pipe_serial);
8054 %}
8055 
8056 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8057   match(Set newval (GetAndAddL mem incr));
8058   ins_cost(INSN_COST * 10);
8059   format %{ "get_and_addL $newval, [$mem], $incr" %}
8060   ins_encode %{
8061     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8062   %}
8063   ins_pipe(pipe_serial);
8064 %}
8065 
8066 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8067   predicate(n->as_LoadStore()->result_not_used());
8068   match(Set dummy (GetAndAddL mem incr));
8069   ins_cost(INSN_COST * 9);
8070   format %{ "get_and_addL [$mem], $incr" %}
8071   ins_encode %{
8072     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8073   %}
8074   ins_pipe(pipe_serial);
8075 %}
8076 
8077 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8078   match(Set newval (GetAndAddI mem incr));
8079   ins_cost(INSN_COST * 10);
8080   format %{ "get_and_addI $newval, [$mem], $incr" %}
8081   ins_encode %{
8082     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8083   %}
8084   ins_pipe(pipe_serial);
8085 %}
8086 
8087 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8088   predicate(n->as_LoadStore()->result_not_used());
8089   match(Set dummy (GetAndAddI mem incr));
8090   ins_cost(INSN_COST * 9);
8091   format %{ "get_and_addI [$mem], $incr" %}
8092   ins_encode %{
8093     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8094   %}
8095   ins_pipe(pipe_serial);
8096 %}
8097 
8098 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8099   match(Set newval (GetAndAddI mem incr));
8100   ins_cost(INSN_COST * 10);
8101   format %{ "get_and_addI $newval, [$mem], $incr" %}
8102   ins_encode %{
8103     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8104   %}
8105   ins_pipe(pipe_serial);
8106 %}
8107 
8108 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8109   predicate(n->as_LoadStore()->result_not_used());
8110   match(Set dummy (GetAndAddI mem incr));
8111   ins_cost(INSN_COST * 9);
8112   format %{ "get_and_addI [$mem], $incr" %}
8113   ins_encode %{
8114     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8115   %}
8116   ins_pipe(pipe_serial);
8117 %}
8118 
8119 // Manifest a CmpL result in an integer register.
8120 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
8121 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
8122 %{
8123   match(Set dst (CmpL3 src1 src2));
8124   effect(KILL flags);
8125 
8126   ins_cost(INSN_COST * 6);
8127   format %{
8128       "cmp $src1, $src2"
8129       "csetw $dst, ne"
8130       "cnegw $dst, lt"
8131   %}
8132   // format %{ "CmpL3 $dst, $src1, $src2" %}
8133   ins_encode %{
8134     __ cmp($src1$$Register, $src2$$Register);
8135     __ csetw($dst$$Register, Assembler::NE);
8136     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8137   %}
8138 
8139   ins_pipe(pipe_class_default);
8140 %}
8141 
8142 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
8143 %{
8144   match(Set dst (CmpL3 src1 src2));
8145   effect(KILL flags);
8146 
8147   ins_cost(INSN_COST * 6);
8148   format %{
8149       "cmp $src1, $src2"
8150       "csetw $dst, ne"
8151       "cnegw $dst, lt"
8152   %}
8153   ins_encode %{
8154     int32_t con = (int32_t)$src2$$constant;
8155      if (con < 0) {
8156       __ adds(zr, $src1$$Register, -con);
8157     } else {
8158       __ subs(zr, $src1$$Register, con);
8159     }
8160     __ csetw($dst$$Register, Assembler::NE);
8161     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8162   %}
8163 
8164   ins_pipe(pipe_class_default);
8165 %}
8166 
8167 // ============================================================================
8168 // Conditional Move Instructions
8169 
8170 // n.b. we have identical rules for both a signed compare op (cmpOp)
8171 // and an unsigned compare op (cmpOpU). it would be nice if we could
8172 // define an op class which merged both inputs and use it to type the
8173 // argument to a single rule. unfortunatelyt his fails because the
8174 // opclass does not live up to the COND_INTER interface of its
8175 // component operands. When the generic code tries to negate the
8176 // operand it ends up running the generci Machoper::negate method
8177 // which throws a ShouldNotHappen. So, we have to provide two flavours
8178 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
8179 
8180 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8181   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8182 
8183   ins_cost(INSN_COST * 2);
8184   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
8185 
8186   ins_encode %{
8187     __ cselw(as_Register($dst$$reg),
8188              as_Register($src2$$reg),
8189              as_Register($src1$$reg),
8190              (Assembler::Condition)$cmp$$cmpcode);
8191   %}
8192 
8193   ins_pipe(icond_reg_reg);
8194 %}
8195 
8196 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8197   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8198 
8199   ins_cost(INSN_COST * 2);
8200   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
8201 
8202   ins_encode %{
8203     __ cselw(as_Register($dst$$reg),
8204              as_Register($src2$$reg),
8205              as_Register($src1$$reg),
8206              (Assembler::Condition)$cmp$$cmpcode);
8207   %}
8208 
8209   ins_pipe(icond_reg_reg);
8210 %}
8211 
8212 // special cases where one arg is zero
8213 
8214 // n.b. this is selected in preference to the rule above because it
8215 // avoids loading constant 0 into a source register
8216 
8217 // TODO
8218 // we ought only to be able to cull one of these variants as the ideal
8219 // transforms ought always to order the zero consistently (to left/right?)
8220 
8221 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8222   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8223 
8224   ins_cost(INSN_COST * 2);
8225   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
8226 
8227   ins_encode %{
8228     __ cselw(as_Register($dst$$reg),
8229              as_Register($src$$reg),
8230              zr,
8231              (Assembler::Condition)$cmp$$cmpcode);
8232   %}
8233 
8234   ins_pipe(icond_reg);
8235 %}
8236 
8237 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8238   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8239 
8240   ins_cost(INSN_COST * 2);
8241   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
8242 
8243   ins_encode %{
8244     __ cselw(as_Register($dst$$reg),
8245              as_Register($src$$reg),
8246              zr,
8247              (Assembler::Condition)$cmp$$cmpcode);
8248   %}
8249 
8250   ins_pipe(icond_reg);
8251 %}
8252 
8253 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8254   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8255 
8256   ins_cost(INSN_COST * 2);
8257   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
8258 
8259   ins_encode %{
8260     __ cselw(as_Register($dst$$reg),
8261              zr,
8262              as_Register($src$$reg),
8263              (Assembler::Condition)$cmp$$cmpcode);
8264   %}
8265 
8266   ins_pipe(icond_reg);
8267 %}
8268 
8269 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8270   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8271 
8272   ins_cost(INSN_COST * 2);
8273   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
8274 
8275   ins_encode %{
8276     __ cselw(as_Register($dst$$reg),
8277              zr,
8278              as_Register($src$$reg),
8279              (Assembler::Condition)$cmp$$cmpcode);
8280   %}
8281 
8282   ins_pipe(icond_reg);
8283 %}
8284 
8285 // special case for creating a boolean 0 or 1
8286 
8287 // n.b. this is selected in preference to the rule above because it
8288 // avoids loading constants 0 and 1 into a source register
8289 
8290 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8291   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8292 
8293   ins_cost(INSN_COST * 2);
8294   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
8295 
8296   ins_encode %{
8297     // equivalently
8298     // cset(as_Register($dst$$reg),
8299     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8300     __ csincw(as_Register($dst$$reg),
8301              zr,
8302              zr,
8303              (Assembler::Condition)$cmp$$cmpcode);
8304   %}
8305 
8306   ins_pipe(icond_none);
8307 %}
8308 
8309 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8310   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8311 
8312   ins_cost(INSN_COST * 2);
8313   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
8314 
8315   ins_encode %{
8316     // equivalently
8317     // cset(as_Register($dst$$reg),
8318     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8319     __ csincw(as_Register($dst$$reg),
8320              zr,
8321              zr,
8322              (Assembler::Condition)$cmp$$cmpcode);
8323   %}
8324 
8325   ins_pipe(icond_none);
8326 %}
8327 
8328 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8329   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8330 
8331   ins_cost(INSN_COST * 2);
8332   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
8333 
8334   ins_encode %{
8335     __ csel(as_Register($dst$$reg),
8336             as_Register($src2$$reg),
8337             as_Register($src1$$reg),
8338             (Assembler::Condition)$cmp$$cmpcode);
8339   %}
8340 
8341   ins_pipe(icond_reg_reg);
8342 %}
8343 
8344 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8345   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8346 
8347   ins_cost(INSN_COST * 2);
8348   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
8349 
8350   ins_encode %{
8351     __ csel(as_Register($dst$$reg),
8352             as_Register($src2$$reg),
8353             as_Register($src1$$reg),
8354             (Assembler::Condition)$cmp$$cmpcode);
8355   %}
8356 
8357   ins_pipe(icond_reg_reg);
8358 %}
8359 
8360 // special cases where one arg is zero
8361 
8362 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8363   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8364 
8365   ins_cost(INSN_COST * 2);
8366   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
8367 
8368   ins_encode %{
8369     __ csel(as_Register($dst$$reg),
8370             zr,
8371             as_Register($src$$reg),
8372             (Assembler::Condition)$cmp$$cmpcode);
8373   %}
8374 
8375   ins_pipe(icond_reg);
8376 %}
8377 
8378 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8379   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8380 
8381   ins_cost(INSN_COST * 2);
8382   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
8383 
8384   ins_encode %{
8385     __ csel(as_Register($dst$$reg),
8386             zr,
8387             as_Register($src$$reg),
8388             (Assembler::Condition)$cmp$$cmpcode);
8389   %}
8390 
8391   ins_pipe(icond_reg);
8392 %}
8393 
8394 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8395   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8396 
8397   ins_cost(INSN_COST * 2);
8398   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8399 
8400   ins_encode %{
8401     __ csel(as_Register($dst$$reg),
8402             as_Register($src$$reg),
8403             zr,
8404             (Assembler::Condition)$cmp$$cmpcode);
8405   %}
8406 
8407   ins_pipe(icond_reg);
8408 %}
8409 
8410 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8411   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8412 
8413   ins_cost(INSN_COST * 2);
8414   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8415 
8416   ins_encode %{
8417     __ csel(as_Register($dst$$reg),
8418             as_Register($src$$reg),
8419             zr,
8420             (Assembler::Condition)$cmp$$cmpcode);
8421   %}
8422 
8423   ins_pipe(icond_reg);
8424 %}
8425 
8426 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8427   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8428 
8429   ins_cost(INSN_COST * 2);
8430   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8431 
8432   ins_encode %{
8433     __ csel(as_Register($dst$$reg),
8434             as_Register($src2$$reg),
8435             as_Register($src1$$reg),
8436             (Assembler::Condition)$cmp$$cmpcode);
8437   %}
8438 
8439   ins_pipe(icond_reg_reg);
8440 %}
8441 
8442 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8443   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8444 
8445   ins_cost(INSN_COST * 2);
8446   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8447 
8448   ins_encode %{
8449     __ csel(as_Register($dst$$reg),
8450             as_Register($src2$$reg),
8451             as_Register($src1$$reg),
8452             (Assembler::Condition)$cmp$$cmpcode);
8453   %}
8454 
8455   ins_pipe(icond_reg_reg);
8456 %}
8457 
8458 // special cases where one arg is zero
8459 
8460 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8461   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8462 
8463   ins_cost(INSN_COST * 2);
8464   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
8465 
8466   ins_encode %{
8467     __ csel(as_Register($dst$$reg),
8468             zr,
8469             as_Register($src$$reg),
8470             (Assembler::Condition)$cmp$$cmpcode);
8471   %}
8472 
8473   ins_pipe(icond_reg);
8474 %}
8475 
8476 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8477   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8478 
8479   ins_cost(INSN_COST * 2);
8480   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
8481 
8482   ins_encode %{
8483     __ csel(as_Register($dst$$reg),
8484             zr,
8485             as_Register($src$$reg),
8486             (Assembler::Condition)$cmp$$cmpcode);
8487   %}
8488 
8489   ins_pipe(icond_reg);
8490 %}
8491 
8492 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8493   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8494 
8495   ins_cost(INSN_COST * 2);
8496   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
8497 
8498   ins_encode %{
8499     __ csel(as_Register($dst$$reg),
8500             as_Register($src$$reg),
8501             zr,
8502             (Assembler::Condition)$cmp$$cmpcode);
8503   %}
8504 
8505   ins_pipe(icond_reg);
8506 %}
8507 
8508 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8509   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8510 
8511   ins_cost(INSN_COST * 2);
8512   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
8513 
8514   ins_encode %{
8515     __ csel(as_Register($dst$$reg),
8516             as_Register($src$$reg),
8517             zr,
8518             (Assembler::Condition)$cmp$$cmpcode);
8519   %}
8520 
8521   ins_pipe(icond_reg);
8522 %}
8523 
8524 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8525   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8526 
8527   ins_cost(INSN_COST * 2);
8528   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8529 
8530   ins_encode %{
8531     __ cselw(as_Register($dst$$reg),
8532              as_Register($src2$$reg),
8533              as_Register($src1$$reg),
8534              (Assembler::Condition)$cmp$$cmpcode);
8535   %}
8536 
8537   ins_pipe(icond_reg_reg);
8538 %}
8539 
8540 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8541   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8542 
8543   ins_cost(INSN_COST * 2);
8544   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8545 
8546   ins_encode %{
8547     __ cselw(as_Register($dst$$reg),
8548              as_Register($src2$$reg),
8549              as_Register($src1$$reg),
8550              (Assembler::Condition)$cmp$$cmpcode);
8551   %}
8552 
8553   ins_pipe(icond_reg_reg);
8554 %}
8555 
8556 // special cases where one arg is zero
8557 
8558 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8559   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8560 
8561   ins_cost(INSN_COST * 2);
8562   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
8563 
8564   ins_encode %{
8565     __ cselw(as_Register($dst$$reg),
8566              zr,
8567              as_Register($src$$reg),
8568              (Assembler::Condition)$cmp$$cmpcode);
8569   %}
8570 
8571   ins_pipe(icond_reg);
8572 %}
8573 
8574 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8575   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8576 
8577   ins_cost(INSN_COST * 2);
8578   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
8579 
8580   ins_encode %{
8581     __ cselw(as_Register($dst$$reg),
8582              zr,
8583              as_Register($src$$reg),
8584              (Assembler::Condition)$cmp$$cmpcode);
8585   %}
8586 
8587   ins_pipe(icond_reg);
8588 %}
8589 
8590 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8591   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8592 
8593   ins_cost(INSN_COST * 2);
8594   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
8595 
8596   ins_encode %{
8597     __ cselw(as_Register($dst$$reg),
8598              as_Register($src$$reg),
8599              zr,
8600              (Assembler::Condition)$cmp$$cmpcode);
8601   %}
8602 
8603   ins_pipe(icond_reg);
8604 %}
8605 
8606 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8607   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8608 
8609   ins_cost(INSN_COST * 2);
8610   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
8611 
8612   ins_encode %{
8613     __ cselw(as_Register($dst$$reg),
8614              as_Register($src$$reg),
8615              zr,
8616              (Assembler::Condition)$cmp$$cmpcode);
8617   %}
8618 
8619   ins_pipe(icond_reg);
8620 %}
8621 
8622 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
8623 %{
8624   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8625 
8626   ins_cost(INSN_COST * 3);
8627 
8628   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8629   ins_encode %{
8630     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8631     __ fcsels(as_FloatRegister($dst$$reg),
8632               as_FloatRegister($src2$$reg),
8633               as_FloatRegister($src1$$reg),
8634               cond);
8635   %}
8636 
8637   ins_pipe(pipe_class_default);
8638 %}
8639 
8640 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
8641 %{
8642   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8643 
8644   ins_cost(INSN_COST * 3);
8645 
8646   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8647   ins_encode %{
8648     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8649     __ fcsels(as_FloatRegister($dst$$reg),
8650               as_FloatRegister($src2$$reg),
8651               as_FloatRegister($src1$$reg),
8652               cond);
8653   %}
8654 
8655   ins_pipe(pipe_class_default);
8656 %}
8657 
8658 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
8659 %{
8660   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8661 
8662   ins_cost(INSN_COST * 3);
8663 
8664   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8665   ins_encode %{
8666     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8667     __ fcseld(as_FloatRegister($dst$$reg),
8668               as_FloatRegister($src2$$reg),
8669               as_FloatRegister($src1$$reg),
8670               cond);
8671   %}
8672 
8673   ins_pipe(pipe_class_default);
8674 %}
8675 
8676 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
8677 %{
8678   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8679 
8680   ins_cost(INSN_COST * 3);
8681 
8682   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8683   ins_encode %{
8684     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8685     __ fcseld(as_FloatRegister($dst$$reg),
8686               as_FloatRegister($src2$$reg),
8687               as_FloatRegister($src1$$reg),
8688               cond);
8689   %}
8690 
8691   ins_pipe(pipe_class_default);
8692 %}
8693 
8694 // ============================================================================
8695 // Arithmetic Instructions
8696 //
8697 
8698 // Integer Addition
8699 
8700 // TODO
8701 // these currently employ operations which do not set CR and hence are
8702 // not flagged as killing CR but we would like to isolate the cases
8703 // where we want to set flags from those where we don't. need to work
8704 // out how to do that.
8705 
8706 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8707   match(Set dst (AddI src1 src2));
8708 
8709   ins_cost(INSN_COST);
8710   format %{ "addw  $dst, $src1, $src2" %}
8711 
8712   ins_encode %{
8713     __ addw(as_Register($dst$$reg),
8714             as_Register($src1$$reg),
8715             as_Register($src2$$reg));
8716   %}
8717 
8718   ins_pipe(ialu_reg_reg);
8719 %}
8720 
8721 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8722   match(Set dst (AddI src1 src2));
8723 
8724   ins_cost(INSN_COST);
8725   format %{ "addw $dst, $src1, $src2" %}
8726 
8727   // use opcode to indicate that this is an add not a sub
8728   opcode(0x0);
8729 
8730   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8731 
8732   ins_pipe(ialu_reg_imm);
8733 %}
8734 
8735 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
8736   match(Set dst (AddI (ConvL2I src1) src2));
8737 
8738   ins_cost(INSN_COST);
8739   format %{ "addw $dst, $src1, $src2" %}
8740 
8741   // use opcode to indicate that this is an add not a sub
8742   opcode(0x0);
8743 
8744   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8745 
8746   ins_pipe(ialu_reg_imm);
8747 %}
8748 
8749 // Pointer Addition
8750 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
8751   match(Set dst (AddP src1 src2));
8752 
8753   ins_cost(INSN_COST);
8754   format %{ "add $dst, $src1, $src2\t# ptr" %}
8755 
8756   ins_encode %{
8757     __ add(as_Register($dst$$reg),
8758            as_Register($src1$$reg),
8759            as_Register($src2$$reg));
8760   %}
8761 
8762   ins_pipe(ialu_reg_reg);
8763 %}
8764 
8765 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
8766   match(Set dst (AddP src1 (ConvI2L src2)));
8767 
8768   ins_cost(1.9 * INSN_COST);
8769   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
8770 
8771   ins_encode %{
8772     __ add(as_Register($dst$$reg),
8773            as_Register($src1$$reg),
8774            as_Register($src2$$reg), ext::sxtw);
8775   %}
8776 
8777   ins_pipe(ialu_reg_reg);
8778 %}
8779 
8780 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
8781   match(Set dst (AddP src1 (LShiftL src2 scale)));
8782 
8783   ins_cost(1.9 * INSN_COST);
8784   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
8785 
8786   ins_encode %{
8787     __ lea(as_Register($dst$$reg),
8788            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8789                    Address::lsl($scale$$constant)));
8790   %}
8791 
8792   ins_pipe(ialu_reg_reg_shift);
8793 %}
8794 
8795 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
8796   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
8797 
8798   ins_cost(1.9 * INSN_COST);
8799   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
8800 
8801   ins_encode %{
8802     __ lea(as_Register($dst$$reg),
8803            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8804                    Address::sxtw($scale$$constant)));
8805   %}
8806 
8807   ins_pipe(ialu_reg_reg_shift);
8808 %}
8809 
8810 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
8811   match(Set dst (LShiftL (ConvI2L src) scale));
8812 
8813   ins_cost(INSN_COST);
8814   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
8815 
8816   ins_encode %{
8817     __ sbfiz(as_Register($dst$$reg),
8818           as_Register($src$$reg),
8819           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
8820   %}
8821 
8822   ins_pipe(ialu_reg_shift);
8823 %}
8824 
8825 // Pointer Immediate Addition
8826 // n.b. this needs to be more expensive than using an indirect memory
8827 // operand
8828 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
8829   match(Set dst (AddP src1 src2));
8830 
8831   ins_cost(INSN_COST);
8832   format %{ "add $dst, $src1, $src2\t# ptr" %}
8833 
8834   // use opcode to indicate that this is an add not a sub
8835   opcode(0x0);
8836 
8837   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8838 
8839   ins_pipe(ialu_reg_imm);
8840 %}
8841 
8842 // Long Addition
8843 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8844 
8845   match(Set dst (AddL src1 src2));
8846 
8847   ins_cost(INSN_COST);
8848   format %{ "add  $dst, $src1, $src2" %}
8849 
8850   ins_encode %{
8851     __ add(as_Register($dst$$reg),
8852            as_Register($src1$$reg),
8853            as_Register($src2$$reg));
8854   %}
8855 
8856   ins_pipe(ialu_reg_reg);
8857 %}
8858 
8859 // No constant pool entries requiredLong Immediate Addition.
8860 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8861   match(Set dst (AddL src1 src2));
8862 
8863   ins_cost(INSN_COST);
8864   format %{ "add $dst, $src1, $src2" %}
8865 
8866   // use opcode to indicate that this is an add not a sub
8867   opcode(0x0);
8868 
8869   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8870 
8871   ins_pipe(ialu_reg_imm);
8872 %}
8873 
8874 // Integer Subtraction
8875 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8876   match(Set dst (SubI src1 src2));
8877 
8878   ins_cost(INSN_COST);
8879   format %{ "subw  $dst, $src1, $src2" %}
8880 
8881   ins_encode %{
8882     __ subw(as_Register($dst$$reg),
8883             as_Register($src1$$reg),
8884             as_Register($src2$$reg));
8885   %}
8886 
8887   ins_pipe(ialu_reg_reg);
8888 %}
8889 
8890 // Immediate Subtraction
8891 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8892   match(Set dst (SubI src1 src2));
8893 
8894   ins_cost(INSN_COST);
8895   format %{ "subw $dst, $src1, $src2" %}
8896 
8897   // use opcode to indicate that this is a sub not an add
8898   opcode(0x1);
8899 
8900   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8901 
8902   ins_pipe(ialu_reg_imm);
8903 %}
8904 
8905 // Long Subtraction
8906 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8907 
8908   match(Set dst (SubL src1 src2));
8909 
8910   ins_cost(INSN_COST);
8911   format %{ "sub  $dst, $src1, $src2" %}
8912 
8913   ins_encode %{
8914     __ sub(as_Register($dst$$reg),
8915            as_Register($src1$$reg),
8916            as_Register($src2$$reg));
8917   %}
8918 
8919   ins_pipe(ialu_reg_reg);
8920 %}
8921 
8922 // No constant pool entries requiredLong Immediate Subtraction.
8923 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8924   match(Set dst (SubL src1 src2));
8925 
8926   ins_cost(INSN_COST);
8927   format %{ "sub$dst, $src1, $src2" %}
8928 
8929   // use opcode to indicate that this is a sub not an add
8930   opcode(0x1);
8931 
8932   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8933 
8934   ins_pipe(ialu_reg_imm);
8935 %}
8936 
8937 // Integer Negation (special case for sub)
8938 
8939 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
8940   match(Set dst (SubI zero src));
8941 
8942   ins_cost(INSN_COST);
8943   format %{ "negw $dst, $src\t# int" %}
8944 
8945   ins_encode %{
8946     __ negw(as_Register($dst$$reg),
8947             as_Register($src$$reg));
8948   %}
8949 
8950   ins_pipe(ialu_reg);
8951 %}
8952 
8953 // Long Negation
8954 
8955 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
8956   match(Set dst (SubL zero src));
8957 
8958   ins_cost(INSN_COST);
8959   format %{ "neg $dst, $src\t# long" %}
8960 
8961   ins_encode %{
8962     __ neg(as_Register($dst$$reg),
8963            as_Register($src$$reg));
8964   %}
8965 
8966   ins_pipe(ialu_reg);
8967 %}
8968 
8969 // Integer Multiply
8970 
8971 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8972   match(Set dst (MulI src1 src2));
8973 
8974   ins_cost(INSN_COST * 3);
8975   format %{ "mulw  $dst, $src1, $src2" %}
8976 
8977   ins_encode %{
8978     __ mulw(as_Register($dst$$reg),
8979             as_Register($src1$$reg),
8980             as_Register($src2$$reg));
8981   %}
8982 
8983   ins_pipe(imul_reg_reg);
8984 %}
8985 
8986 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8987   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
8988 
8989   ins_cost(INSN_COST * 3);
8990   format %{ "smull  $dst, $src1, $src2" %}
8991 
8992   ins_encode %{
8993     __ smull(as_Register($dst$$reg),
8994              as_Register($src1$$reg),
8995              as_Register($src2$$reg));
8996   %}
8997 
8998   ins_pipe(imul_reg_reg);
8999 %}
9000 
9001 // Long Multiply
9002 
9003 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9004   match(Set dst (MulL src1 src2));
9005 
9006   ins_cost(INSN_COST * 5);
9007   format %{ "mul  $dst, $src1, $src2" %}
9008 
9009   ins_encode %{
9010     __ mul(as_Register($dst$$reg),
9011            as_Register($src1$$reg),
9012            as_Register($src2$$reg));
9013   %}
9014 
9015   ins_pipe(lmul_reg_reg);
9016 %}
9017 
9018 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9019 %{
9020   match(Set dst (MulHiL src1 src2));
9021 
9022   ins_cost(INSN_COST * 7);
9023   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9024 
9025   ins_encode %{
9026     __ smulh(as_Register($dst$$reg),
9027              as_Register($src1$$reg),
9028              as_Register($src2$$reg));
9029   %}
9030 
9031   ins_pipe(lmul_reg_reg);
9032 %}
9033 
9034 // Combined Integer Multiply & Add/Sub
9035 
9036 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9037   match(Set dst (AddI src3 (MulI src1 src2)));
9038 
9039   ins_cost(INSN_COST * 3);
9040   format %{ "madd  $dst, $src1, $src2, $src3" %}
9041 
9042   ins_encode %{
9043     __ maddw(as_Register($dst$$reg),
9044              as_Register($src1$$reg),
9045              as_Register($src2$$reg),
9046              as_Register($src3$$reg));
9047   %}
9048 
9049   ins_pipe(imac_reg_reg);
9050 %}
9051 
9052 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9053   match(Set dst (SubI src3 (MulI src1 src2)));
9054 
9055   ins_cost(INSN_COST * 3);
9056   format %{ "msub  $dst, $src1, $src2, $src3" %}
9057 
9058   ins_encode %{
9059     __ msubw(as_Register($dst$$reg),
9060              as_Register($src1$$reg),
9061              as_Register($src2$$reg),
9062              as_Register($src3$$reg));
9063   %}
9064 
9065   ins_pipe(imac_reg_reg);
9066 %}
9067 
9068 // Combined Long Multiply & Add/Sub
9069 
9070 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9071   match(Set dst (AddL src3 (MulL src1 src2)));
9072 
9073   ins_cost(INSN_COST * 5);
9074   format %{ "madd  $dst, $src1, $src2, $src3" %}
9075 
9076   ins_encode %{
9077     __ madd(as_Register($dst$$reg),
9078             as_Register($src1$$reg),
9079             as_Register($src2$$reg),
9080             as_Register($src3$$reg));
9081   %}
9082 
9083   ins_pipe(lmac_reg_reg);
9084 %}
9085 
9086 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9087   match(Set dst (SubL src3 (MulL src1 src2)));
9088 
9089   ins_cost(INSN_COST * 5);
9090   format %{ "msub  $dst, $src1, $src2, $src3" %}
9091 
9092   ins_encode %{
9093     __ msub(as_Register($dst$$reg),
9094             as_Register($src1$$reg),
9095             as_Register($src2$$reg),
9096             as_Register($src3$$reg));
9097   %}
9098 
9099   ins_pipe(lmac_reg_reg);
9100 %}
9101 
9102 // Integer Divide
9103 
9104 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9105   match(Set dst (DivI src1 src2));
9106 
9107   ins_cost(INSN_COST * 19);
9108   format %{ "sdivw  $dst, $src1, $src2" %}
9109 
9110   ins_encode(aarch64_enc_divw(dst, src1, src2));
9111   ins_pipe(idiv_reg_reg);
9112 %}
9113 
9114 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
9115   match(Set dst (URShiftI (RShiftI src1 div1) div2));
9116   ins_cost(INSN_COST);
9117   format %{ "lsrw $dst, $src1, $div1" %}
9118   ins_encode %{
9119     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
9120   %}
9121   ins_pipe(ialu_reg_shift);
9122 %}
9123 
9124 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
9125   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
9126   ins_cost(INSN_COST);
9127   format %{ "addw $dst, $src, LSR $div1" %}
9128 
9129   ins_encode %{
9130     __ addw(as_Register($dst$$reg),
9131               as_Register($src$$reg),
9132               as_Register($src$$reg),
9133               Assembler::LSR, 31);
9134   %}
9135   ins_pipe(ialu_reg);
9136 %}
9137 
9138 // Long Divide
9139 
9140 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9141   match(Set dst (DivL src1 src2));
9142 
9143   ins_cost(INSN_COST * 35);
9144   format %{ "sdiv   $dst, $src1, $src2" %}
9145 
9146   ins_encode(aarch64_enc_div(dst, src1, src2));
9147   ins_pipe(ldiv_reg_reg);
9148 %}
9149 
9150 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
9151   match(Set dst (URShiftL (RShiftL src1 div1) div2));
9152   ins_cost(INSN_COST);
9153   format %{ "lsr $dst, $src1, $div1" %}
9154   ins_encode %{
9155     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
9156   %}
9157   ins_pipe(ialu_reg_shift);
9158 %}
9159 
9160 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
9161   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
9162   ins_cost(INSN_COST);
9163   format %{ "add $dst, $src, $div1" %}
9164 
9165   ins_encode %{
9166     __ add(as_Register($dst$$reg),
9167               as_Register($src$$reg),
9168               as_Register($src$$reg),
9169               Assembler::LSR, 63);
9170   %}
9171   ins_pipe(ialu_reg);
9172 %}
9173 
9174 // Integer Remainder
9175 
9176 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9177   match(Set dst (ModI src1 src2));
9178 
9179   ins_cost(INSN_COST * 22);
9180   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
9181             "msubw($dst, rscratch1, $src2, $src1" %}
9182 
9183   ins_encode(aarch64_enc_modw(dst, src1, src2));
9184   ins_pipe(idiv_reg_reg);
9185 %}
9186 
9187 // Long Remainder
9188 
9189 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9190   match(Set dst (ModL src1 src2));
9191 
9192   ins_cost(INSN_COST * 38);
9193   format %{ "sdiv   rscratch1, $src1, $src2\n"
9194             "msub($dst, rscratch1, $src2, $src1" %}
9195 
9196   ins_encode(aarch64_enc_mod(dst, src1, src2));
9197   ins_pipe(ldiv_reg_reg);
9198 %}
9199 
9200 // Integer Shifts
9201 
9202 // Shift Left Register
9203 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9204   match(Set dst (LShiftI src1 src2));
9205 
9206   ins_cost(INSN_COST * 2);
9207   format %{ "lslvw  $dst, $src1, $src2" %}
9208 
9209   ins_encode %{
9210     __ lslvw(as_Register($dst$$reg),
9211              as_Register($src1$$reg),
9212              as_Register($src2$$reg));
9213   %}
9214 
9215   ins_pipe(ialu_reg_reg_vshift);
9216 %}
9217 
9218 // Shift Left Immediate
9219 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9220   match(Set dst (LShiftI src1 src2));
9221 
9222   ins_cost(INSN_COST);
9223   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
9224 
9225   ins_encode %{
9226     __ lslw(as_Register($dst$$reg),
9227             as_Register($src1$$reg),
9228             $src2$$constant & 0x1f);
9229   %}
9230 
9231   ins_pipe(ialu_reg_shift);
9232 %}
9233 
9234 // Shift Right Logical Register
9235 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9236   match(Set dst (URShiftI src1 src2));
9237 
9238   ins_cost(INSN_COST * 2);
9239   format %{ "lsrvw  $dst, $src1, $src2" %}
9240 
9241   ins_encode %{
9242     __ lsrvw(as_Register($dst$$reg),
9243              as_Register($src1$$reg),
9244              as_Register($src2$$reg));
9245   %}
9246 
9247   ins_pipe(ialu_reg_reg_vshift);
9248 %}
9249 
9250 // Shift Right Logical Immediate
9251 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9252   match(Set dst (URShiftI src1 src2));
9253 
9254   ins_cost(INSN_COST);
9255   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
9256 
9257   ins_encode %{
9258     __ lsrw(as_Register($dst$$reg),
9259             as_Register($src1$$reg),
9260             $src2$$constant & 0x1f);
9261   %}
9262 
9263   ins_pipe(ialu_reg_shift);
9264 %}
9265 
9266 // Shift Right Arithmetic Register
9267 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9268   match(Set dst (RShiftI src1 src2));
9269 
9270   ins_cost(INSN_COST * 2);
9271   format %{ "asrvw  $dst, $src1, $src2" %}
9272 
9273   ins_encode %{
9274     __ asrvw(as_Register($dst$$reg),
9275              as_Register($src1$$reg),
9276              as_Register($src2$$reg));
9277   %}
9278 
9279   ins_pipe(ialu_reg_reg_vshift);
9280 %}
9281 
9282 // Shift Right Arithmetic Immediate
9283 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9284   match(Set dst (RShiftI src1 src2));
9285 
9286   ins_cost(INSN_COST);
9287   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
9288 
9289   ins_encode %{
9290     __ asrw(as_Register($dst$$reg),
9291             as_Register($src1$$reg),
9292             $src2$$constant & 0x1f);
9293   %}
9294 
9295   ins_pipe(ialu_reg_shift);
9296 %}
9297 
9298 // Combined Int Mask and Right Shift (using UBFM)
9299 // TODO
9300 
9301 // Long Shifts
9302 
9303 // Shift Left Register
9304 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9305   match(Set dst (LShiftL src1 src2));
9306 
9307   ins_cost(INSN_COST * 2);
9308   format %{ "lslv  $dst, $src1, $src2" %}
9309 
9310   ins_encode %{
9311     __ lslv(as_Register($dst$$reg),
9312             as_Register($src1$$reg),
9313             as_Register($src2$$reg));
9314   %}
9315 
9316   ins_pipe(ialu_reg_reg_vshift);
9317 %}
9318 
9319 // Shift Left Immediate
9320 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9321   match(Set dst (LShiftL src1 src2));
9322 
9323   ins_cost(INSN_COST);
9324   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
9325 
9326   ins_encode %{
9327     __ lsl(as_Register($dst$$reg),
9328             as_Register($src1$$reg),
9329             $src2$$constant & 0x3f);
9330   %}
9331 
9332   ins_pipe(ialu_reg_shift);
9333 %}
9334 
9335 // Shift Right Logical Register
9336 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9337   match(Set dst (URShiftL src1 src2));
9338 
9339   ins_cost(INSN_COST * 2);
9340   format %{ "lsrv  $dst, $src1, $src2" %}
9341 
9342   ins_encode %{
9343     __ lsrv(as_Register($dst$$reg),
9344             as_Register($src1$$reg),
9345             as_Register($src2$$reg));
9346   %}
9347 
9348   ins_pipe(ialu_reg_reg_vshift);
9349 %}
9350 
9351 // Shift Right Logical Immediate
9352 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9353   match(Set dst (URShiftL src1 src2));
9354 
9355   ins_cost(INSN_COST);
9356   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
9357 
9358   ins_encode %{
9359     __ lsr(as_Register($dst$$reg),
9360            as_Register($src1$$reg),
9361            $src2$$constant & 0x3f);
9362   %}
9363 
9364   ins_pipe(ialu_reg_shift);
9365 %}
9366 
9367 // A special-case pattern for card table stores.
9368 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
9369   match(Set dst (URShiftL (CastP2X src1) src2));
9370 
9371   ins_cost(INSN_COST);
9372   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
9373 
9374   ins_encode %{
9375     __ lsr(as_Register($dst$$reg),
9376            as_Register($src1$$reg),
9377            $src2$$constant & 0x3f);
9378   %}
9379 
9380   ins_pipe(ialu_reg_shift);
9381 %}
9382 
9383 // Shift Right Arithmetic Register
9384 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9385   match(Set dst (RShiftL src1 src2));
9386 
9387   ins_cost(INSN_COST * 2);
9388   format %{ "asrv  $dst, $src1, $src2" %}
9389 
9390   ins_encode %{
9391     __ asrv(as_Register($dst$$reg),
9392             as_Register($src1$$reg),
9393             as_Register($src2$$reg));
9394   %}
9395 
9396   ins_pipe(ialu_reg_reg_vshift);
9397 %}
9398 
9399 // Shift Right Arithmetic Immediate
9400 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9401   match(Set dst (RShiftL src1 src2));
9402 
9403   ins_cost(INSN_COST);
9404   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9405 
9406   ins_encode %{
9407     __ asr(as_Register($dst$$reg),
9408            as_Register($src1$$reg),
9409            $src2$$constant & 0x3f);
9410   %}
9411 
9412   ins_pipe(ialu_reg_shift);
9413 %}
9414 
9415 // BEGIN This section of the file is automatically generated. Do not edit --------------
9416 
9417 instruct regL_not_reg(iRegLNoSp dst,
9418                          iRegL src1, immL_M1 m1,
9419                          rFlagsReg cr) %{
9420   match(Set dst (XorL src1 m1));
9421   ins_cost(INSN_COST);
9422   format %{ "eon  $dst, $src1, zr" %}
9423 
9424   ins_encode %{
9425     __ eon(as_Register($dst$$reg),
9426               as_Register($src1$$reg),
9427               zr,
9428               Assembler::LSL, 0);
9429   %}
9430 
9431   ins_pipe(ialu_reg);
9432 %}
9433 instruct regI_not_reg(iRegINoSp dst,
9434                          iRegIorL2I src1, immI_M1 m1,
9435                          rFlagsReg cr) %{
9436   match(Set dst (XorI src1 m1));
9437   ins_cost(INSN_COST);
9438   format %{ "eonw  $dst, $src1, zr" %}
9439 
9440   ins_encode %{
9441     __ eonw(as_Register($dst$$reg),
9442               as_Register($src1$$reg),
9443               zr,
9444               Assembler::LSL, 0);
9445   %}
9446 
9447   ins_pipe(ialu_reg);
9448 %}
9449 
9450 instruct AndI_reg_not_reg(iRegINoSp dst,
9451                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9452                          rFlagsReg cr) %{
9453   match(Set dst (AndI src1 (XorI src2 m1)));
9454   ins_cost(INSN_COST);
9455   format %{ "bicw  $dst, $src1, $src2" %}
9456 
9457   ins_encode %{
9458     __ bicw(as_Register($dst$$reg),
9459               as_Register($src1$$reg),
9460               as_Register($src2$$reg),
9461               Assembler::LSL, 0);
9462   %}
9463 
9464   ins_pipe(ialu_reg_reg);
9465 %}
9466 
9467 instruct AndL_reg_not_reg(iRegLNoSp dst,
9468                          iRegL src1, iRegL src2, immL_M1 m1,
9469                          rFlagsReg cr) %{
9470   match(Set dst (AndL src1 (XorL src2 m1)));
9471   ins_cost(INSN_COST);
9472   format %{ "bic  $dst, $src1, $src2" %}
9473 
9474   ins_encode %{
9475     __ bic(as_Register($dst$$reg),
9476               as_Register($src1$$reg),
9477               as_Register($src2$$reg),
9478               Assembler::LSL, 0);
9479   %}
9480 
9481   ins_pipe(ialu_reg_reg);
9482 %}
9483 
9484 instruct OrI_reg_not_reg(iRegINoSp dst,
9485                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9486                          rFlagsReg cr) %{
9487   match(Set dst (OrI src1 (XorI src2 m1)));
9488   ins_cost(INSN_COST);
9489   format %{ "ornw  $dst, $src1, $src2" %}
9490 
9491   ins_encode %{
9492     __ ornw(as_Register($dst$$reg),
9493               as_Register($src1$$reg),
9494               as_Register($src2$$reg),
9495               Assembler::LSL, 0);
9496   %}
9497 
9498   ins_pipe(ialu_reg_reg);
9499 %}
9500 
9501 instruct OrL_reg_not_reg(iRegLNoSp dst,
9502                          iRegL src1, iRegL src2, immL_M1 m1,
9503                          rFlagsReg cr) %{
9504   match(Set dst (OrL src1 (XorL src2 m1)));
9505   ins_cost(INSN_COST);
9506   format %{ "orn  $dst, $src1, $src2" %}
9507 
9508   ins_encode %{
9509     __ orn(as_Register($dst$$reg),
9510               as_Register($src1$$reg),
9511               as_Register($src2$$reg),
9512               Assembler::LSL, 0);
9513   %}
9514 
9515   ins_pipe(ialu_reg_reg);
9516 %}
9517 
9518 instruct XorI_reg_not_reg(iRegINoSp dst,
9519                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9520                          rFlagsReg cr) %{
9521   match(Set dst (XorI m1 (XorI src2 src1)));
9522   ins_cost(INSN_COST);
9523   format %{ "eonw  $dst, $src1, $src2" %}
9524 
9525   ins_encode %{
9526     __ eonw(as_Register($dst$$reg),
9527               as_Register($src1$$reg),
9528               as_Register($src2$$reg),
9529               Assembler::LSL, 0);
9530   %}
9531 
9532   ins_pipe(ialu_reg_reg);
9533 %}
9534 
9535 instruct XorL_reg_not_reg(iRegLNoSp dst,
9536                          iRegL src1, iRegL src2, immL_M1 m1,
9537                          rFlagsReg cr) %{
9538   match(Set dst (XorL m1 (XorL src2 src1)));
9539   ins_cost(INSN_COST);
9540   format %{ "eon  $dst, $src1, $src2" %}
9541 
9542   ins_encode %{
9543     __ eon(as_Register($dst$$reg),
9544               as_Register($src1$$reg),
9545               as_Register($src2$$reg),
9546               Assembler::LSL, 0);
9547   %}
9548 
9549   ins_pipe(ialu_reg_reg);
9550 %}
9551 
9552 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
9553                          iRegIorL2I src1, iRegIorL2I src2,
9554                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9555   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
9556   ins_cost(1.9 * INSN_COST);
9557   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
9558 
9559   ins_encode %{
9560     __ bicw(as_Register($dst$$reg),
9561               as_Register($src1$$reg),
9562               as_Register($src2$$reg),
9563               Assembler::LSR,
9564               $src3$$constant & 0x3f);
9565   %}
9566 
9567   ins_pipe(ialu_reg_reg_shift);
9568 %}
9569 
9570 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
9571                          iRegL src1, iRegL src2,
9572                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9573   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
9574   ins_cost(1.9 * INSN_COST);
9575   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
9576 
9577   ins_encode %{
9578     __ bic(as_Register($dst$$reg),
9579               as_Register($src1$$reg),
9580               as_Register($src2$$reg),
9581               Assembler::LSR,
9582               $src3$$constant & 0x3f);
9583   %}
9584 
9585   ins_pipe(ialu_reg_reg_shift);
9586 %}
9587 
9588 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
9589                          iRegIorL2I src1, iRegIorL2I src2,
9590                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9591   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
9592   ins_cost(1.9 * INSN_COST);
9593   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
9594 
9595   ins_encode %{
9596     __ bicw(as_Register($dst$$reg),
9597               as_Register($src1$$reg),
9598               as_Register($src2$$reg),
9599               Assembler::ASR,
9600               $src3$$constant & 0x3f);
9601   %}
9602 
9603   ins_pipe(ialu_reg_reg_shift);
9604 %}
9605 
9606 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
9607                          iRegL src1, iRegL src2,
9608                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9609   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
9610   ins_cost(1.9 * INSN_COST);
9611   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
9612 
9613   ins_encode %{
9614     __ bic(as_Register($dst$$reg),
9615               as_Register($src1$$reg),
9616               as_Register($src2$$reg),
9617               Assembler::ASR,
9618               $src3$$constant & 0x3f);
9619   %}
9620 
9621   ins_pipe(ialu_reg_reg_shift);
9622 %}
9623 
9624 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
9625                          iRegIorL2I src1, iRegIorL2I src2,
9626                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9627   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
9628   ins_cost(1.9 * INSN_COST);
9629   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
9630 
9631   ins_encode %{
9632     __ bicw(as_Register($dst$$reg),
9633               as_Register($src1$$reg),
9634               as_Register($src2$$reg),
9635               Assembler::LSL,
9636               $src3$$constant & 0x3f);
9637   %}
9638 
9639   ins_pipe(ialu_reg_reg_shift);
9640 %}
9641 
9642 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
9643                          iRegL src1, iRegL src2,
9644                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9645   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
9646   ins_cost(1.9 * INSN_COST);
9647   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
9648 
9649   ins_encode %{
9650     __ bic(as_Register($dst$$reg),
9651               as_Register($src1$$reg),
9652               as_Register($src2$$reg),
9653               Assembler::LSL,
9654               $src3$$constant & 0x3f);
9655   %}
9656 
9657   ins_pipe(ialu_reg_reg_shift);
9658 %}
9659 
9660 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
9661                          iRegIorL2I src1, iRegIorL2I src2,
9662                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9663   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
9664   ins_cost(1.9 * INSN_COST);
9665   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
9666 
9667   ins_encode %{
9668     __ eonw(as_Register($dst$$reg),
9669               as_Register($src1$$reg),
9670               as_Register($src2$$reg),
9671               Assembler::LSR,
9672               $src3$$constant & 0x3f);
9673   %}
9674 
9675   ins_pipe(ialu_reg_reg_shift);
9676 %}
9677 
9678 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
9679                          iRegL src1, iRegL src2,
9680                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9681   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
9682   ins_cost(1.9 * INSN_COST);
9683   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
9684 
9685   ins_encode %{
9686     __ eon(as_Register($dst$$reg),
9687               as_Register($src1$$reg),
9688               as_Register($src2$$reg),
9689               Assembler::LSR,
9690               $src3$$constant & 0x3f);
9691   %}
9692 
9693   ins_pipe(ialu_reg_reg_shift);
9694 %}
9695 
9696 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
9697                          iRegIorL2I src1, iRegIorL2I src2,
9698                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9699   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
9700   ins_cost(1.9 * INSN_COST);
9701   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
9702 
9703   ins_encode %{
9704     __ eonw(as_Register($dst$$reg),
9705               as_Register($src1$$reg),
9706               as_Register($src2$$reg),
9707               Assembler::ASR,
9708               $src3$$constant & 0x3f);
9709   %}
9710 
9711   ins_pipe(ialu_reg_reg_shift);
9712 %}
9713 
9714 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
9715                          iRegL src1, iRegL src2,
9716                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9717   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
9718   ins_cost(1.9 * INSN_COST);
9719   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
9720 
9721   ins_encode %{
9722     __ eon(as_Register($dst$$reg),
9723               as_Register($src1$$reg),
9724               as_Register($src2$$reg),
9725               Assembler::ASR,
9726               $src3$$constant & 0x3f);
9727   %}
9728 
9729   ins_pipe(ialu_reg_reg_shift);
9730 %}
9731 
9732 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
9733                          iRegIorL2I src1, iRegIorL2I src2,
9734                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9735   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
9736   ins_cost(1.9 * INSN_COST);
9737   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
9738 
9739   ins_encode %{
9740     __ eonw(as_Register($dst$$reg),
9741               as_Register($src1$$reg),
9742               as_Register($src2$$reg),
9743               Assembler::LSL,
9744               $src3$$constant & 0x3f);
9745   %}
9746 
9747   ins_pipe(ialu_reg_reg_shift);
9748 %}
9749 
9750 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
9751                          iRegL src1, iRegL src2,
9752                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9753   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
9754   ins_cost(1.9 * INSN_COST);
9755   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
9756 
9757   ins_encode %{
9758     __ eon(as_Register($dst$$reg),
9759               as_Register($src1$$reg),
9760               as_Register($src2$$reg),
9761               Assembler::LSL,
9762               $src3$$constant & 0x3f);
9763   %}
9764 
9765   ins_pipe(ialu_reg_reg_shift);
9766 %}
9767 
9768 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
9769                          iRegIorL2I src1, iRegIorL2I src2,
9770                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9771   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
9772   ins_cost(1.9 * INSN_COST);
9773   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
9774 
9775   ins_encode %{
9776     __ ornw(as_Register($dst$$reg),
9777               as_Register($src1$$reg),
9778               as_Register($src2$$reg),
9779               Assembler::LSR,
9780               $src3$$constant & 0x3f);
9781   %}
9782 
9783   ins_pipe(ialu_reg_reg_shift);
9784 %}
9785 
9786 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
9787                          iRegL src1, iRegL src2,
9788                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9789   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
9790   ins_cost(1.9 * INSN_COST);
9791   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
9792 
9793   ins_encode %{
9794     __ orn(as_Register($dst$$reg),
9795               as_Register($src1$$reg),
9796               as_Register($src2$$reg),
9797               Assembler::LSR,
9798               $src3$$constant & 0x3f);
9799   %}
9800 
9801   ins_pipe(ialu_reg_reg_shift);
9802 %}
9803 
9804 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
9805                          iRegIorL2I src1, iRegIorL2I src2,
9806                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9807   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
9808   ins_cost(1.9 * INSN_COST);
9809   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
9810 
9811   ins_encode %{
9812     __ ornw(as_Register($dst$$reg),
9813               as_Register($src1$$reg),
9814               as_Register($src2$$reg),
9815               Assembler::ASR,
9816               $src3$$constant & 0x3f);
9817   %}
9818 
9819   ins_pipe(ialu_reg_reg_shift);
9820 %}
9821 
9822 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
9823                          iRegL src1, iRegL src2,
9824                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9825   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
9826   ins_cost(1.9 * INSN_COST);
9827   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
9828 
9829   ins_encode %{
9830     __ orn(as_Register($dst$$reg),
9831               as_Register($src1$$reg),
9832               as_Register($src2$$reg),
9833               Assembler::ASR,
9834               $src3$$constant & 0x3f);
9835   %}
9836 
9837   ins_pipe(ialu_reg_reg_shift);
9838 %}
9839 
9840 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
9841                          iRegIorL2I src1, iRegIorL2I src2,
9842                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9843   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
9844   ins_cost(1.9 * INSN_COST);
9845   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
9846 
9847   ins_encode %{
9848     __ ornw(as_Register($dst$$reg),
9849               as_Register($src1$$reg),
9850               as_Register($src2$$reg),
9851               Assembler::LSL,
9852               $src3$$constant & 0x3f);
9853   %}
9854 
9855   ins_pipe(ialu_reg_reg_shift);
9856 %}
9857 
9858 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
9859                          iRegL src1, iRegL src2,
9860                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9861   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
9862   ins_cost(1.9 * INSN_COST);
9863   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
9864 
9865   ins_encode %{
9866     __ orn(as_Register($dst$$reg),
9867               as_Register($src1$$reg),
9868               as_Register($src2$$reg),
9869               Assembler::LSL,
9870               $src3$$constant & 0x3f);
9871   %}
9872 
9873   ins_pipe(ialu_reg_reg_shift);
9874 %}
9875 
9876 instruct AndI_reg_URShift_reg(iRegINoSp dst,
9877                          iRegIorL2I src1, iRegIorL2I src2,
9878                          immI src3, rFlagsReg cr) %{
9879   match(Set dst (AndI src1 (URShiftI src2 src3)));
9880 
9881   ins_cost(1.9 * INSN_COST);
9882   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
9883 
9884   ins_encode %{
9885     __ andw(as_Register($dst$$reg),
9886               as_Register($src1$$reg),
9887               as_Register($src2$$reg),
9888               Assembler::LSR,
9889               $src3$$constant & 0x3f);
9890   %}
9891 
9892   ins_pipe(ialu_reg_reg_shift);
9893 %}
9894 
9895 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
9896                          iRegL src1, iRegL src2,
9897                          immI src3, rFlagsReg cr) %{
9898   match(Set dst (AndL src1 (URShiftL src2 src3)));
9899 
9900   ins_cost(1.9 * INSN_COST);
9901   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
9902 
9903   ins_encode %{
9904     __ andr(as_Register($dst$$reg),
9905               as_Register($src1$$reg),
9906               as_Register($src2$$reg),
9907               Assembler::LSR,
9908               $src3$$constant & 0x3f);
9909   %}
9910 
9911   ins_pipe(ialu_reg_reg_shift);
9912 %}
9913 
9914 instruct AndI_reg_RShift_reg(iRegINoSp dst,
9915                          iRegIorL2I src1, iRegIorL2I src2,
9916                          immI src3, rFlagsReg cr) %{
9917   match(Set dst (AndI src1 (RShiftI src2 src3)));
9918 
9919   ins_cost(1.9 * INSN_COST);
9920   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
9921 
9922   ins_encode %{
9923     __ andw(as_Register($dst$$reg),
9924               as_Register($src1$$reg),
9925               as_Register($src2$$reg),
9926               Assembler::ASR,
9927               $src3$$constant & 0x3f);
9928   %}
9929 
9930   ins_pipe(ialu_reg_reg_shift);
9931 %}
9932 
9933 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
9934                          iRegL src1, iRegL src2,
9935                          immI src3, rFlagsReg cr) %{
9936   match(Set dst (AndL src1 (RShiftL src2 src3)));
9937 
9938   ins_cost(1.9 * INSN_COST);
9939   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
9940 
9941   ins_encode %{
9942     __ andr(as_Register($dst$$reg),
9943               as_Register($src1$$reg),
9944               as_Register($src2$$reg),
9945               Assembler::ASR,
9946               $src3$$constant & 0x3f);
9947   %}
9948 
9949   ins_pipe(ialu_reg_reg_shift);
9950 %}
9951 
9952 instruct AndI_reg_LShift_reg(iRegINoSp dst,
9953                          iRegIorL2I src1, iRegIorL2I src2,
9954                          immI src3, rFlagsReg cr) %{
9955   match(Set dst (AndI src1 (LShiftI src2 src3)));
9956 
9957   ins_cost(1.9 * INSN_COST);
9958   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
9959 
9960   ins_encode %{
9961     __ andw(as_Register($dst$$reg),
9962               as_Register($src1$$reg),
9963               as_Register($src2$$reg),
9964               Assembler::LSL,
9965               $src3$$constant & 0x3f);
9966   %}
9967 
9968   ins_pipe(ialu_reg_reg_shift);
9969 %}
9970 
9971 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
9972                          iRegL src1, iRegL src2,
9973                          immI src3, rFlagsReg cr) %{
9974   match(Set dst (AndL src1 (LShiftL src2 src3)));
9975 
9976   ins_cost(1.9 * INSN_COST);
9977   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
9978 
9979   ins_encode %{
9980     __ andr(as_Register($dst$$reg),
9981               as_Register($src1$$reg),
9982               as_Register($src2$$reg),
9983               Assembler::LSL,
9984               $src3$$constant & 0x3f);
9985   %}
9986 
9987   ins_pipe(ialu_reg_reg_shift);
9988 %}
9989 
9990 instruct XorI_reg_URShift_reg(iRegINoSp dst,
9991                          iRegIorL2I src1, iRegIorL2I src2,
9992                          immI src3, rFlagsReg cr) %{
9993   match(Set dst (XorI src1 (URShiftI src2 src3)));
9994 
9995   ins_cost(1.9 * INSN_COST);
9996   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
9997 
9998   ins_encode %{
9999     __ eorw(as_Register($dst$$reg),
10000               as_Register($src1$$reg),
10001               as_Register($src2$$reg),
10002               Assembler::LSR,
10003               $src3$$constant & 0x3f);
10004   %}
10005 
10006   ins_pipe(ialu_reg_reg_shift);
10007 %}
10008 
10009 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10010                          iRegL src1, iRegL src2,
10011                          immI src3, rFlagsReg cr) %{
10012   match(Set dst (XorL src1 (URShiftL src2 src3)));
10013 
10014   ins_cost(1.9 * INSN_COST);
10015   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10016 
10017   ins_encode %{
10018     __ eor(as_Register($dst$$reg),
10019               as_Register($src1$$reg),
10020               as_Register($src2$$reg),
10021               Assembler::LSR,
10022               $src3$$constant & 0x3f);
10023   %}
10024 
10025   ins_pipe(ialu_reg_reg_shift);
10026 %}
10027 
10028 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10029                          iRegIorL2I src1, iRegIorL2I src2,
10030                          immI src3, rFlagsReg cr) %{
10031   match(Set dst (XorI src1 (RShiftI src2 src3)));
10032 
10033   ins_cost(1.9 * INSN_COST);
10034   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10035 
10036   ins_encode %{
10037     __ eorw(as_Register($dst$$reg),
10038               as_Register($src1$$reg),
10039               as_Register($src2$$reg),
10040               Assembler::ASR,
10041               $src3$$constant & 0x3f);
10042   %}
10043 
10044   ins_pipe(ialu_reg_reg_shift);
10045 %}
10046 
10047 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10048                          iRegL src1, iRegL src2,
10049                          immI src3, rFlagsReg cr) %{
10050   match(Set dst (XorL src1 (RShiftL src2 src3)));
10051 
10052   ins_cost(1.9 * INSN_COST);
10053   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10054 
10055   ins_encode %{
10056     __ eor(as_Register($dst$$reg),
10057               as_Register($src1$$reg),
10058               as_Register($src2$$reg),
10059               Assembler::ASR,
10060               $src3$$constant & 0x3f);
10061   %}
10062 
10063   ins_pipe(ialu_reg_reg_shift);
10064 %}
10065 
10066 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10067                          iRegIorL2I src1, iRegIorL2I src2,
10068                          immI src3, rFlagsReg cr) %{
10069   match(Set dst (XorI src1 (LShiftI src2 src3)));
10070 
10071   ins_cost(1.9 * INSN_COST);
10072   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10073 
10074   ins_encode %{
10075     __ eorw(as_Register($dst$$reg),
10076               as_Register($src1$$reg),
10077               as_Register($src2$$reg),
10078               Assembler::LSL,
10079               $src3$$constant & 0x3f);
10080   %}
10081 
10082   ins_pipe(ialu_reg_reg_shift);
10083 %}
10084 
10085 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10086                          iRegL src1, iRegL src2,
10087                          immI src3, rFlagsReg cr) %{
10088   match(Set dst (XorL src1 (LShiftL src2 src3)));
10089 
10090   ins_cost(1.9 * INSN_COST);
10091   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10092 
10093   ins_encode %{
10094     __ eor(as_Register($dst$$reg),
10095               as_Register($src1$$reg),
10096               as_Register($src2$$reg),
10097               Assembler::LSL,
10098               $src3$$constant & 0x3f);
10099   %}
10100 
10101   ins_pipe(ialu_reg_reg_shift);
10102 %}
10103 
10104 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10105                          iRegIorL2I src1, iRegIorL2I src2,
10106                          immI src3, rFlagsReg cr) %{
10107   match(Set dst (OrI src1 (URShiftI src2 src3)));
10108 
10109   ins_cost(1.9 * INSN_COST);
10110   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10111 
10112   ins_encode %{
10113     __ orrw(as_Register($dst$$reg),
10114               as_Register($src1$$reg),
10115               as_Register($src2$$reg),
10116               Assembler::LSR,
10117               $src3$$constant & 0x3f);
10118   %}
10119 
10120   ins_pipe(ialu_reg_reg_shift);
10121 %}
10122 
10123 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10124                          iRegL src1, iRegL src2,
10125                          immI src3, rFlagsReg cr) %{
10126   match(Set dst (OrL src1 (URShiftL src2 src3)));
10127 
10128   ins_cost(1.9 * INSN_COST);
10129   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10130 
10131   ins_encode %{
10132     __ orr(as_Register($dst$$reg),
10133               as_Register($src1$$reg),
10134               as_Register($src2$$reg),
10135               Assembler::LSR,
10136               $src3$$constant & 0x3f);
10137   %}
10138 
10139   ins_pipe(ialu_reg_reg_shift);
10140 %}
10141 
10142 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10143                          iRegIorL2I src1, iRegIorL2I src2,
10144                          immI src3, rFlagsReg cr) %{
10145   match(Set dst (OrI src1 (RShiftI src2 src3)));
10146 
10147   ins_cost(1.9 * INSN_COST);
10148   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10149 
10150   ins_encode %{
10151     __ orrw(as_Register($dst$$reg),
10152               as_Register($src1$$reg),
10153               as_Register($src2$$reg),
10154               Assembler::ASR,
10155               $src3$$constant & 0x3f);
10156   %}
10157 
10158   ins_pipe(ialu_reg_reg_shift);
10159 %}
10160 
10161 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10162                          iRegL src1, iRegL src2,
10163                          immI src3, rFlagsReg cr) %{
10164   match(Set dst (OrL src1 (RShiftL src2 src3)));
10165 
10166   ins_cost(1.9 * INSN_COST);
10167   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10168 
10169   ins_encode %{
10170     __ orr(as_Register($dst$$reg),
10171               as_Register($src1$$reg),
10172               as_Register($src2$$reg),
10173               Assembler::ASR,
10174               $src3$$constant & 0x3f);
10175   %}
10176 
10177   ins_pipe(ialu_reg_reg_shift);
10178 %}
10179 
10180 instruct OrI_reg_LShift_reg(iRegINoSp dst,
10181                          iRegIorL2I src1, iRegIorL2I src2,
10182                          immI src3, rFlagsReg cr) %{
10183   match(Set dst (OrI src1 (LShiftI src2 src3)));
10184 
10185   ins_cost(1.9 * INSN_COST);
10186   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
10187 
10188   ins_encode %{
10189     __ orrw(as_Register($dst$$reg),
10190               as_Register($src1$$reg),
10191               as_Register($src2$$reg),
10192               Assembler::LSL,
10193               $src3$$constant & 0x3f);
10194   %}
10195 
10196   ins_pipe(ialu_reg_reg_shift);
10197 %}
10198 
10199 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
10200                          iRegL src1, iRegL src2,
10201                          immI src3, rFlagsReg cr) %{
10202   match(Set dst (OrL src1 (LShiftL src2 src3)));
10203 
10204   ins_cost(1.9 * INSN_COST);
10205   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
10206 
10207   ins_encode %{
10208     __ orr(as_Register($dst$$reg),
10209               as_Register($src1$$reg),
10210               as_Register($src2$$reg),
10211               Assembler::LSL,
10212               $src3$$constant & 0x3f);
10213   %}
10214 
10215   ins_pipe(ialu_reg_reg_shift);
10216 %}
10217 
10218 instruct AddI_reg_URShift_reg(iRegINoSp dst,
10219                          iRegIorL2I src1, iRegIorL2I src2,
10220                          immI src3, rFlagsReg cr) %{
10221   match(Set dst (AddI src1 (URShiftI src2 src3)));
10222 
10223   ins_cost(1.9 * INSN_COST);
10224   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
10225 
10226   ins_encode %{
10227     __ addw(as_Register($dst$$reg),
10228               as_Register($src1$$reg),
10229               as_Register($src2$$reg),
10230               Assembler::LSR,
10231               $src3$$constant & 0x3f);
10232   %}
10233 
10234   ins_pipe(ialu_reg_reg_shift);
10235 %}
10236 
10237 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
10238                          iRegL src1, iRegL src2,
10239                          immI src3, rFlagsReg cr) %{
10240   match(Set dst (AddL src1 (URShiftL src2 src3)));
10241 
10242   ins_cost(1.9 * INSN_COST);
10243   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
10244 
10245   ins_encode %{
10246     __ add(as_Register($dst$$reg),
10247               as_Register($src1$$reg),
10248               as_Register($src2$$reg),
10249               Assembler::LSR,
10250               $src3$$constant & 0x3f);
10251   %}
10252 
10253   ins_pipe(ialu_reg_reg_shift);
10254 %}
10255 
10256 instruct AddI_reg_RShift_reg(iRegINoSp dst,
10257                          iRegIorL2I src1, iRegIorL2I src2,
10258                          immI src3, rFlagsReg cr) %{
10259   match(Set dst (AddI src1 (RShiftI src2 src3)));
10260 
10261   ins_cost(1.9 * INSN_COST);
10262   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
10263 
10264   ins_encode %{
10265     __ addw(as_Register($dst$$reg),
10266               as_Register($src1$$reg),
10267               as_Register($src2$$reg),
10268               Assembler::ASR,
10269               $src3$$constant & 0x3f);
10270   %}
10271 
10272   ins_pipe(ialu_reg_reg_shift);
10273 %}
10274 
10275 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
10276                          iRegL src1, iRegL src2,
10277                          immI src3, rFlagsReg cr) %{
10278   match(Set dst (AddL src1 (RShiftL src2 src3)));
10279 
10280   ins_cost(1.9 * INSN_COST);
10281   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
10282 
10283   ins_encode %{
10284     __ add(as_Register($dst$$reg),
10285               as_Register($src1$$reg),
10286               as_Register($src2$$reg),
10287               Assembler::ASR,
10288               $src3$$constant & 0x3f);
10289   %}
10290 
10291   ins_pipe(ialu_reg_reg_shift);
10292 %}
10293 
10294 instruct AddI_reg_LShift_reg(iRegINoSp dst,
10295                          iRegIorL2I src1, iRegIorL2I src2,
10296                          immI src3, rFlagsReg cr) %{
10297   match(Set dst (AddI src1 (LShiftI src2 src3)));
10298 
10299   ins_cost(1.9 * INSN_COST);
10300   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
10301 
10302   ins_encode %{
10303     __ addw(as_Register($dst$$reg),
10304               as_Register($src1$$reg),
10305               as_Register($src2$$reg),
10306               Assembler::LSL,
10307               $src3$$constant & 0x3f);
10308   %}
10309 
10310   ins_pipe(ialu_reg_reg_shift);
10311 %}
10312 
10313 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
10314                          iRegL src1, iRegL src2,
10315                          immI src3, rFlagsReg cr) %{
10316   match(Set dst (AddL src1 (LShiftL src2 src3)));
10317 
10318   ins_cost(1.9 * INSN_COST);
10319   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
10320 
10321   ins_encode %{
10322     __ add(as_Register($dst$$reg),
10323               as_Register($src1$$reg),
10324               as_Register($src2$$reg),
10325               Assembler::LSL,
10326               $src3$$constant & 0x3f);
10327   %}
10328 
10329   ins_pipe(ialu_reg_reg_shift);
10330 %}
10331 
10332 instruct SubI_reg_URShift_reg(iRegINoSp dst,
10333                          iRegIorL2I src1, iRegIorL2I src2,
10334                          immI src3, rFlagsReg cr) %{
10335   match(Set dst (SubI src1 (URShiftI src2 src3)));
10336 
10337   ins_cost(1.9 * INSN_COST);
10338   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
10339 
10340   ins_encode %{
10341     __ subw(as_Register($dst$$reg),
10342               as_Register($src1$$reg),
10343               as_Register($src2$$reg),
10344               Assembler::LSR,
10345               $src3$$constant & 0x3f);
10346   %}
10347 
10348   ins_pipe(ialu_reg_reg_shift);
10349 %}
10350 
10351 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
10352                          iRegL src1, iRegL src2,
10353                          immI src3, rFlagsReg cr) %{
10354   match(Set dst (SubL src1 (URShiftL src2 src3)));
10355 
10356   ins_cost(1.9 * INSN_COST);
10357   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
10358 
10359   ins_encode %{
10360     __ sub(as_Register($dst$$reg),
10361               as_Register($src1$$reg),
10362               as_Register($src2$$reg),
10363               Assembler::LSR,
10364               $src3$$constant & 0x3f);
10365   %}
10366 
10367   ins_pipe(ialu_reg_reg_shift);
10368 %}
10369 
10370 instruct SubI_reg_RShift_reg(iRegINoSp dst,
10371                          iRegIorL2I src1, iRegIorL2I src2,
10372                          immI src3, rFlagsReg cr) %{
10373   match(Set dst (SubI src1 (RShiftI src2 src3)));
10374 
10375   ins_cost(1.9 * INSN_COST);
10376   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
10377 
10378   ins_encode %{
10379     __ subw(as_Register($dst$$reg),
10380               as_Register($src1$$reg),
10381               as_Register($src2$$reg),
10382               Assembler::ASR,
10383               $src3$$constant & 0x3f);
10384   %}
10385 
10386   ins_pipe(ialu_reg_reg_shift);
10387 %}
10388 
10389 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10390                          iRegL src1, iRegL src2,
10391                          immI src3, rFlagsReg cr) %{
10392   match(Set dst (SubL src1 (RShiftL src2 src3)));
10393 
10394   ins_cost(1.9 * INSN_COST);
10395   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10396 
10397   ins_encode %{
10398     __ sub(as_Register($dst$$reg),
10399               as_Register($src1$$reg),
10400               as_Register($src2$$reg),
10401               Assembler::ASR,
10402               $src3$$constant & 0x3f);
10403   %}
10404 
10405   ins_pipe(ialu_reg_reg_shift);
10406 %}
10407 
10408 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10409                          iRegIorL2I src1, iRegIorL2I src2,
10410                          immI src3, rFlagsReg cr) %{
10411   match(Set dst (SubI src1 (LShiftI src2 src3)));
10412 
10413   ins_cost(1.9 * INSN_COST);
10414   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10415 
10416   ins_encode %{
10417     __ subw(as_Register($dst$$reg),
10418               as_Register($src1$$reg),
10419               as_Register($src2$$reg),
10420               Assembler::LSL,
10421               $src3$$constant & 0x3f);
10422   %}
10423 
10424   ins_pipe(ialu_reg_reg_shift);
10425 %}
10426 
10427 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
10428                          iRegL src1, iRegL src2,
10429                          immI src3, rFlagsReg cr) %{
10430   match(Set dst (SubL src1 (LShiftL src2 src3)));
10431 
10432   ins_cost(1.9 * INSN_COST);
10433   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
10434 
10435   ins_encode %{
10436     __ sub(as_Register($dst$$reg),
10437               as_Register($src1$$reg),
10438               as_Register($src2$$reg),
10439               Assembler::LSL,
10440               $src3$$constant & 0x3f);
10441   %}
10442 
10443   ins_pipe(ialu_reg_reg_shift);
10444 %}
10445 
10446 
10447 
10448 // Shift Left followed by Shift Right.
10449 // This idiom is used by the compiler for the i2b bytecode etc.
10450 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10451 %{
10452   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
10453   // Make sure we are not going to exceed what sbfm can do.
10454   predicate((unsigned int)n->in(2)->get_int() <= 63
10455             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10456 
10457   ins_cost(INSN_COST * 2);
10458   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10459   ins_encode %{
10460     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10461     int s = 63 - lshift;
10462     int r = (rshift - lshift) & 63;
10463     __ sbfm(as_Register($dst$$reg),
10464             as_Register($src$$reg),
10465             r, s);
10466   %}
10467 
10468   ins_pipe(ialu_reg_shift);
10469 %}
10470 
10471 // Shift Left followed by Shift Right.
10472 // This idiom is used by the compiler for the i2b bytecode etc.
10473 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10474 %{
10475   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
10476   // Make sure we are not going to exceed what sbfmw can do.
10477   predicate((unsigned int)n->in(2)->get_int() <= 31
10478             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10479 
10480   ins_cost(INSN_COST * 2);
10481   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10482   ins_encode %{
10483     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10484     int s = 31 - lshift;
10485     int r = (rshift - lshift) & 31;
10486     __ sbfmw(as_Register($dst$$reg),
10487             as_Register($src$$reg),
10488             r, s);
10489   %}
10490 
10491   ins_pipe(ialu_reg_shift);
10492 %}
10493 
10494 // Shift Left followed by Shift Right.
10495 // This idiom is used by the compiler for the i2b bytecode etc.
10496 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10497 %{
10498   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
10499   // Make sure we are not going to exceed what ubfm can do.
10500   predicate((unsigned int)n->in(2)->get_int() <= 63
10501             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10502 
10503   ins_cost(INSN_COST * 2);
10504   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10505   ins_encode %{
10506     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10507     int s = 63 - lshift;
10508     int r = (rshift - lshift) & 63;
10509     __ ubfm(as_Register($dst$$reg),
10510             as_Register($src$$reg),
10511             r, s);
10512   %}
10513 
10514   ins_pipe(ialu_reg_shift);
10515 %}
10516 
10517 // Shift Left followed by Shift Right.
10518 // This idiom is used by the compiler for the i2b bytecode etc.
10519 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10520 %{
10521   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
10522   // Make sure we are not going to exceed what ubfmw can do.
10523   predicate((unsigned int)n->in(2)->get_int() <= 31
10524             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10525 
10526   ins_cost(INSN_COST * 2);
10527   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10528   ins_encode %{
10529     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10530     int s = 31 - lshift;
10531     int r = (rshift - lshift) & 31;
10532     __ ubfmw(as_Register($dst$$reg),
10533             as_Register($src$$reg),
10534             r, s);
10535   %}
10536 
10537   ins_pipe(ialu_reg_shift);
10538 %}
10539 // Bitfield extract with shift & mask
10540 
10541 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10542 %{
10543   match(Set dst (AndI (URShiftI src rshift) mask));
10544 
10545   ins_cost(INSN_COST);
10546   format %{ "ubfxw $dst, $src, $mask" %}
10547   ins_encode %{
10548     int rshift = $rshift$$constant;
10549     long mask = $mask$$constant;
10550     int width = exact_log2(mask+1);
10551     __ ubfxw(as_Register($dst$$reg),
10552             as_Register($src$$reg), rshift, width);
10553   %}
10554   ins_pipe(ialu_reg_shift);
10555 %}
10556 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
10557 %{
10558   match(Set dst (AndL (URShiftL src rshift) mask));
10559 
10560   ins_cost(INSN_COST);
10561   format %{ "ubfx $dst, $src, $mask" %}
10562   ins_encode %{
10563     int rshift = $rshift$$constant;
10564     long mask = $mask$$constant;
10565     int width = exact_log2(mask+1);
10566     __ ubfx(as_Register($dst$$reg),
10567             as_Register($src$$reg), rshift, width);
10568   %}
10569   ins_pipe(ialu_reg_shift);
10570 %}
10571 
10572 // We can use ubfx when extending an And with a mask when we know mask
10573 // is positive.  We know that because immI_bitmask guarantees it.
10574 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10575 %{
10576   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
10577 
10578   ins_cost(INSN_COST * 2);
10579   format %{ "ubfx $dst, $src, $mask" %}
10580   ins_encode %{
10581     int rshift = $rshift$$constant;
10582     long mask = $mask$$constant;
10583     int width = exact_log2(mask+1);
10584     __ ubfx(as_Register($dst$$reg),
10585             as_Register($src$$reg), rshift, width);
10586   %}
10587   ins_pipe(ialu_reg_shift);
10588 %}
10589 
10590 // Rotations
10591 
10592 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10593 %{
10594   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10595   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10596 
10597   ins_cost(INSN_COST);
10598   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10599 
10600   ins_encode %{
10601     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10602             $rshift$$constant & 63);
10603   %}
10604   ins_pipe(ialu_reg_reg_extr);
10605 %}
10606 
10607 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10608 %{
10609   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10610   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10611 
10612   ins_cost(INSN_COST);
10613   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10614 
10615   ins_encode %{
10616     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10617             $rshift$$constant & 31);
10618   %}
10619   ins_pipe(ialu_reg_reg_extr);
10620 %}
10621 
10622 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10623 %{
10624   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10625   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10626 
10627   ins_cost(INSN_COST);
10628   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10629 
10630   ins_encode %{
10631     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10632             $rshift$$constant & 63);
10633   %}
10634   ins_pipe(ialu_reg_reg_extr);
10635 %}
10636 
10637 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10638 %{
10639   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10640   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10641 
10642   ins_cost(INSN_COST);
10643   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10644 
10645   ins_encode %{
10646     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10647             $rshift$$constant & 31);
10648   %}
10649   ins_pipe(ialu_reg_reg_extr);
10650 %}
10651 
10652 
10653 // rol expander
10654 
10655 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10656 %{
10657   effect(DEF dst, USE src, USE shift);
10658 
10659   format %{ "rol    $dst, $src, $shift" %}
10660   ins_cost(INSN_COST * 3);
10661   ins_encode %{
10662     __ subw(rscratch1, zr, as_Register($shift$$reg));
10663     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10664             rscratch1);
10665     %}
10666   ins_pipe(ialu_reg_reg_vshift);
10667 %}
10668 
10669 // rol expander
10670 
10671 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10672 %{
10673   effect(DEF dst, USE src, USE shift);
10674 
10675   format %{ "rol    $dst, $src, $shift" %}
10676   ins_cost(INSN_COST * 3);
10677   ins_encode %{
10678     __ subw(rscratch1, zr, as_Register($shift$$reg));
10679     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10680             rscratch1);
10681     %}
10682   ins_pipe(ialu_reg_reg_vshift);
10683 %}
10684 
10685 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10686 %{
10687   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
10688 
10689   expand %{
10690     rolL_rReg(dst, src, shift, cr);
10691   %}
10692 %}
10693 
10694 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10695 %{
10696   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
10697 
10698   expand %{
10699     rolL_rReg(dst, src, shift, cr);
10700   %}
10701 %}
10702 
10703 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10704 %{
10705   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
10706 
10707   expand %{
10708     rolL_rReg(dst, src, shift, cr);
10709   %}
10710 %}
10711 
10712 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10713 %{
10714   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
10715 
10716   expand %{
10717     rolL_rReg(dst, src, shift, cr);
10718   %}
10719 %}
10720 
10721 // ror expander
10722 
10723 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10724 %{
10725   effect(DEF dst, USE src, USE shift);
10726 
10727   format %{ "ror    $dst, $src, $shift" %}
10728   ins_cost(INSN_COST);
10729   ins_encode %{
10730     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10731             as_Register($shift$$reg));
10732     %}
10733   ins_pipe(ialu_reg_reg_vshift);
10734 %}
10735 
10736 // ror expander
10737 
10738 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10739 %{
10740   effect(DEF dst, USE src, USE shift);
10741 
10742   format %{ "ror    $dst, $src, $shift" %}
10743   ins_cost(INSN_COST);
10744   ins_encode %{
10745     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10746             as_Register($shift$$reg));
10747     %}
10748   ins_pipe(ialu_reg_reg_vshift);
10749 %}
10750 
10751 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10752 %{
10753   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
10754 
10755   expand %{
10756     rorL_rReg(dst, src, shift, cr);
10757   %}
10758 %}
10759 
10760 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10761 %{
10762   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
10763 
10764   expand %{
10765     rorL_rReg(dst, src, shift, cr);
10766   %}
10767 %}
10768 
10769 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10770 %{
10771   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
10772 
10773   expand %{
10774     rorL_rReg(dst, src, shift, cr);
10775   %}
10776 %}
10777 
10778 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10779 %{
10780   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
10781 
10782   expand %{
10783     rorL_rReg(dst, src, shift, cr);
10784   %}
10785 %}
10786 
10787 // Add/subtract (extended)
10788 
10789 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10790 %{
10791   match(Set dst (AddL src1 (ConvI2L src2)));
10792   ins_cost(INSN_COST);
10793   format %{ "add  $dst, $src1, sxtw $src2" %}
10794 
10795    ins_encode %{
10796      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10797             as_Register($src2$$reg), ext::sxtw);
10798    %}
10799   ins_pipe(ialu_reg_reg);
10800 %};
10801 
10802 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10803 %{
10804   match(Set dst (SubL src1 (ConvI2L src2)));
10805   ins_cost(INSN_COST);
10806   format %{ "sub  $dst, $src1, sxtw $src2" %}
10807 
10808    ins_encode %{
10809      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10810             as_Register($src2$$reg), ext::sxtw);
10811    %}
10812   ins_pipe(ialu_reg_reg);
10813 %};
10814 
10815 
10816 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
10817 %{
10818   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10819   ins_cost(INSN_COST);
10820   format %{ "add  $dst, $src1, sxth $src2" %}
10821 
10822    ins_encode %{
10823      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10824             as_Register($src2$$reg), ext::sxth);
10825    %}
10826   ins_pipe(ialu_reg_reg);
10827 %}
10828 
10829 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10830 %{
10831   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10832   ins_cost(INSN_COST);
10833   format %{ "add  $dst, $src1, sxtb $src2" %}
10834 
10835    ins_encode %{
10836      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10837             as_Register($src2$$reg), ext::sxtb);
10838    %}
10839   ins_pipe(ialu_reg_reg);
10840 %}
10841 
10842 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10843 %{
10844   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
10845   ins_cost(INSN_COST);
10846   format %{ "add  $dst, $src1, uxtb $src2" %}
10847 
10848    ins_encode %{
10849      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10850             as_Register($src2$$reg), ext::uxtb);
10851    %}
10852   ins_pipe(ialu_reg_reg);
10853 %}
10854 
10855 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
10856 %{
10857   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10858   ins_cost(INSN_COST);
10859   format %{ "add  $dst, $src1, sxth $src2" %}
10860 
10861    ins_encode %{
10862      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10863             as_Register($src2$$reg), ext::sxth);
10864    %}
10865   ins_pipe(ialu_reg_reg);
10866 %}
10867 
10868 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
10869 %{
10870   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10871   ins_cost(INSN_COST);
10872   format %{ "add  $dst, $src1, sxtw $src2" %}
10873 
10874    ins_encode %{
10875      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10876             as_Register($src2$$reg), ext::sxtw);
10877    %}
10878   ins_pipe(ialu_reg_reg);
10879 %}
10880 
10881 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10882 %{
10883   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10884   ins_cost(INSN_COST);
10885   format %{ "add  $dst, $src1, sxtb $src2" %}
10886 
10887    ins_encode %{
10888      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10889             as_Register($src2$$reg), ext::sxtb);
10890    %}
10891   ins_pipe(ialu_reg_reg);
10892 %}
10893 
10894 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10895 %{
10896   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
10897   ins_cost(INSN_COST);
10898   format %{ "add  $dst, $src1, uxtb $src2" %}
10899 
10900    ins_encode %{
10901      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10902             as_Register($src2$$reg), ext::uxtb);
10903    %}
10904   ins_pipe(ialu_reg_reg);
10905 %}
10906 
10907 
10908 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10909 %{
10910   match(Set dst (AddI src1 (AndI src2 mask)));
10911   ins_cost(INSN_COST);
10912   format %{ "addw  $dst, $src1, $src2, uxtb" %}
10913 
10914    ins_encode %{
10915      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10916             as_Register($src2$$reg), ext::uxtb);
10917    %}
10918   ins_pipe(ialu_reg_reg);
10919 %}
10920 
10921 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10922 %{
10923   match(Set dst (AddI src1 (AndI src2 mask)));
10924   ins_cost(INSN_COST);
10925   format %{ "addw  $dst, $src1, $src2, uxth" %}
10926 
10927    ins_encode %{
10928      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10929             as_Register($src2$$reg), ext::uxth);
10930    %}
10931   ins_pipe(ialu_reg_reg);
10932 %}
10933 
10934 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
10935 %{
10936   match(Set dst (AddL src1 (AndL src2 mask)));
10937   ins_cost(INSN_COST);
10938   format %{ "add  $dst, $src1, $src2, uxtb" %}
10939 
10940    ins_encode %{
10941      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10942             as_Register($src2$$reg), ext::uxtb);
10943    %}
10944   ins_pipe(ialu_reg_reg);
10945 %}
10946 
10947 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
10948 %{
10949   match(Set dst (AddL src1 (AndL src2 mask)));
10950   ins_cost(INSN_COST);
10951   format %{ "add  $dst, $src1, $src2, uxth" %}
10952 
10953    ins_encode %{
10954      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10955             as_Register($src2$$reg), ext::uxth);
10956    %}
10957   ins_pipe(ialu_reg_reg);
10958 %}
10959 
10960 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
10961 %{
10962   match(Set dst (AddL src1 (AndL src2 mask)));
10963   ins_cost(INSN_COST);
10964   format %{ "add  $dst, $src1, $src2, uxtw" %}
10965 
10966    ins_encode %{
10967      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10968             as_Register($src2$$reg), ext::uxtw);
10969    %}
10970   ins_pipe(ialu_reg_reg);
10971 %}
10972 
10973 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10974 %{
10975   match(Set dst (SubI src1 (AndI src2 mask)));
10976   ins_cost(INSN_COST);
10977   format %{ "subw  $dst, $src1, $src2, uxtb" %}
10978 
10979    ins_encode %{
10980      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10981             as_Register($src2$$reg), ext::uxtb);
10982    %}
10983   ins_pipe(ialu_reg_reg);
10984 %}
10985 
10986 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10987 %{
10988   match(Set dst (SubI src1 (AndI src2 mask)));
10989   ins_cost(INSN_COST);
10990   format %{ "subw  $dst, $src1, $src2, uxth" %}
10991 
10992    ins_encode %{
10993      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10994             as_Register($src2$$reg), ext::uxth);
10995    %}
10996   ins_pipe(ialu_reg_reg);
10997 %}
10998 
10999 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11000 %{
11001   match(Set dst (SubL src1 (AndL src2 mask)));
11002   ins_cost(INSN_COST);
11003   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11004 
11005    ins_encode %{
11006      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11007             as_Register($src2$$reg), ext::uxtb);
11008    %}
11009   ins_pipe(ialu_reg_reg);
11010 %}
11011 
11012 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11013 %{
11014   match(Set dst (SubL src1 (AndL src2 mask)));
11015   ins_cost(INSN_COST);
11016   format %{ "sub  $dst, $src1, $src2, uxth" %}
11017 
11018    ins_encode %{
11019      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11020             as_Register($src2$$reg), ext::uxth);
11021    %}
11022   ins_pipe(ialu_reg_reg);
11023 %}
11024 
11025 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11026 %{
11027   match(Set dst (SubL src1 (AndL src2 mask)));
11028   ins_cost(INSN_COST);
11029   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11030 
11031    ins_encode %{
11032      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11033             as_Register($src2$$reg), ext::uxtw);
11034    %}
11035   ins_pipe(ialu_reg_reg);
11036 %}
11037 
11038 // END This section of the file is automatically generated. Do not edit --------------
11039 
11040 // ============================================================================
11041 // Floating Point Arithmetic Instructions
11042 
11043 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11044   match(Set dst (AddF src1 src2));
11045 
11046   ins_cost(INSN_COST * 5);
11047   format %{ "fadds   $dst, $src1, $src2" %}
11048 
11049   ins_encode %{
11050     __ fadds(as_FloatRegister($dst$$reg),
11051              as_FloatRegister($src1$$reg),
11052              as_FloatRegister($src2$$reg));
11053   %}
11054 
11055   ins_pipe(pipe_class_default);
11056 %}
11057 
11058 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11059   match(Set dst (AddD src1 src2));
11060 
11061   ins_cost(INSN_COST * 5);
11062   format %{ "faddd   $dst, $src1, $src2" %}
11063 
11064   ins_encode %{
11065     __ faddd(as_FloatRegister($dst$$reg),
11066              as_FloatRegister($src1$$reg),
11067              as_FloatRegister($src2$$reg));
11068   %}
11069 
11070   ins_pipe(pipe_class_default);
11071 %}
11072 
11073 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11074   match(Set dst (SubF src1 src2));
11075 
11076   ins_cost(INSN_COST * 5);
11077   format %{ "fsubs   $dst, $src1, $src2" %}
11078 
11079   ins_encode %{
11080     __ fsubs(as_FloatRegister($dst$$reg),
11081              as_FloatRegister($src1$$reg),
11082              as_FloatRegister($src2$$reg));
11083   %}
11084 
11085   ins_pipe(pipe_class_default);
11086 %}
11087 
11088 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11089   match(Set dst (SubD src1 src2));
11090 
11091   ins_cost(INSN_COST * 5);
11092   format %{ "fsubd   $dst, $src1, $src2" %}
11093 
11094   ins_encode %{
11095     __ fsubd(as_FloatRegister($dst$$reg),
11096              as_FloatRegister($src1$$reg),
11097              as_FloatRegister($src2$$reg));
11098   %}
11099 
11100   ins_pipe(pipe_class_default);
11101 %}
11102 
11103 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11104   match(Set dst (MulF src1 src2));
11105 
11106   ins_cost(INSN_COST * 6);
11107   format %{ "fmuls   $dst, $src1, $src2" %}
11108 
11109   ins_encode %{
11110     __ fmuls(as_FloatRegister($dst$$reg),
11111              as_FloatRegister($src1$$reg),
11112              as_FloatRegister($src2$$reg));
11113   %}
11114 
11115   ins_pipe(pipe_class_default);
11116 %}
11117 
11118 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11119   match(Set dst (MulD src1 src2));
11120 
11121   ins_cost(INSN_COST * 6);
11122   format %{ "fmuld   $dst, $src1, $src2" %}
11123 
11124   ins_encode %{
11125     __ fmuld(as_FloatRegister($dst$$reg),
11126              as_FloatRegister($src1$$reg),
11127              as_FloatRegister($src2$$reg));
11128   %}
11129 
11130   ins_pipe(pipe_class_default);
11131 %}
11132 
11133 // We cannot use these fused mul w add/sub ops because they don't
11134 // produce the same result as the equivalent separated ops
11135 // (essentially they don't round the intermediate result). that's a
11136 // shame. leaving them here in case we can idenitfy cases where it is
11137 // legitimate to use them
11138 
11139 
11140 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11141 //   match(Set dst (AddF (MulF src1 src2) src3));
11142 
11143 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
11144 
11145 //   ins_encode %{
11146 //     __ fmadds(as_FloatRegister($dst$$reg),
11147 //              as_FloatRegister($src1$$reg),
11148 //              as_FloatRegister($src2$$reg),
11149 //              as_FloatRegister($src3$$reg));
11150 //   %}
11151 
11152 //   ins_pipe(pipe_class_default);
11153 // %}
11154 
11155 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11156 //   match(Set dst (AddD (MulD src1 src2) src3));
11157 
11158 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
11159 
11160 //   ins_encode %{
11161 //     __ fmaddd(as_FloatRegister($dst$$reg),
11162 //              as_FloatRegister($src1$$reg),
11163 //              as_FloatRegister($src2$$reg),
11164 //              as_FloatRegister($src3$$reg));
11165 //   %}
11166 
11167 //   ins_pipe(pipe_class_default);
11168 // %}
11169 
11170 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11171 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
11172 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
11173 
11174 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
11175 
11176 //   ins_encode %{
11177 //     __ fmsubs(as_FloatRegister($dst$$reg),
11178 //               as_FloatRegister($src1$$reg),
11179 //               as_FloatRegister($src2$$reg),
11180 //              as_FloatRegister($src3$$reg));
11181 //   %}
11182 
11183 //   ins_pipe(pipe_class_default);
11184 // %}
11185 
11186 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11187 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
11188 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
11189 
11190 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
11191 
11192 //   ins_encode %{
11193 //     __ fmsubd(as_FloatRegister($dst$$reg),
11194 //               as_FloatRegister($src1$$reg),
11195 //               as_FloatRegister($src2$$reg),
11196 //               as_FloatRegister($src3$$reg));
11197 //   %}
11198 
11199 //   ins_pipe(pipe_class_default);
11200 // %}
11201 
11202 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11203 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
11204 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
11205 
11206 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
11207 
11208 //   ins_encode %{
11209 //     __ fnmadds(as_FloatRegister($dst$$reg),
11210 //                as_FloatRegister($src1$$reg),
11211 //                as_FloatRegister($src2$$reg),
11212 //                as_FloatRegister($src3$$reg));
11213 //   %}
11214 
11215 //   ins_pipe(pipe_class_default);
11216 // %}
11217 
11218 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11219 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
11220 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
11221 
11222 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
11223 
11224 //   ins_encode %{
11225 //     __ fnmaddd(as_FloatRegister($dst$$reg),
11226 //                as_FloatRegister($src1$$reg),
11227 //                as_FloatRegister($src2$$reg),
11228 //                as_FloatRegister($src3$$reg));
11229 //   %}
11230 
11231 //   ins_pipe(pipe_class_default);
11232 // %}
11233 
11234 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
11235 //   match(Set dst (SubF (MulF src1 src2) src3));
11236 
11237 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
11238 
11239 //   ins_encode %{
11240 //     __ fnmsubs(as_FloatRegister($dst$$reg),
11241 //                as_FloatRegister($src1$$reg),
11242 //                as_FloatRegister($src2$$reg),
11243 //                as_FloatRegister($src3$$reg));
11244 //   %}
11245 
11246 //   ins_pipe(pipe_class_default);
11247 // %}
11248 
11249 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
11250 //   match(Set dst (SubD (MulD src1 src2) src3));
11251 
11252 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
11253 
11254 //   ins_encode %{
11255 //   // n.b. insn name should be fnmsubd
11256 //     __ fnmsub(as_FloatRegister($dst$$reg),
11257 //                as_FloatRegister($src1$$reg),
11258 //                as_FloatRegister($src2$$reg),
11259 //                as_FloatRegister($src3$$reg));
11260 //   %}
11261 
11262 //   ins_pipe(pipe_class_default);
11263 // %}
11264 
11265 
11266 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11267   match(Set dst (DivF src1  src2));
11268 
11269   ins_cost(INSN_COST * 18);
11270   format %{ "fdivs   $dst, $src1, $src2" %}
11271 
11272   ins_encode %{
11273     __ fdivs(as_FloatRegister($dst$$reg),
11274              as_FloatRegister($src1$$reg),
11275              as_FloatRegister($src2$$reg));
11276   %}
11277 
11278   ins_pipe(pipe_class_default);
11279 %}
11280 
11281 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11282   match(Set dst (DivD src1  src2));
11283 
11284   ins_cost(INSN_COST * 32);
11285   format %{ "fdivd   $dst, $src1, $src2" %}
11286 
11287   ins_encode %{
11288     __ fdivd(as_FloatRegister($dst$$reg),
11289              as_FloatRegister($src1$$reg),
11290              as_FloatRegister($src2$$reg));
11291   %}
11292 
11293   ins_pipe(pipe_class_default);
11294 %}
11295 
11296 instruct negF_reg_reg(vRegF dst, vRegF src) %{
11297   match(Set dst (NegF src));
11298 
11299   ins_cost(INSN_COST * 3);
11300   format %{ "fneg   $dst, $src" %}
11301 
11302   ins_encode %{
11303     __ fnegs(as_FloatRegister($dst$$reg),
11304              as_FloatRegister($src$$reg));
11305   %}
11306 
11307   ins_pipe(pipe_class_default);
11308 %}
11309 
11310 instruct negD_reg_reg(vRegD dst, vRegD src) %{
11311   match(Set dst (NegD src));
11312 
11313   ins_cost(INSN_COST * 3);
11314   format %{ "fnegd   $dst, $src" %}
11315 
11316   ins_encode %{
11317     __ fnegd(as_FloatRegister($dst$$reg),
11318              as_FloatRegister($src$$reg));
11319   %}
11320 
11321   ins_pipe(pipe_class_default);
11322 %}
11323 
11324 instruct absF_reg(vRegF dst, vRegF src) %{
11325   match(Set dst (AbsF src));
11326 
11327   ins_cost(INSN_COST * 3);
11328   format %{ "fabss   $dst, $src" %}
11329   ins_encode %{
11330     __ fabss(as_FloatRegister($dst$$reg),
11331              as_FloatRegister($src$$reg));
11332   %}
11333 
11334   ins_pipe(pipe_class_default);
11335 %}
11336 
11337 instruct absD_reg(vRegD dst, vRegD src) %{
11338   match(Set dst (AbsD src));
11339 
11340   ins_cost(INSN_COST * 3);
11341   format %{ "fabsd   $dst, $src" %}
11342   ins_encode %{
11343     __ fabsd(as_FloatRegister($dst$$reg),
11344              as_FloatRegister($src$$reg));
11345   %}
11346 
11347   ins_pipe(pipe_class_default);
11348 %}
11349 
11350 instruct sqrtD_reg(vRegD dst, vRegD src) %{
11351   match(Set dst (SqrtD src));
11352 
11353   ins_cost(INSN_COST * 50);
11354   format %{ "fsqrtd  $dst, $src" %}
11355   ins_encode %{
11356     __ fsqrtd(as_FloatRegister($dst$$reg),
11357              as_FloatRegister($src$$reg));
11358   %}
11359 
11360   ins_pipe(pipe_class_default);
11361 %}
11362 
11363 instruct sqrtF_reg(vRegF dst, vRegF src) %{
11364   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11365 
11366   ins_cost(INSN_COST * 50);
11367   format %{ "fsqrts  $dst, $src" %}
11368   ins_encode %{
11369     __ fsqrts(as_FloatRegister($dst$$reg),
11370              as_FloatRegister($src$$reg));
11371   %}
11372 
11373   ins_pipe(pipe_class_default);
11374 %}
11375 
11376 // ============================================================================
11377 // Logical Instructions
11378 
11379 // Integer Logical Instructions
11380 
11381 // And Instructions
11382 
11383 
11384 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
11385   match(Set dst (AndI src1 src2));
11386 
11387   format %{ "andw  $dst, $src1, $src2\t# int" %}
11388 
11389   ins_cost(INSN_COST);
11390   ins_encode %{
11391     __ andw(as_Register($dst$$reg),
11392             as_Register($src1$$reg),
11393             as_Register($src2$$reg));
11394   %}
11395 
11396   ins_pipe(ialu_reg_reg);
11397 %}
11398 
11399 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
11400   match(Set dst (AndI src1 src2));
11401 
11402   format %{ "andsw  $dst, $src1, $src2\t# int" %}
11403 
11404   ins_cost(INSN_COST);
11405   ins_encode %{
11406     __ andw(as_Register($dst$$reg),
11407             as_Register($src1$$reg),
11408             (unsigned long)($src2$$constant));
11409   %}
11410 
11411   ins_pipe(ialu_reg_imm);
11412 %}
11413 
11414 // Or Instructions
11415 
11416 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11417   match(Set dst (OrI src1 src2));
11418 
11419   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11420 
11421   ins_cost(INSN_COST);
11422   ins_encode %{
11423     __ orrw(as_Register($dst$$reg),
11424             as_Register($src1$$reg),
11425             as_Register($src2$$reg));
11426   %}
11427 
11428   ins_pipe(ialu_reg_reg);
11429 %}
11430 
11431 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11432   match(Set dst (OrI src1 src2));
11433 
11434   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11435 
11436   ins_cost(INSN_COST);
11437   ins_encode %{
11438     __ orrw(as_Register($dst$$reg),
11439             as_Register($src1$$reg),
11440             (unsigned long)($src2$$constant));
11441   %}
11442 
11443   ins_pipe(ialu_reg_imm);
11444 %}
11445 
11446 // Xor Instructions
11447 
11448 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11449   match(Set dst (XorI src1 src2));
11450 
11451   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11452 
11453   ins_cost(INSN_COST);
11454   ins_encode %{
11455     __ eorw(as_Register($dst$$reg),
11456             as_Register($src1$$reg),
11457             as_Register($src2$$reg));
11458   %}
11459 
11460   ins_pipe(ialu_reg_reg);
11461 %}
11462 
11463 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11464   match(Set dst (XorI src1 src2));
11465 
11466   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11467 
11468   ins_cost(INSN_COST);
11469   ins_encode %{
11470     __ eorw(as_Register($dst$$reg),
11471             as_Register($src1$$reg),
11472             (unsigned long)($src2$$constant));
11473   %}
11474 
11475   ins_pipe(ialu_reg_imm);
11476 %}
11477 
11478 // Long Logical Instructions
11479 // TODO
11480 
11481 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
11482   match(Set dst (AndL src1 src2));
11483 
11484   format %{ "and  $dst, $src1, $src2\t# int" %}
11485 
11486   ins_cost(INSN_COST);
11487   ins_encode %{
11488     __ andr(as_Register($dst$$reg),
11489             as_Register($src1$$reg),
11490             as_Register($src2$$reg));
11491   %}
11492 
11493   ins_pipe(ialu_reg_reg);
11494 %}
11495 
11496 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
11497   match(Set dst (AndL src1 src2));
11498 
11499   format %{ "and  $dst, $src1, $src2\t# int" %}
11500 
11501   ins_cost(INSN_COST);
11502   ins_encode %{
11503     __ andr(as_Register($dst$$reg),
11504             as_Register($src1$$reg),
11505             (unsigned long)($src2$$constant));
11506   %}
11507 
11508   ins_pipe(ialu_reg_imm);
11509 %}
11510 
11511 // Or Instructions
11512 
11513 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11514   match(Set dst (OrL src1 src2));
11515 
11516   format %{ "orr  $dst, $src1, $src2\t# int" %}
11517 
11518   ins_cost(INSN_COST);
11519   ins_encode %{
11520     __ orr(as_Register($dst$$reg),
11521            as_Register($src1$$reg),
11522            as_Register($src2$$reg));
11523   %}
11524 
11525   ins_pipe(ialu_reg_reg);
11526 %}
11527 
11528 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11529   match(Set dst (OrL src1 src2));
11530 
11531   format %{ "orr  $dst, $src1, $src2\t# int" %}
11532 
11533   ins_cost(INSN_COST);
11534   ins_encode %{
11535     __ orr(as_Register($dst$$reg),
11536            as_Register($src1$$reg),
11537            (unsigned long)($src2$$constant));
11538   %}
11539 
11540   ins_pipe(ialu_reg_imm);
11541 %}
11542 
11543 // Xor Instructions
11544 
11545 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11546   match(Set dst (XorL src1 src2));
11547 
11548   format %{ "eor  $dst, $src1, $src2\t# int" %}
11549 
11550   ins_cost(INSN_COST);
11551   ins_encode %{
11552     __ eor(as_Register($dst$$reg),
11553            as_Register($src1$$reg),
11554            as_Register($src2$$reg));
11555   %}
11556 
11557   ins_pipe(ialu_reg_reg);
11558 %}
11559 
11560 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11561   match(Set dst (XorL src1 src2));
11562 
11563   ins_cost(INSN_COST);
11564   format %{ "eor  $dst, $src1, $src2\t# int" %}
11565 
11566   ins_encode %{
11567     __ eor(as_Register($dst$$reg),
11568            as_Register($src1$$reg),
11569            (unsigned long)($src2$$constant));
11570   %}
11571 
11572   ins_pipe(ialu_reg_imm);
11573 %}
11574 
11575 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
11576 %{
11577   match(Set dst (ConvI2L src));
11578 
11579   ins_cost(INSN_COST);
11580   format %{ "sxtw  $dst, $src\t# i2l" %}
11581   ins_encode %{
11582     __ sbfm($dst$$Register, $src$$Register, 0, 31);
11583   %}
11584   ins_pipe(ialu_reg_shift);
11585 %}
11586 
11587 // this pattern occurs in bigmath arithmetic
11588 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
11589 %{
11590   match(Set dst (AndL (ConvI2L src) mask));
11591 
11592   ins_cost(INSN_COST);
11593   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
11594   ins_encode %{
11595     __ ubfm($dst$$Register, $src$$Register, 0, 31);
11596   %}
11597 
11598   ins_pipe(ialu_reg_shift);
11599 %}
11600 
11601 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
11602   match(Set dst (ConvL2I src));
11603 
11604   ins_cost(INSN_COST);
11605   format %{ "movw  $dst, $src \t// l2i" %}
11606 
11607   ins_encode %{
11608     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
11609   %}
11610 
11611   ins_pipe(ialu_reg);
11612 %}
11613 
11614 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
11615 %{
11616   match(Set dst (Conv2B src));
11617   effect(KILL cr);
11618 
11619   format %{
11620     "cmpw $src, zr\n\t"
11621     "cset $dst, ne"
11622   %}
11623 
11624   ins_encode %{
11625     __ cmpw(as_Register($src$$reg), zr);
11626     __ cset(as_Register($dst$$reg), Assembler::NE);
11627   %}
11628 
11629   ins_pipe(ialu_reg);
11630 %}
11631 
11632 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
11633 %{
11634   match(Set dst (Conv2B src));
11635   effect(KILL cr);
11636 
11637   format %{
11638     "cmp  $src, zr\n\t"
11639     "cset $dst, ne"
11640   %}
11641 
11642   ins_encode %{
11643     __ cmp(as_Register($src$$reg), zr);
11644     __ cset(as_Register($dst$$reg), Assembler::NE);
11645   %}
11646 
11647   ins_pipe(ialu_reg);
11648 %}
11649 
11650 instruct convD2F_reg(vRegF dst, vRegD src) %{
11651   match(Set dst (ConvD2F src));
11652 
11653   ins_cost(INSN_COST * 5);
11654   format %{ "fcvtd  $dst, $src \t// d2f" %}
11655 
11656   ins_encode %{
11657     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11658   %}
11659 
11660   ins_pipe(pipe_class_default);
11661 %}
11662 
11663 instruct convF2D_reg(vRegD dst, vRegF src) %{
11664   match(Set dst (ConvF2D src));
11665 
11666   ins_cost(INSN_COST * 5);
11667   format %{ "fcvts  $dst, $src \t// f2d" %}
11668 
11669   ins_encode %{
11670     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11671   %}
11672 
11673   ins_pipe(pipe_class_default);
11674 %}
11675 
11676 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11677   match(Set dst (ConvF2I src));
11678 
11679   ins_cost(INSN_COST * 5);
11680   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
11681 
11682   ins_encode %{
11683     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11684   %}
11685 
11686   ins_pipe(pipe_class_default);
11687 %}
11688 
11689 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
11690   match(Set dst (ConvF2L src));
11691 
11692   ins_cost(INSN_COST * 5);
11693   format %{ "fcvtzs  $dst, $src \t// f2l" %}
11694 
11695   ins_encode %{
11696     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11697   %}
11698 
11699   ins_pipe(pipe_class_default);
11700 %}
11701 
11702 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
11703   match(Set dst (ConvI2F src));
11704 
11705   ins_cost(INSN_COST * 5);
11706   format %{ "scvtfws  $dst, $src \t// i2f" %}
11707 
11708   ins_encode %{
11709     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11710   %}
11711 
11712   ins_pipe(pipe_class_default);
11713 %}
11714 
11715 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
11716   match(Set dst (ConvL2F src));
11717 
11718   ins_cost(INSN_COST * 5);
11719   format %{ "scvtfs  $dst, $src \t// l2f" %}
11720 
11721   ins_encode %{
11722     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11723   %}
11724 
11725   ins_pipe(pipe_class_default);
11726 %}
11727 
11728 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
11729   match(Set dst (ConvD2I src));
11730 
11731   ins_cost(INSN_COST * 5);
11732   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
11733 
11734   ins_encode %{
11735     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11736   %}
11737 
11738   ins_pipe(pipe_class_default);
11739 %}
11740 
11741 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11742   match(Set dst (ConvD2L src));
11743 
11744   ins_cost(INSN_COST * 5);
11745   format %{ "fcvtzd  $dst, $src \t// d2l" %}
11746 
11747   ins_encode %{
11748     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11749   %}
11750 
11751   ins_pipe(pipe_class_default);
11752 %}
11753 
11754 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
11755   match(Set dst (ConvI2D src));
11756 
11757   ins_cost(INSN_COST * 5);
11758   format %{ "scvtfwd  $dst, $src \t// i2d" %}
11759 
11760   ins_encode %{
11761     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11762   %}
11763 
11764   ins_pipe(pipe_class_default);
11765 %}
11766 
11767 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
11768   match(Set dst (ConvL2D src));
11769 
11770   ins_cost(INSN_COST * 5);
11771   format %{ "scvtfd  $dst, $src \t// l2d" %}
11772 
11773   ins_encode %{
11774     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11775   %}
11776 
11777   ins_pipe(pipe_class_default);
11778 %}
11779 
11780 // stack <-> reg and reg <-> reg shuffles with no conversion
11781 
11782 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
11783 
11784   match(Set dst (MoveF2I src));
11785 
11786   effect(DEF dst, USE src);
11787 
11788   ins_cost(4 * INSN_COST);
11789 
11790   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
11791 
11792   ins_encode %{
11793     __ ldrw($dst$$Register, Address(sp, $src$$disp));
11794   %}
11795 
11796   ins_pipe(iload_reg_reg);
11797 
11798 %}
11799 
11800 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
11801 
11802   match(Set dst (MoveI2F src));
11803 
11804   effect(DEF dst, USE src);
11805 
11806   ins_cost(4 * INSN_COST);
11807 
11808   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
11809 
11810   ins_encode %{
11811     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11812   %}
11813 
11814   ins_pipe(pipe_class_memory);
11815 
11816 %}
11817 
11818 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
11819 
11820   match(Set dst (MoveD2L src));
11821 
11822   effect(DEF dst, USE src);
11823 
11824   ins_cost(4 * INSN_COST);
11825 
11826   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
11827 
11828   ins_encode %{
11829     __ ldr($dst$$Register, Address(sp, $src$$disp));
11830   %}
11831 
11832   ins_pipe(iload_reg_reg);
11833 
11834 %}
11835 
11836 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
11837 
11838   match(Set dst (MoveL2D src));
11839 
11840   effect(DEF dst, USE src);
11841 
11842   ins_cost(4 * INSN_COST);
11843 
11844   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
11845 
11846   ins_encode %{
11847     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11848   %}
11849 
11850   ins_pipe(pipe_class_memory);
11851 
11852 %}
11853 
11854 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
11855 
11856   match(Set dst (MoveF2I src));
11857 
11858   effect(DEF dst, USE src);
11859 
11860   ins_cost(INSN_COST);
11861 
11862   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
11863 
11864   ins_encode %{
11865     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11866   %}
11867 
11868   ins_pipe(pipe_class_memory);
11869 
11870 %}
11871 
11872 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
11873 
11874   match(Set dst (MoveI2F src));
11875 
11876   effect(DEF dst, USE src);
11877 
11878   ins_cost(INSN_COST);
11879 
11880   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
11881 
11882   ins_encode %{
11883     __ strw($src$$Register, Address(sp, $dst$$disp));
11884   %}
11885 
11886   ins_pipe(istore_reg_reg);
11887 
11888 %}
11889 
11890 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
11891 
11892   match(Set dst (MoveD2L src));
11893 
11894   effect(DEF dst, USE src);
11895 
11896   ins_cost(INSN_COST);
11897 
11898   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
11899 
11900   ins_encode %{
11901     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11902   %}
11903 
11904   ins_pipe(pipe_class_memory);
11905 
11906 %}
11907 
11908 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
11909 
11910   match(Set dst (MoveL2D src));
11911 
11912   effect(DEF dst, USE src);
11913 
11914   ins_cost(INSN_COST);
11915 
11916   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
11917 
11918   ins_encode %{
11919     __ str($src$$Register, Address(sp, $dst$$disp));
11920   %}
11921 
11922   ins_pipe(istore_reg_reg);
11923 
11924 %}
11925 
11926 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11927 
11928   match(Set dst (MoveF2I src));
11929 
11930   effect(DEF dst, USE src);
11931 
11932   ins_cost(INSN_COST);
11933 
11934   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
11935 
11936   ins_encode %{
11937     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
11938   %}
11939 
11940   ins_pipe(pipe_class_memory);
11941 
11942 %}
11943 
11944 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
11945 
11946   match(Set dst (MoveI2F src));
11947 
11948   effect(DEF dst, USE src);
11949 
11950   ins_cost(INSN_COST);
11951 
11952   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
11953 
11954   ins_encode %{
11955     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
11956   %}
11957 
11958   ins_pipe(pipe_class_memory);
11959 
11960 %}
11961 
11962 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11963 
11964   match(Set dst (MoveD2L src));
11965 
11966   effect(DEF dst, USE src);
11967 
11968   ins_cost(INSN_COST);
11969 
11970   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
11971 
11972   ins_encode %{
11973     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
11974   %}
11975 
11976   ins_pipe(pipe_class_memory);
11977 
11978 %}
11979 
11980 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
11981 
11982   match(Set dst (MoveL2D src));
11983 
11984   effect(DEF dst, USE src);
11985 
11986   ins_cost(INSN_COST);
11987 
11988   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
11989 
11990   ins_encode %{
11991     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
11992   %}
11993 
11994   ins_pipe(pipe_class_memory);
11995 
11996 %}
11997 
11998 // ============================================================================
11999 // clearing of an array
12000 
12001 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12002 %{
12003   match(Set dummy (ClearArray cnt base));
12004   effect(USE_KILL cnt, USE_KILL base);
12005 
12006   ins_cost(4 * INSN_COST);
12007   format %{ "ClearArray $cnt, $base" %}
12008 
12009   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
12010 
12011   ins_pipe(pipe_class_memory);
12012 %}
12013 
12014 // ============================================================================
12015 // Overflow Math Instructions
12016 
12017 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12018 %{
12019   match(Set cr (OverflowAddI op1 op2));
12020 
12021   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12022   ins_cost(INSN_COST);
12023   ins_encode %{
12024     __ cmnw($op1$$Register, $op2$$Register);
12025   %}
12026 
12027   ins_pipe(icmp_reg_reg);
12028 %}
12029 
12030 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12031 %{
12032   match(Set cr (OverflowAddI op1 op2));
12033 
12034   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12035   ins_cost(INSN_COST);
12036   ins_encode %{
12037     __ cmnw($op1$$Register, $op2$$constant);
12038   %}
12039 
12040   ins_pipe(icmp_reg_imm);
12041 %}
12042 
12043 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12044 %{
12045   match(Set cr (OverflowAddL op1 op2));
12046 
12047   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12048   ins_cost(INSN_COST);
12049   ins_encode %{
12050     __ cmn($op1$$Register, $op2$$Register);
12051   %}
12052 
12053   ins_pipe(icmp_reg_reg);
12054 %}
12055 
12056 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12057 %{
12058   match(Set cr (OverflowAddL op1 op2));
12059 
12060   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12061   ins_cost(INSN_COST);
12062   ins_encode %{
12063     __ cmn($op1$$Register, $op2$$constant);
12064   %}
12065 
12066   ins_pipe(icmp_reg_imm);
12067 %}
12068 
12069 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12070 %{
12071   match(Set cr (OverflowSubI op1 op2));
12072 
12073   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12074   ins_cost(INSN_COST);
12075   ins_encode %{
12076     __ cmpw($op1$$Register, $op2$$Register);
12077   %}
12078 
12079   ins_pipe(icmp_reg_reg);
12080 %}
12081 
12082 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12083 %{
12084   match(Set cr (OverflowSubI op1 op2));
12085 
12086   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12087   ins_cost(INSN_COST);
12088   ins_encode %{
12089     __ cmpw($op1$$Register, $op2$$constant);
12090   %}
12091 
12092   ins_pipe(icmp_reg_imm);
12093 %}
12094 
12095 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12096 %{
12097   match(Set cr (OverflowSubL op1 op2));
12098 
12099   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12100   ins_cost(INSN_COST);
12101   ins_encode %{
12102     __ cmp($op1$$Register, $op2$$Register);
12103   %}
12104 
12105   ins_pipe(icmp_reg_reg);
12106 %}
12107 
12108 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12109 %{
12110   match(Set cr (OverflowSubL op1 op2));
12111 
12112   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12113   ins_cost(INSN_COST);
12114   ins_encode %{
12115     __ cmp($op1$$Register, $op2$$constant);
12116   %}
12117 
12118   ins_pipe(icmp_reg_imm);
12119 %}
12120 
12121 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
12122 %{
12123   match(Set cr (OverflowSubI zero op1));
12124 
12125   format %{ "cmpw  zr, $op1\t# overflow check int" %}
12126   ins_cost(INSN_COST);
12127   ins_encode %{
12128     __ cmpw(zr, $op1$$Register);
12129   %}
12130 
12131   ins_pipe(icmp_reg_imm);
12132 %}
12133 
12134 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
12135 %{
12136   match(Set cr (OverflowSubL zero op1));
12137 
12138   format %{ "cmp   zr, $op1\t# overflow check long" %}
12139   ins_cost(INSN_COST);
12140   ins_encode %{
12141     __ cmp(zr, $op1$$Register);
12142   %}
12143 
12144   ins_pipe(icmp_reg_imm);
12145 %}
12146 
12147 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12148 %{
12149   match(Set cr (OverflowMulI op1 op2));
12150 
12151   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12152             "cmp   rscratch1, rscratch1, sxtw\n\t"
12153             "movw  rscratch1, #0x80000000\n\t"
12154             "cselw rscratch1, rscratch1, zr, NE\n\t"
12155             "cmpw  rscratch1, #1" %}
12156   ins_cost(5 * INSN_COST);
12157   ins_encode %{
12158     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12159     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12160     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12161     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12162     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12163   %}
12164 
12165   ins_pipe(pipe_slow);
12166 %}
12167 
12168 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
12169 %{
12170   match(If cmp (OverflowMulI op1 op2));
12171   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12172             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12173   effect(USE labl, KILL cr);
12174 
12175   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12176             "cmp   rscratch1, rscratch1, sxtw\n\t"
12177             "b$cmp   $labl" %}
12178   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
12179   ins_encode %{
12180     Label* L = $labl$$label;
12181     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12182     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12183     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12184     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12185   %}
12186 
12187   ins_pipe(pipe_serial);
12188 %}
12189 
12190 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12191 %{
12192   match(Set cr (OverflowMulL op1 op2));
12193 
12194   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12195             "smulh rscratch2, $op1, $op2\n\t"
12196             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12197             "movw  rscratch1, #0x80000000\n\t"
12198             "cselw rscratch1, rscratch1, zr, NE\n\t"
12199             "cmpw  rscratch1, #1" %}
12200   ins_cost(6 * INSN_COST);
12201   ins_encode %{
12202     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12203     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12204     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12205     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12206     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12207     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12208   %}
12209 
12210   ins_pipe(pipe_slow);
12211 %}
12212 
12213 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
12214 %{
12215   match(If cmp (OverflowMulL op1 op2));
12216   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12217             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12218   effect(USE labl, KILL cr);
12219 
12220   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12221             "smulh rscratch2, $op1, $op2\n\t"
12222             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12223             "b$cmp $labl" %}
12224   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
12225   ins_encode %{
12226     Label* L = $labl$$label;
12227     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12228     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12229     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12230     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12231     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12232   %}
12233 
12234   ins_pipe(pipe_serial);
12235 %}
12236 
12237 // ============================================================================
12238 // Compare Instructions
12239 
12240 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
12241 %{
12242   match(Set cr (CmpI op1 op2));
12243 
12244   effect(DEF cr, USE op1, USE op2);
12245 
12246   ins_cost(INSN_COST);
12247   format %{ "cmpw  $op1, $op2" %}
12248 
12249   ins_encode(aarch64_enc_cmpw(op1, op2));
12250 
12251   ins_pipe(icmp_reg_reg);
12252 %}
12253 
12254 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
12255 %{
12256   match(Set cr (CmpI op1 zero));
12257 
12258   effect(DEF cr, USE op1);
12259 
12260   ins_cost(INSN_COST);
12261   format %{ "cmpw $op1, 0" %}
12262 
12263   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12264 
12265   ins_pipe(icmp_reg_imm);
12266 %}
12267 
12268 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
12269 %{
12270   match(Set cr (CmpI op1 op2));
12271 
12272   effect(DEF cr, USE op1);
12273 
12274   ins_cost(INSN_COST);
12275   format %{ "cmpw  $op1, $op2" %}
12276 
12277   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12278 
12279   ins_pipe(icmp_reg_imm);
12280 %}
12281 
12282 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
12283 %{
12284   match(Set cr (CmpI op1 op2));
12285 
12286   effect(DEF cr, USE op1);
12287 
12288   ins_cost(INSN_COST * 2);
12289   format %{ "cmpw  $op1, $op2" %}
12290 
12291   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12292 
12293   ins_pipe(icmp_reg_imm);
12294 %}
12295 
12296 // Unsigned compare Instructions; really, same as signed compare
12297 // except it should only be used to feed an If or a CMovI which takes a
12298 // cmpOpU.
12299 
12300 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
12301 %{
12302   match(Set cr (CmpU op1 op2));
12303 
12304   effect(DEF cr, USE op1, USE op2);
12305 
12306   ins_cost(INSN_COST);
12307   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12308 
12309   ins_encode(aarch64_enc_cmpw(op1, op2));
12310 
12311   ins_pipe(icmp_reg_reg);
12312 %}
12313 
12314 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
12315 %{
12316   match(Set cr (CmpU op1 zero));
12317 
12318   effect(DEF cr, USE op1);
12319 
12320   ins_cost(INSN_COST);
12321   format %{ "cmpw $op1, #0\t# unsigned" %}
12322 
12323   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12324 
12325   ins_pipe(icmp_reg_imm);
12326 %}
12327 
12328 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
12329 %{
12330   match(Set cr (CmpU op1 op2));
12331 
12332   effect(DEF cr, USE op1);
12333 
12334   ins_cost(INSN_COST);
12335   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12336 
12337   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12338 
12339   ins_pipe(icmp_reg_imm);
12340 %}
12341 
12342 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
12343 %{
12344   match(Set cr (CmpU op1 op2));
12345 
12346   effect(DEF cr, USE op1);
12347 
12348   ins_cost(INSN_COST * 2);
12349   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12350 
12351   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12352 
12353   ins_pipe(icmp_reg_imm);
12354 %}
12355 
12356 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12357 %{
12358   match(Set cr (CmpL op1 op2));
12359 
12360   effect(DEF cr, USE op1, USE op2);
12361 
12362   ins_cost(INSN_COST);
12363   format %{ "cmp  $op1, $op2" %}
12364 
12365   ins_encode(aarch64_enc_cmp(op1, op2));
12366 
12367   ins_pipe(icmp_reg_reg);
12368 %}
12369 
12370 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
12371 %{
12372   match(Set cr (CmpL op1 zero));
12373 
12374   effect(DEF cr, USE op1);
12375 
12376   ins_cost(INSN_COST);
12377   format %{ "tst  $op1" %}
12378 
12379   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
12380 
12381   ins_pipe(icmp_reg_imm);
12382 %}
12383 
12384 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
12385 %{
12386   match(Set cr (CmpL op1 op2));
12387 
12388   effect(DEF cr, USE op1);
12389 
12390   ins_cost(INSN_COST);
12391   format %{ "cmp  $op1, $op2" %}
12392 
12393   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
12394 
12395   ins_pipe(icmp_reg_imm);
12396 %}
12397 
12398 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
12399 %{
12400   match(Set cr (CmpL op1 op2));
12401 
12402   effect(DEF cr, USE op1);
12403 
12404   ins_cost(INSN_COST * 2);
12405   format %{ "cmp  $op1, $op2" %}
12406 
12407   ins_encode(aarch64_enc_cmp_imm(op1, op2));
12408 
12409   ins_pipe(icmp_reg_imm);
12410 %}
12411 
12412 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
12413 %{
12414   match(Set cr (CmpP op1 op2));
12415 
12416   effect(DEF cr, USE op1, USE op2);
12417 
12418   ins_cost(INSN_COST);
12419   format %{ "cmp  $op1, $op2\t // ptr" %}
12420 
12421   ins_encode(aarch64_enc_cmpp(op1, op2));
12422 
12423   ins_pipe(icmp_reg_reg);
12424 %}
12425 
12426 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
12427 %{
12428   match(Set cr (CmpN op1 op2));
12429 
12430   effect(DEF cr, USE op1, USE op2);
12431 
12432   ins_cost(INSN_COST);
12433   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
12434 
12435   ins_encode(aarch64_enc_cmpn(op1, op2));
12436 
12437   ins_pipe(icmp_reg_reg);
12438 %}
12439 
12440 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
12441 %{
12442   match(Set cr (CmpP op1 zero));
12443 
12444   effect(DEF cr, USE op1, USE zero);
12445 
12446   ins_cost(INSN_COST);
12447   format %{ "cmp  $op1, 0\t // ptr" %}
12448 
12449   ins_encode(aarch64_enc_testp(op1));
12450 
12451   ins_pipe(icmp_reg_imm);
12452 %}
12453 
12454 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
12455 %{
12456   match(Set cr (CmpN op1 zero));
12457 
12458   effect(DEF cr, USE op1, USE zero);
12459 
12460   ins_cost(INSN_COST);
12461   format %{ "cmp  $op1, 0\t // compressed ptr" %}
12462 
12463   ins_encode(aarch64_enc_testn(op1));
12464 
12465   ins_pipe(icmp_reg_imm);
12466 %}
12467 
12468 // FP comparisons
12469 //
12470 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
12471 // using normal cmpOp. See declaration of rFlagsReg for details.
12472 
12473 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
12474 %{
12475   match(Set cr (CmpF src1 src2));
12476 
12477   ins_cost(3 * INSN_COST);
12478   format %{ "fcmps $src1, $src2" %}
12479 
12480   ins_encode %{
12481     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12482   %}
12483 
12484   ins_pipe(pipe_class_compare);
12485 %}
12486 
12487 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
12488 %{
12489   match(Set cr (CmpF src1 src2));
12490 
12491   ins_cost(3 * INSN_COST);
12492   format %{ "fcmps $src1, 0.0" %}
12493 
12494   ins_encode %{
12495     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
12496   %}
12497 
12498   ins_pipe(pipe_class_compare);
12499 %}
12500 // FROM HERE
12501 
12502 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
12503 %{
12504   match(Set cr (CmpD src1 src2));
12505 
12506   ins_cost(3 * INSN_COST);
12507   format %{ "fcmpd $src1, $src2" %}
12508 
12509   ins_encode %{
12510     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12511   %}
12512 
12513   ins_pipe(pipe_class_compare);
12514 %}
12515 
12516 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
12517 %{
12518   match(Set cr (CmpD src1 src2));
12519 
12520   ins_cost(3 * INSN_COST);
12521   format %{ "fcmpd $src1, 0.0" %}
12522 
12523   ins_encode %{
12524     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
12525   %}
12526 
12527   ins_pipe(pipe_class_compare);
12528 %}
12529 
12530 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
12531 %{
12532   match(Set dst (CmpF3 src1 src2));
12533   effect(KILL cr);
12534 
12535   ins_cost(5 * INSN_COST);
12536   format %{ "fcmps $src1, $src2\n\t"
12537             "csinvw($dst, zr, zr, eq\n\t"
12538             "csnegw($dst, $dst, $dst, lt)"
12539   %}
12540 
12541   ins_encode %{
12542     Label done;
12543     FloatRegister s1 = as_FloatRegister($src1$$reg);
12544     FloatRegister s2 = as_FloatRegister($src2$$reg);
12545     Register d = as_Register($dst$$reg);
12546     __ fcmps(s1, s2);
12547     // installs 0 if EQ else -1
12548     __ csinvw(d, zr, zr, Assembler::EQ);
12549     // keeps -1 if less or unordered else installs 1
12550     __ csnegw(d, d, d, Assembler::LT);
12551     __ bind(done);
12552   %}
12553 
12554   ins_pipe(pipe_class_default);
12555 
12556 %}
12557 
12558 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
12559 %{
12560   match(Set dst (CmpD3 src1 src2));
12561   effect(KILL cr);
12562 
12563   ins_cost(5 * INSN_COST);
12564   format %{ "fcmpd $src1, $src2\n\t"
12565             "csinvw($dst, zr, zr, eq\n\t"
12566             "csnegw($dst, $dst, $dst, lt)"
12567   %}
12568 
12569   ins_encode %{
12570     Label done;
12571     FloatRegister s1 = as_FloatRegister($src1$$reg);
12572     FloatRegister s2 = as_FloatRegister($src2$$reg);
12573     Register d = as_Register($dst$$reg);
12574     __ fcmpd(s1, s2);
12575     // installs 0 if EQ else -1
12576     __ csinvw(d, zr, zr, Assembler::EQ);
12577     // keeps -1 if less or unordered else installs 1
12578     __ csnegw(d, d, d, Assembler::LT);
12579     __ bind(done);
12580   %}
12581   ins_pipe(pipe_class_default);
12582 
12583 %}
12584 
12585 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
12586 %{
12587   match(Set dst (CmpF3 src1 zero));
12588   effect(KILL cr);
12589 
12590   ins_cost(5 * INSN_COST);
12591   format %{ "fcmps $src1, 0.0\n\t"
12592             "csinvw($dst, zr, zr, eq\n\t"
12593             "csnegw($dst, $dst, $dst, lt)"
12594   %}
12595 
12596   ins_encode %{
12597     Label done;
12598     FloatRegister s1 = as_FloatRegister($src1$$reg);
12599     Register d = as_Register($dst$$reg);
12600     __ fcmps(s1, 0.0D);
12601     // installs 0 if EQ else -1
12602     __ csinvw(d, zr, zr, Assembler::EQ);
12603     // keeps -1 if less or unordered else installs 1
12604     __ csnegw(d, d, d, Assembler::LT);
12605     __ bind(done);
12606   %}
12607 
12608   ins_pipe(pipe_class_default);
12609 
12610 %}
12611 
12612 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
12613 %{
12614   match(Set dst (CmpD3 src1 zero));
12615   effect(KILL cr);
12616 
12617   ins_cost(5 * INSN_COST);
12618   format %{ "fcmpd $src1, 0.0\n\t"
12619             "csinvw($dst, zr, zr, eq\n\t"
12620             "csnegw($dst, $dst, $dst, lt)"
12621   %}
12622 
12623   ins_encode %{
12624     Label done;
12625     FloatRegister s1 = as_FloatRegister($src1$$reg);
12626     Register d = as_Register($dst$$reg);
12627     __ fcmpd(s1, 0.0D);
12628     // installs 0 if EQ else -1
12629     __ csinvw(d, zr, zr, Assembler::EQ);
12630     // keeps -1 if less or unordered else installs 1
12631     __ csnegw(d, d, d, Assembler::LT);
12632     __ bind(done);
12633   %}
12634   ins_pipe(pipe_class_default);
12635 
12636 %}
12637 
12638 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
12639 %{
12640   match(Set dst (CmpLTMask p q));
12641   effect(KILL cr);
12642 
12643   ins_cost(3 * INSN_COST);
12644 
12645   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
12646             "csetw $dst, lt\n\t"
12647             "subw $dst, zr, $dst"
12648   %}
12649 
12650   ins_encode %{
12651     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
12652     __ csetw(as_Register($dst$$reg), Assembler::LT);
12653     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
12654   %}
12655 
12656   ins_pipe(ialu_reg_reg);
12657 %}
12658 
12659 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
12660 %{
12661   match(Set dst (CmpLTMask src zero));
12662   effect(KILL cr);
12663 
12664   ins_cost(INSN_COST);
12665 
12666   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
12667 
12668   ins_encode %{
12669     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
12670   %}
12671 
12672   ins_pipe(ialu_reg_shift);
12673 %}
12674 
12675 // ============================================================================
12676 // Max and Min
12677 
12678 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12679 %{
12680   match(Set dst (MinI src1 src2));
12681 
12682   effect(DEF dst, USE src1, USE src2, KILL cr);
12683   size(8);
12684 
12685   ins_cost(INSN_COST * 3);
12686   format %{
12687     "cmpw $src1 $src2\t signed int\n\t"
12688     "cselw $dst, $src1, $src2 lt\t"
12689   %}
12690 
12691   ins_encode %{
12692     __ cmpw(as_Register($src1$$reg),
12693             as_Register($src2$$reg));
12694     __ cselw(as_Register($dst$$reg),
12695              as_Register($src1$$reg),
12696              as_Register($src2$$reg),
12697              Assembler::LT);
12698   %}
12699 
12700   ins_pipe(ialu_reg_reg);
12701 %}
12702 // FROM HERE
12703 
12704 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12705 %{
12706   match(Set dst (MaxI src1 src2));
12707 
12708   effect(DEF dst, USE src1, USE src2, KILL cr);
12709   size(8);
12710 
12711   ins_cost(INSN_COST * 3);
12712   format %{
12713     "cmpw $src1 $src2\t signed int\n\t"
12714     "cselw $dst, $src1, $src2 gt\t"
12715   %}
12716 
12717   ins_encode %{
12718     __ cmpw(as_Register($src1$$reg),
12719             as_Register($src2$$reg));
12720     __ cselw(as_Register($dst$$reg),
12721              as_Register($src1$$reg),
12722              as_Register($src2$$reg),
12723              Assembler::GT);
12724   %}
12725 
12726   ins_pipe(ialu_reg_reg);
12727 %}
12728 
12729 // ============================================================================
12730 // Branch Instructions
12731 
12732 // Direct Branch.
12733 instruct branch(label lbl)
12734 %{
12735   match(Goto);
12736 
12737   effect(USE lbl);
12738 
12739   ins_cost(BRANCH_COST);
12740   format %{ "b  $lbl" %}
12741 
12742   ins_encode(aarch64_enc_b(lbl));
12743 
12744   ins_pipe(pipe_branch);
12745 %}
12746 
12747 // Conditional Near Branch
12748 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
12749 %{
12750   // Same match rule as `branchConFar'.
12751   match(If cmp cr);
12752 
12753   effect(USE lbl);
12754 
12755   ins_cost(BRANCH_COST);
12756   // If set to 1 this indicates that the current instruction is a
12757   // short variant of a long branch. This avoids using this
12758   // instruction in first-pass matching. It will then only be used in
12759   // the `Shorten_branches' pass.
12760   // ins_short_branch(1);
12761   format %{ "b$cmp  $lbl" %}
12762 
12763   ins_encode(aarch64_enc_br_con(cmp, lbl));
12764 
12765   ins_pipe(pipe_branch_cond);
12766 %}
12767 
12768 // Conditional Near Branch Unsigned
12769 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12770 %{
12771   // Same match rule as `branchConFar'.
12772   match(If cmp cr);
12773 
12774   effect(USE lbl);
12775 
12776   ins_cost(BRANCH_COST);
12777   // If set to 1 this indicates that the current instruction is a
12778   // short variant of a long branch. This avoids using this
12779   // instruction in first-pass matching. It will then only be used in
12780   // the `Shorten_branches' pass.
12781   // ins_short_branch(1);
12782   format %{ "b$cmp  $lbl\t# unsigned" %}
12783 
12784   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12785 
12786   ins_pipe(pipe_branch_cond);
12787 %}
12788 
12789 // Make use of CBZ and CBNZ.  These instructions, as well as being
12790 // shorter than (cmp; branch), have the additional benefit of not
12791 // killing the flags.
12792 
12793 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
12794   match(If cmp (CmpI op1 op2));
12795   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12796             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12797   effect(USE labl);
12798 
12799   ins_cost(BRANCH_COST);
12800   format %{ "cbw$cmp   $op1, $labl" %}
12801   ins_encode %{
12802     Label* L = $labl$$label;
12803     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12804     if (cond == Assembler::EQ)
12805       __ cbzw($op1$$Register, *L);
12806     else
12807       __ cbnzw($op1$$Register, *L);
12808   %}
12809   ins_pipe(pipe_cmp_branch);
12810 %}
12811 
12812 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
12813   match(If cmp (CmpL op1 op2));
12814   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12815             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12816   effect(USE labl);
12817 
12818   ins_cost(BRANCH_COST);
12819   format %{ "cb$cmp   $op1, $labl" %}
12820   ins_encode %{
12821     Label* L = $labl$$label;
12822     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12823     if (cond == Assembler::EQ)
12824       __ cbz($op1$$Register, *L);
12825     else
12826       __ cbnz($op1$$Register, *L);
12827   %}
12828   ins_pipe(pipe_cmp_branch);
12829 %}
12830 
12831 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
12832   match(If cmp (CmpP op1 op2));
12833   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12834             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12835   effect(USE labl);
12836 
12837   ins_cost(BRANCH_COST);
12838   format %{ "cb$cmp   $op1, $labl" %}
12839   ins_encode %{
12840     Label* L = $labl$$label;
12841     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12842     if (cond == Assembler::EQ)
12843       __ cbz($op1$$Register, *L);
12844     else
12845       __ cbnz($op1$$Register, *L);
12846   %}
12847   ins_pipe(pipe_cmp_branch);
12848 %}
12849 
12850 // Conditional Far Branch
12851 // Conditional Far Branch Unsigned
12852 // TODO: fixme
12853 
12854 // counted loop end branch near
12855 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
12856 %{
12857   match(CountedLoopEnd cmp cr);
12858 
12859   effect(USE lbl);
12860 
12861   ins_cost(BRANCH_COST);
12862   // short variant.
12863   // ins_short_branch(1);
12864   format %{ "b$cmp $lbl \t// counted loop end" %}
12865 
12866   ins_encode(aarch64_enc_br_con(cmp, lbl));
12867 
12868   ins_pipe(pipe_branch);
12869 %}
12870 
12871 // counted loop end branch near Unsigned
12872 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12873 %{
12874   match(CountedLoopEnd cmp cr);
12875 
12876   effect(USE lbl);
12877 
12878   ins_cost(BRANCH_COST);
12879   // short variant.
12880   // ins_short_branch(1);
12881   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
12882 
12883   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12884 
12885   ins_pipe(pipe_branch);
12886 %}
12887 
12888 // counted loop end branch far
12889 // counted loop end branch far unsigned
12890 // TODO: fixme
12891 
12892 // ============================================================================
12893 // inlined locking and unlocking
12894 
12895 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12896 %{
12897   match(Set cr (FastLock object box));
12898   effect(TEMP tmp, TEMP tmp2);
12899 
12900   // TODO
12901   // identify correct cost
12902   ins_cost(5 * INSN_COST);
12903   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
12904 
12905   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
12906 
12907   ins_pipe(pipe_serial);
12908 %}
12909 
12910 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12911 %{
12912   match(Set cr (FastUnlock object box));
12913   effect(TEMP tmp, TEMP tmp2);
12914 
12915   ins_cost(5 * INSN_COST);
12916   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
12917 
12918   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
12919 
12920   ins_pipe(pipe_serial);
12921 %}
12922 
12923 
12924 // ============================================================================
12925 // Safepoint Instructions
12926 
12927 // TODO
12928 // provide a near and far version of this code
12929 
12930 instruct safePoint(iRegP poll)
12931 %{
12932   match(SafePoint poll);
12933 
12934   format %{
12935     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
12936   %}
12937   ins_encode %{
12938     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
12939   %}
12940   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
12941 %}
12942 
12943 
12944 // ============================================================================
12945 // Procedure Call/Return Instructions
12946 
12947 // Call Java Static Instruction
12948 
12949 instruct CallStaticJavaDirect(method meth)
12950 %{
12951   match(CallStaticJava);
12952 
12953   effect(USE meth);
12954 
12955   ins_cost(CALL_COST);
12956 
12957   format %{ "call,static $meth \t// ==> " %}
12958 
12959   ins_encode( aarch64_enc_java_static_call(meth),
12960               aarch64_enc_call_epilog );
12961 
12962   ins_pipe(pipe_class_call);
12963 %}
12964 
12965 // TO HERE
12966 
12967 // Call Java Dynamic Instruction
12968 instruct CallDynamicJavaDirect(method meth)
12969 %{
12970   match(CallDynamicJava);
12971 
12972   effect(USE meth);
12973 
12974   ins_cost(CALL_COST);
12975 
12976   format %{ "CALL,dynamic $meth \t// ==> " %}
12977 
12978   ins_encode( aarch64_enc_java_dynamic_call(meth),
12979                aarch64_enc_call_epilog );
12980 
12981   ins_pipe(pipe_class_call);
12982 %}
12983 
12984 // Call Runtime Instruction
12985 
12986 instruct CallRuntimeDirect(method meth)
12987 %{
12988   match(CallRuntime);
12989 
12990   effect(USE meth);
12991 
12992   ins_cost(CALL_COST);
12993 
12994   format %{ "CALL, runtime $meth" %}
12995 
12996   ins_encode( aarch64_enc_java_to_runtime(meth) );
12997 
12998   ins_pipe(pipe_class_call);
12999 %}
13000 
13001 // Call Runtime Instruction
13002 
13003 instruct CallLeafDirect(method meth)
13004 %{
13005   match(CallLeaf);
13006 
13007   effect(USE meth);
13008 
13009   ins_cost(CALL_COST);
13010 
13011   format %{ "CALL, runtime leaf $meth" %}
13012 
13013   ins_encode( aarch64_enc_java_to_runtime(meth) );
13014 
13015   ins_pipe(pipe_class_call);
13016 %}
13017 
13018 // Call Runtime Instruction
13019 
13020 instruct CallLeafNoFPDirect(method meth)
13021 %{
13022   match(CallLeafNoFP);
13023 
13024   effect(USE meth);
13025 
13026   ins_cost(CALL_COST);
13027 
13028   format %{ "CALL, runtime leaf nofp $meth" %}
13029 
13030   ins_encode( aarch64_enc_java_to_runtime(meth) );
13031 
13032   ins_pipe(pipe_class_call);
13033 %}
13034 
13035 // Tail Call; Jump from runtime stub to Java code.
13036 // Also known as an 'interprocedural jump'.
13037 // Target of jump will eventually return to caller.
13038 // TailJump below removes the return address.
13039 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
13040 %{
13041   match(TailCall jump_target method_oop);
13042 
13043   ins_cost(CALL_COST);
13044 
13045   format %{ "br $jump_target\t# $method_oop holds method oop" %}
13046 
13047   ins_encode(aarch64_enc_tail_call(jump_target));
13048 
13049   ins_pipe(pipe_class_call);
13050 %}
13051 
13052 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
13053 %{
13054   match(TailJump jump_target ex_oop);
13055 
13056   ins_cost(CALL_COST);
13057 
13058   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
13059 
13060   ins_encode(aarch64_enc_tail_jmp(jump_target));
13061 
13062   ins_pipe(pipe_class_call);
13063 %}
13064 
13065 // Create exception oop: created by stack-crawling runtime code.
13066 // Created exception is now available to this handler, and is setup
13067 // just prior to jumping to this handler. No code emitted.
13068 // TODO check
13069 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
13070 instruct CreateException(iRegP_R0 ex_oop)
13071 %{
13072   match(Set ex_oop (CreateEx));
13073 
13074   format %{ " -- \t// exception oop; no code emitted" %}
13075 
13076   size(0);
13077 
13078   ins_encode( /*empty*/ );
13079 
13080   ins_pipe(pipe_class_empty);
13081 %}
13082 
13083 // Rethrow exception: The exception oop will come in the first
13084 // argument position. Then JUMP (not call) to the rethrow stub code.
13085 instruct RethrowException() %{
13086   match(Rethrow);
13087   ins_cost(CALL_COST);
13088 
13089   format %{ "b rethrow_stub" %}
13090 
13091   ins_encode( aarch64_enc_rethrow() );
13092 
13093   ins_pipe(pipe_class_call);
13094 %}
13095 
13096 
13097 // Return Instruction
13098 // epilog node loads ret address into lr as part of frame pop
13099 instruct Ret()
13100 %{
13101   match(Return);
13102 
13103   format %{ "ret\t// return register" %}
13104 
13105   ins_encode( aarch64_enc_ret() );
13106 
13107   ins_pipe(pipe_branch);
13108 %}
13109 
13110 // Die now.
13111 instruct ShouldNotReachHere() %{
13112   match(Halt);
13113 
13114   ins_cost(CALL_COST);
13115   format %{ "ShouldNotReachHere" %}
13116 
13117   ins_encode %{
13118     // TODO
13119     // implement proper trap call here
13120     __ brk(999);
13121   %}
13122 
13123   ins_pipe(pipe_class_default);
13124 %}
13125 
13126 // ============================================================================
13127 // Partial Subtype Check
13128 //
13129 // superklass array for an instance of the superklass.  Set a hidden
13130 // internal cache on a hit (cache is checked with exposed code in
13131 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
13132 // encoding ALSO sets flags.
13133 
13134 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
13135 %{
13136   match(Set result (PartialSubtypeCheck sub super));
13137   effect(KILL cr, KILL temp);
13138 
13139   ins_cost(1100);  // slightly larger than the next version
13140   format %{ "partialSubtypeCheck $result, $sub, $super" %}
13141 
13142   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13143 
13144   opcode(0x1); // Force zero of result reg on hit
13145 
13146   ins_pipe(pipe_class_memory);
13147 %}
13148 
13149 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
13150 %{
13151   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
13152   effect(KILL temp, KILL result);
13153 
13154   ins_cost(1100);  // slightly larger than the next version
13155   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
13156 
13157   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13158 
13159   opcode(0x0); // Don't zero result reg on hit
13160 
13161   ins_pipe(pipe_class_memory);
13162 %}
13163 
13164 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
13165                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
13166 %{
13167   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
13168   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
13169 
13170   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
13171   ins_encode %{
13172     __ string_compare($str1$$Register, $str2$$Register,
13173                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
13174                       $tmp1$$Register);
13175   %}
13176   ins_pipe(pipe_class_memory);
13177 %}
13178 
13179 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
13180        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13181 %{
13182   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
13183   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
13184          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13185   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
13186 
13187   ins_encode %{
13188     __ string_indexof($str1$$Register, $str2$$Register,
13189                       $cnt1$$Register, $cnt2$$Register,
13190                       $tmp1$$Register, $tmp2$$Register,
13191                       $tmp3$$Register, $tmp4$$Register,
13192                       -1, $result$$Register);
13193   %}
13194   ins_pipe(pipe_class_memory);
13195 %}
13196 
13197 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
13198                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
13199                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13200 %{
13201   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
13202   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
13203          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13204   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
13205 
13206   ins_encode %{
13207     int icnt2 = (int)$int_cnt2$$constant;
13208     __ string_indexof($str1$$Register, $str2$$Register,
13209                       $cnt1$$Register, zr,
13210                       $tmp1$$Register, $tmp2$$Register,
13211                       $tmp3$$Register, $tmp4$$Register,
13212                       icnt2, $result$$Register);
13213   %}
13214   ins_pipe(pipe_class_memory);
13215 %}
13216 
13217 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
13218                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
13219 %{
13220   match(Set result (StrEquals (Binary str1 str2) cnt));
13221   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
13222 
13223   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
13224   ins_encode %{
13225     __ string_equals($str1$$Register, $str2$$Register,
13226                       $cnt$$Register, $result$$Register,
13227                       $tmp$$Register);
13228   %}
13229   ins_pipe(pipe_class_memory);
13230 %}
13231 
13232 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
13233                       iRegP_R10 tmp, rFlagsReg cr)
13234 %{
13235   match(Set result (AryEq ary1 ary2));
13236   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
13237 
13238   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
13239   ins_encode %{
13240     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
13241                           $result$$Register, $tmp$$Register);
13242   %}
13243   ins_pipe(pipe_class_memory);
13244 %}
13245 
13246 // encode char[] to byte[] in ISO_8859_1
13247 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
13248                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
13249                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
13250                           iRegI_R0 result, rFlagsReg cr)
13251 %{
13252   match(Set result (EncodeISOArray src (Binary dst len)));
13253   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
13254          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
13255 
13256   format %{ "Encode array $src,$dst,$len -> $result" %}
13257   ins_encode %{
13258     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
13259          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
13260          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
13261   %}
13262   ins_pipe( pipe_class_memory );
13263 %}
13264 
13265 // ============================================================================
13266 // This name is KNOWN by the ADLC and cannot be changed.
13267 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
13268 // for this guy.
13269 instruct tlsLoadP(thread_RegP dst)
13270 %{
13271   match(Set dst (ThreadLocal));
13272 
13273   ins_cost(0);
13274 
13275   format %{ " -- \t// $dst=Thread::current(), empty" %}
13276 
13277   size(0);
13278 
13279   ins_encode( /*empty*/ );
13280 
13281   ins_pipe(pipe_class_empty);
13282 %}
13283 
13284 // ====================VECTOR INSTRUCTIONS=====================================
13285 
13286 // Load vector (32 bits)
13287 instruct loadV4(vecX dst, vmem mem)
13288 %{
13289   predicate(n->as_LoadVector()->memory_size() == 4);
13290   match(Set dst (LoadVector mem));
13291   ins_cost(4 * INSN_COST);
13292   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
13293   ins_encode( aarch64_enc_ldrvS(dst, mem) );
13294   ins_pipe(pipe_class_memory);
13295 %}
13296 
13297 // Load vector (64 bits)
13298 instruct loadV8(vecX dst, vmem mem)
13299 %{
13300   predicate(n->as_LoadVector()->memory_size() == 8);
13301   match(Set dst (LoadVector mem));
13302   ins_cost(4 * INSN_COST);
13303   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
13304   ins_encode( aarch64_enc_ldrvD(dst, mem) );
13305   ins_pipe(pipe_class_memory);
13306 %}
13307 
13308 // Load Vector (128 bits)
13309 instruct loadV16(vecX dst, vmem mem)
13310 %{
13311   predicate(n->as_LoadVector()->memory_size() == 16);
13312   match(Set dst (LoadVector mem));
13313   ins_cost(4 * INSN_COST);
13314   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
13315   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
13316   ins_pipe(pipe_class_memory);
13317 %}
13318 
13319 // Store Vector (32 bits)
13320 instruct storeV4(vecX src, vmem mem)
13321 %{
13322   predicate(n->as_StoreVector()->memory_size() == 4);
13323   match(Set mem (StoreVector mem src));
13324   ins_cost(4 * INSN_COST);
13325   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
13326   ins_encode( aarch64_enc_strvS(src, mem) );
13327   ins_pipe(pipe_class_memory);
13328 %}
13329 
13330 // Store Vector (64 bits)
13331 instruct storeV8(vecX src, vmem mem)
13332 %{
13333   predicate(n->as_StoreVector()->memory_size() == 8);
13334   match(Set mem (StoreVector mem src));
13335   ins_cost(4 * INSN_COST);
13336   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
13337   ins_encode( aarch64_enc_strvD(src, mem) );
13338   ins_pipe(pipe_class_memory);
13339 %}
13340 
13341 // Store Vector (128 bits)
13342 instruct storeV16(vecX src, vmem mem)
13343 %{
13344   predicate(n->as_StoreVector()->memory_size() == 16);
13345   match(Set mem (StoreVector mem src));
13346   ins_cost(4 * INSN_COST);
13347   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
13348   ins_encode( aarch64_enc_strvQ(src, mem) );
13349   ins_pipe(pipe_class_memory);
13350 %}
13351 
13352 instruct replicate16B(vecX dst, iRegIorL2I src)
13353 %{
13354   match(Set dst (ReplicateB src));
13355   ins_cost(INSN_COST);
13356   format %{ "dup  $dst, $src\t# vector (16B)" %}
13357   ins_encode %{
13358     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
13359   %}
13360   ins_pipe(pipe_class_default);
13361 %}
13362 
13363 instruct replicate16B_imm(vecX dst, immI con)
13364 %{
13365   match(Set dst (ReplicateB con));
13366   ins_cost(INSN_COST);
13367   format %{ "movi  $dst, $con\t# vector(16B)" %}
13368   ins_encode %{
13369     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant);
13370   %}
13371   ins_pipe(pipe_class_default);
13372 %}
13373 
13374 instruct replicate8S(vecX dst, iRegIorL2I src)
13375 %{
13376   match(Set dst (ReplicateS src));
13377   ins_cost(INSN_COST);
13378   format %{ "dup  $dst, $src\t# vector (8S)" %}
13379   ins_encode %{
13380     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
13381   %}
13382   ins_pipe(pipe_class_default);
13383 %}
13384 
13385 instruct replicate8S_imm(vecX dst, immI con)
13386 %{
13387   match(Set dst (ReplicateS con));
13388   ins_cost(INSN_COST);
13389   format %{ "movi  $dst, $con\t# vector(8H)" %}
13390   ins_encode %{
13391     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant);
13392   %}
13393   ins_pipe(pipe_class_default);
13394 %}
13395 
13396 instruct replicate4I(vecX dst, iRegIorL2I src)
13397 %{
13398   match(Set dst (ReplicateI src));
13399   ins_cost(INSN_COST);
13400   format %{ "dup  $dst, $src\t# vector (4I)" %}
13401   ins_encode %{
13402     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
13403   %}
13404   ins_pipe(pipe_class_default);
13405 %}
13406 
13407 instruct replicate4I_imm(vecX dst, immI con)
13408 %{
13409   match(Set dst (ReplicateI con));
13410   ins_cost(INSN_COST);
13411   format %{ "movi  $dst, $con\t# vector(4I)" %}
13412   ins_encode %{
13413     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
13414   %}
13415   ins_pipe(pipe_class_default);
13416 %}
13417 
13418 instruct replicate2L(vecX dst, iRegL src)
13419 %{
13420   match(Set dst (ReplicateL src));
13421   ins_cost(INSN_COST);
13422   format %{ "dup  $dst, $src\t# vector (2L)" %}
13423   ins_encode %{
13424     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
13425   %}
13426   ins_pipe(pipe_class_default);
13427 %}
13428 
13429 instruct replicate2L_zero(vecX dst, immI0 zero)
13430 %{
13431   match(Set dst (ReplicateI zero));
13432   ins_cost(INSN_COST);
13433   format %{ "movi  $dst, $zero\t# vector(4I)" %}
13434   ins_encode %{
13435     __ eor(as_FloatRegister($dst$$reg), __ T16B,
13436            as_FloatRegister($dst$$reg),
13437            as_FloatRegister($dst$$reg));
13438   %}
13439   ins_pipe(pipe_class_default);
13440 %}
13441 
13442 instruct replicate4F(vecX dst, vRegF src)
13443 %{
13444   match(Set dst (ReplicateF src));
13445   ins_cost(INSN_COST);
13446   format %{ "dup  $dst, $src\t# vector (4F)" %}
13447   ins_encode %{
13448     __ dup(as_FloatRegister($dst$$reg), __ T4S,
13449            as_FloatRegister($src$$reg));
13450   %}
13451   ins_pipe(pipe_class_default);
13452 %}
13453 
13454 instruct replicate2D(vecX dst, vRegD src)
13455 %{
13456   match(Set dst (ReplicateD src));
13457   ins_cost(INSN_COST);
13458   format %{ "dup  $dst, $src\t# vector (2D)" %}
13459   ins_encode %{
13460     __ dup(as_FloatRegister($dst$$reg), __ T2D,
13461            as_FloatRegister($src$$reg));
13462   %}
13463   ins_pipe(pipe_class_default);
13464 %}
13465 
13466 // ====================REDUCTION ARITHMETIC====================================
13467 
13468 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
13469 %{
13470   match(Set dst (AddReductionVI src1 src2));
13471   ins_cost(INSN_COST);
13472   effect(TEMP tmp, TEMP tmp2);
13473   format %{ "addv  $tmp, T4S, $src2\n\t"
13474             "umov  $tmp2, $tmp, S, 0\n\t"
13475             "addw  $dst, $tmp2, $src1\t add reduction4i"
13476   %}
13477   ins_encode %{
13478     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
13479             as_FloatRegister($src2$$reg));
13480     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
13481     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
13482   %}
13483   ins_pipe(pipe_class_default);
13484 %}
13485 
13486 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
13487 %{
13488   match(Set dst (MulReductionVI src1 src2));
13489   ins_cost(INSN_COST);
13490   effect(TEMP tmp, TEMP tmp2, TEMP dst);
13491   format %{ "ins   $tmp, $src2, 0, 1\n\t"
13492             "mul   $tmp, $tmp, $src2\n\t"
13493             "umov  $tmp2, $tmp, S, 0\n\t"
13494             "mul   $dst, $tmp2, $src1\n\t"
13495             "umov  $tmp2, $tmp, S, 1\n\t"
13496             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
13497   %}
13498   ins_encode %{
13499     __ ins(as_FloatRegister($tmp$$reg), __ D,
13500            as_FloatRegister($src2$$reg), 0, 1);
13501     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
13502            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
13503     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
13504     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
13505     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
13506     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
13507   %}
13508   ins_pipe(pipe_class_default);
13509 %}
13510 
13511 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
13512 %{
13513   match(Set dst (AddReductionVF src1 src2));
13514   ins_cost(INSN_COST);
13515   effect(TEMP tmp, TEMP dst);
13516   format %{ "fadds $dst, $src1, $src2\n\t"
13517             "ins   $tmp, S, $src2, 0, 1\n\t"
13518             "fadds $dst, $dst, $tmp\n\t"
13519             "ins   $tmp, S, $src2, 0, 2\n\t"
13520             "fadds $dst, $dst, $tmp\n\t"
13521             "ins   $tmp, S, $src2, 0, 3\n\t"
13522             "fadds $dst, $dst, $tmp\t add reduction4f"
13523   %}
13524   ins_encode %{
13525     __ fadds(as_FloatRegister($dst$$reg),
13526              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13527     __ ins(as_FloatRegister($tmp$$reg), __ S,
13528            as_FloatRegister($src2$$reg), 0, 1);
13529     __ fadds(as_FloatRegister($dst$$reg),
13530              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13531     __ ins(as_FloatRegister($tmp$$reg), __ S,
13532            as_FloatRegister($src2$$reg), 0, 2);
13533     __ fadds(as_FloatRegister($dst$$reg),
13534              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13535     __ ins(as_FloatRegister($tmp$$reg), __ S,
13536            as_FloatRegister($src2$$reg), 0, 3);
13537     __ fadds(as_FloatRegister($dst$$reg),
13538              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13539   %}
13540   ins_pipe(pipe_class_default);
13541 %}
13542 
13543 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
13544 %{
13545   match(Set dst (MulReductionVF src1 src2));
13546   ins_cost(INSN_COST);
13547   effect(TEMP tmp, TEMP dst);
13548   format %{ "fmuls $dst, $src1, $src2\n\t"
13549             "ins   $tmp, S, $src2, 0, 1\n\t"
13550             "fmuls $dst, $dst, $tmp\n\t"
13551             "ins   $tmp, S, $src2, 0, 2\n\t"
13552             "fmuls $dst, $dst, $tmp\n\t"
13553             "ins   $tmp, S, $src2, 0, 3\n\t"
13554             "fmuls $dst, $dst, $tmp\t add reduction4f"
13555   %}
13556   ins_encode %{
13557     __ fmuls(as_FloatRegister($dst$$reg),
13558              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13559     __ ins(as_FloatRegister($tmp$$reg), __ S,
13560            as_FloatRegister($src2$$reg), 0, 1);
13561     __ fmuls(as_FloatRegister($dst$$reg),
13562              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13563     __ ins(as_FloatRegister($tmp$$reg), __ S,
13564            as_FloatRegister($src2$$reg), 0, 2);
13565     __ fmuls(as_FloatRegister($dst$$reg),
13566              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13567     __ ins(as_FloatRegister($tmp$$reg), __ S,
13568            as_FloatRegister($src2$$reg), 0, 3);
13569     __ fmuls(as_FloatRegister($dst$$reg),
13570              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13571   %}
13572   ins_pipe(pipe_class_default);
13573 %}
13574 
13575 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
13576 %{
13577   match(Set dst (AddReductionVD src1 src2));
13578   ins_cost(INSN_COST);
13579   effect(TEMP tmp, TEMP dst);
13580   format %{ "faddd $dst, $src1, $src2\n\t"
13581             "ins   $tmp, D, $src2, 0, 1\n\t"
13582             "faddd $dst, $dst, $tmp\t add reduction2d"
13583   %}
13584   ins_encode %{
13585     __ faddd(as_FloatRegister($dst$$reg),
13586              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13587     __ ins(as_FloatRegister($tmp$$reg), __ D,
13588            as_FloatRegister($src2$$reg), 0, 1);
13589     __ faddd(as_FloatRegister($dst$$reg),
13590              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13591   %}
13592   ins_pipe(pipe_class_default);
13593 %}
13594 
13595 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
13596 %{
13597   match(Set dst (MulReductionVD src1 src2));
13598   ins_cost(INSN_COST);
13599   effect(TEMP tmp, TEMP dst);
13600   format %{ "fmuld $dst, $src1, $src2\n\t"
13601             "ins   $tmp, D, $src2, 0, 1\n\t"
13602             "fmuld $dst, $dst, $tmp\t add reduction2d"
13603   %}
13604   ins_encode %{
13605     __ fmuld(as_FloatRegister($dst$$reg),
13606              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13607     __ ins(as_FloatRegister($tmp$$reg), __ D,
13608            as_FloatRegister($src2$$reg), 0, 1);
13609     __ fmuld(as_FloatRegister($dst$$reg),
13610              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13611   %}
13612   ins_pipe(pipe_class_default);
13613 %}
13614 
13615 // ====================VECTOR ARITHMETIC=======================================
13616 
13617 // --------------------------------- ADD --------------------------------------
13618 
13619 instruct vadd16B(vecX dst, vecX src1, vecX src2)
13620 %{
13621   match(Set dst (AddVB src1 src2));
13622   ins_cost(INSN_COST);
13623   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
13624   ins_encode %{
13625     __ addv(as_FloatRegister($dst$$reg), __ T16B,
13626             as_FloatRegister($src1$$reg),
13627             as_FloatRegister($src2$$reg));
13628   %}
13629   ins_pipe(pipe_class_default);
13630 %}
13631 
13632 instruct vadd8S(vecX dst, vecX src1, vecX src2)
13633 %{
13634   match(Set dst (AddVS src1 src2));
13635   ins_cost(INSN_COST);
13636   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
13637   ins_encode %{
13638     __ addv(as_FloatRegister($dst$$reg), __ T8H,
13639             as_FloatRegister($src1$$reg),
13640             as_FloatRegister($src2$$reg));
13641   %}
13642   ins_pipe(pipe_class_default);
13643 %}
13644 
13645 instruct vadd4I(vecX dst, vecX src1, vecX src2)
13646 %{
13647   match(Set dst (AddVI src1 src2));
13648   ins_cost(INSN_COST);
13649   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
13650   ins_encode %{
13651     __ addv(as_FloatRegister($dst$$reg), __ T4S,
13652             as_FloatRegister($src1$$reg),
13653             as_FloatRegister($src2$$reg));
13654   %}
13655   ins_pipe(pipe_class_default);
13656 %}
13657 
13658 instruct vadd2L(vecX dst, vecX src1, vecX src2)
13659 %{
13660   match(Set dst (AddVL src1 src2));
13661   ins_cost(INSN_COST);
13662   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
13663   ins_encode %{
13664     __ addv(as_FloatRegister($dst$$reg), __ T2D,
13665             as_FloatRegister($src1$$reg),
13666             as_FloatRegister($src2$$reg));
13667   %}
13668   ins_pipe(pipe_class_default);
13669 %}
13670 
13671 instruct vadd4F(vecX dst, vecX src1, vecX src2)
13672 %{
13673   match(Set dst (AddVF src1 src2));
13674   ins_cost(INSN_COST);
13675   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
13676   ins_encode %{
13677     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
13678             as_FloatRegister($src1$$reg),
13679             as_FloatRegister($src2$$reg));
13680   %}
13681   ins_pipe(pipe_class_default);
13682 %}
13683 
13684 instruct vadd2D(vecX dst, vecX src1, vecX src2)
13685 %{
13686   match(Set dst (AddVD src1 src2));
13687   ins_cost(INSN_COST);
13688   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
13689   ins_encode %{
13690     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
13691             as_FloatRegister($src1$$reg),
13692             as_FloatRegister($src2$$reg));
13693   %}
13694   ins_pipe(pipe_class_default);
13695 %}
13696 
13697 // --------------------------------- SUB --------------------------------------
13698 
13699 instruct vsub16B(vecX dst, vecX src1, vecX src2)
13700 %{
13701   match(Set dst (SubVB src1 src2));
13702   ins_cost(INSN_COST);
13703   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
13704   ins_encode %{
13705     __ subv(as_FloatRegister($dst$$reg), __ T16B,
13706             as_FloatRegister($src1$$reg),
13707             as_FloatRegister($src2$$reg));
13708   %}
13709   ins_pipe(pipe_class_default);
13710 %}
13711 
13712 instruct vsub8S(vecX dst, vecX src1, vecX src2)
13713 %{
13714   match(Set dst (SubVS src1 src2));
13715   ins_cost(INSN_COST);
13716   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
13717   ins_encode %{
13718     __ subv(as_FloatRegister($dst$$reg), __ T8H,
13719             as_FloatRegister($src1$$reg),
13720             as_FloatRegister($src2$$reg));
13721   %}
13722   ins_pipe(pipe_class_default);
13723 %}
13724 
13725 instruct vsub4I(vecX dst, vecX src1, vecX src2)
13726 %{
13727   match(Set dst (SubVI src1 src2));
13728   ins_cost(INSN_COST);
13729   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
13730   ins_encode %{
13731     __ subv(as_FloatRegister($dst$$reg), __ T4S,
13732             as_FloatRegister($src1$$reg),
13733             as_FloatRegister($src2$$reg));
13734   %}
13735   ins_pipe(pipe_class_default);
13736 %}
13737 
13738 instruct vsub2L(vecX dst, vecX src1, vecX src2)
13739 %{
13740   match(Set dst (SubVL src1 src2));
13741   ins_cost(INSN_COST);
13742   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
13743   ins_encode %{
13744     __ subv(as_FloatRegister($dst$$reg), __ T2D,
13745             as_FloatRegister($src1$$reg),
13746             as_FloatRegister($src2$$reg));
13747   %}
13748   ins_pipe(pipe_class_default);
13749 %}
13750 
13751 instruct vsub4F(vecX dst, vecX src1, vecX src2)
13752 %{
13753   match(Set dst (SubVF src1 src2));
13754   ins_cost(INSN_COST);
13755   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
13756   ins_encode %{
13757     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
13758             as_FloatRegister($src1$$reg),
13759             as_FloatRegister($src2$$reg));
13760   %}
13761   ins_pipe(pipe_class_default);
13762 %}
13763 
13764 instruct vsub2D(vecX dst, vecX src1, vecX src2)
13765 %{
13766   match(Set dst (SubVD src1 src2));
13767   ins_cost(INSN_COST);
13768   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
13769   ins_encode %{
13770     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
13771             as_FloatRegister($src1$$reg),
13772             as_FloatRegister($src2$$reg));
13773   %}
13774   ins_pipe(pipe_class_default);
13775 %}
13776 
13777 // --------------------------------- MUL --------------------------------------
13778 
13779 instruct vmul8S(vecX dst, vecX src1, vecX src2)
13780 %{
13781   match(Set dst (MulVS src1 src2));
13782   ins_cost(INSN_COST);
13783   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
13784   ins_encode %{
13785     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
13786             as_FloatRegister($src1$$reg),
13787             as_FloatRegister($src2$$reg));
13788   %}
13789   ins_pipe(pipe_class_default);
13790 %}
13791 
13792 instruct vmul4I(vecX dst, vecX src1, vecX src2)
13793 %{
13794   match(Set dst (MulVI src1 src2));
13795   ins_cost(INSN_COST);
13796   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
13797   ins_encode %{
13798     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
13799             as_FloatRegister($src1$$reg),
13800             as_FloatRegister($src2$$reg));
13801   %}
13802   ins_pipe(pipe_class_default);
13803 %}
13804 
13805 instruct vmul4F(vecX dst, vecX src1, vecX src2)
13806 %{
13807   match(Set dst (MulVF src1 src2));
13808   ins_cost(INSN_COST);
13809   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
13810   ins_encode %{
13811     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
13812             as_FloatRegister($src1$$reg),
13813             as_FloatRegister($src2$$reg));
13814   %}
13815   ins_pipe(pipe_class_default);
13816 %}
13817 
13818 instruct vmul2D(vecX dst, vecX src1, vecX src2)
13819 %{
13820   match(Set dst (MulVD src1 src2));
13821   ins_cost(INSN_COST);
13822   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
13823   ins_encode %{
13824     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
13825             as_FloatRegister($src1$$reg),
13826             as_FloatRegister($src2$$reg));
13827   %}
13828   ins_pipe(pipe_class_default);
13829 %}
13830 
13831 // --------------------------------- DIV --------------------------------------
13832 
13833 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
13834 %{
13835   match(Set dst (DivVF src1 src2));
13836   ins_cost(INSN_COST);
13837   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
13838   ins_encode %{
13839     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
13840             as_FloatRegister($src1$$reg),
13841             as_FloatRegister($src2$$reg));
13842   %}
13843   ins_pipe(pipe_class_default);
13844 %}
13845 
13846 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
13847 %{
13848   match(Set dst (DivVD src1 src2));
13849   ins_cost(INSN_COST);
13850   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
13851   ins_encode %{
13852     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
13853             as_FloatRegister($src1$$reg),
13854             as_FloatRegister($src2$$reg));
13855   %}
13856   ins_pipe(pipe_class_default);
13857 %}
13858 
13859 // --------------------------------- AND --------------------------------------
13860 
13861 instruct vand16B(vecX dst, vecX src1, vecX src2)
13862 %{
13863   match(Set dst (AndV src1 src2));
13864   ins_cost(INSN_COST);
13865   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
13866   ins_encode %{
13867     __ andr(as_FloatRegister($dst$$reg), __ T16B,
13868             as_FloatRegister($src1$$reg),
13869             as_FloatRegister($src2$$reg));
13870   %}
13871   ins_pipe(pipe_class_default);
13872 %}
13873 
13874 // --------------------------------- OR ---------------------------------------
13875 
13876 instruct vor16B(vecX dst, vecX src1, vecX src2)
13877 %{
13878   match(Set dst (OrV src1 src2));
13879   ins_cost(INSN_COST);
13880   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
13881   ins_encode %{
13882     __ orr(as_FloatRegister($dst$$reg), __ T16B,
13883             as_FloatRegister($src1$$reg),
13884             as_FloatRegister($src2$$reg));
13885   %}
13886   ins_pipe(pipe_class_default);
13887 %}
13888 
13889 // --------------------------------- XOR --------------------------------------
13890 
13891 instruct vxor16B(vecX dst, vecX src1, vecX src2)
13892 %{
13893   match(Set dst (XorV src1 src2));
13894   ins_cost(INSN_COST);
13895   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
13896   ins_encode %{
13897     __ eor(as_FloatRegister($dst$$reg), __ T16B,
13898             as_FloatRegister($src1$$reg),
13899             as_FloatRegister($src2$$reg));
13900   %}
13901   ins_pipe(pipe_class_default);
13902 %}
13903 
13904 // ------------------------------ Shift ---------------------------------------
13905 
13906 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
13907   match(Set dst (LShiftCntV cnt));
13908   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
13909   ins_encode %{
13910     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
13911   %}
13912   ins_pipe(pipe_class_default);
13913 %}
13914 
13915 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
13916 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
13917   match(Set dst (RShiftCntV cnt));
13918   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
13919   ins_encode %{
13920     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
13921     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
13922   %}
13923   ins_pipe(pipe_class_default);
13924 %}
13925 
13926 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
13927   match(Set dst (LShiftVB src shift));
13928   match(Set dst (RShiftVB src shift));
13929   ins_cost(INSN_COST);
13930   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
13931   ins_encode %{
13932     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
13933             as_FloatRegister($src$$reg),
13934             as_FloatRegister($shift$$reg));
13935   %}
13936   ins_pipe(pipe_class_default);
13937 %}
13938 
13939 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
13940   match(Set dst (URShiftVB src shift));
13941   ins_cost(INSN_COST);
13942   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
13943   ins_encode %{
13944     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
13945             as_FloatRegister($src$$reg),
13946             as_FloatRegister($shift$$reg));
13947   %}
13948   ins_pipe(pipe_class_default);
13949 %}
13950 
13951 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
13952   match(Set dst (LShiftVB src shift));
13953   ins_cost(INSN_COST);
13954   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
13955   ins_encode %{
13956     int sh = (int)$shift$$constant & 31;
13957     if (sh >= 8) {
13958       __ eor(as_FloatRegister($dst$$reg), __ T16B,
13959              as_FloatRegister($src$$reg),
13960              as_FloatRegister($src$$reg));
13961     } else {
13962       __ shl(as_FloatRegister($dst$$reg), __ T16B,
13963              as_FloatRegister($src$$reg), sh);
13964     }
13965   %}
13966   ins_pipe(pipe_class_default);
13967 %}
13968 
13969 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
13970   match(Set dst (RShiftVB src shift));
13971   ins_cost(INSN_COST);
13972   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
13973   ins_encode %{
13974     int sh = (int)$shift$$constant & 31;
13975     if (sh >= 8) sh = 7;
13976     sh = -sh & 7;
13977     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
13978            as_FloatRegister($src$$reg), sh);
13979   %}
13980   ins_pipe(pipe_class_default);
13981 %}
13982 
13983 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
13984   match(Set dst (URShiftVB src shift));
13985   ins_cost(INSN_COST);
13986   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
13987   ins_encode %{
13988     int sh = (int)$shift$$constant & 31;
13989     if (sh >= 8) {
13990       __ eor(as_FloatRegister($dst$$reg), __ T16B,
13991              as_FloatRegister($src$$reg),
13992              as_FloatRegister($src$$reg));
13993     } else {
13994       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
13995              as_FloatRegister($src$$reg), -sh & 7);
13996     }
13997   %}
13998   ins_pipe(pipe_class_default);
13999 %}
14000 
14001 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
14002   match(Set dst (LShiftVS src shift));
14003   match(Set dst (RShiftVS src shift));
14004   ins_cost(INSN_COST);
14005   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
14006   ins_encode %{
14007     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
14008             as_FloatRegister($src$$reg),
14009             as_FloatRegister($shift$$reg));
14010   %}
14011   ins_pipe(pipe_class_default);
14012 %}
14013 
14014 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
14015   match(Set dst (URShiftVS src shift));
14016   ins_cost(INSN_COST);
14017   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
14018   ins_encode %{
14019     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
14020             as_FloatRegister($src$$reg),
14021             as_FloatRegister($shift$$reg));
14022   %}
14023   ins_pipe(pipe_class_default);
14024 %}
14025 
14026 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
14027   match(Set dst (LShiftVS src shift));
14028   ins_cost(INSN_COST);
14029   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
14030   ins_encode %{
14031     int sh = (int)$shift$$constant & 31;
14032     if (sh >= 16) {
14033       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14034              as_FloatRegister($src$$reg),
14035              as_FloatRegister($src$$reg));
14036     } else {
14037       __ shl(as_FloatRegister($dst$$reg), __ T8H,
14038              as_FloatRegister($src$$reg), sh);
14039     }
14040   %}
14041   ins_pipe(pipe_class_default);
14042 %}
14043 
14044 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
14045   match(Set dst (RShiftVS src shift));
14046   ins_cost(INSN_COST);
14047   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
14048   ins_encode %{
14049     int sh = (int)$shift$$constant & 31;
14050     if (sh >= 16) sh = 15;
14051     sh = -sh & 15;
14052     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
14053            as_FloatRegister($src$$reg), sh);
14054   %}
14055   ins_pipe(pipe_class_default);
14056 %}
14057 
14058 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
14059   match(Set dst (URShiftVS src shift));
14060   ins_cost(INSN_COST);
14061   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
14062   ins_encode %{
14063     int sh = (int)$shift$$constant & 31;
14064     if (sh >= 16) {
14065       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14066              as_FloatRegister($src$$reg),
14067              as_FloatRegister($src$$reg));
14068     } else {
14069       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
14070              as_FloatRegister($src$$reg), -sh & 15);
14071     }
14072   %}
14073   ins_pipe(pipe_class_default);
14074 %}
14075 
14076 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
14077   match(Set dst (LShiftVI src shift));
14078   match(Set dst (RShiftVI src shift));
14079   ins_cost(INSN_COST);
14080   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
14081   ins_encode %{
14082     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
14083             as_FloatRegister($src$$reg),
14084             as_FloatRegister($shift$$reg));
14085   %}
14086   ins_pipe(pipe_class_default);
14087 %}
14088 
14089 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
14090   match(Set dst (URShiftVI src shift));
14091   ins_cost(INSN_COST);
14092   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
14093   ins_encode %{
14094     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
14095             as_FloatRegister($src$$reg),
14096             as_FloatRegister($shift$$reg));
14097   %}
14098   ins_pipe(pipe_class_default);
14099 %}
14100 
14101 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
14102   match(Set dst (LShiftVI src shift));
14103   ins_cost(INSN_COST);
14104   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
14105   ins_encode %{
14106     __ shl(as_FloatRegister($dst$$reg), __ T4S,
14107            as_FloatRegister($src$$reg),
14108            (int)$shift$$constant & 31);
14109   %}
14110   ins_pipe(pipe_class_default);
14111 %}
14112 
14113 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
14114   match(Set dst (RShiftVI src shift));
14115   ins_cost(INSN_COST);
14116   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
14117   ins_encode %{
14118     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
14119             as_FloatRegister($src$$reg),
14120             -(int)$shift$$constant & 31);
14121   %}
14122   ins_pipe(pipe_class_default);
14123 %}
14124 
14125 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
14126   match(Set dst (URShiftVI src shift));
14127   ins_cost(INSN_COST);
14128   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
14129   ins_encode %{
14130     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
14131             as_FloatRegister($src$$reg),
14132             -(int)$shift$$constant & 31);
14133   %}
14134   ins_pipe(pipe_class_default);
14135 %}
14136 
14137 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
14138   match(Set dst (LShiftVL src shift));
14139   match(Set dst (RShiftVL src shift));
14140   ins_cost(INSN_COST);
14141   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
14142   ins_encode %{
14143     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
14144             as_FloatRegister($src$$reg),
14145             as_FloatRegister($shift$$reg));
14146   %}
14147   ins_pipe(pipe_class_default);
14148 %}
14149 
14150 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
14151   match(Set dst (URShiftVL src shift));
14152   ins_cost(INSN_COST);
14153   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
14154   ins_encode %{
14155     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
14156             as_FloatRegister($src$$reg),
14157             as_FloatRegister($shift$$reg));
14158   %}
14159   ins_pipe(pipe_class_default);
14160 %}
14161 
14162 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
14163   match(Set dst (LShiftVL src shift));
14164   ins_cost(INSN_COST);
14165   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
14166   ins_encode %{
14167     __ shl(as_FloatRegister($dst$$reg), __ T2D,
14168            as_FloatRegister($src$$reg),
14169            (int)$shift$$constant & 63);
14170   %}
14171   ins_pipe(pipe_class_default);
14172 %}
14173 
14174 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
14175   match(Set dst (RShiftVL src shift));
14176   ins_cost(INSN_COST);
14177   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
14178   ins_encode %{
14179     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
14180             as_FloatRegister($src$$reg),
14181             -(int)$shift$$constant & 63);
14182   %}
14183   ins_pipe(pipe_class_default);
14184 %}
14185 
14186 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
14187   match(Set dst (URShiftVL src shift));
14188   ins_cost(INSN_COST);
14189   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
14190   ins_encode %{
14191     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
14192             as_FloatRegister($src$$reg),
14193             -(int)$shift$$constant & 63);
14194   %}
14195   ins_pipe(pipe_class_default);
14196 %}
14197 
14198 //----------PEEPHOLE RULES-----------------------------------------------------
14199 // These must follow all instruction definitions as they use the names
14200 // defined in the instructions definitions.
14201 //
14202 // peepmatch ( root_instr_name [preceding_instruction]* );
14203 //
14204 // peepconstraint %{
14205 // (instruction_number.operand_name relational_op instruction_number.operand_name
14206 //  [, ...] );
14207 // // instruction numbers are zero-based using left to right order in peepmatch
14208 //
14209 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
14210 // // provide an instruction_number.operand_name for each operand that appears
14211 // // in the replacement instruction's match rule
14212 //
14213 // ---------VM FLAGS---------------------------------------------------------
14214 //
14215 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14216 //
14217 // Each peephole rule is given an identifying number starting with zero and
14218 // increasing by one in the order seen by the parser.  An individual peephole
14219 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14220 // on the command-line.
14221 //
14222 // ---------CURRENT LIMITATIONS----------------------------------------------
14223 //
14224 // Only match adjacent instructions in same basic block
14225 // Only equality constraints
14226 // Only constraints between operands, not (0.dest_reg == RAX_enc)
14227 // Only one replacement instruction
14228 //
14229 // ---------EXAMPLE----------------------------------------------------------
14230 //
14231 // // pertinent parts of existing instructions in architecture description
14232 // instruct movI(iRegINoSp dst, iRegI src)
14233 // %{
14234 //   match(Set dst (CopyI src));
14235 // %}
14236 //
14237 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
14238 // %{
14239 //   match(Set dst (AddI dst src));
14240 //   effect(KILL cr);
14241 // %}
14242 //
14243 // // Change (inc mov) to lea
14244 // peephole %{
14245 //   // increment preceeded by register-register move
14246 //   peepmatch ( incI_iReg movI );
14247 //   // require that the destination register of the increment
14248 //   // match the destination register of the move
14249 //   peepconstraint ( 0.dst == 1.dst );
14250 //   // construct a replacement instruction that sets
14251 //   // the destination to ( move's source register + one )
14252 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
14253 // %}
14254 //
14255 
14256 // Implementation no longer uses movX instructions since
14257 // machine-independent system no longer uses CopyX nodes.
14258 //
14259 // peephole
14260 // %{
14261 //   peepmatch (incI_iReg movI);
14262 //   peepconstraint (0.dst == 1.dst);
14263 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
14264 // %}
14265 
14266 // peephole
14267 // %{
14268 //   peepmatch (decI_iReg movI);
14269 //   peepconstraint (0.dst == 1.dst);
14270 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
14271 // %}
14272 
14273 // peephole
14274 // %{
14275 //   peepmatch (addI_iReg_imm movI);
14276 //   peepconstraint (0.dst == 1.dst);
14277 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
14278 // %}
14279 
14280 // peephole
14281 // %{
14282 //   peepmatch (incL_iReg movL);
14283 //   peepconstraint (0.dst == 1.dst);
14284 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
14285 // %}
14286 
14287 // peephole
14288 // %{
14289 //   peepmatch (decL_iReg movL);
14290 //   peepconstraint (0.dst == 1.dst);
14291 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
14292 // %}
14293 
14294 // peephole
14295 // %{
14296 //   peepmatch (addL_iReg_imm movL);
14297 //   peepconstraint (0.dst == 1.dst);
14298 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
14299 // %}
14300 
14301 // peephole
14302 // %{
14303 //   peepmatch (addP_iReg_imm movP);
14304 //   peepconstraint (0.dst == 1.dst);
14305 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
14306 // %}
14307 
14308 // // Change load of spilled value to only a spill
14309 // instruct storeI(memory mem, iRegI src)
14310 // %{
14311 //   match(Set mem (StoreI mem src));
14312 // %}
14313 //
14314 // instruct loadI(iRegINoSp dst, memory mem)
14315 // %{
14316 //   match(Set dst (LoadI mem));
14317 // %}
14318 //
14319 
14320 //----------SMARTSPILL RULES---------------------------------------------------
14321 // These must follow all instruction definitions as they use the names
14322 // defined in the instructions definitions.
14323 
14324 // Local Variables:
14325 // mode: c++
14326 // End: