1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036   MemBarNode *has_parent_membar(const Node *n,
1037                                 ProjNode *&ctl, ProjNode *&mem);
1038   MemBarNode *has_child_membar(const MemBarNode *n,
1039                                ProjNode *&ctl, ProjNode *&mem);
1040 
1041   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1042   bool unnecessary_acquire(const Node *barrier);
1043   bool needs_acquiring_load(const Node *load);
1044 
1045   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1046   bool unnecessary_release(const Node *barrier);
1047   bool unnecessary_volatile(const Node *barrier);
1048   bool needs_releasing_store(const Node *store);
1049 
1050   // Use barrier instructions for unsafe volatile gets rather than
1051   // trying to identify an exact signature for them
1052   const bool UseBarriersForUnsafeVolatileGet = false;
1053 %}
1054 
1055 source %{
1056 
1057   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1058   // use to implement volatile reads and writes. For a volatile read
1059   // we simply need
1060   //
1061   //   ldar<x>
1062   //
1063   // and for a volatile write we need
1064   //
1065   //   stlr<x>
1066   // 
1067   // Alternatively, we can implement them by pairing a normal
1068   // load/store with a memory barrier. For a volatile read we need
1069   // 
1070   //   ldr<x>
1071   //   dmb ishld
1072   //
1073   // for a volatile write
1074   //
1075   //   dmb ish
1076   //   str<x>
1077   //   dmb ish
1078   //
1079   // In order to generate the desired instruction sequence we need to
1080   // be able to identify specific 'signature' ideal graph node
1081   // sequences which i) occur as a translation of a volatile reads or
1082   // writes and ii) do not occur through any other translation or
1083   // graph transformation. We can then provide alternative aldc
1084   // matching rules which translate these node sequences to the
1085   // desired machine code sequences. Selection of the alternative
1086   // rules can be implemented by predicates which identify the
1087   // relevant node sequences.
1088   //
1089   // The ideal graph generator translates a volatile read to the node
1090   // sequence
1091   //
1092   //   LoadX[mo_acquire]
1093   //   MemBarAcquire
1094   //
1095   // As a special case when using the compressed oops optimization we
1096   // may also see this variant
1097   //
1098   //   LoadN[mo_acquire]
1099   //   DecodeN
1100   //   MemBarAcquire
1101   //
1102   // A volatile write is translated to the node sequence
1103   //
1104   //   MemBarRelease
1105   //   StoreX[mo_release]
1106   //   MemBarVolatile
1107   //
1108   // n.b. the above node patterns are generated with a strict
1109   // 'signature' configuration of input and output dependencies (see
1110   // the predicates below for exact details). The two signatures are
1111   // unique to translated volatile reads/stores -- they will not
1112   // appear as a result of any other bytecode translation or inlining
1113   // nor as a consequence of optimizing transforms.
1114   //
1115   // We also want to catch inlined unsafe volatile gets and puts and
1116   // be able to implement them using either ldar<x>/stlr<x> or some
1117   // combination of ldr<x>/stlr<x> and dmb instructions.
1118   //
1119   // Inlined unsafe volatiles puts manifest as a minor variant of the
1120   // normal volatile put node sequence containing an extra cpuorder
1121   // membar
1122   //
1123   //   MemBarRelease
1124   //   MemBarCPUOrder
1125   //   StoreX[mo_release]
1126   //   MemBarVolatile
1127   //
1128   // n.b. as an aside, the cpuorder membar is not itself subject to
1129   // matching and translation by adlc rules.  However, the rule
1130   // predicates need to detect its presence in order to correctly
1131   // select the desired adlc rules.
1132   //
1133   // Inlined unsafe volatiles gets manifest as a somewhat different
1134   // node sequence to a normal volatile get
1135   //
1136   //   MemBarCPUOrder
1137   //        ||       \\
1138   //   MemBarAcquire LoadX[mo_acquire]
1139   //        ||
1140   //   MemBarCPUOrder
1141   //
1142   // In this case the acquire membar does not directly depend on the
1143   // load. However, we can be sure that the load is generated from an
1144   // inlined unsafe volatile get if we see it dependent on this unique
1145   // sequence of membar nodes. Similarly, given an acquire membar we
1146   // can know that it was added because of an inlined unsafe volatile
1147   // get if it is fed and feeds a cpuorder membar and if its feed
1148   // membar also feeds an acquiring load.
1149   //
1150   // So, where we can identify these volatile read and write
1151   // signatures we can choose to plant either of the above two code
1152   // sequences. For a volatile read we can simply plant a normal
1153   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1154   // also choose to inhibit translation of the MemBarAcquire and
1155   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1156   //
1157   // When we recognise a volatile store signature we can choose to
1158   // plant at a dmb ish as a translation for the MemBarRelease, a
1159   // normal str<x> and then a dmb ish for the MemBarVolatile.
1160   // Alternatively, we can inhibit translation of the MemBarRelease
1161   // and MemBarVolatile and instead plant a simple stlr<x>
1162   // instruction.
1163   //
1164   // Of course, the above only applies when we see these signature
1165   // configurations. We still want to plant dmb instructions in any
1166   // other cases where we may see a MemBarAcquire, MemBarRelease or
1167   // MemBarVolatile. For example, at the end of a constructor which
1168   // writes final/volatile fields we will see a MemBarRelease
1169   // instruction and this needs a 'dmb ish' lest we risk the
1170   // constructed object being visible without making the
1171   // final/volatile field writes visible.
1172   //
1173   // n.b. the translation rules below which rely on detection of the
1174   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1175   // If we see anything other than the signature configurations we
1176   // always just translate the loads and stors to ldr<x> and str<x>
1177   // and translate acquire, release and volatile membars to the
1178   // relevant dmb instructions.
1179   //
1180   // n.b.b as a case in point for the above comment, the current
1181   // predicates don't detect the precise signature for certain types
1182   // of volatile object stores (where the heap_base input type is not
1183   // known at compile-time to be non-NULL). In those cases the
1184   // MemBarRelease and MemBarVolatile bracket an if-then-else sequence
1185   // with a store in each branch (we need a different store depending
1186   // on whether heap_base is actually NULL). In such a case we will
1187   // just plant a dmb both before and after the branch/merge. The
1188   // predicate could (and probably should) be fixed later to also
1189   // detect this case.
1190 
1191   // graph traversal helpers
1192 
1193   // if node n is linked to a parent MemBarNode by an intervening
1194   // Control or Memory ProjNode return the MemBarNode otherwise return
1195   // NULL.
1196   //
1197   // n may only be a Load or a MemBar.
1198   //
1199   // The ProjNode* references c and m are used to return the relevant
1200   // nodes.
1201 
1202   MemBarNode *has_parent_membar(const Node *n, ProjNode *&c, ProjNode *&m)
1203   {
1204     Node *ctl = NULL;
1205     Node *mem = NULL;
1206     Node *membar = NULL;
1207 
1208     if (n->is_Load()) {
1209       ctl = n->lookup(LoadNode::Control);
1210       mem = n->lookup(LoadNode::Memory);
1211     } else if (n->is_MemBar()) {
1212       ctl = n->lookup(TypeFunc::Control);
1213       mem = n->lookup(TypeFunc::Memory);
1214     } else {
1215         return NULL;
1216     }
1217 
1218     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj())
1219       return NULL;
1220 
1221     c = ctl->as_Proj();
1222 
1223     membar = ctl->lookup(0);
1224 
1225     if (!membar || !membar->is_MemBar())
1226       return NULL;
1227 
1228     m = mem->as_Proj();
1229 
1230     if (mem->lookup(0) != membar)
1231       return NULL;
1232 
1233     return membar->as_MemBar();
1234   }
1235 
1236   // if n is linked to a child MemBarNode by intervening Control and
1237   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1238   //
1239   // The ProjNode** arguments c and m are used to return pointers to
1240   // the relevant nodes. A null argument means don't don't return a
1241   // value.
1242 
1243   MemBarNode *has_child_membar(const MemBarNode *n, ProjNode *&c, ProjNode *&m)
1244   {
1245     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1246     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1247 
1248     // MemBar needs to have both a Ctl and Mem projection
1249     if (! ctl || ! mem)
1250       return NULL;
1251 
1252     c = ctl;
1253     m = mem;
1254 
1255     MemBarNode *child = NULL;
1256     Node *x;
1257 
1258     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1259       x = ctl->fast_out(i);
1260       // if we see a membar we keep hold of it. we may also see a new
1261       // arena copy of the original but it will appear later
1262       if (x->is_MemBar()) {
1263           child = x->as_MemBar();
1264           break;
1265       }
1266     }
1267 
1268     if (child == NULL)
1269       return NULL;
1270 
1271     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1272       x = mem->fast_out(i);
1273       // if we see a membar we keep hold of it. we may also see a new
1274       // arena copy of the original but it will appear later
1275       if (x == child) {
1276         return child;
1277       }
1278     }
1279     return NULL;
1280   }
1281 
1282   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1283 
1284 bool unnecessary_acquire(const Node *barrier) {
1285   // assert barrier->is_MemBar();
1286   if (UseBarriersForVolatile)
1287     // we need to plant a dmb
1288     return false;
1289 
1290   // a volatile read derived from bytecode (or also from an inlined
1291   // SHA field read via LibraryCallKit::load_field_from_object)
1292   // manifests as a LoadX[mo_acquire] followed by an acquire membar
1293   // with a bogus read dependency on it's preceding load. so in those
1294   // cases we will find the load node at the PARMS offset of the
1295   // acquire membar.  n.b. there may be an intervening DecodeN node.
1296   //
1297   // a volatile load derived from an inlined unsafe field access
1298   // manifests as a cpuorder membar with Ctl and Mem projections
1299   // feeding both an acquire membar and a LoadX[mo_acquire]. The
1300   // acquire then feeds another cpuorder membar via Ctl and Mem
1301   // projections. The load has no output dependency on these trailing
1302   // membars because subsequent nodes inserted into the graph take
1303   // their control feed from the final membar cpuorder meaning they
1304   // are all ordered after the load.
1305 
1306   Node *x = barrier->lookup(TypeFunc::Parms);
1307   if (x) {
1308     // we are starting from an acquire and it has a fake dependency
1309     //
1310     // need to check for
1311     //
1312     //   LoadX[mo_acquire]
1313     //   {  |1   }
1314     //   {DecodeN}
1315     //      |Parms
1316     //   MemBarAcquire*
1317     //
1318     // where * tags node we were passed
1319     // and |k means input k
1320     if (x->is_DecodeNarrowPtr())
1321       x = x->in(1);
1322 
1323     return (x->is_Load() && x->as_Load()->is_acquire());
1324   }
1325   
1326   // only continue if we want to try to match unsafe volatile gets
1327   if (UseBarriersForUnsafeVolatileGet)
1328     return false;
1329 
1330   // need to check for
1331   //
1332   //     MemBarCPUOrder
1333   //        ||       \\
1334   //   MemBarAcquire* LoadX[mo_acquire]
1335   //        ||
1336   //   MemBarCPUOrder
1337   //
1338   // where * tags node we were passed
1339   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
1340 
1341   // check for a parent MemBarCPUOrder
1342   ProjNode *ctl;
1343   ProjNode *mem;
1344   MemBarNode *parent = has_parent_membar(barrier, ctl, mem);
1345   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
1346     return false;
1347   // ensure the proj nodes both feed a LoadX[mo_acquire]
1348   LoadNode *ld = NULL;
1349   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1350     x = ctl->fast_out(i);
1351     // if we see a load we keep hold of it and stop searching
1352     if (x->is_Load()) {
1353       ld = x->as_Load();
1354       break;
1355     }
1356   }
1357   // it must be an acquiring load
1358   if (! ld || ! ld->is_acquire())
1359     return false;
1360   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1361     x = mem->fast_out(i);
1362     // if we see the same load we drop it and stop searching
1363     if (x == ld) {
1364       ld = NULL;
1365       break;
1366     }
1367   }
1368   // we must have dropped the load
1369   if (ld)
1370     return false;
1371   // check for a child cpuorder membar
1372   MemBarNode *child  = has_child_membar(barrier->as_MemBar(), ctl, mem);
1373   if (!child || child->Opcode() != Op_MemBarCPUOrder)
1374     return false;
1375 
1376   return true;
1377 }
1378 
1379 bool needs_acquiring_load(const Node *n)
1380 {
1381   // assert n->is_Load();
1382   if (UseBarriersForVolatile)
1383     // we use a normal load and a dmb
1384     return false;
1385 
1386   LoadNode *ld = n->as_Load();
1387 
1388   if (!ld->is_acquire())
1389     return false;
1390 
1391   // check if this load is feeding an acquire membar
1392   //
1393   //   LoadX[mo_acquire]
1394   //   {  |1   }
1395   //   {DecodeN}
1396   //      |Parms
1397   //   MemBarAcquire*
1398   //
1399   // where * tags node we were passed
1400   // and |k means input k
1401 
1402   Node *start = ld;
1403   Node *mbacq = NULL;
1404 
1405   // if we hit a DecodeNarrowPtr we reset the start node and restart
1406   // the search through the outputs
1407  restart:
1408 
1409   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
1410     Node *x = start->fast_out(i);
1411     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
1412       mbacq = x;
1413     } else if (!mbacq &&
1414                (x->is_DecodeNarrowPtr() ||
1415                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
1416       start = x;
1417       goto restart;
1418     }
1419   }
1420 
1421   if (mbacq) {
1422     return true;
1423   }
1424 
1425   // only continue if we want to try to match unsafe volatile gets
1426   if (UseBarriersForUnsafeVolatileGet)
1427     return false;
1428 
1429   // check if Ctl and Proj feed comes from a MemBarCPUOrder
1430   //
1431   //     MemBarCPUOrder
1432   //        ||       \\
1433   //   MemBarAcquire* LoadX[mo_acquire]
1434   //        ||
1435   //   MemBarCPUOrder
1436 
1437   MemBarNode *membar;
1438   ProjNode *ctl;
1439   ProjNode *mem;
1440 
1441   membar = has_parent_membar(ld, ctl, mem);
1442 
1443   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1444     return false;
1445 
1446   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
1447 
1448   membar = has_child_membar(membar, ctl, mem);
1449 
1450   if (!membar || !membar->Opcode() == Op_MemBarAcquire)
1451     return false;
1452 
1453   membar = has_child_membar(membar, ctl, mem);
1454   
1455   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder)
1456     return false;
1457 
1458   return true;
1459 }
1460 
1461 bool unnecessary_release(const Node *n) {
1462   // assert n->is_MemBar();
1463   if (UseBarriersForVolatile)
1464     // we need to plant a dmb
1465     return false;
1466 
1467   // ok, so we can omit this release barrier if it has been inserted
1468   // as part of a volatile store sequence
1469   //
1470   //   MemBarRelease
1471   //  {      ||      }
1472   //  {MemBarCPUOrder} -- optional
1473   //         ||     \\
1474   //         ||     StoreX[mo_release]
1475   //         | \     /
1476   //         | MergeMem
1477   //         | /
1478   //   MemBarVolatile
1479   //
1480   // where
1481   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1482   //  | \ and / indicate further routing of the Ctl and Mem feeds
1483   // 
1484   // so we need to check that
1485   //
1486   // ia) the release membar (or its dependent cpuorder membar) feeds
1487   // control to a store node (via a Control project node)
1488   //
1489   // ii) the store is ordered release
1490   //
1491   // iii) the release membar (or its dependent cpuorder membar) feeds
1492   // control to a volatile membar (via the same Control project node)
1493   //
1494   // iv) the release membar feeds memory to a merge mem and to the
1495   // same store (both via a single Memory proj node)
1496   //
1497   // v) the store outputs to the merge mem
1498   //
1499   // vi) the merge mem outputs to the same volatile membar
1500   //
1501   // n.b. if this is an inlined unsafe node then the release membar
1502   // may feed its control and memory links via an intervening cpuorder
1503   // membar. this case can be dealt with when we check the release
1504   // membar projections. if they both feed a single cpuorder membar
1505   // node continue to make the same checks as above but with the
1506   // cpuorder membar substituted for the release membar. if they don't
1507   // both feed a cpuorder membar then the check fails.
1508   //
1509   // n.b.b. for an inlined unsafe store of an object in the case where
1510   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1511   // an embedded if then else where we expect the store. this is
1512   // needed to do the right type of store depending on whether
1513   // heap_base is NULL. We could check for that but for now we can
1514   // just take the hit of on inserting a redundant dmb for this
1515   // redundant volatile membar
1516 
1517   MemBarNode *barrier = n->as_MemBar();
1518   ProjNode *ctl;
1519   ProjNode *mem;
1520   // check for an intervening cpuorder membar
1521   MemBarNode *b = has_child_membar(barrier, ctl, mem);
1522   if (b && b->Opcode() == Op_MemBarCPUOrder) {
1523     // ok, so start form the dependent cpuorder barrier
1524     barrier = b;
1525   }
1526   // check the ctl and mem flow
1527   ctl = barrier->proj_out(TypeFunc::Control);
1528   mem = barrier->proj_out(TypeFunc::Memory);
1529 
1530   // the barrier needs to have both a Ctl and Mem projection
1531   if (! ctl || ! mem)
1532     return false;
1533 
1534   Node *x = NULL;
1535   Node *mbvol = NULL;
1536   StoreNode * st = NULL;
1537 
1538   // For a normal volatile write the Ctl ProjNode should have output
1539   // to a MemBarVolatile and a Store marked as releasing
1540   //
1541   // n.b. for an inlined unsafe store of an object in the case where
1542   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1543   // an embedded if then else where we expect the store. this is
1544   // needed to do the right type of store depending on whether
1545   // heap_base is NULL. We could check for that case too but for now
1546   // we can just take the hit of inserting a dmb and a non-volatile
1547   // store to implement the volatile store
1548 
1549   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1550     x = ctl->fast_out(i);
1551     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1552       if (mbvol) {
1553         return false;
1554       }
1555       mbvol = x;
1556     } else if (x->is_Store()) {
1557       st = x->as_Store();
1558       if (! st->is_release()) {
1559         return false;
1560       }
1561     } else if (!x->is_Mach()) {
1562       // we may see mach nodes added during matching but nothing else
1563       return false;
1564     }
1565   }
1566 
1567   if (!mbvol || !st)
1568     return false;
1569 
1570   // the Mem ProjNode should output to a MergeMem and the same Store
1571   Node *mm = NULL;
1572   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1573     x = mem->fast_out(i);
1574     if (!mm && x->is_MergeMem()) {
1575       mm = x;
1576     } else if (x != st && !x->is_Mach()) {
1577       // we may see mach nodes added during matching but nothing else
1578       return false;
1579     }
1580   }
1581 
1582   if (!mm)
1583     return false;
1584 
1585   // the MergeMem should output to the MemBarVolatile
1586   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1587     x = mm->fast_out(i);
1588     if (x != mbvol && !x->is_Mach()) {
1589       // we may see mach nodes added during matching but nothing else
1590       return false;
1591     }
1592   }
1593 
1594   return true;
1595 }
1596 
1597 bool unnecessary_volatile(const Node *n) {
1598   // assert n->is_MemBar();
1599   if (UseBarriersForVolatile)
1600     // we need to plant a dmb
1601     return false;
1602 
1603   // ok, so we can omit this volatile barrier if it has been inserted
1604   // as part of a volatile store sequence
1605   //
1606   //   MemBarRelease
1607   //  {      ||      }
1608   //  {MemBarCPUOrder} -- optional
1609   //         ||     \\
1610   //         ||     StoreX[mo_release]
1611   //         | \     /
1612   //         | MergeMem
1613   //         | /
1614   //   MemBarVolatile
1615   //
1616   // where
1617   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1618   //  | \ and / indicate further routing of the Ctl and Mem feeds
1619   // 
1620   // we need to check that
1621   //
1622   // i) the volatile membar gets its control feed from a release
1623   // membar (or its dependent cpuorder membar) via a Control project
1624   // node
1625   //
1626   // ii) the release membar (or its dependent cpuorder membar) also
1627   // feeds control to a store node via the same proj node
1628   //
1629   // iii) the store is ordered release
1630   //
1631   // iv) the release membar (or its dependent cpuorder membar) feeds
1632   // memory to a merge mem and to the same store (both via a single
1633   // Memory proj node)
1634   //
1635   // v) the store outputs to the merge mem
1636   //
1637   // vi) the merge mem outputs to the volatile membar
1638   //
1639   // n.b. for an inlined unsafe store of an object in the case where
1640   // !TypePtr::NULL_PTR->higher_equal(type(heap_base_oop)) we may see
1641   // an embedded if then else where we expect the store. this is
1642   // needed to do the right type of store depending on whether
1643   // heap_base is NULL. We could check for that but for now we can
1644   // just take the hit of on inserting a redundant dmb for this
1645   // redundant volatile membar
1646 
1647   MemBarNode *mbvol = n->as_MemBar();
1648   Node *x = n->lookup(TypeFunc::Control);
1649 
1650   if (! x || !x->is_Proj())
1651     return false;
1652 
1653   ProjNode *proj = x->as_Proj();
1654 
1655   x = proj->lookup(0);
1656 
1657   if (!x || !x->is_MemBar())
1658     return false;
1659 
1660   MemBarNode *barrier = x->as_MemBar();
1661 
1662   // if the barrier is a release membar we have what we want. if it is
1663   // a cpuorder membar then we need to ensure that it is fed by a
1664   // release membar in which case we proceed to check the graph below
1665   // this cpuorder membar as the feed
1666 
1667   if (x->Opcode() != Op_MemBarRelease) {
1668     if (x->Opcode() != Op_MemBarCPUOrder)
1669       return false;
1670     ProjNode *ctl;
1671     ProjNode *mem;
1672     MemBarNode *b = has_parent_membar(x, ctl, mem);
1673     if (!b || !b->Opcode() == Op_MemBarRelease)
1674       return false;
1675   }
1676 
1677   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1678   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1679 
1680   // barrier needs to have both a Ctl and Mem projection
1681   // and we need to have reached it via the Ctl projection
1682   if (! ctl || ! mem || ctl != proj)
1683     return false;
1684 
1685   StoreNode * st = NULL;
1686 
1687   // The Ctl ProjNode should have output to a MemBarVolatile and
1688   // a Store marked as releasing
1689   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1690     x = ctl->fast_out(i);
1691     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1692       if (x != mbvol) {
1693         return false;
1694       }
1695     } else if (x->is_Store()) {
1696       st = x->as_Store();
1697       if (! st->is_release()) {
1698         return false;
1699       }
1700     } else if (!x->is_Mach()){
1701       // we may see mach nodes added during matching but nothing else
1702       return false;
1703     }
1704   }
1705 
1706   if (!st)
1707     return false;
1708 
1709   // the Mem ProjNode should output to a MergeMem and the same Store
1710   Node *mm = NULL;
1711   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1712     x = mem->fast_out(i);
1713     if (!mm && x->is_MergeMem()) {
1714       mm = x;
1715     } else if (x != st && !x->is_Mach()) {
1716       // we may see mach nodes added during matching but nothing else
1717       return false;
1718     }
1719   }
1720 
1721   if (!mm)
1722     return false;
1723 
1724   // the MergeMem should output to the MemBarVolatile
1725   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1726     x = mm->fast_out(i);
1727     if (x != mbvol && !x->is_Mach()) {
1728       // we may see mach nodes added during matching but nothing else
1729       return false;
1730     }
1731   }
1732 
1733   return true;
1734 }
1735 
1736 
1737 
1738 bool needs_releasing_store(const Node *n)
1739 {
1740   // assert n->is_Store();
1741   if (UseBarriersForVolatile)
1742     // we use a normal store and dmb combination
1743     return false;
1744 
1745   StoreNode *st = n->as_Store();
1746 
1747   if (!st->is_release())
1748     return false;
1749 
1750   // check if this store is bracketed by a release (or its dependent
1751   // cpuorder membar) and a volatile membar
1752   //
1753   //   MemBarRelease
1754   //  {      ||      }
1755   //  {MemBarCPUOrder} -- optional
1756   //         ||     \\
1757   //         ||     StoreX[mo_release]
1758   //         | \     /
1759   //         | MergeMem
1760   //         | /
1761   //   MemBarVolatile
1762   //
1763   // where
1764   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1765   //  | \ and / indicate further routing of the Ctl and Mem feeds
1766   // 
1767 
1768 
1769   Node *x = st->lookup(TypeFunc::Control);
1770 
1771   if (! x || !x->is_Proj())
1772     return false;
1773 
1774   ProjNode *proj = x->as_Proj();
1775 
1776   x = proj->lookup(0);
1777 
1778   if (!x || !x->is_MemBar())
1779     return false;
1780 
1781   MemBarNode *barrier = x->as_MemBar();
1782 
1783   // if the barrier is a release membar we have what we want. if it is
1784   // a cpuorder membar then we need to ensure that it is fed by a
1785   // release membar in which case we proceed to check the graph below
1786   // this cpuorder membar as the feed
1787 
1788   if (x->Opcode() != Op_MemBarRelease) {
1789     if (x->Opcode() != Op_MemBarCPUOrder)
1790       return false;
1791     Node *ctl = x->lookup(TypeFunc::Control);
1792     Node *mem = x->lookup(TypeFunc::Memory);
1793     if (!ctl || !ctl->is_Proj() || !mem || !mem->is_Proj())
1794       return false;
1795     x = ctl->lookup(0);
1796     if (!x || !x->is_MemBar() || !x->Opcode() == Op_MemBarRelease)
1797       return false;
1798     Node *y = mem->lookup(0);
1799     if (!y || y != x)
1800       return false;
1801   }
1802 
1803   ProjNode *ctl = barrier->proj_out(TypeFunc::Control);
1804   ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1805 
1806   // MemBarRelease needs to have both a Ctl and Mem projection
1807   // and we need to have reached it via the Ctl projection
1808   if (! ctl || ! mem || ctl != proj)
1809     return false;
1810 
1811   MemBarNode *mbvol = NULL;
1812 
1813   // The Ctl ProjNode should have output to a MemBarVolatile and
1814   // a Store marked as releasing
1815   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1816     x = ctl->fast_out(i);
1817     if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
1818       mbvol = x->as_MemBar();
1819     } else if (x->is_Store()) {
1820       if (x != st) {
1821         return false;
1822       }
1823     } else if (!x->is_Mach()){
1824       return false;
1825     }
1826   }
1827 
1828   if (!mbvol)
1829     return false;
1830 
1831   // the Mem ProjNode should output to a MergeMem and the same Store
1832   Node *mm = NULL;
1833   for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1834     x = mem->fast_out(i);
1835     if (!mm && x->is_MergeMem()) {
1836       mm = x;
1837     } else if (x != st && !x->is_Mach()) {
1838       return false;
1839     }
1840   }
1841 
1842   if (!mm)
1843     return false;
1844 
1845   // the MergeMem should output to the MemBarVolatile
1846   for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
1847     x = mm->fast_out(i);
1848     if (x != mbvol && !x->is_Mach()) {
1849       return false;
1850     }
1851   }
1852 
1853   return true;
1854 }
1855 
1856 
1857 
1858 #define __ _masm.
1859 
1860 // advance declarations for helper functions to convert register
1861 // indices to register objects
1862 
1863 // the ad file has to provide implementations of certain methods
1864 // expected by the generic code
1865 //
1866 // REQUIRED FUNCTIONALITY
1867 
1868 //=============================================================================
1869 
1870 // !!!!! Special hack to get all types of calls to specify the byte offset
1871 //       from the start of the call to the point where the return address
1872 //       will point.
1873 
1874 int MachCallStaticJavaNode::ret_addr_offset()
1875 {
1876   // call should be a simple bl
1877   int off = 4;
1878   return off;
1879 }
1880 
1881 int MachCallDynamicJavaNode::ret_addr_offset()
1882 {
1883   return 16; // movz, movk, movk, bl
1884 }
1885 
1886 int MachCallRuntimeNode::ret_addr_offset() {
1887   // for generated stubs the call will be
1888   //   far_call(addr)
1889   // for real runtime callouts it will be six instructions
1890   // see aarch64_enc_java_to_runtime
1891   //   adr(rscratch2, retaddr)
1892   //   lea(rscratch1, RuntimeAddress(addr)
1893   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1894   //   blrt rscratch1
1895   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1896   if (cb) {
1897     return MacroAssembler::far_branch_size();
1898   } else {
1899     return 6 * NativeInstruction::instruction_size;
1900   }
1901 }
1902 
1903 // Indicate if the safepoint node needs the polling page as an input
1904 
1905 // the shared code plants the oop data at the start of the generated
1906 // code for the safepoint node and that needs ot be at the load
1907 // instruction itself. so we cannot plant a mov of the safepoint poll
1908 // address followed by a load. setting this to true means the mov is
1909 // scheduled as a prior instruction. that's better for scheduling
1910 // anyway.
1911 
1912 bool SafePointNode::needs_polling_address_input()
1913 {
1914   return true;
1915 }
1916 
1917 //=============================================================================
1918 
1919 #ifndef PRODUCT
1920 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1921   st->print("BREAKPOINT");
1922 }
1923 #endif
1924 
1925 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1926   MacroAssembler _masm(&cbuf);
1927   __ brk(0);
1928 }
1929 
1930 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1931   return MachNode::size(ra_);
1932 }
1933 
1934 //=============================================================================
1935 
1936 #ifndef PRODUCT
1937   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1938     st->print("nop \t# %d bytes pad for loops and calls", _count);
1939   }
1940 #endif
1941 
1942   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1943     MacroAssembler _masm(&cbuf);
1944     for (int i = 0; i < _count; i++) {
1945       __ nop();
1946     }
1947   }
1948 
1949   uint MachNopNode::size(PhaseRegAlloc*) const {
1950     return _count * NativeInstruction::instruction_size;
1951   }
1952 
1953 //=============================================================================
1954 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1955 
1956 int Compile::ConstantTable::calculate_table_base_offset() const {
1957   return 0;  // absolute addressing, no offset
1958 }
1959 
1960 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1961 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1962   ShouldNotReachHere();
1963 }
1964 
1965 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1966   // Empty encoding
1967 }
1968 
1969 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1970   return 0;
1971 }
1972 
1973 #ifndef PRODUCT
1974 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1975   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1976 }
1977 #endif
1978 
1979 #ifndef PRODUCT
1980 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1981   Compile* C = ra_->C;
1982 
1983   int framesize = C->frame_slots() << LogBytesPerInt;
1984 
1985   if (C->need_stack_bang(framesize))
1986     st->print("# stack bang size=%d\n\t", framesize);
1987 
1988   if (framesize < ((1 << 9) + 2 * wordSize)) {
1989     st->print("sub  sp, sp, #%d\n\t", framesize);
1990     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1991     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
1992   } else {
1993     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
1994     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
1995     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1996     st->print("sub  sp, sp, rscratch1");
1997   }
1998 }
1999 #endif
2000 
2001 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2002   Compile* C = ra_->C;
2003   MacroAssembler _masm(&cbuf);
2004 
2005   // n.b. frame size includes space for return pc and rfp
2006   const long framesize = C->frame_size_in_bytes();
2007   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2008 
2009   // insert a nop at the start of the prolog so we can patch in a
2010   // branch if we need to invalidate the method later
2011   __ nop();
2012 
2013   int bangsize = C->bang_size_in_bytes();
2014   if (C->need_stack_bang(bangsize) && UseStackBanging)
2015     __ generate_stack_overflow_check(bangsize);
2016 
2017   __ build_frame(framesize);
2018 
2019   if (NotifySimulator) {
2020     __ notify(Assembler::method_entry);
2021   }
2022 
2023   if (VerifyStackAtCalls) {
2024     Unimplemented();
2025   }
2026 
2027   C->set_frame_complete(cbuf.insts_size());
2028 
2029   if (C->has_mach_constant_base_node()) {
2030     // NOTE: We set the table base offset here because users might be
2031     // emitted before MachConstantBaseNode.
2032     Compile::ConstantTable& constant_table = C->constant_table();
2033     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2034   }
2035 }
2036 
2037 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2038 {
2039   return MachNode::size(ra_); // too many variables; just compute it
2040                               // the hard way
2041 }
2042 
2043 int MachPrologNode::reloc() const
2044 {
2045   return 0;
2046 }
2047 
2048 //=============================================================================
2049 
2050 #ifndef PRODUCT
2051 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2052   Compile* C = ra_->C;
2053   int framesize = C->frame_slots() << LogBytesPerInt;
2054 
2055   st->print("# pop frame %d\n\t",framesize);
2056 
2057   if (framesize == 0) {
2058     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2059   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2060     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2061     st->print("add  sp, sp, #%d\n\t", framesize);
2062   } else {
2063     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2064     st->print("add  sp, sp, rscratch1\n\t");
2065     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2066   }
2067 
2068   if (do_polling() && C->is_method_compilation()) {
2069     st->print("# touch polling page\n\t");
2070     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2071     st->print("ldr zr, [rscratch1]");
2072   }
2073 }
2074 #endif
2075 
2076 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2077   Compile* C = ra_->C;
2078   MacroAssembler _masm(&cbuf);
2079   int framesize = C->frame_slots() << LogBytesPerInt;
2080 
2081   __ remove_frame(framesize);
2082 
2083   if (NotifySimulator) {
2084     __ notify(Assembler::method_reentry);
2085   }
2086 
2087   if (do_polling() && C->is_method_compilation()) {
2088     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2089   }
2090 }
2091 
2092 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2093   // Variable size. Determine dynamically.
2094   return MachNode::size(ra_);
2095 }
2096 
2097 int MachEpilogNode::reloc() const {
2098   // Return number of relocatable values contained in this instruction.
2099   return 1; // 1 for polling page.
2100 }
2101 
2102 const Pipeline * MachEpilogNode::pipeline() const {
2103   return MachNode::pipeline_class();
2104 }
2105 
2106 // This method seems to be obsolete. It is declared in machnode.hpp
2107 // and defined in all *.ad files, but it is never called. Should we
2108 // get rid of it?
2109 int MachEpilogNode::safepoint_offset() const {
2110   assert(do_polling(), "no return for this epilog node");
2111   return 4;
2112 }
2113 
2114 //=============================================================================
2115 
2116 // Figure out which register class each belongs in: rc_int, rc_float or
2117 // rc_stack.
2118 enum RC { rc_bad, rc_int, rc_float, rc_stack };
2119 
2120 static enum RC rc_class(OptoReg::Name reg) {
2121 
2122   if (reg == OptoReg::Bad) {
2123     return rc_bad;
2124   }
2125 
2126   // we have 30 int registers * 2 halves
2127   // (rscratch1 and rscratch2 are omitted)
2128 
2129   if (reg < 60) {
2130     return rc_int;
2131   }
2132 
2133   // we have 32 float register * 2 halves
2134   if (reg < 60 + 128) {
2135     return rc_float;
2136   }
2137 
2138   // Between float regs & stack is the flags regs.
2139   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
2140 
2141   return rc_stack;
2142 }
2143 
2144 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
2145   Compile* C = ra_->C;
2146 
2147   // Get registers to move.
2148   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
2149   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
2150   OptoReg::Name dst_hi = ra_->get_reg_second(this);
2151   OptoReg::Name dst_lo = ra_->get_reg_first(this);
2152 
2153   enum RC src_hi_rc = rc_class(src_hi);
2154   enum RC src_lo_rc = rc_class(src_lo);
2155   enum RC dst_hi_rc = rc_class(dst_hi);
2156   enum RC dst_lo_rc = rc_class(dst_lo);
2157 
2158   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
2159 
2160   if (src_hi != OptoReg::Bad) {
2161     assert((src_lo&1)==0 && src_lo+1==src_hi &&
2162            (dst_lo&1)==0 && dst_lo+1==dst_hi,
2163            "expected aligned-adjacent pairs");
2164   }
2165 
2166   if (src_lo == dst_lo && src_hi == dst_hi) {
2167     return 0;            // Self copy, no move.
2168   }
2169 
2170   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
2171               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
2172   int src_offset = ra_->reg2offset(src_lo);
2173   int dst_offset = ra_->reg2offset(dst_lo);
2174 
2175   if (bottom_type()->isa_vect() != NULL) {
2176     uint ireg = ideal_reg();
2177     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
2178     if (cbuf) {
2179       MacroAssembler _masm(cbuf);
2180       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
2181       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
2182         // stack->stack
2183         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
2184         if (ireg == Op_VecD) {
2185           __ unspill(rscratch1, true, src_offset);
2186           __ spill(rscratch1, true, dst_offset);
2187         } else {
2188           __ spill_copy128(src_offset, dst_offset);
2189         }
2190       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
2191         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2192                ireg == Op_VecD ? __ T8B : __ T16B,
2193                as_FloatRegister(Matcher::_regEncode[src_lo]));
2194       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
2195         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
2196                        ireg == Op_VecD ? __ D : __ Q,
2197                        ra_->reg2offset(dst_lo));
2198       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
2199         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2200                        ireg == Op_VecD ? __ D : __ Q,
2201                        ra_->reg2offset(src_lo));
2202       } else {
2203         ShouldNotReachHere();
2204       }
2205     }
2206   } else if (cbuf) {
2207     MacroAssembler _masm(cbuf);
2208     switch (src_lo_rc) {
2209     case rc_int:
2210       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
2211         if (is64) {
2212             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
2213                    as_Register(Matcher::_regEncode[src_lo]));
2214         } else {
2215             MacroAssembler _masm(cbuf);
2216             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
2217                     as_Register(Matcher::_regEncode[src_lo]));
2218         }
2219       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
2220         if (is64) {
2221             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2222                      as_Register(Matcher::_regEncode[src_lo]));
2223         } else {
2224             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2225                      as_Register(Matcher::_regEncode[src_lo]));
2226         }
2227       } else {                    // gpr --> stack spill
2228         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2229         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
2230       }
2231       break;
2232     case rc_float:
2233       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
2234         if (is64) {
2235             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
2236                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2237         } else {
2238             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
2239                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2240         }
2241       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
2242           if (cbuf) {
2243             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2244                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2245         } else {
2246             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2247                      as_FloatRegister(Matcher::_regEncode[src_lo]));
2248         }
2249       } else {                    // fpr --> stack spill
2250         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2251         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
2252                  is64 ? __ D : __ S, dst_offset);
2253       }
2254       break;
2255     case rc_stack:
2256       if (dst_lo_rc == rc_int) {  // stack --> gpr load
2257         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
2258       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
2259         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
2260                    is64 ? __ D : __ S, src_offset);
2261       } else {                    // stack --> stack copy
2262         assert(dst_lo_rc == rc_stack, "spill to bad register class");
2263         __ unspill(rscratch1, is64, src_offset);
2264         __ spill(rscratch1, is64, dst_offset);
2265       }
2266       break;
2267     default:
2268       assert(false, "bad rc_class for spill");
2269       ShouldNotReachHere();
2270     }
2271   }
2272 
2273   if (st) {
2274     st->print("spill ");
2275     if (src_lo_rc == rc_stack) {
2276       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
2277     } else {
2278       st->print("%s -> ", Matcher::regName[src_lo]);
2279     }
2280     if (dst_lo_rc == rc_stack) {
2281       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
2282     } else {
2283       st->print("%s", Matcher::regName[dst_lo]);
2284     }
2285     if (bottom_type()->isa_vect() != NULL) {
2286       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
2287     } else {
2288       st->print("\t# spill size = %d", is64 ? 64:32);
2289     }
2290   }
2291 
2292   return 0;
2293 
2294 }
2295 
2296 #ifndef PRODUCT
2297 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2298   if (!ra_)
2299     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
2300   else
2301     implementation(NULL, ra_, false, st);
2302 }
2303 #endif
2304 
2305 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2306   implementation(&cbuf, ra_, false, NULL);
2307 }
2308 
2309 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
2310   return MachNode::size(ra_);
2311 }
2312 
2313 //=============================================================================
2314 
2315 #ifndef PRODUCT
2316 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2317   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2318   int reg = ra_->get_reg_first(this);
2319   st->print("add %s, rsp, #%d]\t# box lock",
2320             Matcher::regName[reg], offset);
2321 }
2322 #endif
2323 
2324 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2325   MacroAssembler _masm(&cbuf);
2326 
2327   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
2328   int reg    = ra_->get_encode(this);
2329 
2330   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
2331     __ add(as_Register(reg), sp, offset);
2332   } else {
2333     ShouldNotReachHere();
2334   }
2335 }
2336 
2337 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
2338   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
2339   return 4;
2340 }
2341 
2342 //=============================================================================
2343 
2344 #ifndef PRODUCT
2345 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
2346 {
2347   st->print_cr("# MachUEPNode");
2348   if (UseCompressedClassPointers) {
2349     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2350     if (Universe::narrow_klass_shift() != 0) {
2351       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
2352     }
2353   } else {
2354    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
2355   }
2356   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
2357   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
2358 }
2359 #endif
2360 
2361 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
2362 {
2363   // This is the unverified entry point.
2364   MacroAssembler _masm(&cbuf);
2365 
2366   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
2367   Label skip;
2368   // TODO
2369   // can we avoid this skip and still use a reloc?
2370   __ br(Assembler::EQ, skip);
2371   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2372   __ bind(skip);
2373 }
2374 
2375 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
2376 {
2377   return MachNode::size(ra_);
2378 }
2379 
2380 // REQUIRED EMIT CODE
2381 
2382 //=============================================================================
2383 
2384 // Emit exception handler code.
2385 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
2386 {
2387   // mov rscratch1 #exception_blob_entry_point
2388   // br rscratch1
2389   // Note that the code buffer's insts_mark is always relative to insts.
2390   // That's why we must use the macroassembler to generate a handler.
2391   MacroAssembler _masm(&cbuf);
2392   address base =
2393   __ start_a_stub(size_exception_handler());
2394   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2395   int offset = __ offset();
2396   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
2397   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
2398   __ end_a_stub();
2399   return offset;
2400 }
2401 
2402 // Emit deopt handler code.
2403 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
2404 {
2405   // Note that the code buffer's insts_mark is always relative to insts.
2406   // That's why we must use the macroassembler to generate a handler.
2407   MacroAssembler _masm(&cbuf);
2408   address base =
2409   __ start_a_stub(size_deopt_handler());
2410   if (base == NULL)  return 0;  // CodeBuffer::expand failed
2411   int offset = __ offset();
2412 
2413   __ adr(lr, __ pc());
2414   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
2415 
2416   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
2417   __ end_a_stub();
2418   return offset;
2419 }
2420 
2421 // REQUIRED MATCHER CODE
2422 
2423 //=============================================================================
2424 
2425 const bool Matcher::match_rule_supported(int opcode) {
2426 
2427   // TODO
2428   // identify extra cases that we might want to provide match rules for
2429   // e.g. Op_StrEquals and other intrinsics
2430   if (!has_match_rule(opcode)) {
2431     return false;
2432   }
2433 
2434   return true;  // Per default match rules are supported.
2435 }
2436 
2437 int Matcher::regnum_to_fpu_offset(int regnum)
2438 {
2439   Unimplemented();
2440   return 0;
2441 }
2442 
2443 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
2444 {
2445   Unimplemented();
2446   return false;
2447 }
2448 
2449 const bool Matcher::isSimpleConstant64(jlong value) {
2450   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2451   // Probably always true, even if a temp register is required.
2452   return true;
2453 }
2454 
2455 // true just means we have fast l2f conversion
2456 const bool Matcher::convL2FSupported(void) {
2457   return true;
2458 }
2459 
2460 // Vector width in bytes.
2461 const int Matcher::vector_width_in_bytes(BasicType bt) {
2462   int size = MIN2(16,(int)MaxVectorSize);
2463   // Minimum 2 values in vector
2464   if (size < 2*type2aelembytes(bt)) size = 0;
2465   // But never < 4
2466   if (size < 4) size = 0;
2467   return size;
2468 }
2469 
2470 // Limits on vector size (number of elements) loaded into vector.
2471 const int Matcher::max_vector_size(const BasicType bt) {
2472   return vector_width_in_bytes(bt)/type2aelembytes(bt);
2473 }
2474 const int Matcher::min_vector_size(const BasicType bt) {
2475 //  For the moment limit the vector size to 8 bytes
2476     int size = 8 / type2aelembytes(bt);
2477     if (size < 2) size = 2;
2478     return size;
2479 }
2480 
2481 // Vector ideal reg.
2482 const int Matcher::vector_ideal_reg(int len) {
2483   switch(len) {
2484     case  8: return Op_VecD;
2485     case 16: return Op_VecX;
2486   }
2487   ShouldNotReachHere();
2488   return 0;
2489 }
2490 
2491 const int Matcher::vector_shift_count_ideal_reg(int size) {
2492   return Op_VecX;
2493 }
2494 
2495 // AES support not yet implemented
2496 const bool Matcher::pass_original_key_for_aes() {
2497   return false;
2498 }
2499 
2500 // x86 supports misaligned vectors store/load.
2501 const bool Matcher::misaligned_vectors_ok() {
2502   return !AlignVector; // can be changed by flag
2503 }
2504 
2505 // false => size gets scaled to BytesPerLong, ok.
2506 const bool Matcher::init_array_count_is_in_bytes = false;
2507 
2508 // Threshold size for cleararray.
2509 const int Matcher::init_array_short_size = 18 * BytesPerLong;
2510 
2511 // Use conditional move (CMOVL)
2512 const int Matcher::long_cmove_cost() {
2513   // long cmoves are no more expensive than int cmoves
2514   return 0;
2515 }
2516 
2517 const int Matcher::float_cmove_cost() {
2518   // float cmoves are no more expensive than int cmoves
2519   return 0;
2520 }
2521 
2522 // Does the CPU require late expand (see block.cpp for description of late expand)?
2523 const bool Matcher::require_postalloc_expand = false;
2524 
2525 // Should the Matcher clone shifts on addressing modes, expecting them
2526 // to be subsumed into complex addressing expressions or compute them
2527 // into registers?  True for Intel but false for most RISCs
2528 const bool Matcher::clone_shift_expressions = false;
2529 
2530 // Do we need to mask the count passed to shift instructions or does
2531 // the cpu only look at the lower 5/6 bits anyway?
2532 const bool Matcher::need_masked_shift_count = false;
2533 
2534 // This affects two different things:
2535 //  - how Decode nodes are matched
2536 //  - how ImplicitNullCheck opportunities are recognized
2537 // If true, the matcher will try to remove all Decodes and match them
2538 // (as operands) into nodes. NullChecks are not prepared to deal with
2539 // Decodes by final_graph_reshaping().
2540 // If false, final_graph_reshaping() forces the decode behind the Cmp
2541 // for a NullCheck. The matcher matches the Decode node into a register.
2542 // Implicit_null_check optimization moves the Decode along with the
2543 // memory operation back up before the NullCheck.
2544 bool Matcher::narrow_oop_use_complex_address() {
2545   return Universe::narrow_oop_shift() == 0;
2546 }
2547 
2548 bool Matcher::narrow_klass_use_complex_address() {
2549 // TODO
2550 // decide whether we need to set this to true
2551   return false;
2552 }
2553 
2554 // Is it better to copy float constants, or load them directly from
2555 // memory?  Intel can load a float constant from a direct address,
2556 // requiring no extra registers.  Most RISCs will have to materialize
2557 // an address into a register first, so they would do better to copy
2558 // the constant from stack.
2559 const bool Matcher::rematerialize_float_constants = false;
2560 
2561 // If CPU can load and store mis-aligned doubles directly then no
2562 // fixup is needed.  Else we split the double into 2 integer pieces
2563 // and move it piece-by-piece.  Only happens when passing doubles into
2564 // C code as the Java calling convention forces doubles to be aligned.
2565 const bool Matcher::misaligned_doubles_ok = true;
2566 
2567 // No-op on amd64
2568 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2569   Unimplemented();
2570 }
2571 
2572 // Advertise here if the CPU requires explicit rounding operations to
2573 // implement the UseStrictFP mode.
2574 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2575 
2576 // Are floats converted to double when stored to stack during
2577 // deoptimization?
2578 bool Matcher::float_in_double() { return true; }
2579 
2580 // Do ints take an entire long register or just half?
2581 // The relevant question is how the int is callee-saved:
2582 // the whole long is written but de-opt'ing will have to extract
2583 // the relevant 32 bits.
2584 const bool Matcher::int_in_long = true;
2585 
2586 // Return whether or not this register is ever used as an argument.
2587 // This function is used on startup to build the trampoline stubs in
2588 // generateOptoStub.  Registers not mentioned will be killed by the VM
2589 // call in the trampoline, and arguments in those registers not be
2590 // available to the callee.
2591 bool Matcher::can_be_java_arg(int reg)
2592 {
2593   return
2594     reg ==  R0_num || reg == R0_H_num ||
2595     reg ==  R1_num || reg == R1_H_num ||
2596     reg ==  R2_num || reg == R2_H_num ||
2597     reg ==  R3_num || reg == R3_H_num ||
2598     reg ==  R4_num || reg == R4_H_num ||
2599     reg ==  R5_num || reg == R5_H_num ||
2600     reg ==  R6_num || reg == R6_H_num ||
2601     reg ==  R7_num || reg == R7_H_num ||
2602     reg ==  V0_num || reg == V0_H_num ||
2603     reg ==  V1_num || reg == V1_H_num ||
2604     reg ==  V2_num || reg == V2_H_num ||
2605     reg ==  V3_num || reg == V3_H_num ||
2606     reg ==  V4_num || reg == V4_H_num ||
2607     reg ==  V5_num || reg == V5_H_num ||
2608     reg ==  V6_num || reg == V6_H_num ||
2609     reg ==  V7_num || reg == V7_H_num;
2610 }
2611 
2612 bool Matcher::is_spillable_arg(int reg)
2613 {
2614   return can_be_java_arg(reg);
2615 }
2616 
2617 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2618   return false;
2619 }
2620 
2621 RegMask Matcher::divI_proj_mask() {
2622   ShouldNotReachHere();
2623   return RegMask();
2624 }
2625 
2626 // Register for MODI projection of divmodI.
2627 RegMask Matcher::modI_proj_mask() {
2628   ShouldNotReachHere();
2629   return RegMask();
2630 }
2631 
2632 // Register for DIVL projection of divmodL.
2633 RegMask Matcher::divL_proj_mask() {
2634   ShouldNotReachHere();
2635   return RegMask();
2636 }
2637 
2638 // Register for MODL projection of divmodL.
2639 RegMask Matcher::modL_proj_mask() {
2640   ShouldNotReachHere();
2641   return RegMask();
2642 }
2643 
2644 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2645   return FP_REG_mask();
2646 }
2647 
2648 // helper for encoding java_to_runtime calls on sim
2649 //
2650 // this is needed to compute the extra arguments required when
2651 // planting a call to the simulator blrt instruction. the TypeFunc
2652 // can be queried to identify the counts for integral, and floating
2653 // arguments and the return type
2654 
2655 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
2656 {
2657   int gps = 0;
2658   int fps = 0;
2659   const TypeTuple *domain = tf->domain();
2660   int max = domain->cnt();
2661   for (int i = TypeFunc::Parms; i < max; i++) {
2662     const Type *t = domain->field_at(i);
2663     switch(t->basic_type()) {
2664     case T_FLOAT:
2665     case T_DOUBLE:
2666       fps++;
2667     default:
2668       gps++;
2669     }
2670   }
2671   gpcnt = gps;
2672   fpcnt = fps;
2673   BasicType rt = tf->return_type();
2674   switch (rt) {
2675   case T_VOID:
2676     rtype = MacroAssembler::ret_type_void;
2677     break;
2678   default:
2679     rtype = MacroAssembler::ret_type_integral;
2680     break;
2681   case T_FLOAT:
2682     rtype = MacroAssembler::ret_type_float;
2683     break;
2684   case T_DOUBLE:
2685     rtype = MacroAssembler::ret_type_double;
2686     break;
2687   }
2688 }
2689 
2690 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2691   MacroAssembler _masm(&cbuf);                                          \
2692   {                                                                     \
2693     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2694     guarantee(DISP == 0, "mode not permitted for volatile");            \
2695     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2696     __ INSN(REG, as_Register(BASE));                                    \
2697   }
2698 
2699 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2700 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2701 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2702                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2703 
2704   // Used for all non-volatile memory accesses.  The use of
2705   // $mem->opcode() to discover whether this pattern uses sign-extended
2706   // offsets is something of a kludge.
2707   static void loadStore(MacroAssembler masm, mem_insn insn,
2708                          Register reg, int opcode,
2709                          Register base, int index, int size, int disp)
2710   {
2711     Address::extend scale;
2712 
2713     // Hooboy, this is fugly.  We need a way to communicate to the
2714     // encoder that the index needs to be sign extended, so we have to
2715     // enumerate all the cases.
2716     switch (opcode) {
2717     case INDINDEXSCALEDOFFSETI2L:
2718     case INDINDEXSCALEDI2L:
2719     case INDINDEXSCALEDOFFSETI2LN:
2720     case INDINDEXSCALEDI2LN:
2721     case INDINDEXOFFSETI2L:
2722     case INDINDEXOFFSETI2LN:
2723       scale = Address::sxtw(size);
2724       break;
2725     default:
2726       scale = Address::lsl(size);
2727     }
2728 
2729     if (index == -1) {
2730       (masm.*insn)(reg, Address(base, disp));
2731     } else {
2732       if (disp == 0) {
2733         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2734       } else {
2735         masm.lea(rscratch1, Address(base, disp));
2736         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2737       }
2738     }
2739   }
2740 
2741   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2742                          FloatRegister reg, int opcode,
2743                          Register base, int index, int size, int disp)
2744   {
2745     Address::extend scale;
2746 
2747     switch (opcode) {
2748     case INDINDEXSCALEDOFFSETI2L:
2749     case INDINDEXSCALEDI2L:
2750     case INDINDEXSCALEDOFFSETI2LN:
2751     case INDINDEXSCALEDI2LN:
2752       scale = Address::sxtw(size);
2753       break;
2754     default:
2755       scale = Address::lsl(size);
2756     }
2757 
2758      if (index == -1) {
2759       (masm.*insn)(reg, Address(base, disp));
2760     } else {
2761       if (disp == 0) {
2762         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2763       } else {
2764         masm.lea(rscratch1, Address(base, disp));
2765         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2766       }
2767     }
2768   }
2769 
2770   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2771                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2772                          int opcode, Register base, int index, int size, int disp)
2773   {
2774     if (index == -1) {
2775       (masm.*insn)(reg, T, Address(base, disp));
2776     } else {
2777       assert(disp == 0, "unsupported address mode");
2778       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2779     }
2780   }
2781 
2782 %}
2783 
2784 
2785 
2786 //----------ENCODING BLOCK-----------------------------------------------------
2787 // This block specifies the encoding classes used by the compiler to
2788 // output byte streams.  Encoding classes are parameterized macros
2789 // used by Machine Instruction Nodes in order to generate the bit
2790 // encoding of the instruction.  Operands specify their base encoding
2791 // interface with the interface keyword.  There are currently
2792 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2793 // COND_INTER.  REG_INTER causes an operand to generate a function
2794 // which returns its register number when queried.  CONST_INTER causes
2795 // an operand to generate a function which returns the value of the
2796 // constant when queried.  MEMORY_INTER causes an operand to generate
2797 // four functions which return the Base Register, the Index Register,
2798 // the Scale Value, and the Offset Value of the operand when queried.
2799 // COND_INTER causes an operand to generate six functions which return
2800 // the encoding code (ie - encoding bits for the instruction)
2801 // associated with each basic boolean condition for a conditional
2802 // instruction.
2803 //
2804 // Instructions specify two basic values for encoding.  Again, a
2805 // function is available to check if the constant displacement is an
2806 // oop. They use the ins_encode keyword to specify their encoding
2807 // classes (which must be a sequence of enc_class names, and their
2808 // parameters, specified in the encoding block), and they use the
2809 // opcode keyword to specify, in order, their primary, secondary, and
2810 // tertiary opcode.  Only the opcode sections which a particular
2811 // instruction needs for encoding need to be specified.
2812 encode %{
2813   // Build emit functions for each basic byte or larger field in the
2814   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2815   // from C++ code in the enc_class source block.  Emit functions will
2816   // live in the main source block for now.  In future, we can
2817   // generalize this by adding a syntax that specifies the sizes of
2818   // fields in an order, so that the adlc can build the emit functions
2819   // automagically
2820 
2821   // catch all for unimplemented encodings
2822   enc_class enc_unimplemented %{
2823     MacroAssembler _masm(&cbuf);
2824     __ unimplemented("C2 catch all");
2825   %}
2826 
2827   // BEGIN Non-volatile memory access
2828 
2829   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2830     Register dst_reg = as_Register($dst$$reg);
2831     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2832                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2833   %}
2834 
2835   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2836     Register dst_reg = as_Register($dst$$reg);
2837     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2838                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2839   %}
2840 
2841   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2842     Register dst_reg = as_Register($dst$$reg);
2843     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2844                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2845   %}
2846 
2847   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2848     Register dst_reg = as_Register($dst$$reg);
2849     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2850                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2851   %}
2852 
2853   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2854     Register dst_reg = as_Register($dst$$reg);
2855     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2856                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2857   %}
2858 
2859   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2860     Register dst_reg = as_Register($dst$$reg);
2861     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2862                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2863   %}
2864 
2865   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2866     Register dst_reg = as_Register($dst$$reg);
2867     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2868                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2869   %}
2870 
2871   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2872     Register dst_reg = as_Register($dst$$reg);
2873     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2874                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2875   %}
2876 
2877   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2878     Register dst_reg = as_Register($dst$$reg);
2879     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2880                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2881   %}
2882 
2883   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2884     Register dst_reg = as_Register($dst$$reg);
2885     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2887   %}
2888 
2889   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2890     Register dst_reg = as_Register($dst$$reg);
2891     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2892                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2893   %}
2894 
2895   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2896     Register dst_reg = as_Register($dst$$reg);
2897     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2898                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2899   %}
2900 
2901   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2902     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2903     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2904                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2905   %}
2906 
2907   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2908     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2909     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2911   %}
2912 
2913   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2914     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2915     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2916        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2917   %}
2918 
2919   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2920     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2921     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2922        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2923   %}
2924 
2925   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2926     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2927     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2928        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2929   %}
2930 
2931   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2932     Register src_reg = as_Register($src$$reg);
2933     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2934                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2935   %}
2936 
2937   enc_class aarch64_enc_strb0(memory mem) %{
2938     MacroAssembler _masm(&cbuf);
2939     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2940                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2941   %}
2942 
2943   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2944     Register src_reg = as_Register($src$$reg);
2945     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2946                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2947   %}
2948 
2949   enc_class aarch64_enc_strh0(memory mem) %{
2950     MacroAssembler _masm(&cbuf);
2951     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2952                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2953   %}
2954 
2955   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2956     Register src_reg = as_Register($src$$reg);
2957     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2958                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2959   %}
2960 
2961   enc_class aarch64_enc_strw0(memory mem) %{
2962     MacroAssembler _masm(&cbuf);
2963     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2964                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2965   %}
2966 
2967   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2968     Register src_reg = as_Register($src$$reg);
2969     // we sometimes get asked to store the stack pointer into the
2970     // current thread -- we cannot do that directly on AArch64
2971     if (src_reg == r31_sp) {
2972       MacroAssembler _masm(&cbuf);
2973       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2974       __ mov(rscratch2, sp);
2975       src_reg = rscratch2;
2976     }
2977     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2978                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2979   %}
2980 
2981   enc_class aarch64_enc_str0(memory mem) %{
2982     MacroAssembler _masm(&cbuf);
2983     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2984                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2985   %}
2986 
2987   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2988     FloatRegister src_reg = as_FloatRegister($src$$reg);
2989     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2990                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2991   %}
2992 
2993   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2994     FloatRegister src_reg = as_FloatRegister($src$$reg);
2995     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2996                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2997   %}
2998 
2999   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3000     FloatRegister src_reg = as_FloatRegister($src$$reg);
3001     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3002        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3003   %}
3004 
3005   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3006     FloatRegister src_reg = as_FloatRegister($src$$reg);
3007     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3008        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3009   %}
3010 
3011   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3012     FloatRegister src_reg = as_FloatRegister($src$$reg);
3013     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3014        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3015   %}
3016 
3017   // END Non-volatile memory access
3018 
3019   // volatile loads and stores
3020 
3021   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3022     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3023                  rscratch1, stlrb);
3024   %}
3025 
3026   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3027     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3028                  rscratch1, stlrh);
3029   %}
3030 
3031   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3032     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3033                  rscratch1, stlrw);
3034   %}
3035 
3036 
3037   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3038     Register dst_reg = as_Register($dst$$reg);
3039     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3040              rscratch1, ldarb);
3041     __ sxtbw(dst_reg, dst_reg);
3042   %}
3043 
3044   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3045     Register dst_reg = as_Register($dst$$reg);
3046     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3047              rscratch1, ldarb);
3048     __ sxtb(dst_reg, dst_reg);
3049   %}
3050 
3051   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3052     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3053              rscratch1, ldarb);
3054   %}
3055 
3056   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3057     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3058              rscratch1, ldarb);
3059   %}
3060 
3061   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3062     Register dst_reg = as_Register($dst$$reg);
3063     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3064              rscratch1, ldarh);
3065     __ sxthw(dst_reg, dst_reg);
3066   %}
3067 
3068   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3069     Register dst_reg = as_Register($dst$$reg);
3070     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3071              rscratch1, ldarh);
3072     __ sxth(dst_reg, dst_reg);
3073   %}
3074 
3075   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
3076     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3077              rscratch1, ldarh);
3078   %}
3079 
3080   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
3081     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3082              rscratch1, ldarh);
3083   %}
3084 
3085   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
3086     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3087              rscratch1, ldarw);
3088   %}
3089 
3090   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
3091     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3092              rscratch1, ldarw);
3093   %}
3094 
3095   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
3096     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3097              rscratch1, ldar);
3098   %}
3099 
3100   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
3101     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3102              rscratch1, ldarw);
3103     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
3104   %}
3105 
3106   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
3107     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3108              rscratch1, ldar);
3109     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
3110   %}
3111 
3112   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
3113     Register src_reg = as_Register($src$$reg);
3114     // we sometimes get asked to store the stack pointer into the
3115     // current thread -- we cannot do that directly on AArch64
3116     if (src_reg == r31_sp) {
3117         MacroAssembler _masm(&cbuf);
3118       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3119       __ mov(rscratch2, sp);
3120       src_reg = rscratch2;
3121     }
3122     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3123                  rscratch1, stlr);
3124   %}
3125 
3126   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
3127     {
3128       MacroAssembler _masm(&cbuf);
3129       FloatRegister src_reg = as_FloatRegister($src$$reg);
3130       __ fmovs(rscratch2, src_reg);
3131     }
3132     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3133                  rscratch1, stlrw);
3134   %}
3135 
3136   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
3137     {
3138       MacroAssembler _masm(&cbuf);
3139       FloatRegister src_reg = as_FloatRegister($src$$reg);
3140       __ fmovd(rscratch2, src_reg);
3141     }
3142     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3143                  rscratch1, stlr);
3144   %}
3145 
3146   // synchronized read/update encodings
3147 
3148   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
3149     MacroAssembler _masm(&cbuf);
3150     Register dst_reg = as_Register($dst$$reg);
3151     Register base = as_Register($mem$$base);
3152     int index = $mem$$index;
3153     int scale = $mem$$scale;
3154     int disp = $mem$$disp;
3155     if (index == -1) {
3156        if (disp != 0) {
3157         __ lea(rscratch1, Address(base, disp));
3158         __ ldaxr(dst_reg, rscratch1);
3159       } else {
3160         // TODO
3161         // should we ever get anything other than this case?
3162         __ ldaxr(dst_reg, base);
3163       }
3164     } else {
3165       Register index_reg = as_Register(index);
3166       if (disp == 0) {
3167         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
3168         __ ldaxr(dst_reg, rscratch1);
3169       } else {
3170         __ lea(rscratch1, Address(base, disp));
3171         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
3172         __ ldaxr(dst_reg, rscratch1);
3173       }
3174     }
3175   %}
3176 
3177   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
3178     MacroAssembler _masm(&cbuf);
3179     Register src_reg = as_Register($src$$reg);
3180     Register base = as_Register($mem$$base);
3181     int index = $mem$$index;
3182     int scale = $mem$$scale;
3183     int disp = $mem$$disp;
3184     if (index == -1) {
3185        if (disp != 0) {
3186         __ lea(rscratch2, Address(base, disp));
3187         __ stlxr(rscratch1, src_reg, rscratch2);
3188       } else {
3189         // TODO
3190         // should we ever get anything other than this case?
3191         __ stlxr(rscratch1, src_reg, base);
3192       }
3193     } else {
3194       Register index_reg = as_Register(index);
3195       if (disp == 0) {
3196         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3197         __ stlxr(rscratch1, src_reg, rscratch2);
3198       } else {
3199         __ lea(rscratch2, Address(base, disp));
3200         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3201         __ stlxr(rscratch1, src_reg, rscratch2);
3202       }
3203     }
3204     __ cmpw(rscratch1, zr);
3205   %}
3206 
3207   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
3208     MacroAssembler _masm(&cbuf);
3209     Register old_reg = as_Register($oldval$$reg);
3210     Register new_reg = as_Register($newval$$reg);
3211     Register base = as_Register($mem$$base);
3212     Register addr_reg;
3213     int index = $mem$$index;
3214     int scale = $mem$$scale;
3215     int disp = $mem$$disp;
3216     if (index == -1) {
3217        if (disp != 0) {
3218         __ lea(rscratch2, Address(base, disp));
3219         addr_reg = rscratch2;
3220       } else {
3221         // TODO
3222         // should we ever get anything other than this case?
3223         addr_reg = base;
3224       }
3225     } else {
3226       Register index_reg = as_Register(index);
3227       if (disp == 0) {
3228         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3229         addr_reg = rscratch2;
3230       } else {
3231         __ lea(rscratch2, Address(base, disp));
3232         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3233         addr_reg = rscratch2;
3234       }
3235     }
3236     Label retry_load, done;
3237     __ bind(retry_load);
3238     __ ldxr(rscratch1, addr_reg);
3239     __ cmp(rscratch1, old_reg);
3240     __ br(Assembler::NE, done);
3241     __ stlxr(rscratch1, new_reg, addr_reg);
3242     __ cbnzw(rscratch1, retry_load);
3243     __ bind(done);
3244   %}
3245 
3246   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
3247     MacroAssembler _masm(&cbuf);
3248     Register old_reg = as_Register($oldval$$reg);
3249     Register new_reg = as_Register($newval$$reg);
3250     Register base = as_Register($mem$$base);
3251     Register addr_reg;
3252     int index = $mem$$index;
3253     int scale = $mem$$scale;
3254     int disp = $mem$$disp;
3255     if (index == -1) {
3256        if (disp != 0) {
3257         __ lea(rscratch2, Address(base, disp));
3258         addr_reg = rscratch2;
3259       } else {
3260         // TODO
3261         // should we ever get anything other than this case?
3262         addr_reg = base;
3263       }
3264     } else {
3265       Register index_reg = as_Register(index);
3266       if (disp == 0) {
3267         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
3268         addr_reg = rscratch2;
3269       } else {
3270         __ lea(rscratch2, Address(base, disp));
3271         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
3272         addr_reg = rscratch2;
3273       }
3274     }
3275     Label retry_load, done;
3276     __ bind(retry_load);
3277     __ ldxrw(rscratch1, addr_reg);
3278     __ cmpw(rscratch1, old_reg);
3279     __ br(Assembler::NE, done);
3280     __ stlxrw(rscratch1, new_reg, addr_reg);
3281     __ cbnzw(rscratch1, retry_load);
3282     __ bind(done);
3283   %}
3284 
3285   // auxiliary used for CompareAndSwapX to set result register
3286   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
3287     MacroAssembler _masm(&cbuf);
3288     Register res_reg = as_Register($res$$reg);
3289     __ cset(res_reg, Assembler::EQ);
3290   %}
3291 
3292   // prefetch encodings
3293 
3294   enc_class aarch64_enc_prefetchw(memory mem) %{
3295     MacroAssembler _masm(&cbuf);
3296     Register base = as_Register($mem$$base);
3297     int index = $mem$$index;
3298     int scale = $mem$$scale;
3299     int disp = $mem$$disp;
3300     if (index == -1) {
3301       __ prfm(Address(base, disp), PSTL1KEEP);
3302       __ nop();
3303     } else {
3304       Register index_reg = as_Register(index);
3305       if (disp == 0) {
3306         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3307       } else {
3308         __ lea(rscratch1, Address(base, disp));
3309         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3310       }
3311     }
3312   %}
3313 
3314   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
3315     MacroAssembler _masm(&cbuf);
3316     Register cnt_reg = as_Register($cnt$$reg);
3317     Register base_reg = as_Register($base$$reg);
3318     // base is word aligned
3319     // cnt is count of words
3320 
3321     Label loop;
3322     Label entry;
3323 
3324 //  Algorithm:
3325 //
3326 //    scratch1 = cnt & 7;
3327 //    cnt -= scratch1;
3328 //    p += scratch1;
3329 //    switch (scratch1) {
3330 //      do {
3331 //        cnt -= 8;
3332 //          p[-8] = 0;
3333 //        case 7:
3334 //          p[-7] = 0;
3335 //        case 6:
3336 //          p[-6] = 0;
3337 //          // ...
3338 //        case 1:
3339 //          p[-1] = 0;
3340 //        case 0:
3341 //          p += 8;
3342 //      } while (cnt);
3343 //    }
3344 
3345     const int unroll = 8; // Number of str(zr) instructions we'll unroll
3346 
3347     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
3348     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
3349     // base_reg always points to the end of the region we're about to zero
3350     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
3351     __ adr(rscratch2, entry);
3352     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
3353     __ br(rscratch2);
3354     __ bind(loop);
3355     __ sub(cnt_reg, cnt_reg, unroll);
3356     for (int i = -unroll; i < 0; i++)
3357       __ str(zr, Address(base_reg, i * wordSize));
3358     __ bind(entry);
3359     __ add(base_reg, base_reg, unroll * wordSize);
3360     __ cbnz(cnt_reg, loop);
3361   %}
3362 
3363   /// mov envcodings
3364 
3365   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3366     MacroAssembler _masm(&cbuf);
3367     u_int32_t con = (u_int32_t)$src$$constant;
3368     Register dst_reg = as_Register($dst$$reg);
3369     if (con == 0) {
3370       __ movw(dst_reg, zr);
3371     } else {
3372       __ movw(dst_reg, con);
3373     }
3374   %}
3375 
3376   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3377     MacroAssembler _masm(&cbuf);
3378     Register dst_reg = as_Register($dst$$reg);
3379     u_int64_t con = (u_int64_t)$src$$constant;
3380     if (con == 0) {
3381       __ mov(dst_reg, zr);
3382     } else {
3383       __ mov(dst_reg, con);
3384     }
3385   %}
3386 
3387   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3388     MacroAssembler _masm(&cbuf);
3389     Register dst_reg = as_Register($dst$$reg);
3390     address con = (address)$src$$constant;
3391     if (con == NULL || con == (address)1) {
3392       ShouldNotReachHere();
3393     } else {
3394       relocInfo::relocType rtype = $src->constant_reloc();
3395       if (rtype == relocInfo::oop_type) {
3396         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3397       } else if (rtype == relocInfo::metadata_type) {
3398         __ mov_metadata(dst_reg, (Metadata*)con);
3399       } else {
3400         assert(rtype == relocInfo::none, "unexpected reloc type");
3401         if (con < (address)(uintptr_t)os::vm_page_size()) {
3402           __ mov(dst_reg, con);
3403         } else {
3404           unsigned long offset;
3405           __ adrp(dst_reg, con, offset);
3406           __ add(dst_reg, dst_reg, offset);
3407         }
3408       }
3409     }
3410   %}
3411 
3412   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3413     MacroAssembler _masm(&cbuf);
3414     Register dst_reg = as_Register($dst$$reg);
3415     __ mov(dst_reg, zr);
3416   %}
3417 
3418   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3419     MacroAssembler _masm(&cbuf);
3420     Register dst_reg = as_Register($dst$$reg);
3421     __ mov(dst_reg, (u_int64_t)1);
3422   %}
3423 
3424   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
3425     MacroAssembler _masm(&cbuf);
3426     address page = (address)$src$$constant;
3427     Register dst_reg = as_Register($dst$$reg);
3428     unsigned long off;
3429     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
3430     assert(off == 0, "assumed offset == 0");
3431   %}
3432 
3433   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3434     MacroAssembler _masm(&cbuf);
3435     address page = (address)$src$$constant;
3436     Register dst_reg = as_Register($dst$$reg);
3437     unsigned long off;
3438     __ adrp(dst_reg, ExternalAddress(page), off);
3439     assert(off == 0, "assumed offset == 0");
3440   %}
3441 
3442   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3443     MacroAssembler _masm(&cbuf);
3444     Register dst_reg = as_Register($dst$$reg);
3445     address con = (address)$src$$constant;
3446     if (con == NULL) {
3447       ShouldNotReachHere();
3448     } else {
3449       relocInfo::relocType rtype = $src->constant_reloc();
3450       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3451       __ set_narrow_oop(dst_reg, (jobject)con);
3452     }
3453   %}
3454 
3455   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
3456     MacroAssembler _masm(&cbuf);
3457     Register dst_reg = as_Register($dst$$reg);
3458     __ mov(dst_reg, zr);
3459   %}
3460 
3461   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
3462     MacroAssembler _masm(&cbuf);
3463     Register dst_reg = as_Register($dst$$reg);
3464     address con = (address)$src$$constant;
3465     if (con == NULL) {
3466       ShouldNotReachHere();
3467     } else {
3468       relocInfo::relocType rtype = $src->constant_reloc();
3469       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
3470       __ set_narrow_klass(dst_reg, (Klass *)con);
3471     }
3472   %}
3473 
3474   // arithmetic encodings
3475 
3476   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
3477     MacroAssembler _masm(&cbuf);
3478     Register dst_reg = as_Register($dst$$reg);
3479     Register src_reg = as_Register($src1$$reg);
3480     int32_t con = (int32_t)$src2$$constant;
3481     // add has primary == 0, subtract has primary == 1
3482     if ($primary) { con = -con; }
3483     if (con < 0) {
3484       __ subw(dst_reg, src_reg, -con);
3485     } else {
3486       __ addw(dst_reg, src_reg, con);
3487     }
3488   %}
3489 
3490   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
3491     MacroAssembler _masm(&cbuf);
3492     Register dst_reg = as_Register($dst$$reg);
3493     Register src_reg = as_Register($src1$$reg);
3494     int32_t con = (int32_t)$src2$$constant;
3495     // add has primary == 0, subtract has primary == 1
3496     if ($primary) { con = -con; }
3497     if (con < 0) {
3498       __ sub(dst_reg, src_reg, -con);
3499     } else {
3500       __ add(dst_reg, src_reg, con);
3501     }
3502   %}
3503 
3504   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
3505     MacroAssembler _masm(&cbuf);
3506    Register dst_reg = as_Register($dst$$reg);
3507    Register src1_reg = as_Register($src1$$reg);
3508    Register src2_reg = as_Register($src2$$reg);
3509     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3510   %}
3511 
3512   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3513     MacroAssembler _masm(&cbuf);
3514    Register dst_reg = as_Register($dst$$reg);
3515    Register src1_reg = as_Register($src1$$reg);
3516    Register src2_reg = as_Register($src2$$reg);
3517     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3518   %}
3519 
3520   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3521     MacroAssembler _masm(&cbuf);
3522    Register dst_reg = as_Register($dst$$reg);
3523    Register src1_reg = as_Register($src1$$reg);
3524    Register src2_reg = as_Register($src2$$reg);
3525     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3526   %}
3527 
3528   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3529     MacroAssembler _masm(&cbuf);
3530    Register dst_reg = as_Register($dst$$reg);
3531    Register src1_reg = as_Register($src1$$reg);
3532    Register src2_reg = as_Register($src2$$reg);
3533     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3534   %}
3535 
3536   // compare instruction encodings
3537 
3538   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3539     MacroAssembler _masm(&cbuf);
3540     Register reg1 = as_Register($src1$$reg);
3541     Register reg2 = as_Register($src2$$reg);
3542     __ cmpw(reg1, reg2);
3543   %}
3544 
3545   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3546     MacroAssembler _masm(&cbuf);
3547     Register reg = as_Register($src1$$reg);
3548     int32_t val = $src2$$constant;
3549     if (val >= 0) {
3550       __ subsw(zr, reg, val);
3551     } else {
3552       __ addsw(zr, reg, -val);
3553     }
3554   %}
3555 
3556   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3557     MacroAssembler _masm(&cbuf);
3558     Register reg1 = as_Register($src1$$reg);
3559     u_int32_t val = (u_int32_t)$src2$$constant;
3560     __ movw(rscratch1, val);
3561     __ cmpw(reg1, rscratch1);
3562   %}
3563 
3564   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3565     MacroAssembler _masm(&cbuf);
3566     Register reg1 = as_Register($src1$$reg);
3567     Register reg2 = as_Register($src2$$reg);
3568     __ cmp(reg1, reg2);
3569   %}
3570 
3571   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3572     MacroAssembler _masm(&cbuf);
3573     Register reg = as_Register($src1$$reg);
3574     int64_t val = $src2$$constant;
3575     if (val >= 0) {
3576       __ subs(zr, reg, val);
3577     } else if (val != -val) {
3578       __ adds(zr, reg, -val);
3579     } else {
3580     // aargh, Long.MIN_VALUE is a special case
3581       __ orr(rscratch1, zr, (u_int64_t)val);
3582       __ subs(zr, reg, rscratch1);
3583     }
3584   %}
3585 
3586   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3587     MacroAssembler _masm(&cbuf);
3588     Register reg1 = as_Register($src1$$reg);
3589     u_int64_t val = (u_int64_t)$src2$$constant;
3590     __ mov(rscratch1, val);
3591     __ cmp(reg1, rscratch1);
3592   %}
3593 
3594   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3595     MacroAssembler _masm(&cbuf);
3596     Register reg1 = as_Register($src1$$reg);
3597     Register reg2 = as_Register($src2$$reg);
3598     __ cmp(reg1, reg2);
3599   %}
3600 
3601   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3602     MacroAssembler _masm(&cbuf);
3603     Register reg1 = as_Register($src1$$reg);
3604     Register reg2 = as_Register($src2$$reg);
3605     __ cmpw(reg1, reg2);
3606   %}
3607 
3608   enc_class aarch64_enc_testp(iRegP src) %{
3609     MacroAssembler _masm(&cbuf);
3610     Register reg = as_Register($src$$reg);
3611     __ cmp(reg, zr);
3612   %}
3613 
3614   enc_class aarch64_enc_testn(iRegN src) %{
3615     MacroAssembler _masm(&cbuf);
3616     Register reg = as_Register($src$$reg);
3617     __ cmpw(reg, zr);
3618   %}
3619 
3620   enc_class aarch64_enc_b(label lbl) %{
3621     MacroAssembler _masm(&cbuf);
3622     Label *L = $lbl$$label;
3623     __ b(*L);
3624   %}
3625 
3626   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3627     MacroAssembler _masm(&cbuf);
3628     Label *L = $lbl$$label;
3629     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3630   %}
3631 
3632   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3633     MacroAssembler _masm(&cbuf);
3634     Label *L = $lbl$$label;
3635     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3636   %}
3637 
3638   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3639   %{
3640      Register sub_reg = as_Register($sub$$reg);
3641      Register super_reg = as_Register($super$$reg);
3642      Register temp_reg = as_Register($temp$$reg);
3643      Register result_reg = as_Register($result$$reg);
3644 
3645      Label miss;
3646      MacroAssembler _masm(&cbuf);
3647      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3648                                      NULL, &miss,
3649                                      /*set_cond_codes:*/ true);
3650      if ($primary) {
3651        __ mov(result_reg, zr);
3652      }
3653      __ bind(miss);
3654   %}
3655 
3656   enc_class aarch64_enc_java_static_call(method meth) %{
3657     MacroAssembler _masm(&cbuf);
3658 
3659     address addr = (address)$meth$$method;
3660     if (!_method) {
3661       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3662       __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3663     } else if (_optimized_virtual) {
3664       __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3665     } else {
3666       __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3667     }
3668 
3669     if (_method) {
3670       // Emit stub for static call
3671       CompiledStaticCall::emit_to_interp_stub(cbuf);
3672     }
3673   %}
3674 
3675   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3676     MacroAssembler _masm(&cbuf);
3677     __ ic_call((address)$meth$$method);
3678   %}
3679 
3680   enc_class aarch64_enc_call_epilog() %{
3681     MacroAssembler _masm(&cbuf);
3682     if (VerifyStackAtCalls) {
3683       // Check that stack depth is unchanged: find majik cookie on stack
3684       __ call_Unimplemented();
3685     }
3686   %}
3687 
3688   enc_class aarch64_enc_java_to_runtime(method meth) %{
3689     MacroAssembler _masm(&cbuf);
3690 
3691     // some calls to generated routines (arraycopy code) are scheduled
3692     // by C2 as runtime calls. if so we can call them using a br (they
3693     // will be in a reachable segment) otherwise we have to use a blrt
3694     // which loads the absolute address into a register.
3695     address entry = (address)$meth$$method;
3696     CodeBlob *cb = CodeCache::find_blob(entry);
3697     if (cb) {
3698       __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3699     } else {
3700       int gpcnt;
3701       int fpcnt;
3702       int rtype;
3703       getCallInfo(tf(), gpcnt, fpcnt, rtype);
3704       Label retaddr;
3705       __ adr(rscratch2, retaddr);
3706       __ lea(rscratch1, RuntimeAddress(entry));
3707       // Leave a breadcrumb for JavaThread::pd_last_frame().
3708       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3709       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
3710       __ bind(retaddr);
3711       __ add(sp, sp, 2 * wordSize);
3712     }
3713   %}
3714 
3715   enc_class aarch64_enc_rethrow() %{
3716     MacroAssembler _masm(&cbuf);
3717     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3718   %}
3719 
3720   enc_class aarch64_enc_ret() %{
3721     MacroAssembler _masm(&cbuf);
3722     __ ret(lr);
3723   %}
3724 
3725   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3726     MacroAssembler _masm(&cbuf);
3727     Register target_reg = as_Register($jump_target$$reg);
3728     __ br(target_reg);
3729   %}
3730 
3731   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3732     MacroAssembler _masm(&cbuf);
3733     Register target_reg = as_Register($jump_target$$reg);
3734     // exception oop should be in r0
3735     // ret addr has been popped into lr
3736     // callee expects it in r3
3737     __ mov(r3, lr);
3738     __ br(target_reg);
3739   %}
3740 
3741   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3742     MacroAssembler _masm(&cbuf);
3743     Register oop = as_Register($object$$reg);
3744     Register box = as_Register($box$$reg);
3745     Register disp_hdr = as_Register($tmp$$reg);
3746     Register tmp = as_Register($tmp2$$reg);
3747     Label cont;
3748     Label object_has_monitor;
3749     Label cas_failed;
3750 
3751     assert_different_registers(oop, box, tmp, disp_hdr);
3752 
3753     // Load markOop from object into displaced_header.
3754     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3755 
3756     // Always do locking in runtime.
3757     if (EmitSync & 0x01) {
3758       __ cmp(oop, zr);
3759       return;
3760     }
3761 
3762     if (UseBiasedLocking) {
3763       __ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
3764     }
3765 
3766     // Handle existing monitor
3767     if (EmitSync & 0x02) {
3768       // we can use AArch64's bit test and branch here but
3769       // markoopDesc does not define a bit index just the bit value
3770       // so assert in case the bit pos changes
3771 #     define __monitor_value_log2 1
3772       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
3773       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
3774 #     undef __monitor_value_log2
3775     }
3776 
3777     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
3778     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
3779 
3780     // Load Compare Value application register.
3781 
3782     // Initialize the box. (Must happen before we update the object mark!)
3783     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3784 
3785     // Compare object markOop with mark and if equal exchange scratch1
3786     // with object markOop.
3787     // Note that this is simply a CAS: it does not generate any
3788     // barriers.  These are separately generated by
3789     // membar_acquire_lock().
3790     {
3791       Label retry_load;
3792       __ bind(retry_load);
3793       __ ldxr(tmp, oop);
3794       __ cmp(tmp, disp_hdr);
3795       __ br(Assembler::NE, cas_failed);
3796       // use stlxr to ensure update is immediately visible
3797       __ stlxr(tmp, box, oop);
3798       __ cbzw(tmp, cont);
3799       __ b(retry_load);
3800     }
3801 
3802     // Formerly:
3803     // __ cmpxchgptr(/*oldv=*/disp_hdr,
3804     //               /*newv=*/box,
3805     //               /*addr=*/oop,
3806     //               /*tmp=*/tmp,
3807     //               cont,
3808     //               /*fail*/NULL);
3809 
3810     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3811 
3812     // If the compare-and-exchange succeeded, then we found an unlocked
3813     // object, will have now locked it will continue at label cont
3814 
3815     __ bind(cas_failed);
3816     // We did not see an unlocked object so try the fast recursive case.
3817 
3818     // Check if the owner is self by comparing the value in the
3819     // markOop of object (disp_hdr) with the stack pointer.
3820     __ mov(rscratch1, sp);
3821     __ sub(disp_hdr, disp_hdr, rscratch1);
3822     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3823     // If condition is true we are cont and hence we can store 0 as the
3824     // displaced header in the box, which indicates that it is a recursive lock.
3825     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3826     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3827 
3828     // Handle existing monitor.
3829     if ((EmitSync & 0x02) == 0) {
3830       __ b(cont);
3831 
3832       __ bind(object_has_monitor);
3833       // The object's monitor m is unlocked iff m->owner == NULL,
3834       // otherwise m->owner may contain a thread or a stack address.
3835       //
3836       // Try to CAS m->owner from NULL to current thread.
3837       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3838       __ mov(disp_hdr, zr);
3839 
3840       {
3841         Label retry_load, fail;
3842         __ bind(retry_load);
3843         __ ldxr(rscratch1, tmp);
3844         __ cmp(disp_hdr, rscratch1);
3845         __ br(Assembler::NE, fail);
3846         // use stlxr to ensure update is immediately visible
3847         __ stlxr(rscratch1, rthread, tmp);
3848         __ cbnzw(rscratch1, retry_load);
3849         __ bind(fail);
3850       }
3851 
3852       // Label next;
3853       // __ cmpxchgptr(/*oldv=*/disp_hdr,
3854       //               /*newv=*/rthread,
3855       //               /*addr=*/tmp,
3856       //               /*tmp=*/rscratch1,
3857       //               /*succeed*/next,
3858       //               /*fail*/NULL);
3859       // __ bind(next);
3860 
3861       // store a non-null value into the box.
3862       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3863 
3864       // PPC port checks the following invariants
3865       // #ifdef ASSERT
3866       // bne(flag, cont);
3867       // We have acquired the monitor, check some invariants.
3868       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
3869       // Invariant 1: _recursions should be 0.
3870       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
3871       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
3872       //                        "monitor->_recursions should be 0", -1);
3873       // Invariant 2: OwnerIsThread shouldn't be 0.
3874       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
3875       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
3876       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
3877       // #endif
3878     }
3879 
3880     __ bind(cont);
3881     // flag == EQ indicates success
3882     // flag == NE indicates failure
3883 
3884   %}
3885 
3886   // TODO
3887   // reimplement this with custom cmpxchgptr code
3888   // which avoids some of the unnecessary branching
3889   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3890     MacroAssembler _masm(&cbuf);
3891     Register oop = as_Register($object$$reg);
3892     Register box = as_Register($box$$reg);
3893     Register disp_hdr = as_Register($tmp$$reg);
3894     Register tmp = as_Register($tmp2$$reg);
3895     Label cont;
3896     Label object_has_monitor;
3897     Label cas_failed;
3898 
3899     assert_different_registers(oop, box, tmp, disp_hdr);
3900 
3901     // Always do locking in runtime.
3902     if (EmitSync & 0x01) {
3903       __ cmp(oop, zr); // Oop can't be 0 here => always false.
3904       return;
3905     }
3906 
3907     if (UseBiasedLocking) {
3908       __ biased_locking_exit(oop, tmp, cont);
3909     }
3910 
3911     // Find the lock address and load the displaced header from the stack.
3912     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3913 
3914     // If the displaced header is 0, we have a recursive unlock.
3915     __ cmp(disp_hdr, zr);
3916     __ br(Assembler::EQ, cont);
3917 
3918 
3919     // Handle existing monitor.
3920     if ((EmitSync & 0x02) == 0) {
3921       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3922       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3923     }
3924 
3925     // Check if it is still a light weight lock, this is is true if we
3926     // see the stack address of the basicLock in the markOop of the
3927     // object.
3928 
3929       {
3930         Label retry_load;
3931         __ bind(retry_load);
3932         __ ldxr(tmp, oop);
3933         __ cmp(box, tmp);
3934         __ br(Assembler::NE, cas_failed);
3935         // use stlxr to ensure update is immediately visible
3936         __ stlxr(tmp, disp_hdr, oop);
3937         __ cbzw(tmp, cont);
3938         __ b(retry_load);
3939       }
3940 
3941     // __ cmpxchgptr(/*compare_value=*/box,
3942     //               /*exchange_value=*/disp_hdr,
3943     //               /*where=*/oop,
3944     //               /*result=*/tmp,
3945     //               cont,
3946     //               /*cas_failed*/NULL);
3947     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3948 
3949     __ bind(cas_failed);
3950 
3951     // Handle existing monitor.
3952     if ((EmitSync & 0x02) == 0) {
3953       __ b(cont);
3954 
3955       __ bind(object_has_monitor);
3956       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3957       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3958       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3959       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3960       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3961       __ cmp(rscratch1, zr);
3962       __ br(Assembler::NE, cont);
3963 
3964       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3965       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3966       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3967       __ cmp(rscratch1, zr);
3968       __ cbnz(rscratch1, cont);
3969       // need a release store here
3970       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3971       __ stlr(rscratch1, tmp); // rscratch1 is zero
3972     }
3973 
3974     __ bind(cont);
3975     // flag == EQ indicates success
3976     // flag == NE indicates failure
3977   %}
3978 
3979 %}
3980 
3981 //----------FRAME--------------------------------------------------------------
3982 // Definition of frame structure and management information.
3983 //
3984 //  S T A C K   L A Y O U T    Allocators stack-slot number
3985 //                             |   (to get allocators register number
3986 //  G  Owned by    |        |  v    add OptoReg::stack0())
3987 //  r   CALLER     |        |
3988 //  o     |        +--------+      pad to even-align allocators stack-slot
3989 //  w     V        |  pad0  |        numbers; owned by CALLER
3990 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3991 //  h     ^        |   in   |  5
3992 //        |        |  args  |  4   Holes in incoming args owned by SELF
3993 //  |     |        |        |  3
3994 //  |     |        +--------+
3995 //  V     |        | old out|      Empty on Intel, window on Sparc
3996 //        |    old |preserve|      Must be even aligned.
3997 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3998 //        |        |   in   |  3   area for Intel ret address
3999 //     Owned by    |preserve|      Empty on Sparc.
4000 //       SELF      +--------+
4001 //        |        |  pad2  |  2   pad to align old SP
4002 //        |        +--------+  1
4003 //        |        | locks  |  0
4004 //        |        +--------+----> OptoReg::stack0(), even aligned
4005 //        |        |  pad1  | 11   pad to align new SP
4006 //        |        +--------+
4007 //        |        |        | 10
4008 //        |        | spills |  9   spills
4009 //        V        |        |  8   (pad0 slot for callee)
4010 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4011 //        ^        |  out   |  7
4012 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4013 //     Owned by    +--------+
4014 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4015 //        |    new |preserve|      Must be even-aligned.
4016 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4017 //        |        |        |
4018 //
4019 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4020 //         known from SELF's arguments and the Java calling convention.
4021 //         Region 6-7 is determined per call site.
4022 // Note 2: If the calling convention leaves holes in the incoming argument
4023 //         area, those holes are owned by SELF.  Holes in the outgoing area
4024 //         are owned by the CALLEE.  Holes should not be nessecary in the
4025 //         incoming area, as the Java calling convention is completely under
4026 //         the control of the AD file.  Doubles can be sorted and packed to
4027 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4028 //         varargs C calling conventions.
4029 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4030 //         even aligned with pad0 as needed.
4031 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4032 //           (the latter is true on Intel but is it false on AArch64?)
4033 //         region 6-11 is even aligned; it may be padded out more so that
4034 //         the region from SP to FP meets the minimum stack alignment.
4035 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4036 //         alignment.  Region 11, pad1, may be dynamically extended so that
4037 //         SP meets the minimum alignment.
4038 
4039 frame %{
4040   // What direction does stack grow in (assumed to be same for C & Java)
4041   stack_direction(TOWARDS_LOW);
4042 
4043   // These three registers define part of the calling convention
4044   // between compiled code and the interpreter.
4045 
4046   // Inline Cache Register or methodOop for I2C.
4047   inline_cache_reg(R12);
4048 
4049   // Method Oop Register when calling interpreter.
4050   interpreter_method_oop_reg(R12);
4051 
4052   // Number of stack slots consumed by locking an object
4053   sync_stack_slots(2);
4054 
4055   // Compiled code's Frame Pointer
4056   frame_pointer(R31);
4057 
4058   // Interpreter stores its frame pointer in a register which is
4059   // stored to the stack by I2CAdaptors.
4060   // I2CAdaptors convert from interpreted java to compiled java.
4061   interpreter_frame_pointer(R29);
4062 
4063   // Stack alignment requirement
4064   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4065 
4066   // Number of stack slots between incoming argument block and the start of
4067   // a new frame.  The PROLOG must add this many slots to the stack.  The
4068   // EPILOG must remove this many slots. aarch64 needs two slots for
4069   // return address and fp.
4070   // TODO think this is correct but check
4071   in_preserve_stack_slots(4);
4072 
4073   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4074   // for calls to C.  Supports the var-args backing area for register parms.
4075   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4076 
4077   // The after-PROLOG location of the return address.  Location of
4078   // return address specifies a type (REG or STACK) and a number
4079   // representing the register number (i.e. - use a register name) or
4080   // stack slot.
4081   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4082   // Otherwise, it is above the locks and verification slot and alignment word
4083   // TODO this may well be correct but need to check why that - 2 is there
4084   // ppc port uses 0 but we definitely need to allow for fixed_slots
4085   // which folds in the space used for monitors
4086   return_addr(STACK - 2 +
4087               round_to((Compile::current()->in_preserve_stack_slots() +
4088                         Compile::current()->fixed_slots()),
4089                        stack_alignment_in_slots()));
4090 
4091   // Body of function which returns an integer array locating
4092   // arguments either in registers or in stack slots.  Passed an array
4093   // of ideal registers called "sig" and a "length" count.  Stack-slot
4094   // offsets are based on outgoing arguments, i.e. a CALLER setting up
4095   // arguments for a CALLEE.  Incoming stack arguments are
4096   // automatically biased by the preserve_stack_slots field above.
4097 
4098   calling_convention
4099   %{
4100     // No difference between ingoing/outgoing just pass false
4101     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4102   %}
4103 
4104   c_calling_convention
4105   %{
4106     // This is obviously always outgoing
4107     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
4108   %}
4109 
4110   // Location of compiled Java return values.  Same as C for now.
4111   return_value
4112   %{
4113     // TODO do we allow ideal_reg == Op_RegN???
4114     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4115            "only return normal values");
4116 
4117     static const int lo[Op_RegL + 1] = { // enum name
4118       0,                                 // Op_Node
4119       0,                                 // Op_Set
4120       R0_num,                            // Op_RegN
4121       R0_num,                            // Op_RegI
4122       R0_num,                            // Op_RegP
4123       V0_num,                            // Op_RegF
4124       V0_num,                            // Op_RegD
4125       R0_num                             // Op_RegL
4126     };
4127 
4128     static const int hi[Op_RegL + 1] = { // enum name
4129       0,                                 // Op_Node
4130       0,                                 // Op_Set
4131       OptoReg::Bad,                       // Op_RegN
4132       OptoReg::Bad,                      // Op_RegI
4133       R0_H_num,                          // Op_RegP
4134       OptoReg::Bad,                      // Op_RegF
4135       V0_H_num,                          // Op_RegD
4136       R0_H_num                           // Op_RegL
4137     };
4138 
4139     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4140   %}
4141 %}
4142 
4143 //----------ATTRIBUTES---------------------------------------------------------
4144 //----------Operand Attributes-------------------------------------------------
4145 op_attrib op_cost(1);        // Required cost attribute
4146 
4147 //----------Instruction Attributes---------------------------------------------
4148 ins_attrib ins_cost(INSN_COST); // Required cost attribute
4149 ins_attrib ins_size(32);        // Required size attribute (in bits)
4150 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4151                                 // a non-matching short branch variant
4152                                 // of some long branch?
4153 ins_attrib ins_alignment(4);    // Required alignment attribute (must
4154                                 // be a power of 2) specifies the
4155                                 // alignment that some part of the
4156                                 // instruction (not necessarily the
4157                                 // start) requires.  If > 1, a
4158                                 // compute_padding() function must be
4159                                 // provided for the instruction
4160 
4161 //----------OPERANDS-----------------------------------------------------------
4162 // Operand definitions must precede instruction definitions for correct parsing
4163 // in the ADLC because operands constitute user defined types which are used in
4164 // instruction definitions.
4165 
4166 //----------Simple Operands----------------------------------------------------
4167 
4168 // Integer operands 32 bit
4169 // 32 bit immediate
4170 operand immI()
4171 %{
4172   match(ConI);
4173 
4174   op_cost(0);
4175   format %{ %}
4176   interface(CONST_INTER);
4177 %}
4178 
4179 // 32 bit zero
4180 operand immI0()
4181 %{
4182   predicate(n->get_int() == 0);
4183   match(ConI);
4184 
4185   op_cost(0);
4186   format %{ %}
4187   interface(CONST_INTER);
4188 %}
4189 
4190 // 32 bit unit increment
4191 operand immI_1()
4192 %{
4193   predicate(n->get_int() == 1);
4194   match(ConI);
4195 
4196   op_cost(0);
4197   format %{ %}
4198   interface(CONST_INTER);
4199 %}
4200 
4201 // 32 bit unit decrement
4202 operand immI_M1()
4203 %{
4204   predicate(n->get_int() == -1);
4205   match(ConI);
4206 
4207   op_cost(0);
4208   format %{ %}
4209   interface(CONST_INTER);
4210 %}
4211 
4212 operand immI_le_4()
4213 %{
4214   predicate(n->get_int() <= 4);
4215   match(ConI);
4216 
4217   op_cost(0);
4218   format %{ %}
4219   interface(CONST_INTER);
4220 %}
4221 
4222 operand immI_31()
4223 %{
4224   predicate(n->get_int() == 31);
4225   match(ConI);
4226 
4227   op_cost(0);
4228   format %{ %}
4229   interface(CONST_INTER);
4230 %}
4231 
4232 operand immI_8()
4233 %{
4234   predicate(n->get_int() == 8);
4235   match(ConI);
4236 
4237   op_cost(0);
4238   format %{ %}
4239   interface(CONST_INTER);
4240 %}
4241 
4242 operand immI_16()
4243 %{
4244   predicate(n->get_int() == 16);
4245   match(ConI);
4246 
4247   op_cost(0);
4248   format %{ %}
4249   interface(CONST_INTER);
4250 %}
4251 
4252 operand immI_24()
4253 %{
4254   predicate(n->get_int() == 24);
4255   match(ConI);
4256 
4257   op_cost(0);
4258   format %{ %}
4259   interface(CONST_INTER);
4260 %}
4261 
4262 operand immI_32()
4263 %{
4264   predicate(n->get_int() == 32);
4265   match(ConI);
4266 
4267   op_cost(0);
4268   format %{ %}
4269   interface(CONST_INTER);
4270 %}
4271 
4272 operand immI_48()
4273 %{
4274   predicate(n->get_int() == 48);
4275   match(ConI);
4276 
4277   op_cost(0);
4278   format %{ %}
4279   interface(CONST_INTER);
4280 %}
4281 
4282 operand immI_56()
4283 %{
4284   predicate(n->get_int() == 56);
4285   match(ConI);
4286 
4287   op_cost(0);
4288   format %{ %}
4289   interface(CONST_INTER);
4290 %}
4291 
4292 operand immI_64()
4293 %{
4294   predicate(n->get_int() == 64);
4295   match(ConI);
4296 
4297   op_cost(0);
4298   format %{ %}
4299   interface(CONST_INTER);
4300 %}
4301 
4302 operand immI_255()
4303 %{
4304   predicate(n->get_int() == 255);
4305   match(ConI);
4306 
4307   op_cost(0);
4308   format %{ %}
4309   interface(CONST_INTER);
4310 %}
4311 
4312 operand immI_65535()
4313 %{
4314   predicate(n->get_int() == 65535);
4315   match(ConI);
4316 
4317   op_cost(0);
4318   format %{ %}
4319   interface(CONST_INTER);
4320 %}
4321 
4322 operand immL_63()
4323 %{
4324   predicate(n->get_int() == 63);
4325   match(ConI);
4326 
4327   op_cost(0);
4328   format %{ %}
4329   interface(CONST_INTER);
4330 %}
4331 
4332 operand immL_255()
4333 %{
4334   predicate(n->get_int() == 255);
4335   match(ConI);
4336 
4337   op_cost(0);
4338   format %{ %}
4339   interface(CONST_INTER);
4340 %}
4341 
4342 operand immL_65535()
4343 %{
4344   predicate(n->get_long() == 65535L);
4345   match(ConL);
4346 
4347   op_cost(0);
4348   format %{ %}
4349   interface(CONST_INTER);
4350 %}
4351 
4352 operand immL_4294967295()
4353 %{
4354   predicate(n->get_long() == 4294967295L);
4355   match(ConL);
4356 
4357   op_cost(0);
4358   format %{ %}
4359   interface(CONST_INTER);
4360 %}
4361 
4362 operand immL_bitmask()
4363 %{
4364   predicate(((n->get_long() & 0xc000000000000000l) == 0)
4365             && is_power_of_2(n->get_long() + 1));
4366   match(ConL);
4367 
4368   op_cost(0);
4369   format %{ %}
4370   interface(CONST_INTER);
4371 %}
4372 
4373 operand immI_bitmask()
4374 %{
4375   predicate(((n->get_int() & 0xc0000000) == 0)
4376             && is_power_of_2(n->get_int() + 1));
4377   match(ConI);
4378 
4379   op_cost(0);
4380   format %{ %}
4381   interface(CONST_INTER);
4382 %}
4383 
4384 // Scale values for scaled offset addressing modes (up to long but not quad)
4385 operand immIScale()
4386 %{
4387   predicate(0 <= n->get_int() && (n->get_int() <= 3));
4388   match(ConI);
4389 
4390   op_cost(0);
4391   format %{ %}
4392   interface(CONST_INTER);
4393 %}
4394 
4395 // 26 bit signed offset -- for pc-relative branches
4396 operand immI26()
4397 %{
4398   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
4399   match(ConI);
4400 
4401   op_cost(0);
4402   format %{ %}
4403   interface(CONST_INTER);
4404 %}
4405 
4406 // 19 bit signed offset -- for pc-relative loads
4407 operand immI19()
4408 %{
4409   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
4410   match(ConI);
4411 
4412   op_cost(0);
4413   format %{ %}
4414   interface(CONST_INTER);
4415 %}
4416 
4417 // 12 bit unsigned offset -- for base plus immediate loads
4418 operand immIU12()
4419 %{
4420   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
4421   match(ConI);
4422 
4423   op_cost(0);
4424   format %{ %}
4425   interface(CONST_INTER);
4426 %}
4427 
4428 operand immLU12()
4429 %{
4430   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
4431   match(ConL);
4432 
4433   op_cost(0);
4434   format %{ %}
4435   interface(CONST_INTER);
4436 %}
4437 
4438 // Offset for scaled or unscaled immediate loads and stores
4439 operand immIOffset()
4440 %{
4441   predicate(Address::offset_ok_for_immed(n->get_int()));
4442   match(ConI);
4443 
4444   op_cost(0);
4445   format %{ %}
4446   interface(CONST_INTER);
4447 %}
4448 
4449 operand immLoffset()
4450 %{
4451   predicate(Address::offset_ok_for_immed(n->get_long()));
4452   match(ConL);
4453 
4454   op_cost(0);
4455   format %{ %}
4456   interface(CONST_INTER);
4457 %}
4458 
4459 // 32 bit integer valid for add sub immediate
4460 operand immIAddSub()
4461 %{
4462   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4463   match(ConI);
4464   op_cost(0);
4465   format %{ %}
4466   interface(CONST_INTER);
4467 %}
4468 
4469 // 32 bit unsigned integer valid for logical immediate
4470 // TODO -- check this is right when e.g the mask is 0x80000000
4471 operand immILog()
4472 %{
4473   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4474   match(ConI);
4475 
4476   op_cost(0);
4477   format %{ %}
4478   interface(CONST_INTER);
4479 %}
4480 
4481 // Integer operands 64 bit
4482 // 64 bit immediate
4483 operand immL()
4484 %{
4485   match(ConL);
4486 
4487   op_cost(0);
4488   format %{ %}
4489   interface(CONST_INTER);
4490 %}
4491 
4492 // 64 bit zero
4493 operand immL0()
4494 %{
4495   predicate(n->get_long() == 0);
4496   match(ConL);
4497 
4498   op_cost(0);
4499   format %{ %}
4500   interface(CONST_INTER);
4501 %}
4502 
4503 // 64 bit unit increment
4504 operand immL_1()
4505 %{
4506   predicate(n->get_long() == 1);
4507   match(ConL);
4508 
4509   op_cost(0);
4510   format %{ %}
4511   interface(CONST_INTER);
4512 %}
4513 
4514 // 64 bit unit decrement
4515 operand immL_M1()
4516 %{
4517   predicate(n->get_long() == -1);
4518   match(ConL);
4519 
4520   op_cost(0);
4521   format %{ %}
4522   interface(CONST_INTER);
4523 %}
4524 
4525 // 32 bit offset of pc in thread anchor
4526 
4527 operand immL_pc_off()
4528 %{
4529   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4530                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4531   match(ConL);
4532 
4533   op_cost(0);
4534   format %{ %}
4535   interface(CONST_INTER);
4536 %}
4537 
4538 // 64 bit integer valid for add sub immediate
4539 operand immLAddSub()
4540 %{
4541   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4542   match(ConL);
4543   op_cost(0);
4544   format %{ %}
4545   interface(CONST_INTER);
4546 %}
4547 
4548 // 64 bit integer valid for logical immediate
4549 operand immLLog()
4550 %{
4551   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4552   match(ConL);
4553   op_cost(0);
4554   format %{ %}
4555   interface(CONST_INTER);
4556 %}
4557 
4558 // Long Immediate: low 32-bit mask
4559 operand immL_32bits()
4560 %{
4561   predicate(n->get_long() == 0xFFFFFFFFL);
4562   match(ConL);
4563   op_cost(0);
4564   format %{ %}
4565   interface(CONST_INTER);
4566 %}
4567 
4568 // Pointer operands
4569 // Pointer Immediate
4570 operand immP()
4571 %{
4572   match(ConP);
4573 
4574   op_cost(0);
4575   format %{ %}
4576   interface(CONST_INTER);
4577 %}
4578 
4579 // NULL Pointer Immediate
4580 operand immP0()
4581 %{
4582   predicate(n->get_ptr() == 0);
4583   match(ConP);
4584 
4585   op_cost(0);
4586   format %{ %}
4587   interface(CONST_INTER);
4588 %}
4589 
4590 // Pointer Immediate One
4591 // this is used in object initialization (initial object header)
4592 operand immP_1()
4593 %{
4594   predicate(n->get_ptr() == 1);
4595   match(ConP);
4596 
4597   op_cost(0);
4598   format %{ %}
4599   interface(CONST_INTER);
4600 %}
4601 
4602 // Polling Page Pointer Immediate
4603 operand immPollPage()
4604 %{
4605   predicate((address)n->get_ptr() == os::get_polling_page());
4606   match(ConP);
4607 
4608   op_cost(0);
4609   format %{ %}
4610   interface(CONST_INTER);
4611 %}
4612 
4613 // Card Table Byte Map Base
4614 operand immByteMapBase()
4615 %{
4616   // Get base of card map
4617   predicate((jbyte*)n->get_ptr() ==
4618         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
4619   match(ConP);
4620 
4621   op_cost(0);
4622   format %{ %}
4623   interface(CONST_INTER);
4624 %}
4625 
4626 // Pointer Immediate Minus One
4627 // this is used when we want to write the current PC to the thread anchor
4628 operand immP_M1()
4629 %{
4630   predicate(n->get_ptr() == -1);
4631   match(ConP);
4632 
4633   op_cost(0);
4634   format %{ %}
4635   interface(CONST_INTER);
4636 %}
4637 
4638 // Pointer Immediate Minus Two
4639 // this is used when we want to write the current PC to the thread anchor
4640 operand immP_M2()
4641 %{
4642   predicate(n->get_ptr() == -2);
4643   match(ConP);
4644 
4645   op_cost(0);
4646   format %{ %}
4647   interface(CONST_INTER);
4648 %}
4649 
4650 // Float and Double operands
4651 // Double Immediate
4652 operand immD()
4653 %{
4654   match(ConD);
4655   op_cost(0);
4656   format %{ %}
4657   interface(CONST_INTER);
4658 %}
4659 
4660 // Double Immediate: +0.0d
4661 operand immD0()
4662 %{
4663   predicate(jlong_cast(n->getd()) == 0);
4664   match(ConD);
4665 
4666   op_cost(0);
4667   format %{ %}
4668   interface(CONST_INTER);
4669 %}
4670 
4671 // constant 'double +0.0'.
4672 operand immDPacked()
4673 %{
4674   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4675   match(ConD);
4676   op_cost(0);
4677   format %{ %}
4678   interface(CONST_INTER);
4679 %}
4680 
4681 // Float Immediate
4682 operand immF()
4683 %{
4684   match(ConF);
4685   op_cost(0);
4686   format %{ %}
4687   interface(CONST_INTER);
4688 %}
4689 
4690 // Float Immediate: +0.0f.
4691 operand immF0()
4692 %{
4693   predicate(jint_cast(n->getf()) == 0);
4694   match(ConF);
4695 
4696   op_cost(0);
4697   format %{ %}
4698   interface(CONST_INTER);
4699 %}
4700 
4701 //
4702 operand immFPacked()
4703 %{
4704   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4705   match(ConF);
4706   op_cost(0);
4707   format %{ %}
4708   interface(CONST_INTER);
4709 %}
4710 
4711 // Narrow pointer operands
4712 // Narrow Pointer Immediate
4713 operand immN()
4714 %{
4715   match(ConN);
4716 
4717   op_cost(0);
4718   format %{ %}
4719   interface(CONST_INTER);
4720 %}
4721 
4722 // Narrow NULL Pointer Immediate
4723 operand immN0()
4724 %{
4725   predicate(n->get_narrowcon() == 0);
4726   match(ConN);
4727 
4728   op_cost(0);
4729   format %{ %}
4730   interface(CONST_INTER);
4731 %}
4732 
4733 operand immNKlass()
4734 %{
4735   match(ConNKlass);
4736 
4737   op_cost(0);
4738   format %{ %}
4739   interface(CONST_INTER);
4740 %}
4741 
4742 // Integer 32 bit Register Operands
4743 // Integer 32 bitRegister (excludes SP)
4744 operand iRegI()
4745 %{
4746   constraint(ALLOC_IN_RC(any_reg32));
4747   match(RegI);
4748   match(iRegINoSp);
4749   op_cost(0);
4750   format %{ %}
4751   interface(REG_INTER);
4752 %}
4753 
4754 // Integer 32 bit Register not Special
4755 operand iRegINoSp()
4756 %{
4757   constraint(ALLOC_IN_RC(no_special_reg32));
4758   match(RegI);
4759   op_cost(0);
4760   format %{ %}
4761   interface(REG_INTER);
4762 %}
4763 
4764 // Integer 64 bit Register Operands
4765 // Integer 64 bit Register (includes SP)
4766 operand iRegL()
4767 %{
4768   constraint(ALLOC_IN_RC(any_reg));
4769   match(RegL);
4770   match(iRegLNoSp);
4771   op_cost(0);
4772   format %{ %}
4773   interface(REG_INTER);
4774 %}
4775 
4776 // Integer 64 bit Register not Special
4777 operand iRegLNoSp()
4778 %{
4779   constraint(ALLOC_IN_RC(no_special_reg));
4780   match(RegL);
4781   format %{ %}
4782   interface(REG_INTER);
4783 %}
4784 
4785 // Pointer Register Operands
4786 // Pointer Register
4787 operand iRegP()
4788 %{
4789   constraint(ALLOC_IN_RC(ptr_reg));
4790   match(RegP);
4791   match(iRegPNoSp);
4792   match(iRegP_R0);
4793   //match(iRegP_R2);
4794   //match(iRegP_R4);
4795   //match(iRegP_R5);
4796   match(thread_RegP);
4797   op_cost(0);
4798   format %{ %}
4799   interface(REG_INTER);
4800 %}
4801 
4802 // Pointer 64 bit Register not Special
4803 operand iRegPNoSp()
4804 %{
4805   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4806   match(RegP);
4807   // match(iRegP);
4808   // match(iRegP_R0);
4809   // match(iRegP_R2);
4810   // match(iRegP_R4);
4811   // match(iRegP_R5);
4812   // match(thread_RegP);
4813   op_cost(0);
4814   format %{ %}
4815   interface(REG_INTER);
4816 %}
4817 
4818 // Pointer 64 bit Register R0 only
4819 operand iRegP_R0()
4820 %{
4821   constraint(ALLOC_IN_RC(r0_reg));
4822   match(RegP);
4823   // match(iRegP);
4824   match(iRegPNoSp);
4825   op_cost(0);
4826   format %{ %}
4827   interface(REG_INTER);
4828 %}
4829 
4830 // Pointer 64 bit Register R1 only
4831 operand iRegP_R1()
4832 %{
4833   constraint(ALLOC_IN_RC(r1_reg));
4834   match(RegP);
4835   // match(iRegP);
4836   match(iRegPNoSp);
4837   op_cost(0);
4838   format %{ %}
4839   interface(REG_INTER);
4840 %}
4841 
4842 // Pointer 64 bit Register R2 only
4843 operand iRegP_R2()
4844 %{
4845   constraint(ALLOC_IN_RC(r2_reg));
4846   match(RegP);
4847   // match(iRegP);
4848   match(iRegPNoSp);
4849   op_cost(0);
4850   format %{ %}
4851   interface(REG_INTER);
4852 %}
4853 
4854 // Pointer 64 bit Register R3 only
4855 operand iRegP_R3()
4856 %{
4857   constraint(ALLOC_IN_RC(r3_reg));
4858   match(RegP);
4859   // match(iRegP);
4860   match(iRegPNoSp);
4861   op_cost(0);
4862   format %{ %}
4863   interface(REG_INTER);
4864 %}
4865 
4866 // Pointer 64 bit Register R4 only
4867 operand iRegP_R4()
4868 %{
4869   constraint(ALLOC_IN_RC(r4_reg));
4870   match(RegP);
4871   // match(iRegP);
4872   match(iRegPNoSp);
4873   op_cost(0);
4874   format %{ %}
4875   interface(REG_INTER);
4876 %}
4877 
4878 // Pointer 64 bit Register R5 only
4879 operand iRegP_R5()
4880 %{
4881   constraint(ALLOC_IN_RC(r5_reg));
4882   match(RegP);
4883   // match(iRegP);
4884   match(iRegPNoSp);
4885   op_cost(0);
4886   format %{ %}
4887   interface(REG_INTER);
4888 %}
4889 
4890 // Pointer 64 bit Register R10 only
4891 operand iRegP_R10()
4892 %{
4893   constraint(ALLOC_IN_RC(r10_reg));
4894   match(RegP);
4895   // match(iRegP);
4896   match(iRegPNoSp);
4897   op_cost(0);
4898   format %{ %}
4899   interface(REG_INTER);
4900 %}
4901 
4902 // Long 64 bit Register R11 only
4903 operand iRegL_R11()
4904 %{
4905   constraint(ALLOC_IN_RC(r11_reg));
4906   match(RegL);
4907   match(iRegLNoSp);
4908   op_cost(0);
4909   format %{ %}
4910   interface(REG_INTER);
4911 %}
4912 
4913 // Pointer 64 bit Register FP only
4914 operand iRegP_FP()
4915 %{
4916   constraint(ALLOC_IN_RC(fp_reg));
4917   match(RegP);
4918   // match(iRegP);
4919   op_cost(0);
4920   format %{ %}
4921   interface(REG_INTER);
4922 %}
4923 
4924 // Register R0 only
4925 operand iRegI_R0()
4926 %{
4927   constraint(ALLOC_IN_RC(int_r0_reg));
4928   match(RegI);
4929   match(iRegINoSp);
4930   op_cost(0);
4931   format %{ %}
4932   interface(REG_INTER);
4933 %}
4934 
4935 // Register R2 only
4936 operand iRegI_R2()
4937 %{
4938   constraint(ALLOC_IN_RC(int_r2_reg));
4939   match(RegI);
4940   match(iRegINoSp);
4941   op_cost(0);
4942   format %{ %}
4943   interface(REG_INTER);
4944 %}
4945 
4946 // Register R3 only
4947 operand iRegI_R3()
4948 %{
4949   constraint(ALLOC_IN_RC(int_r3_reg));
4950   match(RegI);
4951   match(iRegINoSp);
4952   op_cost(0);
4953   format %{ %}
4954   interface(REG_INTER);
4955 %}
4956 
4957 
4958 // Register R2 only
4959 operand iRegI_R4()
4960 %{
4961   constraint(ALLOC_IN_RC(int_r4_reg));
4962   match(RegI);
4963   match(iRegINoSp);
4964   op_cost(0);
4965   format %{ %}
4966   interface(REG_INTER);
4967 %}
4968 
4969 
4970 // Pointer Register Operands
4971 // Narrow Pointer Register
4972 operand iRegN()
4973 %{
4974   constraint(ALLOC_IN_RC(any_reg32));
4975   match(RegN);
4976   match(iRegNNoSp);
4977   op_cost(0);
4978   format %{ %}
4979   interface(REG_INTER);
4980 %}
4981 
4982 // Integer 64 bit Register not Special
4983 operand iRegNNoSp()
4984 %{
4985   constraint(ALLOC_IN_RC(no_special_reg32));
4986   match(RegN);
4987   op_cost(0);
4988   format %{ %}
4989   interface(REG_INTER);
4990 %}
4991 
4992 // heap base register -- used for encoding immN0
4993 
4994 operand iRegIHeapbase()
4995 %{
4996   constraint(ALLOC_IN_RC(heapbase_reg));
4997   match(RegI);
4998   op_cost(0);
4999   format %{ %}
5000   interface(REG_INTER);
5001 %}
5002 
5003 // Float Register
5004 // Float register operands
5005 operand vRegF()
5006 %{
5007   constraint(ALLOC_IN_RC(float_reg));
5008   match(RegF);
5009 
5010   op_cost(0);
5011   format %{ %}
5012   interface(REG_INTER);
5013 %}
5014 
5015 // Double Register
5016 // Double register operands
5017 operand vRegD()
5018 %{
5019   constraint(ALLOC_IN_RC(double_reg));
5020   match(RegD);
5021 
5022   op_cost(0);
5023   format %{ %}
5024   interface(REG_INTER);
5025 %}
5026 
5027 operand vecD()
5028 %{
5029   constraint(ALLOC_IN_RC(vectord_reg));
5030   match(VecD);
5031 
5032   op_cost(0);
5033   format %{ %}
5034   interface(REG_INTER);
5035 %}
5036 
5037 operand vecX()
5038 %{
5039   constraint(ALLOC_IN_RC(vectorx_reg));
5040   match(VecX);
5041 
5042   op_cost(0);
5043   format %{ %}
5044   interface(REG_INTER);
5045 %}
5046 
5047 operand vRegD_V0()
5048 %{
5049   constraint(ALLOC_IN_RC(v0_reg));
5050   match(RegD);
5051   op_cost(0);
5052   format %{ %}
5053   interface(REG_INTER);
5054 %}
5055 
5056 operand vRegD_V1()
5057 %{
5058   constraint(ALLOC_IN_RC(v1_reg));
5059   match(RegD);
5060   op_cost(0);
5061   format %{ %}
5062   interface(REG_INTER);
5063 %}
5064 
5065 operand vRegD_V2()
5066 %{
5067   constraint(ALLOC_IN_RC(v2_reg));
5068   match(RegD);
5069   op_cost(0);
5070   format %{ %}
5071   interface(REG_INTER);
5072 %}
5073 
5074 operand vRegD_V3()
5075 %{
5076   constraint(ALLOC_IN_RC(v3_reg));
5077   match(RegD);
5078   op_cost(0);
5079   format %{ %}
5080   interface(REG_INTER);
5081 %}
5082 
5083 // Flags register, used as output of signed compare instructions
5084 
5085 // note that on AArch64 we also use this register as the output for
5086 // for floating point compare instructions (CmpF CmpD). this ensures
5087 // that ordered inequality tests use GT, GE, LT or LE none of which
5088 // pass through cases where the result is unordered i.e. one or both
5089 // inputs to the compare is a NaN. this means that the ideal code can
5090 // replace e.g. a GT with an LE and not end up capturing the NaN case
5091 // (where the comparison should always fail). EQ and NE tests are
5092 // always generated in ideal code so that unordered folds into the NE
5093 // case, matching the behaviour of AArch64 NE.
5094 //
5095 // This differs from x86 where the outputs of FP compares use a
5096 // special FP flags registers and where compares based on this
5097 // register are distinguished into ordered inequalities (cmpOpUCF) and
5098 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
5099 // to explicitly handle the unordered case in branches. x86 also has
5100 // to include extra CMoveX rules to accept a cmpOpUCF input.
5101 
5102 operand rFlagsReg()
5103 %{
5104   constraint(ALLOC_IN_RC(int_flags));
5105   match(RegFlags);
5106 
5107   op_cost(0);
5108   format %{ "RFLAGS" %}
5109   interface(REG_INTER);
5110 %}
5111 
5112 // Flags register, used as output of unsigned compare instructions
5113 operand rFlagsRegU()
5114 %{
5115   constraint(ALLOC_IN_RC(int_flags));
5116   match(RegFlags);
5117 
5118   op_cost(0);
5119   format %{ "RFLAGSU" %}
5120   interface(REG_INTER);
5121 %}
5122 
5123 // Special Registers
5124 
5125 // Method Register
5126 operand inline_cache_RegP(iRegP reg)
5127 %{
5128   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
5129   match(reg);
5130   match(iRegPNoSp);
5131   op_cost(0);
5132   format %{ %}
5133   interface(REG_INTER);
5134 %}
5135 
5136 operand interpreter_method_oop_RegP(iRegP reg)
5137 %{
5138   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
5139   match(reg);
5140   match(iRegPNoSp);
5141   op_cost(0);
5142   format %{ %}
5143   interface(REG_INTER);
5144 %}
5145 
5146 // Thread Register
5147 operand thread_RegP(iRegP reg)
5148 %{
5149   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
5150   match(reg);
5151   op_cost(0);
5152   format %{ %}
5153   interface(REG_INTER);
5154 %}
5155 
5156 operand lr_RegP(iRegP reg)
5157 %{
5158   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
5159   match(reg);
5160   op_cost(0);
5161   format %{ %}
5162   interface(REG_INTER);
5163 %}
5164 
5165 //----------Memory Operands----------------------------------------------------
5166 
5167 operand indirect(iRegP reg)
5168 %{
5169   constraint(ALLOC_IN_RC(ptr_reg));
5170   match(reg);
5171   op_cost(0);
5172   format %{ "[$reg]" %}
5173   interface(MEMORY_INTER) %{
5174     base($reg);
5175     index(0xffffffff);
5176     scale(0x0);
5177     disp(0x0);
5178   %}
5179 %}
5180 
5181 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
5182 %{
5183   constraint(ALLOC_IN_RC(ptr_reg));
5184   match(AddP (AddP reg (LShiftL lreg scale)) off);
5185   op_cost(INSN_COST);
5186   format %{ "$reg, $lreg lsl($scale), $off" %}
5187   interface(MEMORY_INTER) %{
5188     base($reg);
5189     index($lreg);
5190     scale($scale);
5191     disp($off);
5192   %}
5193 %}
5194 
5195 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
5196 %{
5197   constraint(ALLOC_IN_RC(ptr_reg));
5198   match(AddP (AddP reg (LShiftL lreg scale)) off);
5199   op_cost(INSN_COST);
5200   format %{ "$reg, $lreg lsl($scale), $off" %}
5201   interface(MEMORY_INTER) %{
5202     base($reg);
5203     index($lreg);
5204     scale($scale);
5205     disp($off);
5206   %}
5207 %}
5208 
5209 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
5210 %{
5211   constraint(ALLOC_IN_RC(ptr_reg));
5212   match(AddP (AddP reg (ConvI2L ireg)) off);
5213   op_cost(INSN_COST);
5214   format %{ "$reg, $ireg, $off I2L" %}
5215   interface(MEMORY_INTER) %{
5216     base($reg);
5217     index($ireg);
5218     scale(0x0);
5219     disp($off);
5220   %}
5221 %}
5222 
5223 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
5224 %{
5225   constraint(ALLOC_IN_RC(ptr_reg));
5226   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5227   op_cost(INSN_COST);
5228   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
5229   interface(MEMORY_INTER) %{
5230     base($reg);
5231     index($ireg);
5232     scale($scale);
5233     disp($off);
5234   %}
5235 %}
5236 
5237 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
5238 %{
5239   constraint(ALLOC_IN_RC(ptr_reg));
5240   match(AddP reg (LShiftL (ConvI2L ireg) scale));
5241   op_cost(0);
5242   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
5243   interface(MEMORY_INTER) %{
5244     base($reg);
5245     index($ireg);
5246     scale($scale);
5247     disp(0x0);
5248   %}
5249 %}
5250 
5251 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
5252 %{
5253   constraint(ALLOC_IN_RC(ptr_reg));
5254   match(AddP reg (LShiftL lreg scale));
5255   op_cost(0);
5256   format %{ "$reg, $lreg lsl($scale)" %}
5257   interface(MEMORY_INTER) %{
5258     base($reg);
5259     index($lreg);
5260     scale($scale);
5261     disp(0x0);
5262   %}
5263 %}
5264 
5265 operand indIndex(iRegP reg, iRegL lreg)
5266 %{
5267   constraint(ALLOC_IN_RC(ptr_reg));
5268   match(AddP reg lreg);
5269   op_cost(0);
5270   format %{ "$reg, $lreg" %}
5271   interface(MEMORY_INTER) %{
5272     base($reg);
5273     index($lreg);
5274     scale(0x0);
5275     disp(0x0);
5276   %}
5277 %}
5278 
5279 operand indOffI(iRegP reg, immIOffset off)
5280 %{
5281   constraint(ALLOC_IN_RC(ptr_reg));
5282   match(AddP reg off);
5283   op_cost(0);
5284   format %{ "[$reg, $off]" %}
5285   interface(MEMORY_INTER) %{
5286     base($reg);
5287     index(0xffffffff);
5288     scale(0x0);
5289     disp($off);
5290   %}
5291 %}
5292 
5293 operand indOffL(iRegP reg, immLoffset off)
5294 %{
5295   constraint(ALLOC_IN_RC(ptr_reg));
5296   match(AddP reg off);
5297   op_cost(0);
5298   format %{ "[$reg, $off]" %}
5299   interface(MEMORY_INTER) %{
5300     base($reg);
5301     index(0xffffffff);
5302     scale(0x0);
5303     disp($off);
5304   %}
5305 %}
5306 
5307 
5308 operand indirectN(iRegN reg)
5309 %{
5310   predicate(Universe::narrow_oop_shift() == 0);
5311   constraint(ALLOC_IN_RC(ptr_reg));
5312   match(DecodeN reg);
5313   op_cost(0);
5314   format %{ "[$reg]\t# narrow" %}
5315   interface(MEMORY_INTER) %{
5316     base($reg);
5317     index(0xffffffff);
5318     scale(0x0);
5319     disp(0x0);
5320   %}
5321 %}
5322 
5323 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
5324 %{
5325   predicate(Universe::narrow_oop_shift() == 0);
5326   constraint(ALLOC_IN_RC(ptr_reg));
5327   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5328   op_cost(0);
5329   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5330   interface(MEMORY_INTER) %{
5331     base($reg);
5332     index($lreg);
5333     scale($scale);
5334     disp($off);
5335   %}
5336 %}
5337 
5338 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
5339 %{
5340   predicate(Universe::narrow_oop_shift() == 0);
5341   constraint(ALLOC_IN_RC(ptr_reg));
5342   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5343   op_cost(INSN_COST);
5344   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
5345   interface(MEMORY_INTER) %{
5346     base($reg);
5347     index($lreg);
5348     scale($scale);
5349     disp($off);
5350   %}
5351 %}
5352 
5353 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
5354 %{
5355   predicate(Universe::narrow_oop_shift() == 0);
5356   constraint(ALLOC_IN_RC(ptr_reg));
5357   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
5358   op_cost(INSN_COST);
5359   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
5360   interface(MEMORY_INTER) %{
5361     base($reg);
5362     index($ireg);
5363     scale(0x0);
5364     disp($off);
5365   %}
5366 %}
5367 
5368 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
5369 %{
5370   predicate(Universe::narrow_oop_shift() == 0);
5371   constraint(ALLOC_IN_RC(ptr_reg));
5372   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
5373   op_cost(INSN_COST);
5374   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
5375   interface(MEMORY_INTER) %{
5376     base($reg);
5377     index($ireg);
5378     scale($scale);
5379     disp($off);
5380   %}
5381 %}
5382 
5383 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5384 %{
5385   predicate(Universe::narrow_oop_shift() == 0);
5386   constraint(ALLOC_IN_RC(ptr_reg));
5387   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5388   op_cost(0);
5389   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5390   interface(MEMORY_INTER) %{
5391     base($reg);
5392     index($ireg);
5393     scale($scale);
5394     disp(0x0);
5395   %}
5396 %}
5397 
5398 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5399 %{
5400   predicate(Universe::narrow_oop_shift() == 0);
5401   constraint(ALLOC_IN_RC(ptr_reg));
5402   match(AddP (DecodeN reg) (LShiftL lreg scale));
5403   op_cost(0);
5404   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5405   interface(MEMORY_INTER) %{
5406     base($reg);
5407     index($lreg);
5408     scale($scale);
5409     disp(0x0);
5410   %}
5411 %}
5412 
5413 operand indIndexN(iRegN reg, iRegL lreg)
5414 %{
5415   predicate(Universe::narrow_oop_shift() == 0);
5416   constraint(ALLOC_IN_RC(ptr_reg));
5417   match(AddP (DecodeN reg) lreg);
5418   op_cost(0);
5419   format %{ "$reg, $lreg\t# narrow" %}
5420   interface(MEMORY_INTER) %{
5421     base($reg);
5422     index($lreg);
5423     scale(0x0);
5424     disp(0x0);
5425   %}
5426 %}
5427 
5428 operand indOffIN(iRegN reg, immIOffset off)
5429 %{
5430   predicate(Universe::narrow_oop_shift() == 0);
5431   constraint(ALLOC_IN_RC(ptr_reg));
5432   match(AddP (DecodeN reg) off);
5433   op_cost(0);
5434   format %{ "[$reg, $off]\t# narrow" %}
5435   interface(MEMORY_INTER) %{
5436     base($reg);
5437     index(0xffffffff);
5438     scale(0x0);
5439     disp($off);
5440   %}
5441 %}
5442 
5443 operand indOffLN(iRegN reg, immLoffset off)
5444 %{
5445   predicate(Universe::narrow_oop_shift() == 0);
5446   constraint(ALLOC_IN_RC(ptr_reg));
5447   match(AddP (DecodeN reg) off);
5448   op_cost(0);
5449   format %{ "[$reg, $off]\t# narrow" %}
5450   interface(MEMORY_INTER) %{
5451     base($reg);
5452     index(0xffffffff);
5453     scale(0x0);
5454     disp($off);
5455   %}
5456 %}
5457 
5458 
5459 
5460 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5461 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5462 %{
5463   constraint(ALLOC_IN_RC(ptr_reg));
5464   match(AddP reg off);
5465   op_cost(0);
5466   format %{ "[$reg, $off]" %}
5467   interface(MEMORY_INTER) %{
5468     base($reg);
5469     index(0xffffffff);
5470     scale(0x0);
5471     disp($off);
5472   %}
5473 %}
5474 
5475 //----------Special Memory Operands--------------------------------------------
5476 // Stack Slot Operand - This operand is used for loading and storing temporary
5477 //                      values on the stack where a match requires a value to
5478 //                      flow through memory.
5479 operand stackSlotP(sRegP reg)
5480 %{
5481   constraint(ALLOC_IN_RC(stack_slots));
5482   op_cost(100);
5483   // No match rule because this operand is only generated in matching
5484   // match(RegP);
5485   format %{ "[$reg]" %}
5486   interface(MEMORY_INTER) %{
5487     base(0x1e);  // RSP
5488     index(0x0);  // No Index
5489     scale(0x0);  // No Scale
5490     disp($reg);  // Stack Offset
5491   %}
5492 %}
5493 
5494 operand stackSlotI(sRegI reg)
5495 %{
5496   constraint(ALLOC_IN_RC(stack_slots));
5497   // No match rule because this operand is only generated in matching
5498   // match(RegI);
5499   format %{ "[$reg]" %}
5500   interface(MEMORY_INTER) %{
5501     base(0x1e);  // RSP
5502     index(0x0);  // No Index
5503     scale(0x0);  // No Scale
5504     disp($reg);  // Stack Offset
5505   %}
5506 %}
5507 
5508 operand stackSlotF(sRegF reg)
5509 %{
5510   constraint(ALLOC_IN_RC(stack_slots));
5511   // No match rule because this operand is only generated in matching
5512   // match(RegF);
5513   format %{ "[$reg]" %}
5514   interface(MEMORY_INTER) %{
5515     base(0x1e);  // RSP
5516     index(0x0);  // No Index
5517     scale(0x0);  // No Scale
5518     disp($reg);  // Stack Offset
5519   %}
5520 %}
5521 
5522 operand stackSlotD(sRegD reg)
5523 %{
5524   constraint(ALLOC_IN_RC(stack_slots));
5525   // No match rule because this operand is only generated in matching
5526   // match(RegD);
5527   format %{ "[$reg]" %}
5528   interface(MEMORY_INTER) %{
5529     base(0x1e);  // RSP
5530     index(0x0);  // No Index
5531     scale(0x0);  // No Scale
5532     disp($reg);  // Stack Offset
5533   %}
5534 %}
5535 
5536 operand stackSlotL(sRegL reg)
5537 %{
5538   constraint(ALLOC_IN_RC(stack_slots));
5539   // No match rule because this operand is only generated in matching
5540   // match(RegL);
5541   format %{ "[$reg]" %}
5542   interface(MEMORY_INTER) %{
5543     base(0x1e);  // RSP
5544     index(0x0);  // No Index
5545     scale(0x0);  // No Scale
5546     disp($reg);  // Stack Offset
5547   %}
5548 %}
5549 
5550 // Operands for expressing Control Flow
5551 // NOTE: Label is a predefined operand which should not be redefined in
5552 //       the AD file. It is generically handled within the ADLC.
5553 
5554 //----------Conditional Branch Operands----------------------------------------
5555 // Comparison Op  - This is the operation of the comparison, and is limited to
5556 //                  the following set of codes:
5557 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5558 //
5559 // Other attributes of the comparison, such as unsignedness, are specified
5560 // by the comparison instruction that sets a condition code flags register.
5561 // That result is represented by a flags operand whose subtype is appropriate
5562 // to the unsignedness (etc.) of the comparison.
5563 //
5564 // Later, the instruction which matches both the Comparison Op (a Bool) and
5565 // the flags (produced by the Cmp) specifies the coding of the comparison op
5566 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5567 
5568 // used for signed integral comparisons and fp comparisons
5569 
5570 operand cmpOp()
5571 %{
5572   match(Bool);
5573 
5574   format %{ "" %}
5575   interface(COND_INTER) %{
5576     equal(0x0, "eq");
5577     not_equal(0x1, "ne");
5578     less(0xb, "lt");
5579     greater_equal(0xa, "ge");
5580     less_equal(0xd, "le");
5581     greater(0xc, "gt");
5582     overflow(0x6, "vs");
5583     no_overflow(0x7, "vc");
5584   %}
5585 %}
5586 
5587 // used for unsigned integral comparisons
5588 
5589 operand cmpOpU()
5590 %{
5591   match(Bool);
5592 
5593   format %{ "" %}
5594   interface(COND_INTER) %{
5595     equal(0x0, "eq");
5596     not_equal(0x1, "ne");
5597     less(0x3, "lo");
5598     greater_equal(0x2, "hs");
5599     less_equal(0x9, "ls");
5600     greater(0x8, "hi");
5601     overflow(0x6, "vs");
5602     no_overflow(0x7, "vc");
5603   %}
5604 %}
5605 
5606 // Special operand allowing long args to int ops to be truncated for free
5607 
5608 operand iRegL2I(iRegL reg) %{
5609 
5610   op_cost(0);
5611 
5612   match(ConvL2I reg);
5613 
5614   format %{ "l2i($reg)" %}
5615 
5616   interface(REG_INTER)
5617 %}
5618 
5619 opclass vmem(indirect, indIndex, indOffI, indOffL);
5620 
5621 //----------OPERAND CLASSES----------------------------------------------------
5622 // Operand Classes are groups of operands that are used as to simplify
5623 // instruction definitions by not requiring the AD writer to specify
5624 // separate instructions for every form of operand when the
5625 // instruction accepts multiple operand types with the same basic
5626 // encoding and format. The classic case of this is memory operands.
5627 
5628 // memory is used to define read/write location for load/store
5629 // instruction defs. we can turn a memory op into an Address
5630 
5631 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
5632                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
5633 
5634 
5635 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5636 // operations. it allows the src to be either an iRegI or a (ConvL2I
5637 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5638 // can be elided because the 32-bit instruction will just employ the
5639 // lower 32 bits anyway.
5640 //
5641 // n.b. this does not elide all L2I conversions. if the truncated
5642 // value is consumed by more than one operation then the ConvL2I
5643 // cannot be bundled into the consuming nodes so an l2i gets planted
5644 // (actually a movw $dst $src) and the downstream instructions consume
5645 // the result of the l2i as an iRegI input. That's a shame since the
5646 // movw is actually redundant but its not too costly.
5647 
5648 opclass iRegIorL2I(iRegI, iRegL2I);
5649 
5650 //----------PIPELINE-----------------------------------------------------------
5651 // Rules which define the behavior of the target architectures pipeline.
5652 // Integer ALU reg operation
5653 pipeline %{
5654 
5655 attributes %{
5656   // ARM instructions are of fixed length
5657   fixed_size_instructions;        // Fixed size instructions TODO does
5658   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5659   // ARM instructions come in 32-bit word units
5660   instruction_unit_size = 4;         // An instruction is 4 bytes long
5661   instruction_fetch_unit_size = 64;  // The processor fetches one line
5662   instruction_fetch_units = 1;       // of 64 bytes
5663 
5664   // List of nop instructions
5665   nops( MachNop );
5666 %}
5667 
5668 // We don't use an actual pipeline model so don't care about resources
5669 // or description. we do use pipeline classes to introduce fixed
5670 // latencies
5671 
5672 //----------RESOURCES----------------------------------------------------------
5673 // Resources are the functional units available to the machine
5674 
5675 resources( INS0, INS1, INS01 = INS0 | INS1,
5676            ALU0, ALU1, ALU = ALU0 | ALU1,
5677            MAC,
5678            DIV,
5679            BRANCH,
5680            LDST,
5681            NEON_FP);
5682 
5683 //----------PIPELINE DESCRIPTION-----------------------------------------------
5684 // Pipeline Description specifies the stages in the machine's pipeline
5685 
5686 pipe_desc(ISS, EX1, EX2, WR);
5687 
5688 //----------PIPELINE CLASSES---------------------------------------------------
5689 // Pipeline Classes describe the stages in which input and output are
5690 // referenced by the hardware pipeline.
5691 
5692 //------- Integer ALU operations --------------------------
5693 
5694 // Integer ALU reg-reg operation
5695 // Operands needed in EX1, result generated in EX2
5696 // Eg.  ADD     x0, x1, x2
5697 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5698 %{
5699   single_instruction;
5700   dst    : EX2(write);
5701   src1   : EX1(read);
5702   src2   : EX1(read);
5703   INS01  : ISS; // Dual issue as instruction 0 or 1
5704   ALU    : EX2;
5705 %}
5706 
5707 // Integer ALU reg-reg operation with constant shift
5708 // Shifted register must be available in LATE_ISS instead of EX1
5709 // Eg.  ADD     x0, x1, x2, LSL #2
5710 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
5711 %{
5712   single_instruction;
5713   dst    : EX2(write);
5714   src1   : EX1(read);
5715   src2   : ISS(read);
5716   INS01  : ISS;
5717   ALU    : EX2;
5718 %}
5719 
5720 // Integer ALU reg operation with constant shift
5721 // Eg.  LSL     x0, x1, #shift
5722 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
5723 %{
5724   single_instruction;
5725   dst    : EX2(write);
5726   src1   : ISS(read);
5727   INS01  : ISS;
5728   ALU    : EX2;
5729 %}
5730 
5731 // Integer ALU reg-reg operation with variable shift
5732 // Both operands must be available in LATE_ISS instead of EX1
5733 // Result is available in EX1 instead of EX2
5734 // Eg.  LSLV    x0, x1, x2
5735 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
5736 %{
5737   single_instruction;
5738   dst    : EX1(write);
5739   src1   : ISS(read);
5740   src2   : ISS(read);
5741   INS01  : ISS;
5742   ALU    : EX1;
5743 %}
5744 
5745 // Integer ALU reg-reg operation with extract
5746 // As for _vshift above, but result generated in EX2
5747 // Eg.  EXTR    x0, x1, x2, #N
5748 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
5749 %{
5750   single_instruction;
5751   dst    : EX2(write);
5752   src1   : ISS(read);
5753   src2   : ISS(read);
5754   INS1   : ISS; // Can only dual issue as Instruction 1
5755   ALU    : EX1;
5756 %}
5757 
5758 // Integer ALU reg operation
5759 // Eg.  NEG     x0, x1
5760 pipe_class ialu_reg(iRegI dst, iRegI src)
5761 %{
5762   single_instruction;
5763   dst    : EX2(write);
5764   src    : EX1(read);
5765   INS01  : ISS;
5766   ALU    : EX2;
5767 %}
5768 
5769 // Integer ALU reg mmediate operation
5770 // Eg.  ADD     x0, x1, #N
5771 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
5772 %{
5773   single_instruction;
5774   dst    : EX2(write);
5775   src1   : EX1(read);
5776   INS01  : ISS;
5777   ALU    : EX2;
5778 %}
5779 
5780 // Integer ALU immediate operation (no source operands)
5781 // Eg.  MOV     x0, #N
5782 pipe_class ialu_imm(iRegI dst)
5783 %{
5784   single_instruction;
5785   dst    : EX1(write);
5786   INS01  : ISS;
5787   ALU    : EX1;
5788 %}
5789 
5790 //------- Compare operation -------------------------------
5791 
5792 // Compare reg-reg
5793 // Eg.  CMP     x0, x1
5794 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
5795 %{
5796   single_instruction;
5797 //  fixed_latency(16);
5798   cr     : EX2(write);
5799   op1    : EX1(read);
5800   op2    : EX1(read);
5801   INS01  : ISS;
5802   ALU    : EX2;
5803 %}
5804 
5805 // Compare reg-reg
5806 // Eg.  CMP     x0, #N
5807 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
5808 %{
5809   single_instruction;
5810 //  fixed_latency(16);
5811   cr     : EX2(write);
5812   op1    : EX1(read);
5813   INS01  : ISS;
5814   ALU    : EX2;
5815 %}
5816 
5817 //------- Conditional instructions ------------------------
5818 
5819 // Conditional no operands
5820 // Eg.  CSINC   x0, zr, zr, <cond>
5821 pipe_class icond_none(iRegI dst, rFlagsReg cr)
5822 %{
5823   single_instruction;
5824   cr     : EX1(read);
5825   dst    : EX2(write);
5826   INS01  : ISS;
5827   ALU    : EX2;
5828 %}
5829 
5830 // Conditional 2 operand
5831 // EG.  CSEL    X0, X1, X2, <cond>
5832 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
5833 %{
5834   single_instruction;
5835   cr     : EX1(read);
5836   src1   : EX1(read);
5837   src2   : EX1(read);
5838   dst    : EX2(write);
5839   INS01  : ISS;
5840   ALU    : EX2;
5841 %}
5842 
5843 // Conditional 2 operand
5844 // EG.  CSEL    X0, X1, X2, <cond>
5845 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
5846 %{
5847   single_instruction;
5848   cr     : EX1(read);
5849   src    : EX1(read);
5850   dst    : EX2(write);
5851   INS01  : ISS;
5852   ALU    : EX2;
5853 %}
5854 
5855 //------- Multiply pipeline operations --------------------
5856 
5857 // Multiply reg-reg
5858 // Eg.  MUL     w0, w1, w2
5859 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5860 %{
5861   single_instruction;
5862   dst    : WR(write);
5863   src1   : ISS(read);
5864   src2   : ISS(read);
5865   INS01  : ISS;
5866   MAC    : WR;
5867 %}
5868 
5869 // Multiply accumulate
5870 // Eg.  MADD    w0, w1, w2, w3
5871 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
5872 %{
5873   single_instruction;
5874   dst    : WR(write);
5875   src1   : ISS(read);
5876   src2   : ISS(read);
5877   src3   : ISS(read);
5878   INS01  : ISS;
5879   MAC    : WR;
5880 %}
5881 
5882 // Eg.  MUL     w0, w1, w2
5883 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5884 %{
5885   single_instruction;
5886   fixed_latency(3); // Maximum latency for 64 bit mul
5887   dst    : WR(write);
5888   src1   : ISS(read);
5889   src2   : ISS(read);
5890   INS01  : ISS;
5891   MAC    : WR;
5892 %}
5893 
5894 // Multiply accumulate
5895 // Eg.  MADD    w0, w1, w2, w3
5896 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
5897 %{
5898   single_instruction;
5899   fixed_latency(3); // Maximum latency for 64 bit mul
5900   dst    : WR(write);
5901   src1   : ISS(read);
5902   src2   : ISS(read);
5903   src3   : ISS(read);
5904   INS01  : ISS;
5905   MAC    : WR;
5906 %}
5907 
5908 //------- Divide pipeline operations --------------------
5909 
5910 // Eg.  SDIV    w0, w1, w2
5911 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5912 %{
5913   single_instruction;
5914   fixed_latency(8); // Maximum latency for 32 bit divide
5915   dst    : WR(write);
5916   src1   : ISS(read);
5917   src2   : ISS(read);
5918   INS0   : ISS; // Can only dual issue as instruction 0
5919   DIV    : WR;
5920 %}
5921 
5922 // Eg.  SDIV    x0, x1, x2
5923 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5924 %{
5925   single_instruction;
5926   fixed_latency(16); // Maximum latency for 64 bit divide
5927   dst    : WR(write);
5928   src1   : ISS(read);
5929   src2   : ISS(read);
5930   INS0   : ISS; // Can only dual issue as instruction 0
5931   DIV    : WR;
5932 %}
5933 
5934 //------- Load pipeline operations ------------------------
5935 
5936 // Load - prefetch
5937 // Eg.  PFRM    <mem>
5938 pipe_class iload_prefetch(memory mem)
5939 %{
5940   single_instruction;
5941   mem    : ISS(read);
5942   INS01  : ISS;
5943   LDST   : WR;
5944 %}
5945 
5946 // Load - reg, mem
5947 // Eg.  LDR     x0, <mem>
5948 pipe_class iload_reg_mem(iRegI dst, memory mem)
5949 %{
5950   single_instruction;
5951   dst    : WR(write);
5952   mem    : ISS(read);
5953   INS01  : ISS;
5954   LDST   : WR;
5955 %}
5956 
5957 // Load - reg, reg
5958 // Eg.  LDR     x0, [sp, x1]
5959 pipe_class iload_reg_reg(iRegI dst, iRegI src)
5960 %{
5961   single_instruction;
5962   dst    : WR(write);
5963   src    : ISS(read);
5964   INS01  : ISS;
5965   LDST   : WR;
5966 %}
5967 
5968 //------- Store pipeline operations -----------------------
5969 
5970 // Store - zr, mem
5971 // Eg.  STR     zr, <mem>
5972 pipe_class istore_mem(memory mem)
5973 %{
5974   single_instruction;
5975   mem    : ISS(read);
5976   INS01  : ISS;
5977   LDST   : WR;
5978 %}
5979 
5980 // Store - reg, mem
5981 // Eg.  STR     x0, <mem>
5982 pipe_class istore_reg_mem(iRegI src, memory mem)
5983 %{
5984   single_instruction;
5985   mem    : ISS(read);
5986   src    : EX2(read);
5987   INS01  : ISS;
5988   LDST   : WR;
5989 %}
5990 
5991 // Store - reg, reg
5992 // Eg. STR      x0, [sp, x1]
5993 pipe_class istore_reg_reg(iRegI dst, iRegI src)
5994 %{
5995   single_instruction;
5996   dst    : ISS(read);
5997   src    : EX2(read);
5998   INS01  : ISS;
5999   LDST   : WR;
6000 %}
6001 
6002 //------- Store pipeline operations -----------------------
6003 
6004 // Branch
6005 pipe_class pipe_branch()
6006 %{
6007   single_instruction;
6008   INS01  : ISS;
6009   BRANCH : EX1;
6010 %}
6011 
6012 // Conditional branch
6013 pipe_class pipe_branch_cond(rFlagsReg cr)
6014 %{
6015   single_instruction;
6016   cr     : EX1(read);
6017   INS01  : ISS;
6018   BRANCH : EX1;
6019 %}
6020 
6021 // Compare & Branch
6022 // EG.  CBZ/CBNZ
6023 pipe_class pipe_cmp_branch(iRegI op1)
6024 %{
6025   single_instruction;
6026   op1    : EX1(read);
6027   INS01  : ISS;
6028   BRANCH : EX1;
6029 %}
6030 
6031 //------- Synchronisation operations ----------------------
6032 
6033 // Any operation requiring serialization.
6034 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6035 pipe_class pipe_serial()
6036 %{
6037   single_instruction;
6038   force_serialization;
6039   fixed_latency(16);
6040   INS01  : ISS(2); // Cannot dual issue with any other instruction
6041   LDST   : WR;
6042 %}
6043 
6044 // Generic big/slow expanded idiom - also serialized
6045 pipe_class pipe_slow()
6046 %{
6047   instruction_count(10);
6048   multiple_bundles;
6049   force_serialization;
6050   fixed_latency(16);
6051   INS01  : ISS(2); // Cannot dual issue with any other instruction
6052   LDST   : WR;
6053 %}
6054 
6055 // Empty pipeline class
6056 pipe_class pipe_class_empty()
6057 %{
6058   single_instruction;
6059   fixed_latency(0);
6060 %}
6061 
6062 // Default pipeline class.
6063 pipe_class pipe_class_default()
6064 %{
6065   single_instruction;
6066   fixed_latency(2);
6067 %}
6068 
6069 // Pipeline class for compares.
6070 pipe_class pipe_class_compare()
6071 %{
6072   single_instruction;
6073   fixed_latency(16);
6074 %}
6075 
6076 // Pipeline class for memory operations.
6077 pipe_class pipe_class_memory()
6078 %{
6079   single_instruction;
6080   fixed_latency(16);
6081 %}
6082 
6083 // Pipeline class for call.
6084 pipe_class pipe_class_call()
6085 %{
6086   single_instruction;
6087   fixed_latency(100);
6088 %}
6089 
6090 // Define the class for the Nop node.
6091 define %{
6092    MachNop = pipe_class_empty;
6093 %}
6094 
6095 %}
6096 //----------INSTRUCTIONS-------------------------------------------------------
6097 //
6098 // match      -- States which machine-independent subtree may be replaced
6099 //               by this instruction.
6100 // ins_cost   -- The estimated cost of this instruction is used by instruction
6101 //               selection to identify a minimum cost tree of machine
6102 //               instructions that matches a tree of machine-independent
6103 //               instructions.
6104 // format     -- A string providing the disassembly for this instruction.
6105 //               The value of an instruction's operand may be inserted
6106 //               by referring to it with a '$' prefix.
6107 // opcode     -- Three instruction opcodes may be provided.  These are referred
6108 //               to within an encode class as $primary, $secondary, and $tertiary
6109 //               rrspectively.  The primary opcode is commonly used to
6110 //               indicate the type of machine instruction, while secondary
6111 //               and tertiary are often used for prefix options or addressing
6112 //               modes.
6113 // ins_encode -- A list of encode classes with parameters. The encode class
6114 //               name must have been defined in an 'enc_class' specification
6115 //               in the encode section of the architecture description.
6116 
6117 // ============================================================================
6118 // Memory (Load/Store) Instructions
6119 
6120 // Load Instructions
6121 
6122 // Load Byte (8 bit signed)
6123 instruct loadB(iRegINoSp dst, memory mem)
6124 %{
6125   match(Set dst (LoadB mem));
6126   predicate(!needs_acquiring_load(n));
6127 
6128   ins_cost(4 * INSN_COST);
6129   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6130 
6131   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6132 
6133   ins_pipe(iload_reg_mem);
6134 %}
6135 
6136 // Load Byte (8 bit signed) into long
6137 instruct loadB2L(iRegLNoSp dst, memory mem)
6138 %{
6139   match(Set dst (ConvI2L (LoadB mem)));
6140   predicate(!needs_acquiring_load(n->in(1)));
6141 
6142   ins_cost(4 * INSN_COST);
6143   format %{ "ldrsb  $dst, $mem\t# byte" %}
6144 
6145   ins_encode(aarch64_enc_ldrsb(dst, mem));
6146 
6147   ins_pipe(iload_reg_mem);
6148 %}
6149 
6150 // Load Byte (8 bit unsigned)
6151 instruct loadUB(iRegINoSp dst, memory mem)
6152 %{
6153   match(Set dst (LoadUB mem));
6154   predicate(!needs_acquiring_load(n));
6155 
6156   ins_cost(4 * INSN_COST);
6157   format %{ "ldrbw  $dst, $mem\t# byte" %}
6158 
6159   ins_encode(aarch64_enc_ldrb(dst, mem));
6160 
6161   ins_pipe(iload_reg_mem);
6162 %}
6163 
6164 // Load Byte (8 bit unsigned) into long
6165 instruct loadUB2L(iRegLNoSp dst, memory mem)
6166 %{
6167   match(Set dst (ConvI2L (LoadUB mem)));
6168   predicate(!needs_acquiring_load(n->in(1)));
6169 
6170   ins_cost(4 * INSN_COST);
6171   format %{ "ldrb  $dst, $mem\t# byte" %}
6172 
6173   ins_encode(aarch64_enc_ldrb(dst, mem));
6174 
6175   ins_pipe(iload_reg_mem);
6176 %}
6177 
6178 // Load Short (16 bit signed)
6179 instruct loadS(iRegINoSp dst, memory mem)
6180 %{
6181   match(Set dst (LoadS mem));
6182   predicate(!needs_acquiring_load(n));
6183 
6184   ins_cost(4 * INSN_COST);
6185   format %{ "ldrshw  $dst, $mem\t# short" %}
6186 
6187   ins_encode(aarch64_enc_ldrshw(dst, mem));
6188 
6189   ins_pipe(iload_reg_mem);
6190 %}
6191 
6192 // Load Short (16 bit signed) into long
6193 instruct loadS2L(iRegLNoSp dst, memory mem)
6194 %{
6195   match(Set dst (ConvI2L (LoadS mem)));
6196   predicate(!needs_acquiring_load(n->in(1)));
6197 
6198   ins_cost(4 * INSN_COST);
6199   format %{ "ldrsh  $dst, $mem\t# short" %}
6200 
6201   ins_encode(aarch64_enc_ldrsh(dst, mem));
6202 
6203   ins_pipe(iload_reg_mem);
6204 %}
6205 
6206 // Load Char (16 bit unsigned)
6207 instruct loadUS(iRegINoSp dst, memory mem)
6208 %{
6209   match(Set dst (LoadUS mem));
6210   predicate(!needs_acquiring_load(n));
6211 
6212   ins_cost(4 * INSN_COST);
6213   format %{ "ldrh  $dst, $mem\t# short" %}
6214 
6215   ins_encode(aarch64_enc_ldrh(dst, mem));
6216 
6217   ins_pipe(iload_reg_mem);
6218 %}
6219 
6220 // Load Short/Char (16 bit unsigned) into long
6221 instruct loadUS2L(iRegLNoSp dst, memory mem)
6222 %{
6223   match(Set dst (ConvI2L (LoadUS mem)));
6224   predicate(!needs_acquiring_load(n->in(1)));
6225 
6226   ins_cost(4 * INSN_COST);
6227   format %{ "ldrh  $dst, $mem\t# short" %}
6228 
6229   ins_encode(aarch64_enc_ldrh(dst, mem));
6230 
6231   ins_pipe(iload_reg_mem);
6232 %}
6233 
6234 // Load Integer (32 bit signed)
6235 instruct loadI(iRegINoSp dst, memory mem)
6236 %{
6237   match(Set dst (LoadI mem));
6238   predicate(!needs_acquiring_load(n));
6239 
6240   ins_cost(4 * INSN_COST);
6241   format %{ "ldrw  $dst, $mem\t# int" %}
6242 
6243   ins_encode(aarch64_enc_ldrw(dst, mem));
6244 
6245   ins_pipe(iload_reg_mem);
6246 %}
6247 
6248 // Load Integer (32 bit signed) into long
6249 instruct loadI2L(iRegLNoSp dst, memory mem)
6250 %{
6251   match(Set dst (ConvI2L (LoadI mem)));
6252   predicate(!needs_acquiring_load(n->in(1)));
6253 
6254   ins_cost(4 * INSN_COST);
6255   format %{ "ldrsw  $dst, $mem\t# int" %}
6256 
6257   ins_encode(aarch64_enc_ldrsw(dst, mem));
6258 
6259   ins_pipe(iload_reg_mem);
6260 %}
6261 
6262 // Load Integer (32 bit unsigned) into long
6263 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6264 %{
6265   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6266   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6267 
6268   ins_cost(4 * INSN_COST);
6269   format %{ "ldrw  $dst, $mem\t# int" %}
6270 
6271   ins_encode(aarch64_enc_ldrw(dst, mem));
6272 
6273   ins_pipe(iload_reg_mem);
6274 %}
6275 
6276 // Load Long (64 bit signed)
6277 instruct loadL(iRegLNoSp dst, memory mem)
6278 %{
6279   match(Set dst (LoadL mem));
6280   predicate(!needs_acquiring_load(n));
6281 
6282   ins_cost(4 * INSN_COST);
6283   format %{ "ldr  $dst, $mem\t# int" %}
6284 
6285   ins_encode(aarch64_enc_ldr(dst, mem));
6286 
6287   ins_pipe(iload_reg_mem);
6288 %}
6289 
6290 // Load Range
6291 instruct loadRange(iRegINoSp dst, memory mem)
6292 %{
6293   match(Set dst (LoadRange mem));
6294 
6295   ins_cost(4 * INSN_COST);
6296   format %{ "ldrw  $dst, $mem\t# range" %}
6297 
6298   ins_encode(aarch64_enc_ldrw(dst, mem));
6299 
6300   ins_pipe(iload_reg_mem);
6301 %}
6302 
6303 // Load Pointer
6304 instruct loadP(iRegPNoSp dst, memory mem)
6305 %{
6306   match(Set dst (LoadP mem));
6307   predicate(!needs_acquiring_load(n));
6308 
6309   ins_cost(4 * INSN_COST);
6310   format %{ "ldr  $dst, $mem\t# ptr" %}
6311 
6312   ins_encode(aarch64_enc_ldr(dst, mem));
6313 
6314   ins_pipe(iload_reg_mem);
6315 %}
6316 
6317 // Load Compressed Pointer
6318 instruct loadN(iRegNNoSp dst, memory mem)
6319 %{
6320   match(Set dst (LoadN mem));
6321   predicate(!needs_acquiring_load(n));
6322 
6323   ins_cost(4 * INSN_COST);
6324   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6325 
6326   ins_encode(aarch64_enc_ldrw(dst, mem));
6327 
6328   ins_pipe(iload_reg_mem);
6329 %}
6330 
6331 // Load Klass Pointer
6332 instruct loadKlass(iRegPNoSp dst, memory mem)
6333 %{
6334   match(Set dst (LoadKlass mem));
6335   predicate(!needs_acquiring_load(n));
6336 
6337   ins_cost(4 * INSN_COST);
6338   format %{ "ldr  $dst, $mem\t# class" %}
6339 
6340   ins_encode(aarch64_enc_ldr(dst, mem));
6341 
6342   ins_pipe(iload_reg_mem);
6343 %}
6344 
6345 // Load Narrow Klass Pointer
6346 instruct loadNKlass(iRegNNoSp dst, memory mem)
6347 %{
6348   match(Set dst (LoadNKlass mem));
6349   predicate(!needs_acquiring_load(n));
6350 
6351   ins_cost(4 * INSN_COST);
6352   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6353 
6354   ins_encode(aarch64_enc_ldrw(dst, mem));
6355 
6356   ins_pipe(iload_reg_mem);
6357 %}
6358 
6359 // Load Float
6360 instruct loadF(vRegF dst, memory mem)
6361 %{
6362   match(Set dst (LoadF mem));
6363   predicate(!needs_acquiring_load(n));
6364 
6365   ins_cost(4 * INSN_COST);
6366   format %{ "ldrs  $dst, $mem\t# float" %}
6367 
6368   ins_encode( aarch64_enc_ldrs(dst, mem) );
6369 
6370   ins_pipe(pipe_class_memory);
6371 %}
6372 
6373 // Load Double
6374 instruct loadD(vRegD dst, memory mem)
6375 %{
6376   match(Set dst (LoadD mem));
6377   predicate(!needs_acquiring_load(n));
6378 
6379   ins_cost(4 * INSN_COST);
6380   format %{ "ldrd  $dst, $mem\t# double" %}
6381 
6382   ins_encode( aarch64_enc_ldrd(dst, mem) );
6383 
6384   ins_pipe(pipe_class_memory);
6385 %}
6386 
6387 
6388 // Load Int Constant
6389 instruct loadConI(iRegINoSp dst, immI src)
6390 %{
6391   match(Set dst src);
6392 
6393   ins_cost(INSN_COST);
6394   format %{ "mov $dst, $src\t# int" %}
6395 
6396   ins_encode( aarch64_enc_movw_imm(dst, src) );
6397 
6398   ins_pipe(ialu_imm);
6399 %}
6400 
6401 // Load Long Constant
6402 instruct loadConL(iRegLNoSp dst, immL src)
6403 %{
6404   match(Set dst src);
6405 
6406   ins_cost(INSN_COST);
6407   format %{ "mov $dst, $src\t# long" %}
6408 
6409   ins_encode( aarch64_enc_mov_imm(dst, src) );
6410 
6411   ins_pipe(ialu_imm);
6412 %}
6413 
6414 // Load Pointer Constant
6415 
6416 instruct loadConP(iRegPNoSp dst, immP con)
6417 %{
6418   match(Set dst con);
6419 
6420   ins_cost(INSN_COST * 4);
6421   format %{
6422     "mov  $dst, $con\t# ptr\n\t"
6423   %}
6424 
6425   ins_encode(aarch64_enc_mov_p(dst, con));
6426 
6427   ins_pipe(ialu_imm);
6428 %}
6429 
6430 // Load Null Pointer Constant
6431 
6432 instruct loadConP0(iRegPNoSp dst, immP0 con)
6433 %{
6434   match(Set dst con);
6435 
6436   ins_cost(INSN_COST);
6437   format %{ "mov  $dst, $con\t# NULL ptr" %}
6438 
6439   ins_encode(aarch64_enc_mov_p0(dst, con));
6440 
6441   ins_pipe(ialu_imm);
6442 %}
6443 
6444 // Load Pointer Constant One
6445 
6446 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6447 %{
6448   match(Set dst con);
6449 
6450   ins_cost(INSN_COST);
6451   format %{ "mov  $dst, $con\t# NULL ptr" %}
6452 
6453   ins_encode(aarch64_enc_mov_p1(dst, con));
6454 
6455   ins_pipe(ialu_imm);
6456 %}
6457 
6458 // Load Poll Page Constant
6459 
6460 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6461 %{
6462   match(Set dst con);
6463 
6464   ins_cost(INSN_COST);
6465   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6466 
6467   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6468 
6469   ins_pipe(ialu_imm);
6470 %}
6471 
6472 // Load Byte Map Base Constant
6473 
6474 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6475 %{
6476   match(Set dst con);
6477 
6478   ins_cost(INSN_COST);
6479   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6480 
6481   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6482 
6483   ins_pipe(ialu_imm);
6484 %}
6485 
6486 // Load Narrow Pointer Constant
6487 
6488 instruct loadConN(iRegNNoSp dst, immN con)
6489 %{
6490   match(Set dst con);
6491 
6492   ins_cost(INSN_COST * 4);
6493   format %{ "mov  $dst, $con\t# compressed ptr" %}
6494 
6495   ins_encode(aarch64_enc_mov_n(dst, con));
6496 
6497   ins_pipe(ialu_imm);
6498 %}
6499 
6500 // Load Narrow Null Pointer Constant
6501 
6502 instruct loadConN0(iRegNNoSp dst, immN0 con)
6503 %{
6504   match(Set dst con);
6505 
6506   ins_cost(INSN_COST);
6507   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6508 
6509   ins_encode(aarch64_enc_mov_n0(dst, con));
6510 
6511   ins_pipe(ialu_imm);
6512 %}
6513 
6514 // Load Narrow Klass Constant
6515 
6516 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6517 %{
6518   match(Set dst con);
6519 
6520   ins_cost(INSN_COST);
6521   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6522 
6523   ins_encode(aarch64_enc_mov_nk(dst, con));
6524 
6525   ins_pipe(ialu_imm);
6526 %}
6527 
6528 // Load Packed Float Constant
6529 
6530 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6531   match(Set dst con);
6532   ins_cost(INSN_COST * 4);
6533   format %{ "fmovs  $dst, $con"%}
6534   ins_encode %{
6535     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6536   %}
6537 
6538   ins_pipe(pipe_class_default);
6539 %}
6540 
6541 // Load Float Constant
6542 
6543 instruct loadConF(vRegF dst, immF con) %{
6544   match(Set dst con);
6545 
6546   ins_cost(INSN_COST * 4);
6547 
6548   format %{
6549     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6550   %}
6551 
6552   ins_encode %{
6553     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6554   %}
6555 
6556   ins_pipe(pipe_class_default);
6557 %}
6558 
6559 // Load Packed Double Constant
6560 
6561 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6562   match(Set dst con);
6563   ins_cost(INSN_COST);
6564   format %{ "fmovd  $dst, $con"%}
6565   ins_encode %{
6566     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6567   %}
6568 
6569   ins_pipe(pipe_class_default);
6570 %}
6571 
6572 // Load Double Constant
6573 
6574 instruct loadConD(vRegD dst, immD con) %{
6575   match(Set dst con);
6576 
6577   ins_cost(INSN_COST * 5);
6578   format %{
6579     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6580   %}
6581 
6582   ins_encode %{
6583     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6584   %}
6585 
6586   ins_pipe(pipe_class_default);
6587 %}
6588 
6589 // Store Instructions
6590 
6591 // Store CMS card-mark Immediate
6592 instruct storeimmCM0(immI0 zero, memory mem)
6593 %{
6594   match(Set mem (StoreCM mem zero));
6595 
6596   ins_cost(INSN_COST);
6597   format %{ "strb zr, $mem\t# byte" %}
6598 
6599   ins_encode(aarch64_enc_strb0(mem));
6600 
6601   ins_pipe(istore_mem);
6602 %}
6603 
6604 // Store Byte
6605 instruct storeB(iRegIorL2I src, memory mem)
6606 %{
6607   match(Set mem (StoreB mem src));
6608   predicate(!needs_releasing_store(n));
6609 
6610   ins_cost(INSN_COST);
6611   format %{ "strb  $src, $mem\t# byte" %}
6612 
6613   ins_encode(aarch64_enc_strb(src, mem));
6614 
6615   ins_pipe(istore_reg_mem);
6616 %}
6617 
6618 
6619 instruct storeimmB0(immI0 zero, memory mem)
6620 %{
6621   match(Set mem (StoreB mem zero));
6622   predicate(!needs_releasing_store(n));
6623 
6624   ins_cost(INSN_COST);
6625   format %{ "strb zr, $mem\t# byte" %}
6626 
6627   ins_encode(aarch64_enc_strb0(mem));
6628 
6629   ins_pipe(istore_mem);
6630 %}
6631 
6632 // Store Char/Short
6633 instruct storeC(iRegIorL2I src, memory mem)
6634 %{
6635   match(Set mem (StoreC mem src));
6636   predicate(!needs_releasing_store(n));
6637 
6638   ins_cost(INSN_COST);
6639   format %{ "strh  $src, $mem\t# short" %}
6640 
6641   ins_encode(aarch64_enc_strh(src, mem));
6642 
6643   ins_pipe(istore_reg_mem);
6644 %}
6645 
6646 instruct storeimmC0(immI0 zero, memory mem)
6647 %{
6648   match(Set mem (StoreC mem zero));
6649   predicate(!needs_releasing_store(n));
6650 
6651   ins_cost(INSN_COST);
6652   format %{ "strh  zr, $mem\t# short" %}
6653 
6654   ins_encode(aarch64_enc_strh0(mem));
6655 
6656   ins_pipe(istore_mem);
6657 %}
6658 
6659 // Store Integer
6660 
6661 instruct storeI(iRegIorL2I src, memory mem)
6662 %{
6663   match(Set mem(StoreI mem src));
6664   predicate(!needs_releasing_store(n));
6665 
6666   ins_cost(INSN_COST);
6667   format %{ "strw  $src, $mem\t# int" %}
6668 
6669   ins_encode(aarch64_enc_strw(src, mem));
6670 
6671   ins_pipe(istore_reg_mem);
6672 %}
6673 
6674 instruct storeimmI0(immI0 zero, memory mem)
6675 %{
6676   match(Set mem(StoreI mem zero));
6677   predicate(!needs_releasing_store(n));
6678 
6679   ins_cost(INSN_COST);
6680   format %{ "strw  zr, $mem\t# int" %}
6681 
6682   ins_encode(aarch64_enc_strw0(mem));
6683 
6684   ins_pipe(istore_mem);
6685 %}
6686 
6687 // Store Long (64 bit signed)
6688 instruct storeL(iRegL src, memory mem)
6689 %{
6690   match(Set mem (StoreL mem src));
6691   predicate(!needs_releasing_store(n));
6692 
6693   ins_cost(INSN_COST);
6694   format %{ "str  $src, $mem\t# int" %}
6695 
6696   ins_encode(aarch64_enc_str(src, mem));
6697 
6698   ins_pipe(istore_reg_mem);
6699 %}
6700 
6701 // Store Long (64 bit signed)
6702 instruct storeimmL0(immL0 zero, memory mem)
6703 %{
6704   match(Set mem (StoreL mem zero));
6705   predicate(!needs_releasing_store(n));
6706 
6707   ins_cost(INSN_COST);
6708   format %{ "str  zr, $mem\t# int" %}
6709 
6710   ins_encode(aarch64_enc_str0(mem));
6711 
6712   ins_pipe(istore_mem);
6713 %}
6714 
6715 // Store Pointer
6716 instruct storeP(iRegP src, memory mem)
6717 %{
6718   match(Set mem (StoreP mem src));
6719   predicate(!needs_releasing_store(n));
6720 
6721   ins_cost(INSN_COST);
6722   format %{ "str  $src, $mem\t# ptr" %}
6723 
6724   ins_encode(aarch64_enc_str(src, mem));
6725 
6726   ins_pipe(istore_reg_mem);
6727 %}
6728 
6729 // Store Pointer
6730 instruct storeimmP0(immP0 zero, memory mem)
6731 %{
6732   match(Set mem (StoreP mem zero));
6733   predicate(!needs_releasing_store(n));
6734 
6735   ins_cost(INSN_COST);
6736   format %{ "str zr, $mem\t# ptr" %}
6737 
6738   ins_encode(aarch64_enc_str0(mem));
6739 
6740   ins_pipe(istore_mem);
6741 %}
6742 
6743 // Store Compressed Pointer
6744 instruct storeN(iRegN src, memory mem)
6745 %{
6746   match(Set mem (StoreN mem src));
6747   predicate(!needs_releasing_store(n));
6748 
6749   ins_cost(INSN_COST);
6750   format %{ "strw  $src, $mem\t# compressed ptr" %}
6751 
6752   ins_encode(aarch64_enc_strw(src, mem));
6753 
6754   ins_pipe(istore_reg_mem);
6755 %}
6756 
6757 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
6758 %{
6759   match(Set mem (StoreN mem zero));
6760   predicate(Universe::narrow_oop_base() == NULL &&
6761             Universe::narrow_klass_base() == NULL &&
6762             (!needs_releasing_store(n)));
6763 
6764   ins_cost(INSN_COST);
6765   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
6766 
6767   ins_encode(aarch64_enc_strw(heapbase, mem));
6768 
6769   ins_pipe(istore_reg_mem);
6770 %}
6771 
6772 // Store Float
6773 instruct storeF(vRegF src, memory mem)
6774 %{
6775   match(Set mem (StoreF mem src));
6776   predicate(!needs_releasing_store(n));
6777 
6778   ins_cost(INSN_COST);
6779   format %{ "strs  $src, $mem\t# float" %}
6780 
6781   ins_encode( aarch64_enc_strs(src, mem) );
6782 
6783   ins_pipe(pipe_class_memory);
6784 %}
6785 
6786 // TODO
6787 // implement storeImmF0 and storeFImmPacked
6788 
6789 // Store Double
6790 instruct storeD(vRegD src, memory mem)
6791 %{
6792   match(Set mem (StoreD mem src));
6793   predicate(!needs_releasing_store(n));
6794 
6795   ins_cost(INSN_COST);
6796   format %{ "strd  $src, $mem\t# double" %}
6797 
6798   ins_encode( aarch64_enc_strd(src, mem) );
6799 
6800   ins_pipe(pipe_class_memory);
6801 %}
6802 
6803 // Store Compressed Klass Pointer
6804 instruct storeNKlass(iRegN src, memory mem)
6805 %{
6806   predicate(!needs_releasing_store(n));
6807   match(Set mem (StoreNKlass mem src));
6808 
6809   ins_cost(INSN_COST);
6810   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
6811 
6812   ins_encode(aarch64_enc_strw(src, mem));
6813 
6814   ins_pipe(istore_reg_mem);
6815 %}
6816 
6817 // TODO
6818 // implement storeImmD0 and storeDImmPacked
6819 
6820 // prefetch instructions
6821 // Must be safe to execute with invalid address (cannot fault).
6822 
6823 instruct prefetchalloc( memory mem ) %{
6824   match(PrefetchAllocation mem);
6825 
6826   ins_cost(INSN_COST);
6827   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
6828 
6829   ins_encode( aarch64_enc_prefetchw(mem) );
6830 
6831   ins_pipe(iload_prefetch);
6832 %}
6833 
6834 //  ---------------- volatile loads and stores ----------------
6835 
6836 // Load Byte (8 bit signed)
6837 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6838 %{
6839   match(Set dst (LoadB mem));
6840 
6841   ins_cost(VOLATILE_REF_COST);
6842   format %{ "ldarsb  $dst, $mem\t# byte" %}
6843 
6844   ins_encode(aarch64_enc_ldarsb(dst, mem));
6845 
6846   ins_pipe(pipe_serial);
6847 %}
6848 
6849 // Load Byte (8 bit signed) into long
6850 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6851 %{
6852   match(Set dst (ConvI2L (LoadB mem)));
6853 
6854   ins_cost(VOLATILE_REF_COST);
6855   format %{ "ldarsb  $dst, $mem\t# byte" %}
6856 
6857   ins_encode(aarch64_enc_ldarsb(dst, mem));
6858 
6859   ins_pipe(pipe_serial);
6860 %}
6861 
6862 // Load Byte (8 bit unsigned)
6863 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6864 %{
6865   match(Set dst (LoadUB mem));
6866 
6867   ins_cost(VOLATILE_REF_COST);
6868   format %{ "ldarb  $dst, $mem\t# byte" %}
6869 
6870   ins_encode(aarch64_enc_ldarb(dst, mem));
6871 
6872   ins_pipe(pipe_serial);
6873 %}
6874 
6875 // Load Byte (8 bit unsigned) into long
6876 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6877 %{
6878   match(Set dst (ConvI2L (LoadUB mem)));
6879 
6880   ins_cost(VOLATILE_REF_COST);
6881   format %{ "ldarb  $dst, $mem\t# byte" %}
6882 
6883   ins_encode(aarch64_enc_ldarb(dst, mem));
6884 
6885   ins_pipe(pipe_serial);
6886 %}
6887 
6888 // Load Short (16 bit signed)
6889 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6890 %{
6891   match(Set dst (LoadS mem));
6892 
6893   ins_cost(VOLATILE_REF_COST);
6894   format %{ "ldarshw  $dst, $mem\t# short" %}
6895 
6896   ins_encode(aarch64_enc_ldarshw(dst, mem));
6897 
6898   ins_pipe(pipe_serial);
6899 %}
6900 
6901 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6902 %{
6903   match(Set dst (LoadUS mem));
6904 
6905   ins_cost(VOLATILE_REF_COST);
6906   format %{ "ldarhw  $dst, $mem\t# short" %}
6907 
6908   ins_encode(aarch64_enc_ldarhw(dst, mem));
6909 
6910   ins_pipe(pipe_serial);
6911 %}
6912 
6913 // Load Short/Char (16 bit unsigned) into long
6914 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6915 %{
6916   match(Set dst (ConvI2L (LoadUS mem)));
6917 
6918   ins_cost(VOLATILE_REF_COST);
6919   format %{ "ldarh  $dst, $mem\t# short" %}
6920 
6921   ins_encode(aarch64_enc_ldarh(dst, mem));
6922 
6923   ins_pipe(pipe_serial);
6924 %}
6925 
6926 // Load Short/Char (16 bit signed) into long
6927 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6928 %{
6929   match(Set dst (ConvI2L (LoadS mem)));
6930 
6931   ins_cost(VOLATILE_REF_COST);
6932   format %{ "ldarh  $dst, $mem\t# short" %}
6933 
6934   ins_encode(aarch64_enc_ldarsh(dst, mem));
6935 
6936   ins_pipe(pipe_serial);
6937 %}
6938 
6939 // Load Integer (32 bit signed)
6940 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
6941 %{
6942   match(Set dst (LoadI mem));
6943 
6944   ins_cost(VOLATILE_REF_COST);
6945   format %{ "ldarw  $dst, $mem\t# int" %}
6946 
6947   ins_encode(aarch64_enc_ldarw(dst, mem));
6948 
6949   ins_pipe(pipe_serial);
6950 %}
6951 
6952 // Load Integer (32 bit unsigned) into long
6953 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
6954 %{
6955   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6956 
6957   ins_cost(VOLATILE_REF_COST);
6958   format %{ "ldarw  $dst, $mem\t# int" %}
6959 
6960   ins_encode(aarch64_enc_ldarw(dst, mem));
6961 
6962   ins_pipe(pipe_serial);
6963 %}
6964 
6965 // Load Long (64 bit signed)
6966 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
6967 %{
6968   match(Set dst (LoadL mem));
6969 
6970   ins_cost(VOLATILE_REF_COST);
6971   format %{ "ldar  $dst, $mem\t# int" %}
6972 
6973   ins_encode(aarch64_enc_ldar(dst, mem));
6974 
6975   ins_pipe(pipe_serial);
6976 %}
6977 
6978 // Load Pointer
6979 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
6980 %{
6981   match(Set dst (LoadP mem));
6982 
6983   ins_cost(VOLATILE_REF_COST);
6984   format %{ "ldar  $dst, $mem\t# ptr" %}
6985 
6986   ins_encode(aarch64_enc_ldar(dst, mem));
6987 
6988   ins_pipe(pipe_serial);
6989 %}
6990 
6991 // Load Compressed Pointer
6992 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
6993 %{
6994   match(Set dst (LoadN mem));
6995 
6996   ins_cost(VOLATILE_REF_COST);
6997   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
6998 
6999   ins_encode(aarch64_enc_ldarw(dst, mem));
7000 
7001   ins_pipe(pipe_serial);
7002 %}
7003 
7004 // Load Float
7005 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7006 %{
7007   match(Set dst (LoadF mem));
7008 
7009   ins_cost(VOLATILE_REF_COST);
7010   format %{ "ldars  $dst, $mem\t# float" %}
7011 
7012   ins_encode( aarch64_enc_fldars(dst, mem) );
7013 
7014   ins_pipe(pipe_serial);
7015 %}
7016 
7017 // Load Double
7018 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7019 %{
7020   match(Set dst (LoadD mem));
7021 
7022   ins_cost(VOLATILE_REF_COST);
7023   format %{ "ldard  $dst, $mem\t# double" %}
7024 
7025   ins_encode( aarch64_enc_fldard(dst, mem) );
7026 
7027   ins_pipe(pipe_serial);
7028 %}
7029 
7030 // Store Byte
7031 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7032 %{
7033   match(Set mem (StoreB mem src));
7034 
7035   ins_cost(VOLATILE_REF_COST);
7036   format %{ "stlrb  $src, $mem\t# byte" %}
7037 
7038   ins_encode(aarch64_enc_stlrb(src, mem));
7039 
7040   ins_pipe(pipe_class_memory);
7041 %}
7042 
7043 // Store Char/Short
7044 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7045 %{
7046   match(Set mem (StoreC mem src));
7047 
7048   ins_cost(VOLATILE_REF_COST);
7049   format %{ "stlrh  $src, $mem\t# short" %}
7050 
7051   ins_encode(aarch64_enc_stlrh(src, mem));
7052 
7053   ins_pipe(pipe_class_memory);
7054 %}
7055 
7056 // Store Integer
7057 
7058 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7059 %{
7060   match(Set mem(StoreI mem src));
7061 
7062   ins_cost(VOLATILE_REF_COST);
7063   format %{ "stlrw  $src, $mem\t# int" %}
7064 
7065   ins_encode(aarch64_enc_stlrw(src, mem));
7066 
7067   ins_pipe(pipe_class_memory);
7068 %}
7069 
7070 // Store Long (64 bit signed)
7071 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7072 %{
7073   match(Set mem (StoreL mem src));
7074 
7075   ins_cost(VOLATILE_REF_COST);
7076   format %{ "stlr  $src, $mem\t# int" %}
7077 
7078   ins_encode(aarch64_enc_stlr(src, mem));
7079 
7080   ins_pipe(pipe_class_memory);
7081 %}
7082 
7083 // Store Pointer
7084 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7085 %{
7086   match(Set mem (StoreP mem src));
7087 
7088   ins_cost(VOLATILE_REF_COST);
7089   format %{ "stlr  $src, $mem\t# ptr" %}
7090 
7091   ins_encode(aarch64_enc_stlr(src, mem));
7092 
7093   ins_pipe(pipe_class_memory);
7094 %}
7095 
7096 // Store Compressed Pointer
7097 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7098 %{
7099   match(Set mem (StoreN mem src));
7100 
7101   ins_cost(VOLATILE_REF_COST);
7102   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7103 
7104   ins_encode(aarch64_enc_stlrw(src, mem));
7105 
7106   ins_pipe(pipe_class_memory);
7107 %}
7108 
7109 // Store Float
7110 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7111 %{
7112   match(Set mem (StoreF mem src));
7113 
7114   ins_cost(VOLATILE_REF_COST);
7115   format %{ "stlrs  $src, $mem\t# float" %}
7116 
7117   ins_encode( aarch64_enc_fstlrs(src, mem) );
7118 
7119   ins_pipe(pipe_class_memory);
7120 %}
7121 
7122 // TODO
7123 // implement storeImmF0 and storeFImmPacked
7124 
7125 // Store Double
7126 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7127 %{
7128   match(Set mem (StoreD mem src));
7129 
7130   ins_cost(VOLATILE_REF_COST);
7131   format %{ "stlrd  $src, $mem\t# double" %}
7132 
7133   ins_encode( aarch64_enc_fstlrd(src, mem) );
7134 
7135   ins_pipe(pipe_class_memory);
7136 %}
7137 
7138 //  ---------------- end of volatile loads and stores ----------------
7139 
7140 // ============================================================================
7141 // BSWAP Instructions
7142 
7143 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7144   match(Set dst (ReverseBytesI src));
7145 
7146   ins_cost(INSN_COST);
7147   format %{ "revw  $dst, $src" %}
7148 
7149   ins_encode %{
7150     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7151   %}
7152 
7153   ins_pipe(ialu_reg);
7154 %}
7155 
7156 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7157   match(Set dst (ReverseBytesL src));
7158 
7159   ins_cost(INSN_COST);
7160   format %{ "rev  $dst, $src" %}
7161 
7162   ins_encode %{
7163     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7164   %}
7165 
7166   ins_pipe(ialu_reg);
7167 %}
7168 
7169 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7170   match(Set dst (ReverseBytesUS src));
7171 
7172   ins_cost(INSN_COST);
7173   format %{ "rev16w  $dst, $src" %}
7174 
7175   ins_encode %{
7176     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7177   %}
7178 
7179   ins_pipe(ialu_reg);
7180 %}
7181 
7182 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7183   match(Set dst (ReverseBytesS src));
7184 
7185   ins_cost(INSN_COST);
7186   format %{ "rev16w  $dst, $src\n\t"
7187             "sbfmw $dst, $dst, #0, #15" %}
7188 
7189   ins_encode %{
7190     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7191     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7192   %}
7193 
7194   ins_pipe(ialu_reg);
7195 %}
7196 
7197 // ============================================================================
7198 // Zero Count Instructions
7199 
7200 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7201   match(Set dst (CountLeadingZerosI src));
7202 
7203   ins_cost(INSN_COST);
7204   format %{ "clzw  $dst, $src" %}
7205   ins_encode %{
7206     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7207   %}
7208 
7209   ins_pipe(ialu_reg);
7210 %}
7211 
7212 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7213   match(Set dst (CountLeadingZerosL src));
7214 
7215   ins_cost(INSN_COST);
7216   format %{ "clz   $dst, $src" %}
7217   ins_encode %{
7218     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7219   %}
7220 
7221   ins_pipe(ialu_reg);
7222 %}
7223 
7224 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7225   match(Set dst (CountTrailingZerosI src));
7226 
7227   ins_cost(INSN_COST * 2);
7228   format %{ "rbitw  $dst, $src\n\t"
7229             "clzw   $dst, $dst" %}
7230   ins_encode %{
7231     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7232     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7233   %}
7234 
7235   ins_pipe(ialu_reg);
7236 %}
7237 
7238 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7239   match(Set dst (CountTrailingZerosL src));
7240 
7241   ins_cost(INSN_COST * 2);
7242   format %{ "rbit   $dst, $src\n\t"
7243             "clz    $dst, $dst" %}
7244   ins_encode %{
7245     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7246     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7247   %}
7248 
7249   ins_pipe(ialu_reg);
7250 %}
7251 
7252 //---------- Population Count Instructions -------------------------------------
7253 //
7254 
7255 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7256   predicate(UsePopCountInstruction);
7257   match(Set dst (PopCountI src));
7258   effect(TEMP tmp);
7259   ins_cost(INSN_COST * 13);
7260 
7261   format %{ "movw   $src, $src\n\t"
7262             "mov    $tmp, $src\t# vector (1D)\n\t"
7263             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7264             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7265             "mov    $dst, $tmp\t# vector (1D)" %}
7266   ins_encode %{
7267     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7268     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7269     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7270     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7271     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7272   %}
7273 
7274   ins_pipe(pipe_class_default);
7275 %}
7276 
7277 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7278   predicate(UsePopCountInstruction);
7279   match(Set dst (PopCountI (LoadI mem)));
7280   effect(TEMP tmp);
7281   ins_cost(INSN_COST * 13);
7282 
7283   format %{ "ldrs   $tmp, $mem\n\t"
7284             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7285             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7286             "mov    $dst, $tmp\t# vector (1D)" %}
7287   ins_encode %{
7288     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7289     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7290                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7291     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7292     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7293     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7294   %}
7295 
7296   ins_pipe(pipe_class_default);
7297 %}
7298 
7299 // Note: Long.bitCount(long) returns an int.
7300 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7301   predicate(UsePopCountInstruction);
7302   match(Set dst (PopCountL src));
7303   effect(TEMP tmp);
7304   ins_cost(INSN_COST * 13);
7305 
7306   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7307             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7308             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7309             "mov    $dst, $tmp\t# vector (1D)" %}
7310   ins_encode %{
7311     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7312     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7313     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7314     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7315   %}
7316 
7317   ins_pipe(pipe_class_default);
7318 %}
7319 
7320 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7321   predicate(UsePopCountInstruction);
7322   match(Set dst (PopCountL (LoadL mem)));
7323   effect(TEMP tmp);
7324   ins_cost(INSN_COST * 13);
7325 
7326   format %{ "ldrd   $tmp, $mem\n\t"
7327             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7328             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7329             "mov    $dst, $tmp\t# vector (1D)" %}
7330   ins_encode %{
7331     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7332     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7333                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7334     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7335     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7336     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7337   %}
7338 
7339   ins_pipe(pipe_class_default);
7340 %}
7341 
7342 // ============================================================================
7343 // MemBar Instruction
7344 
7345 instruct load_fence() %{
7346   match(LoadFence);
7347   ins_cost(VOLATILE_REF_COST);
7348 
7349   format %{ "load_fence" %}
7350 
7351   ins_encode %{
7352     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7353   %}
7354   ins_pipe(pipe_serial);
7355 %}
7356 
7357 instruct unnecessary_membar_acquire() %{
7358   predicate(unnecessary_acquire(n));
7359   match(MemBarAcquire);
7360   ins_cost(0);
7361 
7362   format %{ "membar_acquire (elided)" %}
7363 
7364   ins_encode %{
7365     __ block_comment("membar_acquire (elided)");
7366   %}
7367 
7368   ins_pipe(pipe_class_empty);
7369 %}
7370 
7371 instruct membar_acquire() %{
7372   match(MemBarAcquire);
7373   ins_cost(VOLATILE_REF_COST);
7374 
7375   format %{ "membar_acquire" %}
7376 
7377   ins_encode %{
7378     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7379   %}
7380 
7381   ins_pipe(pipe_serial);
7382 %}
7383 
7384 
7385 instruct membar_acquire_lock() %{
7386   match(MemBarAcquireLock);
7387   ins_cost(VOLATILE_REF_COST);
7388 
7389   format %{ "membar_acquire_lock" %}
7390 
7391   ins_encode %{
7392     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7393   %}
7394 
7395   ins_pipe(pipe_serial);
7396 %}
7397 
7398 instruct store_fence() %{
7399   match(StoreFence);
7400   ins_cost(VOLATILE_REF_COST);
7401 
7402   format %{ "store_fence" %}
7403 
7404   ins_encode %{
7405     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7406   %}
7407   ins_pipe(pipe_serial);
7408 %}
7409 
7410 instruct unnecessary_membar_release() %{
7411   predicate(unnecessary_release(n));
7412   match(MemBarRelease);
7413   ins_cost(0);
7414 
7415   format %{ "membar_release (elided)" %}
7416 
7417   ins_encode %{
7418     __ block_comment("membar_release (elided)");
7419   %}
7420   ins_pipe(pipe_serial);
7421 %}
7422 
7423 instruct membar_release() %{
7424   match(MemBarRelease);
7425   ins_cost(VOLATILE_REF_COST);
7426 
7427   format %{ "membar_release" %}
7428 
7429   ins_encode %{
7430     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7431   %}
7432   ins_pipe(pipe_serial);
7433 %}
7434 
7435 instruct membar_storestore() %{
7436   match(MemBarStoreStore);
7437   ins_cost(VOLATILE_REF_COST);
7438 
7439   format %{ "MEMBAR-store-store" %}
7440 
7441   ins_encode %{
7442     __ membar(Assembler::StoreStore);
7443   %}
7444   ins_pipe(pipe_serial);
7445 %}
7446 
7447 instruct membar_release_lock() %{
7448   match(MemBarReleaseLock);
7449   ins_cost(VOLATILE_REF_COST);
7450 
7451   format %{ "membar_release_lock" %}
7452 
7453   ins_encode %{
7454     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7455   %}
7456 
7457   ins_pipe(pipe_serial);
7458 %}
7459 
7460 instruct unnecessary_membar_volatile() %{
7461   predicate(unnecessary_volatile(n));
7462   match(MemBarVolatile);
7463   ins_cost(0);
7464 
7465   format %{ "membar_volatile (elided)" %}
7466 
7467   ins_encode %{
7468     __ block_comment("membar_volatile (elided)");
7469   %}
7470 
7471   ins_pipe(pipe_serial);
7472 %}
7473 
7474 instruct membar_volatile() %{
7475   match(MemBarVolatile);
7476   ins_cost(VOLATILE_REF_COST*100);
7477 
7478   format %{ "membar_volatile" %}
7479 
7480   ins_encode %{
7481     __ membar(Assembler::StoreLoad);
7482   %}
7483 
7484   ins_pipe(pipe_serial);
7485 %}
7486 
7487 // ============================================================================
7488 // Cast/Convert Instructions
7489 
7490 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7491   match(Set dst (CastX2P src));
7492 
7493   ins_cost(INSN_COST);
7494   format %{ "mov $dst, $src\t# long -> ptr" %}
7495 
7496   ins_encode %{
7497     if ($dst$$reg != $src$$reg) {
7498       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7499     }
7500   %}
7501 
7502   ins_pipe(ialu_reg);
7503 %}
7504 
7505 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7506   match(Set dst (CastP2X src));
7507 
7508   ins_cost(INSN_COST);
7509   format %{ "mov $dst, $src\t# ptr -> long" %}
7510 
7511   ins_encode %{
7512     if ($dst$$reg != $src$$reg) {
7513       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7514     }
7515   %}
7516 
7517   ins_pipe(ialu_reg);
7518 %}
7519 
7520 // Convert oop into int for vectors alignment masking
7521 instruct convP2I(iRegINoSp dst, iRegP src) %{
7522   match(Set dst (ConvL2I (CastP2X src)));
7523 
7524   ins_cost(INSN_COST);
7525   format %{ "movw $dst, $src\t# ptr -> int" %}
7526   ins_encode %{
7527     __ movw($dst$$Register, $src$$Register);
7528   %}
7529 
7530   ins_pipe(ialu_reg);
7531 %}
7532 
7533 // Convert compressed oop into int for vectors alignment masking
7534 // in case of 32bit oops (heap < 4Gb).
7535 instruct convN2I(iRegINoSp dst, iRegN src)
7536 %{
7537   predicate(Universe::narrow_oop_shift() == 0);
7538   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7539 
7540   ins_cost(INSN_COST);
7541   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7542   ins_encode %{
7543     __ movw($dst$$Register, $src$$Register);
7544   %}
7545 
7546   ins_pipe(ialu_reg);
7547 %}
7548 
7549 
7550 // Convert oop pointer into compressed form
7551 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7552   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7553   match(Set dst (EncodeP src));
7554   effect(KILL cr);
7555   ins_cost(INSN_COST * 3);
7556   format %{ "encode_heap_oop $dst, $src" %}
7557   ins_encode %{
7558     Register s = $src$$Register;
7559     Register d = $dst$$Register;
7560     __ encode_heap_oop(d, s);
7561   %}
7562   ins_pipe(ialu_reg);
7563 %}
7564 
7565 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7566   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7567   match(Set dst (EncodeP src));
7568   ins_cost(INSN_COST * 3);
7569   format %{ "encode_heap_oop_not_null $dst, $src" %}
7570   ins_encode %{
7571     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7572   %}
7573   ins_pipe(ialu_reg);
7574 %}
7575 
7576 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7577   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7578             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7579   match(Set dst (DecodeN src));
7580   ins_cost(INSN_COST * 3);
7581   format %{ "decode_heap_oop $dst, $src" %}
7582   ins_encode %{
7583     Register s = $src$$Register;
7584     Register d = $dst$$Register;
7585     __ decode_heap_oop(d, s);
7586   %}
7587   ins_pipe(ialu_reg);
7588 %}
7589 
7590 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7591   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7592             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7593   match(Set dst (DecodeN src));
7594   ins_cost(INSN_COST * 3);
7595   format %{ "decode_heap_oop_not_null $dst, $src" %}
7596   ins_encode %{
7597     Register s = $src$$Register;
7598     Register d = $dst$$Register;
7599     __ decode_heap_oop_not_null(d, s);
7600   %}
7601   ins_pipe(ialu_reg);
7602 %}
7603 
7604 // n.b. AArch64 implementations of encode_klass_not_null and
7605 // decode_klass_not_null do not modify the flags register so, unlike
7606 // Intel, we don't kill CR as a side effect here
7607 
7608 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7609   match(Set dst (EncodePKlass src));
7610 
7611   ins_cost(INSN_COST * 3);
7612   format %{ "encode_klass_not_null $dst,$src" %}
7613 
7614   ins_encode %{
7615     Register src_reg = as_Register($src$$reg);
7616     Register dst_reg = as_Register($dst$$reg);
7617     __ encode_klass_not_null(dst_reg, src_reg);
7618   %}
7619 
7620    ins_pipe(ialu_reg);
7621 %}
7622 
7623 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7624   match(Set dst (DecodeNKlass src));
7625 
7626   ins_cost(INSN_COST * 3);
7627   format %{ "decode_klass_not_null $dst,$src" %}
7628 
7629   ins_encode %{
7630     Register src_reg = as_Register($src$$reg);
7631     Register dst_reg = as_Register($dst$$reg);
7632     if (dst_reg != src_reg) {
7633       __ decode_klass_not_null(dst_reg, src_reg);
7634     } else {
7635       __ decode_klass_not_null(dst_reg);
7636     }
7637   %}
7638 
7639    ins_pipe(ialu_reg);
7640 %}
7641 
7642 instruct checkCastPP(iRegPNoSp dst)
7643 %{
7644   match(Set dst (CheckCastPP dst));
7645 
7646   size(0);
7647   format %{ "# checkcastPP of $dst" %}
7648   ins_encode(/* empty encoding */);
7649   ins_pipe(pipe_class_empty);
7650 %}
7651 
7652 instruct castPP(iRegPNoSp dst)
7653 %{
7654   match(Set dst (CastPP dst));
7655 
7656   size(0);
7657   format %{ "# castPP of $dst" %}
7658   ins_encode(/* empty encoding */);
7659   ins_pipe(pipe_class_empty);
7660 %}
7661 
7662 instruct castII(iRegI dst)
7663 %{
7664   match(Set dst (CastII dst));
7665 
7666   size(0);
7667   format %{ "# castII of $dst" %}
7668   ins_encode(/* empty encoding */);
7669   ins_cost(0);
7670   ins_pipe(pipe_class_empty);
7671 %}
7672 
7673 // ============================================================================
7674 // Atomic operation instructions
7675 //
7676 // Intel and SPARC both implement Ideal Node LoadPLocked and
7677 // Store{PIL}Conditional instructions using a normal load for the
7678 // LoadPLocked and a CAS for the Store{PIL}Conditional.
7679 //
7680 // The ideal code appears only to use LoadPLocked/StorePLocked as a
7681 // pair to lock object allocations from Eden space when not using
7682 // TLABs.
7683 //
7684 // There does not appear to be a Load{IL}Locked Ideal Node and the
7685 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
7686 // and to use StoreIConditional only for 32-bit and StoreLConditional
7687 // only for 64-bit.
7688 //
7689 // We implement LoadPLocked and StorePLocked instructions using,
7690 // respectively the AArch64 hw load-exclusive and store-conditional
7691 // instructions. Whereas we must implement each of
7692 // Store{IL}Conditional using a CAS which employs a pair of
7693 // instructions comprising a load-exclusive followed by a
7694 // store-conditional.
7695 
7696 
7697 // Locked-load (linked load) of the current heap-top
7698 // used when updating the eden heap top
7699 // implemented using ldaxr on AArch64
7700 
7701 instruct loadPLocked(iRegPNoSp dst, indirect mem)
7702 %{
7703   match(Set dst (LoadPLocked mem));
7704 
7705   ins_cost(VOLATILE_REF_COST);
7706 
7707   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
7708 
7709   ins_encode(aarch64_enc_ldaxr(dst, mem));
7710 
7711   ins_pipe(pipe_serial);
7712 %}
7713 
7714 // Conditional-store of the updated heap-top.
7715 // Used during allocation of the shared heap.
7716 // Sets flag (EQ) on success.
7717 // implemented using stlxr on AArch64.
7718 
7719 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
7720 %{
7721   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7722 
7723   ins_cost(VOLATILE_REF_COST);
7724 
7725  // TODO
7726  // do we need to do a store-conditional release or can we just use a
7727  // plain store-conditional?
7728 
7729   format %{
7730     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
7731     "cmpw rscratch1, zr\t# EQ on successful write"
7732   %}
7733 
7734   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
7735 
7736   ins_pipe(pipe_serial);
7737 %}
7738 
7739 // this has to be implemented as a CAS
7740 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
7741 %{
7742   match(Set cr (StoreLConditional mem (Binary oldval newval)));
7743 
7744   ins_cost(VOLATILE_REF_COST);
7745 
7746   format %{
7747     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7748     "cmpw rscratch1, zr\t# EQ on successful write"
7749   %}
7750 
7751   ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
7752 
7753   ins_pipe(pipe_slow);
7754 %}
7755 
7756 // this has to be implemented as a CAS
7757 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
7758 %{
7759   match(Set cr (StoreIConditional mem (Binary oldval newval)));
7760 
7761   ins_cost(VOLATILE_REF_COST);
7762 
7763   format %{
7764     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7765     "cmpw rscratch1, zr\t# EQ on successful write"
7766   %}
7767 
7768   ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
7769 
7770   ins_pipe(pipe_slow);
7771 %}
7772 
7773 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
7774 // can't match them
7775 
7776 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
7777 
7778   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
7779 
7780   effect(KILL cr);
7781 
7782  format %{
7783     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
7784     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7785  %}
7786 
7787  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7788             aarch64_enc_cset_eq(res));
7789 
7790   ins_pipe(pipe_slow);
7791 %}
7792 
7793 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
7794 
7795   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
7796 
7797   effect(KILL cr);
7798 
7799  format %{
7800     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
7801     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7802  %}
7803 
7804  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7805             aarch64_enc_cset_eq(res));
7806 
7807   ins_pipe(pipe_slow);
7808 %}
7809 
7810 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
7811 
7812   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
7813 
7814   effect(KILL cr);
7815 
7816  format %{
7817     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
7818     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7819  %}
7820 
7821  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
7822             aarch64_enc_cset_eq(res));
7823 
7824   ins_pipe(pipe_slow);
7825 %}
7826 
7827 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
7828 
7829   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
7830 
7831   effect(KILL cr);
7832 
7833  format %{
7834     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
7835     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7836  %}
7837 
7838  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7839             aarch64_enc_cset_eq(res));
7840 
7841   ins_pipe(pipe_slow);
7842 %}
7843 
7844 
7845 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
7846   match(Set prev (GetAndSetI mem newv));
7847   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
7848   ins_encode %{
7849     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
7850   %}
7851   ins_pipe(pipe_serial);
7852 %}
7853 
7854 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
7855   match(Set prev (GetAndSetL mem newv));
7856   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
7857   ins_encode %{
7858     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
7859   %}
7860   ins_pipe(pipe_serial);
7861 %}
7862 
7863 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
7864   match(Set prev (GetAndSetN mem newv));
7865   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
7866   ins_encode %{
7867     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
7868   %}
7869   ins_pipe(pipe_serial);
7870 %}
7871 
7872 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
7873   match(Set prev (GetAndSetP mem newv));
7874   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
7875   ins_encode %{
7876     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
7877   %}
7878   ins_pipe(pipe_serial);
7879 %}
7880 
7881 
7882 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
7883   match(Set newval (GetAndAddL mem incr));
7884   ins_cost(INSN_COST * 10);
7885   format %{ "get_and_addL $newval, [$mem], $incr" %}
7886   ins_encode %{
7887     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
7888   %}
7889   ins_pipe(pipe_serial);
7890 %}
7891 
7892 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
7893   predicate(n->as_LoadStore()->result_not_used());
7894   match(Set dummy (GetAndAddL mem incr));
7895   ins_cost(INSN_COST * 9);
7896   format %{ "get_and_addL [$mem], $incr" %}
7897   ins_encode %{
7898     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
7899   %}
7900   ins_pipe(pipe_serial);
7901 %}
7902 
7903 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
7904   match(Set newval (GetAndAddL mem incr));
7905   ins_cost(INSN_COST * 10);
7906   format %{ "get_and_addL $newval, [$mem], $incr" %}
7907   ins_encode %{
7908     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
7909   %}
7910   ins_pipe(pipe_serial);
7911 %}
7912 
7913 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
7914   predicate(n->as_LoadStore()->result_not_used());
7915   match(Set dummy (GetAndAddL mem incr));
7916   ins_cost(INSN_COST * 9);
7917   format %{ "get_and_addL [$mem], $incr" %}
7918   ins_encode %{
7919     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
7920   %}
7921   ins_pipe(pipe_serial);
7922 %}
7923 
7924 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
7925   match(Set newval (GetAndAddI mem incr));
7926   ins_cost(INSN_COST * 10);
7927   format %{ "get_and_addI $newval, [$mem], $incr" %}
7928   ins_encode %{
7929     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
7930   %}
7931   ins_pipe(pipe_serial);
7932 %}
7933 
7934 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
7935   predicate(n->as_LoadStore()->result_not_used());
7936   match(Set dummy (GetAndAddI mem incr));
7937   ins_cost(INSN_COST * 9);
7938   format %{ "get_and_addI [$mem], $incr" %}
7939   ins_encode %{
7940     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
7941   %}
7942   ins_pipe(pipe_serial);
7943 %}
7944 
7945 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
7946   match(Set newval (GetAndAddI mem incr));
7947   ins_cost(INSN_COST * 10);
7948   format %{ "get_and_addI $newval, [$mem], $incr" %}
7949   ins_encode %{
7950     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
7951   %}
7952   ins_pipe(pipe_serial);
7953 %}
7954 
7955 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
7956   predicate(n->as_LoadStore()->result_not_used());
7957   match(Set dummy (GetAndAddI mem incr));
7958   ins_cost(INSN_COST * 9);
7959   format %{ "get_and_addI [$mem], $incr" %}
7960   ins_encode %{
7961     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
7962   %}
7963   ins_pipe(pipe_serial);
7964 %}
7965 
7966 // Manifest a CmpL result in an integer register.
7967 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
7968 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
7969 %{
7970   match(Set dst (CmpL3 src1 src2));
7971   effect(KILL flags);
7972 
7973   ins_cost(INSN_COST * 6);
7974   format %{
7975       "cmp $src1, $src2"
7976       "csetw $dst, ne"
7977       "cnegw $dst, lt"
7978   %}
7979   // format %{ "CmpL3 $dst, $src1, $src2" %}
7980   ins_encode %{
7981     __ cmp($src1$$Register, $src2$$Register);
7982     __ csetw($dst$$Register, Assembler::NE);
7983     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
7984   %}
7985 
7986   ins_pipe(pipe_class_default);
7987 %}
7988 
7989 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
7990 %{
7991   match(Set dst (CmpL3 src1 src2));
7992   effect(KILL flags);
7993 
7994   ins_cost(INSN_COST * 6);
7995   format %{
7996       "cmp $src1, $src2"
7997       "csetw $dst, ne"
7998       "cnegw $dst, lt"
7999   %}
8000   ins_encode %{
8001     int32_t con = (int32_t)$src2$$constant;
8002      if (con < 0) {
8003       __ adds(zr, $src1$$Register, -con);
8004     } else {
8005       __ subs(zr, $src1$$Register, con);
8006     }
8007     __ csetw($dst$$Register, Assembler::NE);
8008     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
8009   %}
8010 
8011   ins_pipe(pipe_class_default);
8012 %}
8013 
8014 // ============================================================================
8015 // Conditional Move Instructions
8016 
8017 // n.b. we have identical rules for both a signed compare op (cmpOp)
8018 // and an unsigned compare op (cmpOpU). it would be nice if we could
8019 // define an op class which merged both inputs and use it to type the
8020 // argument to a single rule. unfortunatelyt his fails because the
8021 // opclass does not live up to the COND_INTER interface of its
8022 // component operands. When the generic code tries to negate the
8023 // operand it ends up running the generci Machoper::negate method
8024 // which throws a ShouldNotHappen. So, we have to provide two flavours
8025 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
8026 
8027 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8028   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8029 
8030   ins_cost(INSN_COST * 2);
8031   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
8032 
8033   ins_encode %{
8034     __ cselw(as_Register($dst$$reg),
8035              as_Register($src2$$reg),
8036              as_Register($src1$$reg),
8037              (Assembler::Condition)$cmp$$cmpcode);
8038   %}
8039 
8040   ins_pipe(icond_reg_reg);
8041 %}
8042 
8043 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8044   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8045 
8046   ins_cost(INSN_COST * 2);
8047   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
8048 
8049   ins_encode %{
8050     __ cselw(as_Register($dst$$reg),
8051              as_Register($src2$$reg),
8052              as_Register($src1$$reg),
8053              (Assembler::Condition)$cmp$$cmpcode);
8054   %}
8055 
8056   ins_pipe(icond_reg_reg);
8057 %}
8058 
8059 // special cases where one arg is zero
8060 
8061 // n.b. this is selected in preference to the rule above because it
8062 // avoids loading constant 0 into a source register
8063 
8064 // TODO
8065 // we ought only to be able to cull one of these variants as the ideal
8066 // transforms ought always to order the zero consistently (to left/right?)
8067 
8068 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8069   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8070 
8071   ins_cost(INSN_COST * 2);
8072   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
8073 
8074   ins_encode %{
8075     __ cselw(as_Register($dst$$reg),
8076              as_Register($src$$reg),
8077              zr,
8078              (Assembler::Condition)$cmp$$cmpcode);
8079   %}
8080 
8081   ins_pipe(icond_reg);
8082 %}
8083 
8084 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8085   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8086 
8087   ins_cost(INSN_COST * 2);
8088   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
8089 
8090   ins_encode %{
8091     __ cselw(as_Register($dst$$reg),
8092              as_Register($src$$reg),
8093              zr,
8094              (Assembler::Condition)$cmp$$cmpcode);
8095   %}
8096 
8097   ins_pipe(icond_reg);
8098 %}
8099 
8100 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8101   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8102 
8103   ins_cost(INSN_COST * 2);
8104   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
8105 
8106   ins_encode %{
8107     __ cselw(as_Register($dst$$reg),
8108              zr,
8109              as_Register($src$$reg),
8110              (Assembler::Condition)$cmp$$cmpcode);
8111   %}
8112 
8113   ins_pipe(icond_reg);
8114 %}
8115 
8116 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8117   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8118 
8119   ins_cost(INSN_COST * 2);
8120   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
8121 
8122   ins_encode %{
8123     __ cselw(as_Register($dst$$reg),
8124              zr,
8125              as_Register($src$$reg),
8126              (Assembler::Condition)$cmp$$cmpcode);
8127   %}
8128 
8129   ins_pipe(icond_reg);
8130 %}
8131 
8132 // special case for creating a boolean 0 or 1
8133 
8134 // n.b. this is selected in preference to the rule above because it
8135 // avoids loading constants 0 and 1 into a source register
8136 
8137 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8138   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8139 
8140   ins_cost(INSN_COST * 2);
8141   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
8142 
8143   ins_encode %{
8144     // equivalently
8145     // cset(as_Register($dst$$reg),
8146     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8147     __ csincw(as_Register($dst$$reg),
8148              zr,
8149              zr,
8150              (Assembler::Condition)$cmp$$cmpcode);
8151   %}
8152 
8153   ins_pipe(icond_none);
8154 %}
8155 
8156 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8157   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8158 
8159   ins_cost(INSN_COST * 2);
8160   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
8161 
8162   ins_encode %{
8163     // equivalently
8164     // cset(as_Register($dst$$reg),
8165     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8166     __ csincw(as_Register($dst$$reg),
8167              zr,
8168              zr,
8169              (Assembler::Condition)$cmp$$cmpcode);
8170   %}
8171 
8172   ins_pipe(icond_none);
8173 %}
8174 
8175 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8176   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8177 
8178   ins_cost(INSN_COST * 2);
8179   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
8180 
8181   ins_encode %{
8182     __ csel(as_Register($dst$$reg),
8183             as_Register($src2$$reg),
8184             as_Register($src1$$reg),
8185             (Assembler::Condition)$cmp$$cmpcode);
8186   %}
8187 
8188   ins_pipe(icond_reg_reg);
8189 %}
8190 
8191 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8192   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8193 
8194   ins_cost(INSN_COST * 2);
8195   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
8196 
8197   ins_encode %{
8198     __ csel(as_Register($dst$$reg),
8199             as_Register($src2$$reg),
8200             as_Register($src1$$reg),
8201             (Assembler::Condition)$cmp$$cmpcode);
8202   %}
8203 
8204   ins_pipe(icond_reg_reg);
8205 %}
8206 
8207 // special cases where one arg is zero
8208 
8209 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8210   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8211 
8212   ins_cost(INSN_COST * 2);
8213   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
8214 
8215   ins_encode %{
8216     __ csel(as_Register($dst$$reg),
8217             zr,
8218             as_Register($src$$reg),
8219             (Assembler::Condition)$cmp$$cmpcode);
8220   %}
8221 
8222   ins_pipe(icond_reg);
8223 %}
8224 
8225 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8226   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8227 
8228   ins_cost(INSN_COST * 2);
8229   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
8230 
8231   ins_encode %{
8232     __ csel(as_Register($dst$$reg),
8233             zr,
8234             as_Register($src$$reg),
8235             (Assembler::Condition)$cmp$$cmpcode);
8236   %}
8237 
8238   ins_pipe(icond_reg);
8239 %}
8240 
8241 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8242   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8243 
8244   ins_cost(INSN_COST * 2);
8245   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8246 
8247   ins_encode %{
8248     __ csel(as_Register($dst$$reg),
8249             as_Register($src$$reg),
8250             zr,
8251             (Assembler::Condition)$cmp$$cmpcode);
8252   %}
8253 
8254   ins_pipe(icond_reg);
8255 %}
8256 
8257 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8258   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8259 
8260   ins_cost(INSN_COST * 2);
8261   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8262 
8263   ins_encode %{
8264     __ csel(as_Register($dst$$reg),
8265             as_Register($src$$reg),
8266             zr,
8267             (Assembler::Condition)$cmp$$cmpcode);
8268   %}
8269 
8270   ins_pipe(icond_reg);
8271 %}
8272 
8273 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8274   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8275 
8276   ins_cost(INSN_COST * 2);
8277   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8278 
8279   ins_encode %{
8280     __ csel(as_Register($dst$$reg),
8281             as_Register($src2$$reg),
8282             as_Register($src1$$reg),
8283             (Assembler::Condition)$cmp$$cmpcode);
8284   %}
8285 
8286   ins_pipe(icond_reg_reg);
8287 %}
8288 
8289 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8290   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8291 
8292   ins_cost(INSN_COST * 2);
8293   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8294 
8295   ins_encode %{
8296     __ csel(as_Register($dst$$reg),
8297             as_Register($src2$$reg),
8298             as_Register($src1$$reg),
8299             (Assembler::Condition)$cmp$$cmpcode);
8300   %}
8301 
8302   ins_pipe(icond_reg_reg);
8303 %}
8304 
8305 // special cases where one arg is zero
8306 
8307 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8308   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8309 
8310   ins_cost(INSN_COST * 2);
8311   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
8312 
8313   ins_encode %{
8314     __ csel(as_Register($dst$$reg),
8315             zr,
8316             as_Register($src$$reg),
8317             (Assembler::Condition)$cmp$$cmpcode);
8318   %}
8319 
8320   ins_pipe(icond_reg);
8321 %}
8322 
8323 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8324   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8325 
8326   ins_cost(INSN_COST * 2);
8327   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
8328 
8329   ins_encode %{
8330     __ csel(as_Register($dst$$reg),
8331             zr,
8332             as_Register($src$$reg),
8333             (Assembler::Condition)$cmp$$cmpcode);
8334   %}
8335 
8336   ins_pipe(icond_reg);
8337 %}
8338 
8339 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8340   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8341 
8342   ins_cost(INSN_COST * 2);
8343   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
8344 
8345   ins_encode %{
8346     __ csel(as_Register($dst$$reg),
8347             as_Register($src$$reg),
8348             zr,
8349             (Assembler::Condition)$cmp$$cmpcode);
8350   %}
8351 
8352   ins_pipe(icond_reg);
8353 %}
8354 
8355 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8356   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8357 
8358   ins_cost(INSN_COST * 2);
8359   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
8360 
8361   ins_encode %{
8362     __ csel(as_Register($dst$$reg),
8363             as_Register($src$$reg),
8364             zr,
8365             (Assembler::Condition)$cmp$$cmpcode);
8366   %}
8367 
8368   ins_pipe(icond_reg);
8369 %}
8370 
8371 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8372   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8373 
8374   ins_cost(INSN_COST * 2);
8375   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8376 
8377   ins_encode %{
8378     __ cselw(as_Register($dst$$reg),
8379              as_Register($src2$$reg),
8380              as_Register($src1$$reg),
8381              (Assembler::Condition)$cmp$$cmpcode);
8382   %}
8383 
8384   ins_pipe(icond_reg_reg);
8385 %}
8386 
8387 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8388   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8389 
8390   ins_cost(INSN_COST * 2);
8391   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8392 
8393   ins_encode %{
8394     __ cselw(as_Register($dst$$reg),
8395              as_Register($src2$$reg),
8396              as_Register($src1$$reg),
8397              (Assembler::Condition)$cmp$$cmpcode);
8398   %}
8399 
8400   ins_pipe(icond_reg_reg);
8401 %}
8402 
8403 // special cases where one arg is zero
8404 
8405 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8406   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8407 
8408   ins_cost(INSN_COST * 2);
8409   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
8410 
8411   ins_encode %{
8412     __ cselw(as_Register($dst$$reg),
8413              zr,
8414              as_Register($src$$reg),
8415              (Assembler::Condition)$cmp$$cmpcode);
8416   %}
8417 
8418   ins_pipe(icond_reg);
8419 %}
8420 
8421 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8422   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8423 
8424   ins_cost(INSN_COST * 2);
8425   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
8426 
8427   ins_encode %{
8428     __ cselw(as_Register($dst$$reg),
8429              zr,
8430              as_Register($src$$reg),
8431              (Assembler::Condition)$cmp$$cmpcode);
8432   %}
8433 
8434   ins_pipe(icond_reg);
8435 %}
8436 
8437 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8438   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8439 
8440   ins_cost(INSN_COST * 2);
8441   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
8442 
8443   ins_encode %{
8444     __ cselw(as_Register($dst$$reg),
8445              as_Register($src$$reg),
8446              zr,
8447              (Assembler::Condition)$cmp$$cmpcode);
8448   %}
8449 
8450   ins_pipe(icond_reg);
8451 %}
8452 
8453 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8454   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8455 
8456   ins_cost(INSN_COST * 2);
8457   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
8458 
8459   ins_encode %{
8460     __ cselw(as_Register($dst$$reg),
8461              as_Register($src$$reg),
8462              zr,
8463              (Assembler::Condition)$cmp$$cmpcode);
8464   %}
8465 
8466   ins_pipe(icond_reg);
8467 %}
8468 
8469 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
8470 %{
8471   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8472 
8473   ins_cost(INSN_COST * 3);
8474 
8475   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8476   ins_encode %{
8477     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8478     __ fcsels(as_FloatRegister($dst$$reg),
8479               as_FloatRegister($src2$$reg),
8480               as_FloatRegister($src1$$reg),
8481               cond);
8482   %}
8483 
8484   ins_pipe(pipe_class_default);
8485 %}
8486 
8487 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
8488 %{
8489   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8490 
8491   ins_cost(INSN_COST * 3);
8492 
8493   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8494   ins_encode %{
8495     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8496     __ fcsels(as_FloatRegister($dst$$reg),
8497               as_FloatRegister($src2$$reg),
8498               as_FloatRegister($src1$$reg),
8499               cond);
8500   %}
8501 
8502   ins_pipe(pipe_class_default);
8503 %}
8504 
8505 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
8506 %{
8507   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8508 
8509   ins_cost(INSN_COST * 3);
8510 
8511   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8512   ins_encode %{
8513     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8514     __ fcseld(as_FloatRegister($dst$$reg),
8515               as_FloatRegister($src2$$reg),
8516               as_FloatRegister($src1$$reg),
8517               cond);
8518   %}
8519 
8520   ins_pipe(pipe_class_default);
8521 %}
8522 
8523 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
8524 %{
8525   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8526 
8527   ins_cost(INSN_COST * 3);
8528 
8529   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8530   ins_encode %{
8531     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8532     __ fcseld(as_FloatRegister($dst$$reg),
8533               as_FloatRegister($src2$$reg),
8534               as_FloatRegister($src1$$reg),
8535               cond);
8536   %}
8537 
8538   ins_pipe(pipe_class_default);
8539 %}
8540 
8541 // ============================================================================
8542 // Arithmetic Instructions
8543 //
8544 
8545 // Integer Addition
8546 
8547 // TODO
8548 // these currently employ operations which do not set CR and hence are
8549 // not flagged as killing CR but we would like to isolate the cases
8550 // where we want to set flags from those where we don't. need to work
8551 // out how to do that.
8552 
8553 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8554   match(Set dst (AddI src1 src2));
8555 
8556   ins_cost(INSN_COST);
8557   format %{ "addw  $dst, $src1, $src2" %}
8558 
8559   ins_encode %{
8560     __ addw(as_Register($dst$$reg),
8561             as_Register($src1$$reg),
8562             as_Register($src2$$reg));
8563   %}
8564 
8565   ins_pipe(ialu_reg_reg);
8566 %}
8567 
8568 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8569   match(Set dst (AddI src1 src2));
8570 
8571   ins_cost(INSN_COST);
8572   format %{ "addw $dst, $src1, $src2" %}
8573 
8574   // use opcode to indicate that this is an add not a sub
8575   opcode(0x0);
8576 
8577   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8578 
8579   ins_pipe(ialu_reg_imm);
8580 %}
8581 
8582 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
8583   match(Set dst (AddI (ConvL2I src1) src2));
8584 
8585   ins_cost(INSN_COST);
8586   format %{ "addw $dst, $src1, $src2" %}
8587 
8588   // use opcode to indicate that this is an add not a sub
8589   opcode(0x0);
8590 
8591   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8592 
8593   ins_pipe(ialu_reg_imm);
8594 %}
8595 
8596 // Pointer Addition
8597 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
8598   match(Set dst (AddP src1 src2));
8599 
8600   ins_cost(INSN_COST);
8601   format %{ "add $dst, $src1, $src2\t# ptr" %}
8602 
8603   ins_encode %{
8604     __ add(as_Register($dst$$reg),
8605            as_Register($src1$$reg),
8606            as_Register($src2$$reg));
8607   %}
8608 
8609   ins_pipe(ialu_reg_reg);
8610 %}
8611 
8612 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
8613   match(Set dst (AddP src1 (ConvI2L src2)));
8614 
8615   ins_cost(1.9 * INSN_COST);
8616   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
8617 
8618   ins_encode %{
8619     __ add(as_Register($dst$$reg),
8620            as_Register($src1$$reg),
8621            as_Register($src2$$reg), ext::sxtw);
8622   %}
8623 
8624   ins_pipe(ialu_reg_reg);
8625 %}
8626 
8627 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
8628   match(Set dst (AddP src1 (LShiftL src2 scale)));
8629 
8630   ins_cost(1.9 * INSN_COST);
8631   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
8632 
8633   ins_encode %{
8634     __ lea(as_Register($dst$$reg),
8635            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8636                    Address::lsl($scale$$constant)));
8637   %}
8638 
8639   ins_pipe(ialu_reg_reg_shift);
8640 %}
8641 
8642 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
8643   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
8644 
8645   ins_cost(1.9 * INSN_COST);
8646   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
8647 
8648   ins_encode %{
8649     __ lea(as_Register($dst$$reg),
8650            Address(as_Register($src1$$reg), as_Register($src2$$reg),
8651                    Address::sxtw($scale$$constant)));
8652   %}
8653 
8654   ins_pipe(ialu_reg_reg_shift);
8655 %}
8656 
8657 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
8658   match(Set dst (LShiftL (ConvI2L src) scale));
8659 
8660   ins_cost(INSN_COST);
8661   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
8662 
8663   ins_encode %{
8664     __ sbfiz(as_Register($dst$$reg),
8665           as_Register($src$$reg),
8666           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
8667   %}
8668 
8669   ins_pipe(ialu_reg_shift);
8670 %}
8671 
8672 // Pointer Immediate Addition
8673 // n.b. this needs to be more expensive than using an indirect memory
8674 // operand
8675 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
8676   match(Set dst (AddP src1 src2));
8677 
8678   ins_cost(INSN_COST);
8679   format %{ "add $dst, $src1, $src2\t# ptr" %}
8680 
8681   // use opcode to indicate that this is an add not a sub
8682   opcode(0x0);
8683 
8684   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8685 
8686   ins_pipe(ialu_reg_imm);
8687 %}
8688 
8689 // Long Addition
8690 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8691 
8692   match(Set dst (AddL src1 src2));
8693 
8694   ins_cost(INSN_COST);
8695   format %{ "add  $dst, $src1, $src2" %}
8696 
8697   ins_encode %{
8698     __ add(as_Register($dst$$reg),
8699            as_Register($src1$$reg),
8700            as_Register($src2$$reg));
8701   %}
8702 
8703   ins_pipe(ialu_reg_reg);
8704 %}
8705 
8706 // No constant pool entries requiredLong Immediate Addition.
8707 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8708   match(Set dst (AddL src1 src2));
8709 
8710   ins_cost(INSN_COST);
8711   format %{ "add $dst, $src1, $src2" %}
8712 
8713   // use opcode to indicate that this is an add not a sub
8714   opcode(0x0);
8715 
8716   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8717 
8718   ins_pipe(ialu_reg_imm);
8719 %}
8720 
8721 // Integer Subtraction
8722 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8723   match(Set dst (SubI src1 src2));
8724 
8725   ins_cost(INSN_COST);
8726   format %{ "subw  $dst, $src1, $src2" %}
8727 
8728   ins_encode %{
8729     __ subw(as_Register($dst$$reg),
8730             as_Register($src1$$reg),
8731             as_Register($src2$$reg));
8732   %}
8733 
8734   ins_pipe(ialu_reg_reg);
8735 %}
8736 
8737 // Immediate Subtraction
8738 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8739   match(Set dst (SubI src1 src2));
8740 
8741   ins_cost(INSN_COST);
8742   format %{ "subw $dst, $src1, $src2" %}
8743 
8744   // use opcode to indicate that this is a sub not an add
8745   opcode(0x1);
8746 
8747   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8748 
8749   ins_pipe(ialu_reg_imm);
8750 %}
8751 
8752 // Long Subtraction
8753 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8754 
8755   match(Set dst (SubL src1 src2));
8756 
8757   ins_cost(INSN_COST);
8758   format %{ "sub  $dst, $src1, $src2" %}
8759 
8760   ins_encode %{
8761     __ sub(as_Register($dst$$reg),
8762            as_Register($src1$$reg),
8763            as_Register($src2$$reg));
8764   %}
8765 
8766   ins_pipe(ialu_reg_reg);
8767 %}
8768 
8769 // No constant pool entries requiredLong Immediate Subtraction.
8770 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
8771   match(Set dst (SubL src1 src2));
8772 
8773   ins_cost(INSN_COST);
8774   format %{ "sub$dst, $src1, $src2" %}
8775 
8776   // use opcode to indicate that this is a sub not an add
8777   opcode(0x1);
8778 
8779   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
8780 
8781   ins_pipe(ialu_reg_imm);
8782 %}
8783 
8784 // Integer Negation (special case for sub)
8785 
8786 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
8787   match(Set dst (SubI zero src));
8788 
8789   ins_cost(INSN_COST);
8790   format %{ "negw $dst, $src\t# int" %}
8791 
8792   ins_encode %{
8793     __ negw(as_Register($dst$$reg),
8794             as_Register($src$$reg));
8795   %}
8796 
8797   ins_pipe(ialu_reg);
8798 %}
8799 
8800 // Long Negation
8801 
8802 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
8803   match(Set dst (SubL zero src));
8804 
8805   ins_cost(INSN_COST);
8806   format %{ "neg $dst, $src\t# long" %}
8807 
8808   ins_encode %{
8809     __ neg(as_Register($dst$$reg),
8810            as_Register($src$$reg));
8811   %}
8812 
8813   ins_pipe(ialu_reg);
8814 %}
8815 
8816 // Integer Multiply
8817 
8818 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8819   match(Set dst (MulI src1 src2));
8820 
8821   ins_cost(INSN_COST * 3);
8822   format %{ "mulw  $dst, $src1, $src2" %}
8823 
8824   ins_encode %{
8825     __ mulw(as_Register($dst$$reg),
8826             as_Register($src1$$reg),
8827             as_Register($src2$$reg));
8828   %}
8829 
8830   ins_pipe(imul_reg_reg);
8831 %}
8832 
8833 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8834   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
8835 
8836   ins_cost(INSN_COST * 3);
8837   format %{ "smull  $dst, $src1, $src2" %}
8838 
8839   ins_encode %{
8840     __ smull(as_Register($dst$$reg),
8841              as_Register($src1$$reg),
8842              as_Register($src2$$reg));
8843   %}
8844 
8845   ins_pipe(imul_reg_reg);
8846 %}
8847 
8848 // Long Multiply
8849 
8850 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8851   match(Set dst (MulL src1 src2));
8852 
8853   ins_cost(INSN_COST * 5);
8854   format %{ "mul  $dst, $src1, $src2" %}
8855 
8856   ins_encode %{
8857     __ mul(as_Register($dst$$reg),
8858            as_Register($src1$$reg),
8859            as_Register($src2$$reg));
8860   %}
8861 
8862   ins_pipe(lmul_reg_reg);
8863 %}
8864 
8865 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
8866 %{
8867   match(Set dst (MulHiL src1 src2));
8868 
8869   ins_cost(INSN_COST * 7);
8870   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
8871 
8872   ins_encode %{
8873     __ smulh(as_Register($dst$$reg),
8874              as_Register($src1$$reg),
8875              as_Register($src2$$reg));
8876   %}
8877 
8878   ins_pipe(lmul_reg_reg);
8879 %}
8880 
8881 // Combined Integer Multiply & Add/Sub
8882 
8883 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
8884   match(Set dst (AddI src3 (MulI src1 src2)));
8885 
8886   ins_cost(INSN_COST * 3);
8887   format %{ "madd  $dst, $src1, $src2, $src3" %}
8888 
8889   ins_encode %{
8890     __ maddw(as_Register($dst$$reg),
8891              as_Register($src1$$reg),
8892              as_Register($src2$$reg),
8893              as_Register($src3$$reg));
8894   %}
8895 
8896   ins_pipe(imac_reg_reg);
8897 %}
8898 
8899 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
8900   match(Set dst (SubI src3 (MulI src1 src2)));
8901 
8902   ins_cost(INSN_COST * 3);
8903   format %{ "msub  $dst, $src1, $src2, $src3" %}
8904 
8905   ins_encode %{
8906     __ msubw(as_Register($dst$$reg),
8907              as_Register($src1$$reg),
8908              as_Register($src2$$reg),
8909              as_Register($src3$$reg));
8910   %}
8911 
8912   ins_pipe(imac_reg_reg);
8913 %}
8914 
8915 // Combined Long Multiply & Add/Sub
8916 
8917 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
8918   match(Set dst (AddL src3 (MulL src1 src2)));
8919 
8920   ins_cost(INSN_COST * 5);
8921   format %{ "madd  $dst, $src1, $src2, $src3" %}
8922 
8923   ins_encode %{
8924     __ madd(as_Register($dst$$reg),
8925             as_Register($src1$$reg),
8926             as_Register($src2$$reg),
8927             as_Register($src3$$reg));
8928   %}
8929 
8930   ins_pipe(lmac_reg_reg);
8931 %}
8932 
8933 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
8934   match(Set dst (SubL src3 (MulL src1 src2)));
8935 
8936   ins_cost(INSN_COST * 5);
8937   format %{ "msub  $dst, $src1, $src2, $src3" %}
8938 
8939   ins_encode %{
8940     __ msub(as_Register($dst$$reg),
8941             as_Register($src1$$reg),
8942             as_Register($src2$$reg),
8943             as_Register($src3$$reg));
8944   %}
8945 
8946   ins_pipe(lmac_reg_reg);
8947 %}
8948 
8949 // Integer Divide
8950 
8951 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8952   match(Set dst (DivI src1 src2));
8953 
8954   ins_cost(INSN_COST * 19);
8955   format %{ "sdivw  $dst, $src1, $src2" %}
8956 
8957   ins_encode(aarch64_enc_divw(dst, src1, src2));
8958   ins_pipe(idiv_reg_reg);
8959 %}
8960 
8961 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
8962   match(Set dst (URShiftI (RShiftI src1 div1) div2));
8963   ins_cost(INSN_COST);
8964   format %{ "lsrw $dst, $src1, $div1" %}
8965   ins_encode %{
8966     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
8967   %}
8968   ins_pipe(ialu_reg_shift);
8969 %}
8970 
8971 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
8972   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
8973   ins_cost(INSN_COST);
8974   format %{ "addw $dst, $src, LSR $div1" %}
8975 
8976   ins_encode %{
8977     __ addw(as_Register($dst$$reg),
8978               as_Register($src$$reg),
8979               as_Register($src$$reg),
8980               Assembler::LSR, 31);
8981   %}
8982   ins_pipe(ialu_reg);
8983 %}
8984 
8985 // Long Divide
8986 
8987 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8988   match(Set dst (DivL src1 src2));
8989 
8990   ins_cost(INSN_COST * 35);
8991   format %{ "sdiv   $dst, $src1, $src2" %}
8992 
8993   ins_encode(aarch64_enc_div(dst, src1, src2));
8994   ins_pipe(ldiv_reg_reg);
8995 %}
8996 
8997 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
8998   match(Set dst (URShiftL (RShiftL src1 div1) div2));
8999   ins_cost(INSN_COST);
9000   format %{ "lsr $dst, $src1, $div1" %}
9001   ins_encode %{
9002     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
9003   %}
9004   ins_pipe(ialu_reg_shift);
9005 %}
9006 
9007 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
9008   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
9009   ins_cost(INSN_COST);
9010   format %{ "add $dst, $src, $div1" %}
9011 
9012   ins_encode %{
9013     __ add(as_Register($dst$$reg),
9014               as_Register($src$$reg),
9015               as_Register($src$$reg),
9016               Assembler::LSR, 63);
9017   %}
9018   ins_pipe(ialu_reg);
9019 %}
9020 
9021 // Integer Remainder
9022 
9023 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9024   match(Set dst (ModI src1 src2));
9025 
9026   ins_cost(INSN_COST * 22);
9027   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
9028             "msubw($dst, rscratch1, $src2, $src1" %}
9029 
9030   ins_encode(aarch64_enc_modw(dst, src1, src2));
9031   ins_pipe(idiv_reg_reg);
9032 %}
9033 
9034 // Long Remainder
9035 
9036 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9037   match(Set dst (ModL src1 src2));
9038 
9039   ins_cost(INSN_COST * 38);
9040   format %{ "sdiv   rscratch1, $src1, $src2\n"
9041             "msub($dst, rscratch1, $src2, $src1" %}
9042 
9043   ins_encode(aarch64_enc_mod(dst, src1, src2));
9044   ins_pipe(ldiv_reg_reg);
9045 %}
9046 
9047 // Integer Shifts
9048 
9049 // Shift Left Register
9050 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9051   match(Set dst (LShiftI src1 src2));
9052 
9053   ins_cost(INSN_COST * 2);
9054   format %{ "lslvw  $dst, $src1, $src2" %}
9055 
9056   ins_encode %{
9057     __ lslvw(as_Register($dst$$reg),
9058              as_Register($src1$$reg),
9059              as_Register($src2$$reg));
9060   %}
9061 
9062   ins_pipe(ialu_reg_reg_vshift);
9063 %}
9064 
9065 // Shift Left Immediate
9066 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9067   match(Set dst (LShiftI src1 src2));
9068 
9069   ins_cost(INSN_COST);
9070   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
9071 
9072   ins_encode %{
9073     __ lslw(as_Register($dst$$reg),
9074             as_Register($src1$$reg),
9075             $src2$$constant & 0x1f);
9076   %}
9077 
9078   ins_pipe(ialu_reg_shift);
9079 %}
9080 
9081 // Shift Right Logical Register
9082 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9083   match(Set dst (URShiftI src1 src2));
9084 
9085   ins_cost(INSN_COST * 2);
9086   format %{ "lsrvw  $dst, $src1, $src2" %}
9087 
9088   ins_encode %{
9089     __ lsrvw(as_Register($dst$$reg),
9090              as_Register($src1$$reg),
9091              as_Register($src2$$reg));
9092   %}
9093 
9094   ins_pipe(ialu_reg_reg_vshift);
9095 %}
9096 
9097 // Shift Right Logical Immediate
9098 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9099   match(Set dst (URShiftI src1 src2));
9100 
9101   ins_cost(INSN_COST);
9102   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
9103 
9104   ins_encode %{
9105     __ lsrw(as_Register($dst$$reg),
9106             as_Register($src1$$reg),
9107             $src2$$constant & 0x1f);
9108   %}
9109 
9110   ins_pipe(ialu_reg_shift);
9111 %}
9112 
9113 // Shift Right Arithmetic Register
9114 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9115   match(Set dst (RShiftI src1 src2));
9116 
9117   ins_cost(INSN_COST * 2);
9118   format %{ "asrvw  $dst, $src1, $src2" %}
9119 
9120   ins_encode %{
9121     __ asrvw(as_Register($dst$$reg),
9122              as_Register($src1$$reg),
9123              as_Register($src2$$reg));
9124   %}
9125 
9126   ins_pipe(ialu_reg_reg_vshift);
9127 %}
9128 
9129 // Shift Right Arithmetic Immediate
9130 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9131   match(Set dst (RShiftI src1 src2));
9132 
9133   ins_cost(INSN_COST);
9134   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
9135 
9136   ins_encode %{
9137     __ asrw(as_Register($dst$$reg),
9138             as_Register($src1$$reg),
9139             $src2$$constant & 0x1f);
9140   %}
9141 
9142   ins_pipe(ialu_reg_shift);
9143 %}
9144 
9145 // Combined Int Mask and Right Shift (using UBFM)
9146 // TODO
9147 
9148 // Long Shifts
9149 
9150 // Shift Left Register
9151 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9152   match(Set dst (LShiftL src1 src2));
9153 
9154   ins_cost(INSN_COST * 2);
9155   format %{ "lslv  $dst, $src1, $src2" %}
9156 
9157   ins_encode %{
9158     __ lslv(as_Register($dst$$reg),
9159             as_Register($src1$$reg),
9160             as_Register($src2$$reg));
9161   %}
9162 
9163   ins_pipe(ialu_reg_reg_vshift);
9164 %}
9165 
9166 // Shift Left Immediate
9167 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9168   match(Set dst (LShiftL src1 src2));
9169 
9170   ins_cost(INSN_COST);
9171   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
9172 
9173   ins_encode %{
9174     __ lsl(as_Register($dst$$reg),
9175             as_Register($src1$$reg),
9176             $src2$$constant & 0x3f);
9177   %}
9178 
9179   ins_pipe(ialu_reg_shift);
9180 %}
9181 
9182 // Shift Right Logical Register
9183 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9184   match(Set dst (URShiftL src1 src2));
9185 
9186   ins_cost(INSN_COST * 2);
9187   format %{ "lsrv  $dst, $src1, $src2" %}
9188 
9189   ins_encode %{
9190     __ lsrv(as_Register($dst$$reg),
9191             as_Register($src1$$reg),
9192             as_Register($src2$$reg));
9193   %}
9194 
9195   ins_pipe(ialu_reg_reg_vshift);
9196 %}
9197 
9198 // Shift Right Logical Immediate
9199 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9200   match(Set dst (URShiftL src1 src2));
9201 
9202   ins_cost(INSN_COST);
9203   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
9204 
9205   ins_encode %{
9206     __ lsr(as_Register($dst$$reg),
9207            as_Register($src1$$reg),
9208            $src2$$constant & 0x3f);
9209   %}
9210 
9211   ins_pipe(ialu_reg_shift);
9212 %}
9213 
9214 // A special-case pattern for card table stores.
9215 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
9216   match(Set dst (URShiftL (CastP2X src1) src2));
9217 
9218   ins_cost(INSN_COST);
9219   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
9220 
9221   ins_encode %{
9222     __ lsr(as_Register($dst$$reg),
9223            as_Register($src1$$reg),
9224            $src2$$constant & 0x3f);
9225   %}
9226 
9227   ins_pipe(ialu_reg_shift);
9228 %}
9229 
9230 // Shift Right Arithmetic Register
9231 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9232   match(Set dst (RShiftL src1 src2));
9233 
9234   ins_cost(INSN_COST * 2);
9235   format %{ "asrv  $dst, $src1, $src2" %}
9236 
9237   ins_encode %{
9238     __ asrv(as_Register($dst$$reg),
9239             as_Register($src1$$reg),
9240             as_Register($src2$$reg));
9241   %}
9242 
9243   ins_pipe(ialu_reg_reg_vshift);
9244 %}
9245 
9246 // Shift Right Arithmetic Immediate
9247 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9248   match(Set dst (RShiftL src1 src2));
9249 
9250   ins_cost(INSN_COST);
9251   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9252 
9253   ins_encode %{
9254     __ asr(as_Register($dst$$reg),
9255            as_Register($src1$$reg),
9256            $src2$$constant & 0x3f);
9257   %}
9258 
9259   ins_pipe(ialu_reg_shift);
9260 %}
9261 
9262 // BEGIN This section of the file is automatically generated. Do not edit --------------
9263 
9264 instruct regL_not_reg(iRegLNoSp dst,
9265                          iRegL src1, immL_M1 m1,
9266                          rFlagsReg cr) %{
9267   match(Set dst (XorL src1 m1));
9268   ins_cost(INSN_COST);
9269   format %{ "eon  $dst, $src1, zr" %}
9270 
9271   ins_encode %{
9272     __ eon(as_Register($dst$$reg),
9273               as_Register($src1$$reg),
9274               zr,
9275               Assembler::LSL, 0);
9276   %}
9277 
9278   ins_pipe(ialu_reg);
9279 %}
9280 instruct regI_not_reg(iRegINoSp dst,
9281                          iRegIorL2I src1, immI_M1 m1,
9282                          rFlagsReg cr) %{
9283   match(Set dst (XorI src1 m1));
9284   ins_cost(INSN_COST);
9285   format %{ "eonw  $dst, $src1, zr" %}
9286 
9287   ins_encode %{
9288     __ eonw(as_Register($dst$$reg),
9289               as_Register($src1$$reg),
9290               zr,
9291               Assembler::LSL, 0);
9292   %}
9293 
9294   ins_pipe(ialu_reg);
9295 %}
9296 
9297 instruct AndI_reg_not_reg(iRegINoSp dst,
9298                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9299                          rFlagsReg cr) %{
9300   match(Set dst (AndI src1 (XorI src2 m1)));
9301   ins_cost(INSN_COST);
9302   format %{ "bicw  $dst, $src1, $src2" %}
9303 
9304   ins_encode %{
9305     __ bicw(as_Register($dst$$reg),
9306               as_Register($src1$$reg),
9307               as_Register($src2$$reg),
9308               Assembler::LSL, 0);
9309   %}
9310 
9311   ins_pipe(ialu_reg_reg);
9312 %}
9313 
9314 instruct AndL_reg_not_reg(iRegLNoSp dst,
9315                          iRegL src1, iRegL src2, immL_M1 m1,
9316                          rFlagsReg cr) %{
9317   match(Set dst (AndL src1 (XorL src2 m1)));
9318   ins_cost(INSN_COST);
9319   format %{ "bic  $dst, $src1, $src2" %}
9320 
9321   ins_encode %{
9322     __ bic(as_Register($dst$$reg),
9323               as_Register($src1$$reg),
9324               as_Register($src2$$reg),
9325               Assembler::LSL, 0);
9326   %}
9327 
9328   ins_pipe(ialu_reg_reg);
9329 %}
9330 
9331 instruct OrI_reg_not_reg(iRegINoSp dst,
9332                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9333                          rFlagsReg cr) %{
9334   match(Set dst (OrI src1 (XorI src2 m1)));
9335   ins_cost(INSN_COST);
9336   format %{ "ornw  $dst, $src1, $src2" %}
9337 
9338   ins_encode %{
9339     __ ornw(as_Register($dst$$reg),
9340               as_Register($src1$$reg),
9341               as_Register($src2$$reg),
9342               Assembler::LSL, 0);
9343   %}
9344 
9345   ins_pipe(ialu_reg_reg);
9346 %}
9347 
9348 instruct OrL_reg_not_reg(iRegLNoSp dst,
9349                          iRegL src1, iRegL src2, immL_M1 m1,
9350                          rFlagsReg cr) %{
9351   match(Set dst (OrL src1 (XorL src2 m1)));
9352   ins_cost(INSN_COST);
9353   format %{ "orn  $dst, $src1, $src2" %}
9354 
9355   ins_encode %{
9356     __ orn(as_Register($dst$$reg),
9357               as_Register($src1$$reg),
9358               as_Register($src2$$reg),
9359               Assembler::LSL, 0);
9360   %}
9361 
9362   ins_pipe(ialu_reg_reg);
9363 %}
9364 
9365 instruct XorI_reg_not_reg(iRegINoSp dst,
9366                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9367                          rFlagsReg cr) %{
9368   match(Set dst (XorI m1 (XorI src2 src1)));
9369   ins_cost(INSN_COST);
9370   format %{ "eonw  $dst, $src1, $src2" %}
9371 
9372   ins_encode %{
9373     __ eonw(as_Register($dst$$reg),
9374               as_Register($src1$$reg),
9375               as_Register($src2$$reg),
9376               Assembler::LSL, 0);
9377   %}
9378 
9379   ins_pipe(ialu_reg_reg);
9380 %}
9381 
9382 instruct XorL_reg_not_reg(iRegLNoSp dst,
9383                          iRegL src1, iRegL src2, immL_M1 m1,
9384                          rFlagsReg cr) %{
9385   match(Set dst (XorL m1 (XorL src2 src1)));
9386   ins_cost(INSN_COST);
9387   format %{ "eon  $dst, $src1, $src2" %}
9388 
9389   ins_encode %{
9390     __ eon(as_Register($dst$$reg),
9391               as_Register($src1$$reg),
9392               as_Register($src2$$reg),
9393               Assembler::LSL, 0);
9394   %}
9395 
9396   ins_pipe(ialu_reg_reg);
9397 %}
9398 
9399 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
9400                          iRegIorL2I src1, iRegIorL2I src2,
9401                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9402   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
9403   ins_cost(1.9 * INSN_COST);
9404   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
9405 
9406   ins_encode %{
9407     __ bicw(as_Register($dst$$reg),
9408               as_Register($src1$$reg),
9409               as_Register($src2$$reg),
9410               Assembler::LSR,
9411               $src3$$constant & 0x3f);
9412   %}
9413 
9414   ins_pipe(ialu_reg_reg_shift);
9415 %}
9416 
9417 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
9418                          iRegL src1, iRegL src2,
9419                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9420   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
9421   ins_cost(1.9 * INSN_COST);
9422   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
9423 
9424   ins_encode %{
9425     __ bic(as_Register($dst$$reg),
9426               as_Register($src1$$reg),
9427               as_Register($src2$$reg),
9428               Assembler::LSR,
9429               $src3$$constant & 0x3f);
9430   %}
9431 
9432   ins_pipe(ialu_reg_reg_shift);
9433 %}
9434 
9435 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
9436                          iRegIorL2I src1, iRegIorL2I src2,
9437                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9438   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
9439   ins_cost(1.9 * INSN_COST);
9440   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
9441 
9442   ins_encode %{
9443     __ bicw(as_Register($dst$$reg),
9444               as_Register($src1$$reg),
9445               as_Register($src2$$reg),
9446               Assembler::ASR,
9447               $src3$$constant & 0x3f);
9448   %}
9449 
9450   ins_pipe(ialu_reg_reg_shift);
9451 %}
9452 
9453 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
9454                          iRegL src1, iRegL src2,
9455                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9456   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
9457   ins_cost(1.9 * INSN_COST);
9458   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
9459 
9460   ins_encode %{
9461     __ bic(as_Register($dst$$reg),
9462               as_Register($src1$$reg),
9463               as_Register($src2$$reg),
9464               Assembler::ASR,
9465               $src3$$constant & 0x3f);
9466   %}
9467 
9468   ins_pipe(ialu_reg_reg_shift);
9469 %}
9470 
9471 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
9472                          iRegIorL2I src1, iRegIorL2I src2,
9473                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9474   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
9475   ins_cost(1.9 * INSN_COST);
9476   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
9477 
9478   ins_encode %{
9479     __ bicw(as_Register($dst$$reg),
9480               as_Register($src1$$reg),
9481               as_Register($src2$$reg),
9482               Assembler::LSL,
9483               $src3$$constant & 0x3f);
9484   %}
9485 
9486   ins_pipe(ialu_reg_reg_shift);
9487 %}
9488 
9489 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
9490                          iRegL src1, iRegL src2,
9491                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9492   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
9493   ins_cost(1.9 * INSN_COST);
9494   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
9495 
9496   ins_encode %{
9497     __ bic(as_Register($dst$$reg),
9498               as_Register($src1$$reg),
9499               as_Register($src2$$reg),
9500               Assembler::LSL,
9501               $src3$$constant & 0x3f);
9502   %}
9503 
9504   ins_pipe(ialu_reg_reg_shift);
9505 %}
9506 
9507 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
9508                          iRegIorL2I src1, iRegIorL2I src2,
9509                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9510   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
9511   ins_cost(1.9 * INSN_COST);
9512   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
9513 
9514   ins_encode %{
9515     __ eonw(as_Register($dst$$reg),
9516               as_Register($src1$$reg),
9517               as_Register($src2$$reg),
9518               Assembler::LSR,
9519               $src3$$constant & 0x3f);
9520   %}
9521 
9522   ins_pipe(ialu_reg_reg_shift);
9523 %}
9524 
9525 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
9526                          iRegL src1, iRegL src2,
9527                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9528   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
9529   ins_cost(1.9 * INSN_COST);
9530   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
9531 
9532   ins_encode %{
9533     __ eon(as_Register($dst$$reg),
9534               as_Register($src1$$reg),
9535               as_Register($src2$$reg),
9536               Assembler::LSR,
9537               $src3$$constant & 0x3f);
9538   %}
9539 
9540   ins_pipe(ialu_reg_reg_shift);
9541 %}
9542 
9543 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
9544                          iRegIorL2I src1, iRegIorL2I src2,
9545                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9546   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
9547   ins_cost(1.9 * INSN_COST);
9548   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
9549 
9550   ins_encode %{
9551     __ eonw(as_Register($dst$$reg),
9552               as_Register($src1$$reg),
9553               as_Register($src2$$reg),
9554               Assembler::ASR,
9555               $src3$$constant & 0x3f);
9556   %}
9557 
9558   ins_pipe(ialu_reg_reg_shift);
9559 %}
9560 
9561 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
9562                          iRegL src1, iRegL src2,
9563                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9564   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
9565   ins_cost(1.9 * INSN_COST);
9566   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
9567 
9568   ins_encode %{
9569     __ eon(as_Register($dst$$reg),
9570               as_Register($src1$$reg),
9571               as_Register($src2$$reg),
9572               Assembler::ASR,
9573               $src3$$constant & 0x3f);
9574   %}
9575 
9576   ins_pipe(ialu_reg_reg_shift);
9577 %}
9578 
9579 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
9580                          iRegIorL2I src1, iRegIorL2I src2,
9581                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9582   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
9583   ins_cost(1.9 * INSN_COST);
9584   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
9585 
9586   ins_encode %{
9587     __ eonw(as_Register($dst$$reg),
9588               as_Register($src1$$reg),
9589               as_Register($src2$$reg),
9590               Assembler::LSL,
9591               $src3$$constant & 0x3f);
9592   %}
9593 
9594   ins_pipe(ialu_reg_reg_shift);
9595 %}
9596 
9597 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
9598                          iRegL src1, iRegL src2,
9599                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9600   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
9601   ins_cost(1.9 * INSN_COST);
9602   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
9603 
9604   ins_encode %{
9605     __ eon(as_Register($dst$$reg),
9606               as_Register($src1$$reg),
9607               as_Register($src2$$reg),
9608               Assembler::LSL,
9609               $src3$$constant & 0x3f);
9610   %}
9611 
9612   ins_pipe(ialu_reg_reg_shift);
9613 %}
9614 
9615 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
9616                          iRegIorL2I src1, iRegIorL2I src2,
9617                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9618   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
9619   ins_cost(1.9 * INSN_COST);
9620   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
9621 
9622   ins_encode %{
9623     __ ornw(as_Register($dst$$reg),
9624               as_Register($src1$$reg),
9625               as_Register($src2$$reg),
9626               Assembler::LSR,
9627               $src3$$constant & 0x3f);
9628   %}
9629 
9630   ins_pipe(ialu_reg_reg_shift);
9631 %}
9632 
9633 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
9634                          iRegL src1, iRegL src2,
9635                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9636   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
9637   ins_cost(1.9 * INSN_COST);
9638   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
9639 
9640   ins_encode %{
9641     __ orn(as_Register($dst$$reg),
9642               as_Register($src1$$reg),
9643               as_Register($src2$$reg),
9644               Assembler::LSR,
9645               $src3$$constant & 0x3f);
9646   %}
9647 
9648   ins_pipe(ialu_reg_reg_shift);
9649 %}
9650 
9651 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
9652                          iRegIorL2I src1, iRegIorL2I src2,
9653                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9654   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
9655   ins_cost(1.9 * INSN_COST);
9656   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
9657 
9658   ins_encode %{
9659     __ ornw(as_Register($dst$$reg),
9660               as_Register($src1$$reg),
9661               as_Register($src2$$reg),
9662               Assembler::ASR,
9663               $src3$$constant & 0x3f);
9664   %}
9665 
9666   ins_pipe(ialu_reg_reg_shift);
9667 %}
9668 
9669 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
9670                          iRegL src1, iRegL src2,
9671                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9672   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
9673   ins_cost(1.9 * INSN_COST);
9674   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
9675 
9676   ins_encode %{
9677     __ orn(as_Register($dst$$reg),
9678               as_Register($src1$$reg),
9679               as_Register($src2$$reg),
9680               Assembler::ASR,
9681               $src3$$constant & 0x3f);
9682   %}
9683 
9684   ins_pipe(ialu_reg_reg_shift);
9685 %}
9686 
9687 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
9688                          iRegIorL2I src1, iRegIorL2I src2,
9689                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9690   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
9691   ins_cost(1.9 * INSN_COST);
9692   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
9693 
9694   ins_encode %{
9695     __ ornw(as_Register($dst$$reg),
9696               as_Register($src1$$reg),
9697               as_Register($src2$$reg),
9698               Assembler::LSL,
9699               $src3$$constant & 0x3f);
9700   %}
9701 
9702   ins_pipe(ialu_reg_reg_shift);
9703 %}
9704 
9705 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
9706                          iRegL src1, iRegL src2,
9707                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9708   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
9709   ins_cost(1.9 * INSN_COST);
9710   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
9711 
9712   ins_encode %{
9713     __ orn(as_Register($dst$$reg),
9714               as_Register($src1$$reg),
9715               as_Register($src2$$reg),
9716               Assembler::LSL,
9717               $src3$$constant & 0x3f);
9718   %}
9719 
9720   ins_pipe(ialu_reg_reg_shift);
9721 %}
9722 
9723 instruct AndI_reg_URShift_reg(iRegINoSp dst,
9724                          iRegIorL2I src1, iRegIorL2I src2,
9725                          immI src3, rFlagsReg cr) %{
9726   match(Set dst (AndI src1 (URShiftI src2 src3)));
9727 
9728   ins_cost(1.9 * INSN_COST);
9729   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
9730 
9731   ins_encode %{
9732     __ andw(as_Register($dst$$reg),
9733               as_Register($src1$$reg),
9734               as_Register($src2$$reg),
9735               Assembler::LSR,
9736               $src3$$constant & 0x3f);
9737   %}
9738 
9739   ins_pipe(ialu_reg_reg_shift);
9740 %}
9741 
9742 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
9743                          iRegL src1, iRegL src2,
9744                          immI src3, rFlagsReg cr) %{
9745   match(Set dst (AndL src1 (URShiftL src2 src3)));
9746 
9747   ins_cost(1.9 * INSN_COST);
9748   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
9749 
9750   ins_encode %{
9751     __ andr(as_Register($dst$$reg),
9752               as_Register($src1$$reg),
9753               as_Register($src2$$reg),
9754               Assembler::LSR,
9755               $src3$$constant & 0x3f);
9756   %}
9757 
9758   ins_pipe(ialu_reg_reg_shift);
9759 %}
9760 
9761 instruct AndI_reg_RShift_reg(iRegINoSp dst,
9762                          iRegIorL2I src1, iRegIorL2I src2,
9763                          immI src3, rFlagsReg cr) %{
9764   match(Set dst (AndI src1 (RShiftI src2 src3)));
9765 
9766   ins_cost(1.9 * INSN_COST);
9767   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
9768 
9769   ins_encode %{
9770     __ andw(as_Register($dst$$reg),
9771               as_Register($src1$$reg),
9772               as_Register($src2$$reg),
9773               Assembler::ASR,
9774               $src3$$constant & 0x3f);
9775   %}
9776 
9777   ins_pipe(ialu_reg_reg_shift);
9778 %}
9779 
9780 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
9781                          iRegL src1, iRegL src2,
9782                          immI src3, rFlagsReg cr) %{
9783   match(Set dst (AndL src1 (RShiftL src2 src3)));
9784 
9785   ins_cost(1.9 * INSN_COST);
9786   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
9787 
9788   ins_encode %{
9789     __ andr(as_Register($dst$$reg),
9790               as_Register($src1$$reg),
9791               as_Register($src2$$reg),
9792               Assembler::ASR,
9793               $src3$$constant & 0x3f);
9794   %}
9795 
9796   ins_pipe(ialu_reg_reg_shift);
9797 %}
9798 
9799 instruct AndI_reg_LShift_reg(iRegINoSp dst,
9800                          iRegIorL2I src1, iRegIorL2I src2,
9801                          immI src3, rFlagsReg cr) %{
9802   match(Set dst (AndI src1 (LShiftI src2 src3)));
9803 
9804   ins_cost(1.9 * INSN_COST);
9805   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
9806 
9807   ins_encode %{
9808     __ andw(as_Register($dst$$reg),
9809               as_Register($src1$$reg),
9810               as_Register($src2$$reg),
9811               Assembler::LSL,
9812               $src3$$constant & 0x3f);
9813   %}
9814 
9815   ins_pipe(ialu_reg_reg_shift);
9816 %}
9817 
9818 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
9819                          iRegL src1, iRegL src2,
9820                          immI src3, rFlagsReg cr) %{
9821   match(Set dst (AndL src1 (LShiftL src2 src3)));
9822 
9823   ins_cost(1.9 * INSN_COST);
9824   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
9825 
9826   ins_encode %{
9827     __ andr(as_Register($dst$$reg),
9828               as_Register($src1$$reg),
9829               as_Register($src2$$reg),
9830               Assembler::LSL,
9831               $src3$$constant & 0x3f);
9832   %}
9833 
9834   ins_pipe(ialu_reg_reg_shift);
9835 %}
9836 
9837 instruct XorI_reg_URShift_reg(iRegINoSp dst,
9838                          iRegIorL2I src1, iRegIorL2I src2,
9839                          immI src3, rFlagsReg cr) %{
9840   match(Set dst (XorI src1 (URShiftI src2 src3)));
9841 
9842   ins_cost(1.9 * INSN_COST);
9843   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
9844 
9845   ins_encode %{
9846     __ eorw(as_Register($dst$$reg),
9847               as_Register($src1$$reg),
9848               as_Register($src2$$reg),
9849               Assembler::LSR,
9850               $src3$$constant & 0x3f);
9851   %}
9852 
9853   ins_pipe(ialu_reg_reg_shift);
9854 %}
9855 
9856 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
9857                          iRegL src1, iRegL src2,
9858                          immI src3, rFlagsReg cr) %{
9859   match(Set dst (XorL src1 (URShiftL src2 src3)));
9860 
9861   ins_cost(1.9 * INSN_COST);
9862   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
9863 
9864   ins_encode %{
9865     __ eor(as_Register($dst$$reg),
9866               as_Register($src1$$reg),
9867               as_Register($src2$$reg),
9868               Assembler::LSR,
9869               $src3$$constant & 0x3f);
9870   %}
9871 
9872   ins_pipe(ialu_reg_reg_shift);
9873 %}
9874 
9875 instruct XorI_reg_RShift_reg(iRegINoSp dst,
9876                          iRegIorL2I src1, iRegIorL2I src2,
9877                          immI src3, rFlagsReg cr) %{
9878   match(Set dst (XorI src1 (RShiftI src2 src3)));
9879 
9880   ins_cost(1.9 * INSN_COST);
9881   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
9882 
9883   ins_encode %{
9884     __ eorw(as_Register($dst$$reg),
9885               as_Register($src1$$reg),
9886               as_Register($src2$$reg),
9887               Assembler::ASR,
9888               $src3$$constant & 0x3f);
9889   %}
9890 
9891   ins_pipe(ialu_reg_reg_shift);
9892 %}
9893 
9894 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
9895                          iRegL src1, iRegL src2,
9896                          immI src3, rFlagsReg cr) %{
9897   match(Set dst (XorL src1 (RShiftL src2 src3)));
9898 
9899   ins_cost(1.9 * INSN_COST);
9900   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
9901 
9902   ins_encode %{
9903     __ eor(as_Register($dst$$reg),
9904               as_Register($src1$$reg),
9905               as_Register($src2$$reg),
9906               Assembler::ASR,
9907               $src3$$constant & 0x3f);
9908   %}
9909 
9910   ins_pipe(ialu_reg_reg_shift);
9911 %}
9912 
9913 instruct XorI_reg_LShift_reg(iRegINoSp dst,
9914                          iRegIorL2I src1, iRegIorL2I src2,
9915                          immI src3, rFlagsReg cr) %{
9916   match(Set dst (XorI src1 (LShiftI src2 src3)));
9917 
9918   ins_cost(1.9 * INSN_COST);
9919   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
9920 
9921   ins_encode %{
9922     __ eorw(as_Register($dst$$reg),
9923               as_Register($src1$$reg),
9924               as_Register($src2$$reg),
9925               Assembler::LSL,
9926               $src3$$constant & 0x3f);
9927   %}
9928 
9929   ins_pipe(ialu_reg_reg_shift);
9930 %}
9931 
9932 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
9933                          iRegL src1, iRegL src2,
9934                          immI src3, rFlagsReg cr) %{
9935   match(Set dst (XorL src1 (LShiftL src2 src3)));
9936 
9937   ins_cost(1.9 * INSN_COST);
9938   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
9939 
9940   ins_encode %{
9941     __ eor(as_Register($dst$$reg),
9942               as_Register($src1$$reg),
9943               as_Register($src2$$reg),
9944               Assembler::LSL,
9945               $src3$$constant & 0x3f);
9946   %}
9947 
9948   ins_pipe(ialu_reg_reg_shift);
9949 %}
9950 
9951 instruct OrI_reg_URShift_reg(iRegINoSp dst,
9952                          iRegIorL2I src1, iRegIorL2I src2,
9953                          immI src3, rFlagsReg cr) %{
9954   match(Set dst (OrI src1 (URShiftI src2 src3)));
9955 
9956   ins_cost(1.9 * INSN_COST);
9957   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
9958 
9959   ins_encode %{
9960     __ orrw(as_Register($dst$$reg),
9961               as_Register($src1$$reg),
9962               as_Register($src2$$reg),
9963               Assembler::LSR,
9964               $src3$$constant & 0x3f);
9965   %}
9966 
9967   ins_pipe(ialu_reg_reg_shift);
9968 %}
9969 
9970 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
9971                          iRegL src1, iRegL src2,
9972                          immI src3, rFlagsReg cr) %{
9973   match(Set dst (OrL src1 (URShiftL src2 src3)));
9974 
9975   ins_cost(1.9 * INSN_COST);
9976   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
9977 
9978   ins_encode %{
9979     __ orr(as_Register($dst$$reg),
9980               as_Register($src1$$reg),
9981               as_Register($src2$$reg),
9982               Assembler::LSR,
9983               $src3$$constant & 0x3f);
9984   %}
9985 
9986   ins_pipe(ialu_reg_reg_shift);
9987 %}
9988 
9989 instruct OrI_reg_RShift_reg(iRegINoSp dst,
9990                          iRegIorL2I src1, iRegIorL2I src2,
9991                          immI src3, rFlagsReg cr) %{
9992   match(Set dst (OrI src1 (RShiftI src2 src3)));
9993 
9994   ins_cost(1.9 * INSN_COST);
9995   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
9996 
9997   ins_encode %{
9998     __ orrw(as_Register($dst$$reg),
9999               as_Register($src1$$reg),
10000               as_Register($src2$$reg),
10001               Assembler::ASR,
10002               $src3$$constant & 0x3f);
10003   %}
10004 
10005   ins_pipe(ialu_reg_reg_shift);
10006 %}
10007 
10008 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10009                          iRegL src1, iRegL src2,
10010                          immI src3, rFlagsReg cr) %{
10011   match(Set dst (OrL src1 (RShiftL src2 src3)));
10012 
10013   ins_cost(1.9 * INSN_COST);
10014   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10015 
10016   ins_encode %{
10017     __ orr(as_Register($dst$$reg),
10018               as_Register($src1$$reg),
10019               as_Register($src2$$reg),
10020               Assembler::ASR,
10021               $src3$$constant & 0x3f);
10022   %}
10023 
10024   ins_pipe(ialu_reg_reg_shift);
10025 %}
10026 
10027 instruct OrI_reg_LShift_reg(iRegINoSp dst,
10028                          iRegIorL2I src1, iRegIorL2I src2,
10029                          immI src3, rFlagsReg cr) %{
10030   match(Set dst (OrI src1 (LShiftI src2 src3)));
10031 
10032   ins_cost(1.9 * INSN_COST);
10033   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
10034 
10035   ins_encode %{
10036     __ orrw(as_Register($dst$$reg),
10037               as_Register($src1$$reg),
10038               as_Register($src2$$reg),
10039               Assembler::LSL,
10040               $src3$$constant & 0x3f);
10041   %}
10042 
10043   ins_pipe(ialu_reg_reg_shift);
10044 %}
10045 
10046 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
10047                          iRegL src1, iRegL src2,
10048                          immI src3, rFlagsReg cr) %{
10049   match(Set dst (OrL src1 (LShiftL src2 src3)));
10050 
10051   ins_cost(1.9 * INSN_COST);
10052   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
10053 
10054   ins_encode %{
10055     __ orr(as_Register($dst$$reg),
10056               as_Register($src1$$reg),
10057               as_Register($src2$$reg),
10058               Assembler::LSL,
10059               $src3$$constant & 0x3f);
10060   %}
10061 
10062   ins_pipe(ialu_reg_reg_shift);
10063 %}
10064 
10065 instruct AddI_reg_URShift_reg(iRegINoSp dst,
10066                          iRegIorL2I src1, iRegIorL2I src2,
10067                          immI src3, rFlagsReg cr) %{
10068   match(Set dst (AddI src1 (URShiftI src2 src3)));
10069 
10070   ins_cost(1.9 * INSN_COST);
10071   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
10072 
10073   ins_encode %{
10074     __ addw(as_Register($dst$$reg),
10075               as_Register($src1$$reg),
10076               as_Register($src2$$reg),
10077               Assembler::LSR,
10078               $src3$$constant & 0x3f);
10079   %}
10080 
10081   ins_pipe(ialu_reg_reg_shift);
10082 %}
10083 
10084 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
10085                          iRegL src1, iRegL src2,
10086                          immI src3, rFlagsReg cr) %{
10087   match(Set dst (AddL src1 (URShiftL src2 src3)));
10088 
10089   ins_cost(1.9 * INSN_COST);
10090   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
10091 
10092   ins_encode %{
10093     __ add(as_Register($dst$$reg),
10094               as_Register($src1$$reg),
10095               as_Register($src2$$reg),
10096               Assembler::LSR,
10097               $src3$$constant & 0x3f);
10098   %}
10099 
10100   ins_pipe(ialu_reg_reg_shift);
10101 %}
10102 
10103 instruct AddI_reg_RShift_reg(iRegINoSp dst,
10104                          iRegIorL2I src1, iRegIorL2I src2,
10105                          immI src3, rFlagsReg cr) %{
10106   match(Set dst (AddI src1 (RShiftI src2 src3)));
10107 
10108   ins_cost(1.9 * INSN_COST);
10109   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
10110 
10111   ins_encode %{
10112     __ addw(as_Register($dst$$reg),
10113               as_Register($src1$$reg),
10114               as_Register($src2$$reg),
10115               Assembler::ASR,
10116               $src3$$constant & 0x3f);
10117   %}
10118 
10119   ins_pipe(ialu_reg_reg_shift);
10120 %}
10121 
10122 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
10123                          iRegL src1, iRegL src2,
10124                          immI src3, rFlagsReg cr) %{
10125   match(Set dst (AddL src1 (RShiftL src2 src3)));
10126 
10127   ins_cost(1.9 * INSN_COST);
10128   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
10129 
10130   ins_encode %{
10131     __ add(as_Register($dst$$reg),
10132               as_Register($src1$$reg),
10133               as_Register($src2$$reg),
10134               Assembler::ASR,
10135               $src3$$constant & 0x3f);
10136   %}
10137 
10138   ins_pipe(ialu_reg_reg_shift);
10139 %}
10140 
10141 instruct AddI_reg_LShift_reg(iRegINoSp dst,
10142                          iRegIorL2I src1, iRegIorL2I src2,
10143                          immI src3, rFlagsReg cr) %{
10144   match(Set dst (AddI src1 (LShiftI src2 src3)));
10145 
10146   ins_cost(1.9 * INSN_COST);
10147   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
10148 
10149   ins_encode %{
10150     __ addw(as_Register($dst$$reg),
10151               as_Register($src1$$reg),
10152               as_Register($src2$$reg),
10153               Assembler::LSL,
10154               $src3$$constant & 0x3f);
10155   %}
10156 
10157   ins_pipe(ialu_reg_reg_shift);
10158 %}
10159 
10160 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
10161                          iRegL src1, iRegL src2,
10162                          immI src3, rFlagsReg cr) %{
10163   match(Set dst (AddL src1 (LShiftL src2 src3)));
10164 
10165   ins_cost(1.9 * INSN_COST);
10166   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
10167 
10168   ins_encode %{
10169     __ add(as_Register($dst$$reg),
10170               as_Register($src1$$reg),
10171               as_Register($src2$$reg),
10172               Assembler::LSL,
10173               $src3$$constant & 0x3f);
10174   %}
10175 
10176   ins_pipe(ialu_reg_reg_shift);
10177 %}
10178 
10179 instruct SubI_reg_URShift_reg(iRegINoSp dst,
10180                          iRegIorL2I src1, iRegIorL2I src2,
10181                          immI src3, rFlagsReg cr) %{
10182   match(Set dst (SubI src1 (URShiftI src2 src3)));
10183 
10184   ins_cost(1.9 * INSN_COST);
10185   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
10186 
10187   ins_encode %{
10188     __ subw(as_Register($dst$$reg),
10189               as_Register($src1$$reg),
10190               as_Register($src2$$reg),
10191               Assembler::LSR,
10192               $src3$$constant & 0x3f);
10193   %}
10194 
10195   ins_pipe(ialu_reg_reg_shift);
10196 %}
10197 
10198 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
10199                          iRegL src1, iRegL src2,
10200                          immI src3, rFlagsReg cr) %{
10201   match(Set dst (SubL src1 (URShiftL src2 src3)));
10202 
10203   ins_cost(1.9 * INSN_COST);
10204   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
10205 
10206   ins_encode %{
10207     __ sub(as_Register($dst$$reg),
10208               as_Register($src1$$reg),
10209               as_Register($src2$$reg),
10210               Assembler::LSR,
10211               $src3$$constant & 0x3f);
10212   %}
10213 
10214   ins_pipe(ialu_reg_reg_shift);
10215 %}
10216 
10217 instruct SubI_reg_RShift_reg(iRegINoSp dst,
10218                          iRegIorL2I src1, iRegIorL2I src2,
10219                          immI src3, rFlagsReg cr) %{
10220   match(Set dst (SubI src1 (RShiftI src2 src3)));
10221 
10222   ins_cost(1.9 * INSN_COST);
10223   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
10224 
10225   ins_encode %{
10226     __ subw(as_Register($dst$$reg),
10227               as_Register($src1$$reg),
10228               as_Register($src2$$reg),
10229               Assembler::ASR,
10230               $src3$$constant & 0x3f);
10231   %}
10232 
10233   ins_pipe(ialu_reg_reg_shift);
10234 %}
10235 
10236 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10237                          iRegL src1, iRegL src2,
10238                          immI src3, rFlagsReg cr) %{
10239   match(Set dst (SubL src1 (RShiftL src2 src3)));
10240 
10241   ins_cost(1.9 * INSN_COST);
10242   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10243 
10244   ins_encode %{
10245     __ sub(as_Register($dst$$reg),
10246               as_Register($src1$$reg),
10247               as_Register($src2$$reg),
10248               Assembler::ASR,
10249               $src3$$constant & 0x3f);
10250   %}
10251 
10252   ins_pipe(ialu_reg_reg_shift);
10253 %}
10254 
10255 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10256                          iRegIorL2I src1, iRegIorL2I src2,
10257                          immI src3, rFlagsReg cr) %{
10258   match(Set dst (SubI src1 (LShiftI src2 src3)));
10259 
10260   ins_cost(1.9 * INSN_COST);
10261   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10262 
10263   ins_encode %{
10264     __ subw(as_Register($dst$$reg),
10265               as_Register($src1$$reg),
10266               as_Register($src2$$reg),
10267               Assembler::LSL,
10268               $src3$$constant & 0x3f);
10269   %}
10270 
10271   ins_pipe(ialu_reg_reg_shift);
10272 %}
10273 
10274 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
10275                          iRegL src1, iRegL src2,
10276                          immI src3, rFlagsReg cr) %{
10277   match(Set dst (SubL src1 (LShiftL src2 src3)));
10278 
10279   ins_cost(1.9 * INSN_COST);
10280   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
10281 
10282   ins_encode %{
10283     __ sub(as_Register($dst$$reg),
10284               as_Register($src1$$reg),
10285               as_Register($src2$$reg),
10286               Assembler::LSL,
10287               $src3$$constant & 0x3f);
10288   %}
10289 
10290   ins_pipe(ialu_reg_reg_shift);
10291 %}
10292 
10293 
10294 
10295 // Shift Left followed by Shift Right.
10296 // This idiom is used by the compiler for the i2b bytecode etc.
10297 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10298 %{
10299   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
10300   // Make sure we are not going to exceed what sbfm can do.
10301   predicate((unsigned int)n->in(2)->get_int() <= 63
10302             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10303 
10304   ins_cost(INSN_COST * 2);
10305   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10306   ins_encode %{
10307     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10308     int s = 63 - lshift;
10309     int r = (rshift - lshift) & 63;
10310     __ sbfm(as_Register($dst$$reg),
10311             as_Register($src$$reg),
10312             r, s);
10313   %}
10314 
10315   ins_pipe(ialu_reg_shift);
10316 %}
10317 
10318 // Shift Left followed by Shift Right.
10319 // This idiom is used by the compiler for the i2b bytecode etc.
10320 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10321 %{
10322   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
10323   // Make sure we are not going to exceed what sbfmw can do.
10324   predicate((unsigned int)n->in(2)->get_int() <= 31
10325             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10326 
10327   ins_cost(INSN_COST * 2);
10328   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10329   ins_encode %{
10330     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10331     int s = 31 - lshift;
10332     int r = (rshift - lshift) & 31;
10333     __ sbfmw(as_Register($dst$$reg),
10334             as_Register($src$$reg),
10335             r, s);
10336   %}
10337 
10338   ins_pipe(ialu_reg_shift);
10339 %}
10340 
10341 // Shift Left followed by Shift Right.
10342 // This idiom is used by the compiler for the i2b bytecode etc.
10343 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10344 %{
10345   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
10346   // Make sure we are not going to exceed what ubfm can do.
10347   predicate((unsigned int)n->in(2)->get_int() <= 63
10348             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10349 
10350   ins_cost(INSN_COST * 2);
10351   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10352   ins_encode %{
10353     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10354     int s = 63 - lshift;
10355     int r = (rshift - lshift) & 63;
10356     __ ubfm(as_Register($dst$$reg),
10357             as_Register($src$$reg),
10358             r, s);
10359   %}
10360 
10361   ins_pipe(ialu_reg_shift);
10362 %}
10363 
10364 // Shift Left followed by Shift Right.
10365 // This idiom is used by the compiler for the i2b bytecode etc.
10366 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10367 %{
10368   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
10369   // Make sure we are not going to exceed what ubfmw can do.
10370   predicate((unsigned int)n->in(2)->get_int() <= 31
10371             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10372 
10373   ins_cost(INSN_COST * 2);
10374   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10375   ins_encode %{
10376     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10377     int s = 31 - lshift;
10378     int r = (rshift - lshift) & 31;
10379     __ ubfmw(as_Register($dst$$reg),
10380             as_Register($src$$reg),
10381             r, s);
10382   %}
10383 
10384   ins_pipe(ialu_reg_shift);
10385 %}
10386 // Bitfield extract with shift & mask
10387 
10388 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10389 %{
10390   match(Set dst (AndI (URShiftI src rshift) mask));
10391 
10392   ins_cost(INSN_COST);
10393   format %{ "ubfxw $dst, $src, $mask" %}
10394   ins_encode %{
10395     int rshift = $rshift$$constant;
10396     long mask = $mask$$constant;
10397     int width = exact_log2(mask+1);
10398     __ ubfxw(as_Register($dst$$reg),
10399             as_Register($src$$reg), rshift, width);
10400   %}
10401   ins_pipe(ialu_reg_shift);
10402 %}
10403 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
10404 %{
10405   match(Set dst (AndL (URShiftL src rshift) mask));
10406 
10407   ins_cost(INSN_COST);
10408   format %{ "ubfx $dst, $src, $mask" %}
10409   ins_encode %{
10410     int rshift = $rshift$$constant;
10411     long mask = $mask$$constant;
10412     int width = exact_log2(mask+1);
10413     __ ubfx(as_Register($dst$$reg),
10414             as_Register($src$$reg), rshift, width);
10415   %}
10416   ins_pipe(ialu_reg_shift);
10417 %}
10418 
10419 // We can use ubfx when extending an And with a mask when we know mask
10420 // is positive.  We know that because immI_bitmask guarantees it.
10421 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10422 %{
10423   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
10424 
10425   ins_cost(INSN_COST * 2);
10426   format %{ "ubfx $dst, $src, $mask" %}
10427   ins_encode %{
10428     int rshift = $rshift$$constant;
10429     long mask = $mask$$constant;
10430     int width = exact_log2(mask+1);
10431     __ ubfx(as_Register($dst$$reg),
10432             as_Register($src$$reg), rshift, width);
10433   %}
10434   ins_pipe(ialu_reg_shift);
10435 %}
10436 
10437 // Rotations
10438 
10439 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10440 %{
10441   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10442   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10443 
10444   ins_cost(INSN_COST);
10445   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10446 
10447   ins_encode %{
10448     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10449             $rshift$$constant & 63);
10450   %}
10451   ins_pipe(ialu_reg_reg_extr);
10452 %}
10453 
10454 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10455 %{
10456   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10457   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10458 
10459   ins_cost(INSN_COST);
10460   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10461 
10462   ins_encode %{
10463     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10464             $rshift$$constant & 31);
10465   %}
10466   ins_pipe(ialu_reg_reg_extr);
10467 %}
10468 
10469 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10470 %{
10471   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10472   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10473 
10474   ins_cost(INSN_COST);
10475   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10476 
10477   ins_encode %{
10478     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10479             $rshift$$constant & 63);
10480   %}
10481   ins_pipe(ialu_reg_reg_extr);
10482 %}
10483 
10484 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10485 %{
10486   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10487   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10488 
10489   ins_cost(INSN_COST);
10490   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10491 
10492   ins_encode %{
10493     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10494             $rshift$$constant & 31);
10495   %}
10496   ins_pipe(ialu_reg_reg_extr);
10497 %}
10498 
10499 
10500 // rol expander
10501 
10502 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10503 %{
10504   effect(DEF dst, USE src, USE shift);
10505 
10506   format %{ "rol    $dst, $src, $shift" %}
10507   ins_cost(INSN_COST * 3);
10508   ins_encode %{
10509     __ subw(rscratch1, zr, as_Register($shift$$reg));
10510     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10511             rscratch1);
10512     %}
10513   ins_pipe(ialu_reg_reg_vshift);
10514 %}
10515 
10516 // rol expander
10517 
10518 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10519 %{
10520   effect(DEF dst, USE src, USE shift);
10521 
10522   format %{ "rol    $dst, $src, $shift" %}
10523   ins_cost(INSN_COST * 3);
10524   ins_encode %{
10525     __ subw(rscratch1, zr, as_Register($shift$$reg));
10526     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10527             rscratch1);
10528     %}
10529   ins_pipe(ialu_reg_reg_vshift);
10530 %}
10531 
10532 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10533 %{
10534   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
10535 
10536   expand %{
10537     rolL_rReg(dst, src, shift, cr);
10538   %}
10539 %}
10540 
10541 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10542 %{
10543   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
10544 
10545   expand %{
10546     rolL_rReg(dst, src, shift, cr);
10547   %}
10548 %}
10549 
10550 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10551 %{
10552   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
10553 
10554   expand %{
10555     rolL_rReg(dst, src, shift, cr);
10556   %}
10557 %}
10558 
10559 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10560 %{
10561   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
10562 
10563   expand %{
10564     rolL_rReg(dst, src, shift, cr);
10565   %}
10566 %}
10567 
10568 // ror expander
10569 
10570 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10571 %{
10572   effect(DEF dst, USE src, USE shift);
10573 
10574   format %{ "ror    $dst, $src, $shift" %}
10575   ins_cost(INSN_COST);
10576   ins_encode %{
10577     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10578             as_Register($shift$$reg));
10579     %}
10580   ins_pipe(ialu_reg_reg_vshift);
10581 %}
10582 
10583 // ror expander
10584 
10585 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10586 %{
10587   effect(DEF dst, USE src, USE shift);
10588 
10589   format %{ "ror    $dst, $src, $shift" %}
10590   ins_cost(INSN_COST);
10591   ins_encode %{
10592     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10593             as_Register($shift$$reg));
10594     %}
10595   ins_pipe(ialu_reg_reg_vshift);
10596 %}
10597 
10598 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10599 %{
10600   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
10601 
10602   expand %{
10603     rorL_rReg(dst, src, shift, cr);
10604   %}
10605 %}
10606 
10607 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10608 %{
10609   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
10610 
10611   expand %{
10612     rorL_rReg(dst, src, shift, cr);
10613   %}
10614 %}
10615 
10616 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10617 %{
10618   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
10619 
10620   expand %{
10621     rorL_rReg(dst, src, shift, cr);
10622   %}
10623 %}
10624 
10625 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10626 %{
10627   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
10628 
10629   expand %{
10630     rorL_rReg(dst, src, shift, cr);
10631   %}
10632 %}
10633 
10634 // Add/subtract (extended)
10635 
10636 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10637 %{
10638   match(Set dst (AddL src1 (ConvI2L src2)));
10639   ins_cost(INSN_COST);
10640   format %{ "add  $dst, $src1, sxtw $src2" %}
10641 
10642    ins_encode %{
10643      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10644             as_Register($src2$$reg), ext::sxtw);
10645    %}
10646   ins_pipe(ialu_reg_reg);
10647 %};
10648 
10649 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
10650 %{
10651   match(Set dst (SubL src1 (ConvI2L src2)));
10652   ins_cost(INSN_COST);
10653   format %{ "sub  $dst, $src1, sxtw $src2" %}
10654 
10655    ins_encode %{
10656      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10657             as_Register($src2$$reg), ext::sxtw);
10658    %}
10659   ins_pipe(ialu_reg_reg);
10660 %};
10661 
10662 
10663 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
10664 %{
10665   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10666   ins_cost(INSN_COST);
10667   format %{ "add  $dst, $src1, sxth $src2" %}
10668 
10669    ins_encode %{
10670      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10671             as_Register($src2$$reg), ext::sxth);
10672    %}
10673   ins_pipe(ialu_reg_reg);
10674 %}
10675 
10676 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10677 %{
10678   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
10679   ins_cost(INSN_COST);
10680   format %{ "add  $dst, $src1, sxtb $src2" %}
10681 
10682    ins_encode %{
10683      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10684             as_Register($src2$$reg), ext::sxtb);
10685    %}
10686   ins_pipe(ialu_reg_reg);
10687 %}
10688 
10689 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
10690 %{
10691   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
10692   ins_cost(INSN_COST);
10693   format %{ "add  $dst, $src1, uxtb $src2" %}
10694 
10695    ins_encode %{
10696      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10697             as_Register($src2$$reg), ext::uxtb);
10698    %}
10699   ins_pipe(ialu_reg_reg);
10700 %}
10701 
10702 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
10703 %{
10704   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10705   ins_cost(INSN_COST);
10706   format %{ "add  $dst, $src1, sxth $src2" %}
10707 
10708    ins_encode %{
10709      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10710             as_Register($src2$$reg), ext::sxth);
10711    %}
10712   ins_pipe(ialu_reg_reg);
10713 %}
10714 
10715 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
10716 %{
10717   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10718   ins_cost(INSN_COST);
10719   format %{ "add  $dst, $src1, sxtw $src2" %}
10720 
10721    ins_encode %{
10722      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10723             as_Register($src2$$reg), ext::sxtw);
10724    %}
10725   ins_pipe(ialu_reg_reg);
10726 %}
10727 
10728 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10729 %{
10730   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
10731   ins_cost(INSN_COST);
10732   format %{ "add  $dst, $src1, sxtb $src2" %}
10733 
10734    ins_encode %{
10735      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10736             as_Register($src2$$reg), ext::sxtb);
10737    %}
10738   ins_pipe(ialu_reg_reg);
10739 %}
10740 
10741 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
10742 %{
10743   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
10744   ins_cost(INSN_COST);
10745   format %{ "add  $dst, $src1, uxtb $src2" %}
10746 
10747    ins_encode %{
10748      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10749             as_Register($src2$$reg), ext::uxtb);
10750    %}
10751   ins_pipe(ialu_reg_reg);
10752 %}
10753 
10754 
10755 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10756 %{
10757   match(Set dst (AddI src1 (AndI src2 mask)));
10758   ins_cost(INSN_COST);
10759   format %{ "addw  $dst, $src1, $src2, uxtb" %}
10760 
10761    ins_encode %{
10762      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10763             as_Register($src2$$reg), ext::uxtb);
10764    %}
10765   ins_pipe(ialu_reg_reg);
10766 %}
10767 
10768 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10769 %{
10770   match(Set dst (AddI src1 (AndI src2 mask)));
10771   ins_cost(INSN_COST);
10772   format %{ "addw  $dst, $src1, $src2, uxth" %}
10773 
10774    ins_encode %{
10775      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
10776             as_Register($src2$$reg), ext::uxth);
10777    %}
10778   ins_pipe(ialu_reg_reg);
10779 %}
10780 
10781 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
10782 %{
10783   match(Set dst (AddL src1 (AndL src2 mask)));
10784   ins_cost(INSN_COST);
10785   format %{ "add  $dst, $src1, $src2, uxtb" %}
10786 
10787    ins_encode %{
10788      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10789             as_Register($src2$$reg), ext::uxtb);
10790    %}
10791   ins_pipe(ialu_reg_reg);
10792 %}
10793 
10794 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
10795 %{
10796   match(Set dst (AddL src1 (AndL src2 mask)));
10797   ins_cost(INSN_COST);
10798   format %{ "add  $dst, $src1, $src2, uxth" %}
10799 
10800    ins_encode %{
10801      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10802             as_Register($src2$$reg), ext::uxth);
10803    %}
10804   ins_pipe(ialu_reg_reg);
10805 %}
10806 
10807 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
10808 %{
10809   match(Set dst (AddL src1 (AndL src2 mask)));
10810   ins_cost(INSN_COST);
10811   format %{ "add  $dst, $src1, $src2, uxtw" %}
10812 
10813    ins_encode %{
10814      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
10815             as_Register($src2$$reg), ext::uxtw);
10816    %}
10817   ins_pipe(ialu_reg_reg);
10818 %}
10819 
10820 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
10821 %{
10822   match(Set dst (SubI src1 (AndI src2 mask)));
10823   ins_cost(INSN_COST);
10824   format %{ "subw  $dst, $src1, $src2, uxtb" %}
10825 
10826    ins_encode %{
10827      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10828             as_Register($src2$$reg), ext::uxtb);
10829    %}
10830   ins_pipe(ialu_reg_reg);
10831 %}
10832 
10833 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
10834 %{
10835   match(Set dst (SubI src1 (AndI src2 mask)));
10836   ins_cost(INSN_COST);
10837   format %{ "subw  $dst, $src1, $src2, uxth" %}
10838 
10839    ins_encode %{
10840      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
10841             as_Register($src2$$reg), ext::uxth);
10842    %}
10843   ins_pipe(ialu_reg_reg);
10844 %}
10845 
10846 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
10847 %{
10848   match(Set dst (SubL src1 (AndL src2 mask)));
10849   ins_cost(INSN_COST);
10850   format %{ "sub  $dst, $src1, $src2, uxtb" %}
10851 
10852    ins_encode %{
10853      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10854             as_Register($src2$$reg), ext::uxtb);
10855    %}
10856   ins_pipe(ialu_reg_reg);
10857 %}
10858 
10859 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
10860 %{
10861   match(Set dst (SubL src1 (AndL src2 mask)));
10862   ins_cost(INSN_COST);
10863   format %{ "sub  $dst, $src1, $src2, uxth" %}
10864 
10865    ins_encode %{
10866      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10867             as_Register($src2$$reg), ext::uxth);
10868    %}
10869   ins_pipe(ialu_reg_reg);
10870 %}
10871 
10872 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
10873 %{
10874   match(Set dst (SubL src1 (AndL src2 mask)));
10875   ins_cost(INSN_COST);
10876   format %{ "sub  $dst, $src1, $src2, uxtw" %}
10877 
10878    ins_encode %{
10879      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
10880             as_Register($src2$$reg), ext::uxtw);
10881    %}
10882   ins_pipe(ialu_reg_reg);
10883 %}
10884 
10885 // END This section of the file is automatically generated. Do not edit --------------
10886 
10887 // ============================================================================
10888 // Floating Point Arithmetic Instructions
10889 
10890 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10891   match(Set dst (AddF src1 src2));
10892 
10893   ins_cost(INSN_COST * 5);
10894   format %{ "fadds   $dst, $src1, $src2" %}
10895 
10896   ins_encode %{
10897     __ fadds(as_FloatRegister($dst$$reg),
10898              as_FloatRegister($src1$$reg),
10899              as_FloatRegister($src2$$reg));
10900   %}
10901 
10902   ins_pipe(pipe_class_default);
10903 %}
10904 
10905 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10906   match(Set dst (AddD src1 src2));
10907 
10908   ins_cost(INSN_COST * 5);
10909   format %{ "faddd   $dst, $src1, $src2" %}
10910 
10911   ins_encode %{
10912     __ faddd(as_FloatRegister($dst$$reg),
10913              as_FloatRegister($src1$$reg),
10914              as_FloatRegister($src2$$reg));
10915   %}
10916 
10917   ins_pipe(pipe_class_default);
10918 %}
10919 
10920 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10921   match(Set dst (SubF src1 src2));
10922 
10923   ins_cost(INSN_COST * 5);
10924   format %{ "fsubs   $dst, $src1, $src2" %}
10925 
10926   ins_encode %{
10927     __ fsubs(as_FloatRegister($dst$$reg),
10928              as_FloatRegister($src1$$reg),
10929              as_FloatRegister($src2$$reg));
10930   %}
10931 
10932   ins_pipe(pipe_class_default);
10933 %}
10934 
10935 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10936   match(Set dst (SubD src1 src2));
10937 
10938   ins_cost(INSN_COST * 5);
10939   format %{ "fsubd   $dst, $src1, $src2" %}
10940 
10941   ins_encode %{
10942     __ fsubd(as_FloatRegister($dst$$reg),
10943              as_FloatRegister($src1$$reg),
10944              as_FloatRegister($src2$$reg));
10945   %}
10946 
10947   ins_pipe(pipe_class_default);
10948 %}
10949 
10950 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
10951   match(Set dst (MulF src1 src2));
10952 
10953   ins_cost(INSN_COST * 6);
10954   format %{ "fmuls   $dst, $src1, $src2" %}
10955 
10956   ins_encode %{
10957     __ fmuls(as_FloatRegister($dst$$reg),
10958              as_FloatRegister($src1$$reg),
10959              as_FloatRegister($src2$$reg));
10960   %}
10961 
10962   ins_pipe(pipe_class_default);
10963 %}
10964 
10965 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
10966   match(Set dst (MulD src1 src2));
10967 
10968   ins_cost(INSN_COST * 6);
10969   format %{ "fmuld   $dst, $src1, $src2" %}
10970 
10971   ins_encode %{
10972     __ fmuld(as_FloatRegister($dst$$reg),
10973              as_FloatRegister($src1$$reg),
10974              as_FloatRegister($src2$$reg));
10975   %}
10976 
10977   ins_pipe(pipe_class_default);
10978 %}
10979 
10980 // We cannot use these fused mul w add/sub ops because they don't
10981 // produce the same result as the equivalent separated ops
10982 // (essentially they don't round the intermediate result). that's a
10983 // shame. leaving them here in case we can idenitfy cases where it is
10984 // legitimate to use them
10985 
10986 
10987 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
10988 //   match(Set dst (AddF (MulF src1 src2) src3));
10989 
10990 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
10991 
10992 //   ins_encode %{
10993 //     __ fmadds(as_FloatRegister($dst$$reg),
10994 //              as_FloatRegister($src1$$reg),
10995 //              as_FloatRegister($src2$$reg),
10996 //              as_FloatRegister($src3$$reg));
10997 //   %}
10998 
10999 //   ins_pipe(pipe_class_default);
11000 // %}
11001 
11002 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11003 //   match(Set dst (AddD (MulD src1 src2) src3));
11004 
11005 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
11006 
11007 //   ins_encode %{
11008 //     __ fmaddd(as_FloatRegister($dst$$reg),
11009 //              as_FloatRegister($src1$$reg),
11010 //              as_FloatRegister($src2$$reg),
11011 //              as_FloatRegister($src3$$reg));
11012 //   %}
11013 
11014 //   ins_pipe(pipe_class_default);
11015 // %}
11016 
11017 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11018 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
11019 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
11020 
11021 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
11022 
11023 //   ins_encode %{
11024 //     __ fmsubs(as_FloatRegister($dst$$reg),
11025 //               as_FloatRegister($src1$$reg),
11026 //               as_FloatRegister($src2$$reg),
11027 //              as_FloatRegister($src3$$reg));
11028 //   %}
11029 
11030 //   ins_pipe(pipe_class_default);
11031 // %}
11032 
11033 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11034 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
11035 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
11036 
11037 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
11038 
11039 //   ins_encode %{
11040 //     __ fmsubd(as_FloatRegister($dst$$reg),
11041 //               as_FloatRegister($src1$$reg),
11042 //               as_FloatRegister($src2$$reg),
11043 //               as_FloatRegister($src3$$reg));
11044 //   %}
11045 
11046 //   ins_pipe(pipe_class_default);
11047 // %}
11048 
11049 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11050 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
11051 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
11052 
11053 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
11054 
11055 //   ins_encode %{
11056 //     __ fnmadds(as_FloatRegister($dst$$reg),
11057 //                as_FloatRegister($src1$$reg),
11058 //                as_FloatRegister($src2$$reg),
11059 //                as_FloatRegister($src3$$reg));
11060 //   %}
11061 
11062 //   ins_pipe(pipe_class_default);
11063 // %}
11064 
11065 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11066 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
11067 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
11068 
11069 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
11070 
11071 //   ins_encode %{
11072 //     __ fnmaddd(as_FloatRegister($dst$$reg),
11073 //                as_FloatRegister($src1$$reg),
11074 //                as_FloatRegister($src2$$reg),
11075 //                as_FloatRegister($src3$$reg));
11076 //   %}
11077 
11078 //   ins_pipe(pipe_class_default);
11079 // %}
11080 
11081 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
11082 //   match(Set dst (SubF (MulF src1 src2) src3));
11083 
11084 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
11085 
11086 //   ins_encode %{
11087 //     __ fnmsubs(as_FloatRegister($dst$$reg),
11088 //                as_FloatRegister($src1$$reg),
11089 //                as_FloatRegister($src2$$reg),
11090 //                as_FloatRegister($src3$$reg));
11091 //   %}
11092 
11093 //   ins_pipe(pipe_class_default);
11094 // %}
11095 
11096 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
11097 //   match(Set dst (SubD (MulD src1 src2) src3));
11098 
11099 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
11100 
11101 //   ins_encode %{
11102 //   // n.b. insn name should be fnmsubd
11103 //     __ fnmsub(as_FloatRegister($dst$$reg),
11104 //                as_FloatRegister($src1$$reg),
11105 //                as_FloatRegister($src2$$reg),
11106 //                as_FloatRegister($src3$$reg));
11107 //   %}
11108 
11109 //   ins_pipe(pipe_class_default);
11110 // %}
11111 
11112 
11113 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11114   match(Set dst (DivF src1  src2));
11115 
11116   ins_cost(INSN_COST * 18);
11117   format %{ "fdivs   $dst, $src1, $src2" %}
11118 
11119   ins_encode %{
11120     __ fdivs(as_FloatRegister($dst$$reg),
11121              as_FloatRegister($src1$$reg),
11122              as_FloatRegister($src2$$reg));
11123   %}
11124 
11125   ins_pipe(pipe_class_default);
11126 %}
11127 
11128 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11129   match(Set dst (DivD src1  src2));
11130 
11131   ins_cost(INSN_COST * 32);
11132   format %{ "fdivd   $dst, $src1, $src2" %}
11133 
11134   ins_encode %{
11135     __ fdivd(as_FloatRegister($dst$$reg),
11136              as_FloatRegister($src1$$reg),
11137              as_FloatRegister($src2$$reg));
11138   %}
11139 
11140   ins_pipe(pipe_class_default);
11141 %}
11142 
11143 instruct negF_reg_reg(vRegF dst, vRegF src) %{
11144   match(Set dst (NegF src));
11145 
11146   ins_cost(INSN_COST * 3);
11147   format %{ "fneg   $dst, $src" %}
11148 
11149   ins_encode %{
11150     __ fnegs(as_FloatRegister($dst$$reg),
11151              as_FloatRegister($src$$reg));
11152   %}
11153 
11154   ins_pipe(pipe_class_default);
11155 %}
11156 
11157 instruct negD_reg_reg(vRegD dst, vRegD src) %{
11158   match(Set dst (NegD src));
11159 
11160   ins_cost(INSN_COST * 3);
11161   format %{ "fnegd   $dst, $src" %}
11162 
11163   ins_encode %{
11164     __ fnegd(as_FloatRegister($dst$$reg),
11165              as_FloatRegister($src$$reg));
11166   %}
11167 
11168   ins_pipe(pipe_class_default);
11169 %}
11170 
11171 instruct absF_reg(vRegF dst, vRegF src) %{
11172   match(Set dst (AbsF src));
11173 
11174   ins_cost(INSN_COST * 3);
11175   format %{ "fabss   $dst, $src" %}
11176   ins_encode %{
11177     __ fabss(as_FloatRegister($dst$$reg),
11178              as_FloatRegister($src$$reg));
11179   %}
11180 
11181   ins_pipe(pipe_class_default);
11182 %}
11183 
11184 instruct absD_reg(vRegD dst, vRegD src) %{
11185   match(Set dst (AbsD src));
11186 
11187   ins_cost(INSN_COST * 3);
11188   format %{ "fabsd   $dst, $src" %}
11189   ins_encode %{
11190     __ fabsd(as_FloatRegister($dst$$reg),
11191              as_FloatRegister($src$$reg));
11192   %}
11193 
11194   ins_pipe(pipe_class_default);
11195 %}
11196 
11197 instruct sqrtD_reg(vRegD dst, vRegD src) %{
11198   match(Set dst (SqrtD src));
11199 
11200   ins_cost(INSN_COST * 50);
11201   format %{ "fsqrtd  $dst, $src" %}
11202   ins_encode %{
11203     __ fsqrtd(as_FloatRegister($dst$$reg),
11204              as_FloatRegister($src$$reg));
11205   %}
11206 
11207   ins_pipe(pipe_class_default);
11208 %}
11209 
11210 instruct sqrtF_reg(vRegF dst, vRegF src) %{
11211   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11212 
11213   ins_cost(INSN_COST * 50);
11214   format %{ "fsqrts  $dst, $src" %}
11215   ins_encode %{
11216     __ fsqrts(as_FloatRegister($dst$$reg),
11217              as_FloatRegister($src$$reg));
11218   %}
11219 
11220   ins_pipe(pipe_class_default);
11221 %}
11222 
11223 // ============================================================================
11224 // Logical Instructions
11225 
11226 // Integer Logical Instructions
11227 
11228 // And Instructions
11229 
11230 
11231 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
11232   match(Set dst (AndI src1 src2));
11233 
11234   format %{ "andw  $dst, $src1, $src2\t# int" %}
11235 
11236   ins_cost(INSN_COST);
11237   ins_encode %{
11238     __ andw(as_Register($dst$$reg),
11239             as_Register($src1$$reg),
11240             as_Register($src2$$reg));
11241   %}
11242 
11243   ins_pipe(ialu_reg_reg);
11244 %}
11245 
11246 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
11247   match(Set dst (AndI src1 src2));
11248 
11249   format %{ "andsw  $dst, $src1, $src2\t# int" %}
11250 
11251   ins_cost(INSN_COST);
11252   ins_encode %{
11253     __ andw(as_Register($dst$$reg),
11254             as_Register($src1$$reg),
11255             (unsigned long)($src2$$constant));
11256   %}
11257 
11258   ins_pipe(ialu_reg_imm);
11259 %}
11260 
11261 // Or Instructions
11262 
11263 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11264   match(Set dst (OrI src1 src2));
11265 
11266   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11267 
11268   ins_cost(INSN_COST);
11269   ins_encode %{
11270     __ orrw(as_Register($dst$$reg),
11271             as_Register($src1$$reg),
11272             as_Register($src2$$reg));
11273   %}
11274 
11275   ins_pipe(ialu_reg_reg);
11276 %}
11277 
11278 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11279   match(Set dst (OrI src1 src2));
11280 
11281   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11282 
11283   ins_cost(INSN_COST);
11284   ins_encode %{
11285     __ orrw(as_Register($dst$$reg),
11286             as_Register($src1$$reg),
11287             (unsigned long)($src2$$constant));
11288   %}
11289 
11290   ins_pipe(ialu_reg_imm);
11291 %}
11292 
11293 // Xor Instructions
11294 
11295 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11296   match(Set dst (XorI src1 src2));
11297 
11298   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11299 
11300   ins_cost(INSN_COST);
11301   ins_encode %{
11302     __ eorw(as_Register($dst$$reg),
11303             as_Register($src1$$reg),
11304             as_Register($src2$$reg));
11305   %}
11306 
11307   ins_pipe(ialu_reg_reg);
11308 %}
11309 
11310 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11311   match(Set dst (XorI src1 src2));
11312 
11313   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11314 
11315   ins_cost(INSN_COST);
11316   ins_encode %{
11317     __ eorw(as_Register($dst$$reg),
11318             as_Register($src1$$reg),
11319             (unsigned long)($src2$$constant));
11320   %}
11321 
11322   ins_pipe(ialu_reg_imm);
11323 %}
11324 
11325 // Long Logical Instructions
11326 // TODO
11327 
11328 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
11329   match(Set dst (AndL src1 src2));
11330 
11331   format %{ "and  $dst, $src1, $src2\t# int" %}
11332 
11333   ins_cost(INSN_COST);
11334   ins_encode %{
11335     __ andr(as_Register($dst$$reg),
11336             as_Register($src1$$reg),
11337             as_Register($src2$$reg));
11338   %}
11339 
11340   ins_pipe(ialu_reg_reg);
11341 %}
11342 
11343 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
11344   match(Set dst (AndL src1 src2));
11345 
11346   format %{ "and  $dst, $src1, $src2\t# int" %}
11347 
11348   ins_cost(INSN_COST);
11349   ins_encode %{
11350     __ andr(as_Register($dst$$reg),
11351             as_Register($src1$$reg),
11352             (unsigned long)($src2$$constant));
11353   %}
11354 
11355   ins_pipe(ialu_reg_imm);
11356 %}
11357 
11358 // Or Instructions
11359 
11360 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11361   match(Set dst (OrL src1 src2));
11362 
11363   format %{ "orr  $dst, $src1, $src2\t# int" %}
11364 
11365   ins_cost(INSN_COST);
11366   ins_encode %{
11367     __ orr(as_Register($dst$$reg),
11368            as_Register($src1$$reg),
11369            as_Register($src2$$reg));
11370   %}
11371 
11372   ins_pipe(ialu_reg_reg);
11373 %}
11374 
11375 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11376   match(Set dst (OrL src1 src2));
11377 
11378   format %{ "orr  $dst, $src1, $src2\t# int" %}
11379 
11380   ins_cost(INSN_COST);
11381   ins_encode %{
11382     __ orr(as_Register($dst$$reg),
11383            as_Register($src1$$reg),
11384            (unsigned long)($src2$$constant));
11385   %}
11386 
11387   ins_pipe(ialu_reg_imm);
11388 %}
11389 
11390 // Xor Instructions
11391 
11392 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11393   match(Set dst (XorL src1 src2));
11394 
11395   format %{ "eor  $dst, $src1, $src2\t# int" %}
11396 
11397   ins_cost(INSN_COST);
11398   ins_encode %{
11399     __ eor(as_Register($dst$$reg),
11400            as_Register($src1$$reg),
11401            as_Register($src2$$reg));
11402   %}
11403 
11404   ins_pipe(ialu_reg_reg);
11405 %}
11406 
11407 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11408   match(Set dst (XorL src1 src2));
11409 
11410   ins_cost(INSN_COST);
11411   format %{ "eor  $dst, $src1, $src2\t# int" %}
11412 
11413   ins_encode %{
11414     __ eor(as_Register($dst$$reg),
11415            as_Register($src1$$reg),
11416            (unsigned long)($src2$$constant));
11417   %}
11418 
11419   ins_pipe(ialu_reg_imm);
11420 %}
11421 
11422 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
11423 %{
11424   match(Set dst (ConvI2L src));
11425 
11426   ins_cost(INSN_COST);
11427   format %{ "sxtw  $dst, $src\t# i2l" %}
11428   ins_encode %{
11429     __ sbfm($dst$$Register, $src$$Register, 0, 31);
11430   %}
11431   ins_pipe(ialu_reg_shift);
11432 %}
11433 
11434 // this pattern occurs in bigmath arithmetic
11435 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
11436 %{
11437   match(Set dst (AndL (ConvI2L src) mask));
11438 
11439   ins_cost(INSN_COST);
11440   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
11441   ins_encode %{
11442     __ ubfm($dst$$Register, $src$$Register, 0, 31);
11443   %}
11444 
11445   ins_pipe(ialu_reg_shift);
11446 %}
11447 
11448 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
11449   match(Set dst (ConvL2I src));
11450 
11451   ins_cost(INSN_COST);
11452   format %{ "movw  $dst, $src \t// l2i" %}
11453 
11454   ins_encode %{
11455     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
11456   %}
11457 
11458   ins_pipe(ialu_reg);
11459 %}
11460 
11461 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
11462 %{
11463   match(Set dst (Conv2B src));
11464   effect(KILL cr);
11465 
11466   format %{
11467     "cmpw $src, zr\n\t"
11468     "cset $dst, ne"
11469   %}
11470 
11471   ins_encode %{
11472     __ cmpw(as_Register($src$$reg), zr);
11473     __ cset(as_Register($dst$$reg), Assembler::NE);
11474   %}
11475 
11476   ins_pipe(ialu_reg);
11477 %}
11478 
11479 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
11480 %{
11481   match(Set dst (Conv2B src));
11482   effect(KILL cr);
11483 
11484   format %{
11485     "cmp  $src, zr\n\t"
11486     "cset $dst, ne"
11487   %}
11488 
11489   ins_encode %{
11490     __ cmp(as_Register($src$$reg), zr);
11491     __ cset(as_Register($dst$$reg), Assembler::NE);
11492   %}
11493 
11494   ins_pipe(ialu_reg);
11495 %}
11496 
11497 instruct convD2F_reg(vRegF dst, vRegD src) %{
11498   match(Set dst (ConvD2F src));
11499 
11500   ins_cost(INSN_COST * 5);
11501   format %{ "fcvtd  $dst, $src \t// d2f" %}
11502 
11503   ins_encode %{
11504     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11505   %}
11506 
11507   ins_pipe(pipe_class_default);
11508 %}
11509 
11510 instruct convF2D_reg(vRegD dst, vRegF src) %{
11511   match(Set dst (ConvF2D src));
11512 
11513   ins_cost(INSN_COST * 5);
11514   format %{ "fcvts  $dst, $src \t// f2d" %}
11515 
11516   ins_encode %{
11517     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11518   %}
11519 
11520   ins_pipe(pipe_class_default);
11521 %}
11522 
11523 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11524   match(Set dst (ConvF2I src));
11525 
11526   ins_cost(INSN_COST * 5);
11527   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
11528 
11529   ins_encode %{
11530     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11531   %}
11532 
11533   ins_pipe(pipe_class_default);
11534 %}
11535 
11536 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
11537   match(Set dst (ConvF2L src));
11538 
11539   ins_cost(INSN_COST * 5);
11540   format %{ "fcvtzs  $dst, $src \t// f2l" %}
11541 
11542   ins_encode %{
11543     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11544   %}
11545 
11546   ins_pipe(pipe_class_default);
11547 %}
11548 
11549 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
11550   match(Set dst (ConvI2F src));
11551 
11552   ins_cost(INSN_COST * 5);
11553   format %{ "scvtfws  $dst, $src \t// i2f" %}
11554 
11555   ins_encode %{
11556     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11557   %}
11558 
11559   ins_pipe(pipe_class_default);
11560 %}
11561 
11562 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
11563   match(Set dst (ConvL2F src));
11564 
11565   ins_cost(INSN_COST * 5);
11566   format %{ "scvtfs  $dst, $src \t// l2f" %}
11567 
11568   ins_encode %{
11569     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11570   %}
11571 
11572   ins_pipe(pipe_class_default);
11573 %}
11574 
11575 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
11576   match(Set dst (ConvD2I src));
11577 
11578   ins_cost(INSN_COST * 5);
11579   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
11580 
11581   ins_encode %{
11582     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11583   %}
11584 
11585   ins_pipe(pipe_class_default);
11586 %}
11587 
11588 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11589   match(Set dst (ConvD2L src));
11590 
11591   ins_cost(INSN_COST * 5);
11592   format %{ "fcvtzd  $dst, $src \t// d2l" %}
11593 
11594   ins_encode %{
11595     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11596   %}
11597 
11598   ins_pipe(pipe_class_default);
11599 %}
11600 
11601 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
11602   match(Set dst (ConvI2D src));
11603 
11604   ins_cost(INSN_COST * 5);
11605   format %{ "scvtfwd  $dst, $src \t// i2d" %}
11606 
11607   ins_encode %{
11608     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11609   %}
11610 
11611   ins_pipe(pipe_class_default);
11612 %}
11613 
11614 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
11615   match(Set dst (ConvL2D src));
11616 
11617   ins_cost(INSN_COST * 5);
11618   format %{ "scvtfd  $dst, $src \t// l2d" %}
11619 
11620   ins_encode %{
11621     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11622   %}
11623 
11624   ins_pipe(pipe_class_default);
11625 %}
11626 
11627 // stack <-> reg and reg <-> reg shuffles with no conversion
11628 
11629 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
11630 
11631   match(Set dst (MoveF2I src));
11632 
11633   effect(DEF dst, USE src);
11634 
11635   ins_cost(4 * INSN_COST);
11636 
11637   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
11638 
11639   ins_encode %{
11640     __ ldrw($dst$$Register, Address(sp, $src$$disp));
11641   %}
11642 
11643   ins_pipe(iload_reg_reg);
11644 
11645 %}
11646 
11647 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
11648 
11649   match(Set dst (MoveI2F src));
11650 
11651   effect(DEF dst, USE src);
11652 
11653   ins_cost(4 * INSN_COST);
11654 
11655   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
11656 
11657   ins_encode %{
11658     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11659   %}
11660 
11661   ins_pipe(pipe_class_memory);
11662 
11663 %}
11664 
11665 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
11666 
11667   match(Set dst (MoveD2L src));
11668 
11669   effect(DEF dst, USE src);
11670 
11671   ins_cost(4 * INSN_COST);
11672 
11673   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
11674 
11675   ins_encode %{
11676     __ ldr($dst$$Register, Address(sp, $src$$disp));
11677   %}
11678 
11679   ins_pipe(iload_reg_reg);
11680 
11681 %}
11682 
11683 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
11684 
11685   match(Set dst (MoveL2D src));
11686 
11687   effect(DEF dst, USE src);
11688 
11689   ins_cost(4 * INSN_COST);
11690 
11691   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
11692 
11693   ins_encode %{
11694     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
11695   %}
11696 
11697   ins_pipe(pipe_class_memory);
11698 
11699 %}
11700 
11701 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
11702 
11703   match(Set dst (MoveF2I src));
11704 
11705   effect(DEF dst, USE src);
11706 
11707   ins_cost(INSN_COST);
11708 
11709   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
11710 
11711   ins_encode %{
11712     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11713   %}
11714 
11715   ins_pipe(pipe_class_memory);
11716 
11717 %}
11718 
11719 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
11720 
11721   match(Set dst (MoveI2F src));
11722 
11723   effect(DEF dst, USE src);
11724 
11725   ins_cost(INSN_COST);
11726 
11727   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
11728 
11729   ins_encode %{
11730     __ strw($src$$Register, Address(sp, $dst$$disp));
11731   %}
11732 
11733   ins_pipe(istore_reg_reg);
11734 
11735 %}
11736 
11737 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
11738 
11739   match(Set dst (MoveD2L src));
11740 
11741   effect(DEF dst, USE src);
11742 
11743   ins_cost(INSN_COST);
11744 
11745   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
11746 
11747   ins_encode %{
11748     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
11749   %}
11750 
11751   ins_pipe(pipe_class_memory);
11752 
11753 %}
11754 
11755 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
11756 
11757   match(Set dst (MoveL2D src));
11758 
11759   effect(DEF dst, USE src);
11760 
11761   ins_cost(INSN_COST);
11762 
11763   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
11764 
11765   ins_encode %{
11766     __ str($src$$Register, Address(sp, $dst$$disp));
11767   %}
11768 
11769   ins_pipe(istore_reg_reg);
11770 
11771 %}
11772 
11773 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11774 
11775   match(Set dst (MoveF2I src));
11776 
11777   effect(DEF dst, USE src);
11778 
11779   ins_cost(INSN_COST);
11780 
11781   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
11782 
11783   ins_encode %{
11784     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
11785   %}
11786 
11787   ins_pipe(pipe_class_memory);
11788 
11789 %}
11790 
11791 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
11792 
11793   match(Set dst (MoveI2F src));
11794 
11795   effect(DEF dst, USE src);
11796 
11797   ins_cost(INSN_COST);
11798 
11799   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
11800 
11801   ins_encode %{
11802     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
11803   %}
11804 
11805   ins_pipe(pipe_class_memory);
11806 
11807 %}
11808 
11809 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11810 
11811   match(Set dst (MoveD2L src));
11812 
11813   effect(DEF dst, USE src);
11814 
11815   ins_cost(INSN_COST);
11816 
11817   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
11818 
11819   ins_encode %{
11820     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
11821   %}
11822 
11823   ins_pipe(pipe_class_memory);
11824 
11825 %}
11826 
11827 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
11828 
11829   match(Set dst (MoveL2D src));
11830 
11831   effect(DEF dst, USE src);
11832 
11833   ins_cost(INSN_COST);
11834 
11835   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
11836 
11837   ins_encode %{
11838     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
11839   %}
11840 
11841   ins_pipe(pipe_class_memory);
11842 
11843 %}
11844 
11845 // ============================================================================
11846 // clearing of an array
11847 
11848 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
11849 %{
11850   match(Set dummy (ClearArray cnt base));
11851   effect(USE_KILL cnt, USE_KILL base);
11852 
11853   ins_cost(4 * INSN_COST);
11854   format %{ "ClearArray $cnt, $base" %}
11855 
11856   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
11857 
11858   ins_pipe(pipe_class_memory);
11859 %}
11860 
11861 // ============================================================================
11862 // Overflow Math Instructions
11863 
11864 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11865 %{
11866   match(Set cr (OverflowAddI op1 op2));
11867 
11868   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
11869   ins_cost(INSN_COST);
11870   ins_encode %{
11871     __ cmnw($op1$$Register, $op2$$Register);
11872   %}
11873 
11874   ins_pipe(icmp_reg_reg);
11875 %}
11876 
11877 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
11878 %{
11879   match(Set cr (OverflowAddI op1 op2));
11880 
11881   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
11882   ins_cost(INSN_COST);
11883   ins_encode %{
11884     __ cmnw($op1$$Register, $op2$$constant);
11885   %}
11886 
11887   ins_pipe(icmp_reg_imm);
11888 %}
11889 
11890 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11891 %{
11892   match(Set cr (OverflowAddL op1 op2));
11893 
11894   format %{ "cmn   $op1, $op2\t# overflow check long" %}
11895   ins_cost(INSN_COST);
11896   ins_encode %{
11897     __ cmn($op1$$Register, $op2$$Register);
11898   %}
11899 
11900   ins_pipe(icmp_reg_reg);
11901 %}
11902 
11903 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
11904 %{
11905   match(Set cr (OverflowAddL op1 op2));
11906 
11907   format %{ "cmn   $op1, $op2\t# overflow check long" %}
11908   ins_cost(INSN_COST);
11909   ins_encode %{
11910     __ cmn($op1$$Register, $op2$$constant);
11911   %}
11912 
11913   ins_pipe(icmp_reg_imm);
11914 %}
11915 
11916 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11917 %{
11918   match(Set cr (OverflowSubI op1 op2));
11919 
11920   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
11921   ins_cost(INSN_COST);
11922   ins_encode %{
11923     __ cmpw($op1$$Register, $op2$$Register);
11924   %}
11925 
11926   ins_pipe(icmp_reg_reg);
11927 %}
11928 
11929 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
11930 %{
11931   match(Set cr (OverflowSubI op1 op2));
11932 
11933   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
11934   ins_cost(INSN_COST);
11935   ins_encode %{
11936     __ cmpw($op1$$Register, $op2$$constant);
11937   %}
11938 
11939   ins_pipe(icmp_reg_imm);
11940 %}
11941 
11942 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
11943 %{
11944   match(Set cr (OverflowSubL op1 op2));
11945 
11946   format %{ "cmp   $op1, $op2\t# overflow check long" %}
11947   ins_cost(INSN_COST);
11948   ins_encode %{
11949     __ cmp($op1$$Register, $op2$$Register);
11950   %}
11951 
11952   ins_pipe(icmp_reg_reg);
11953 %}
11954 
11955 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
11956 %{
11957   match(Set cr (OverflowSubL op1 op2));
11958 
11959   format %{ "cmp   $op1, $op2\t# overflow check long" %}
11960   ins_cost(INSN_COST);
11961   ins_encode %{
11962     __ cmp($op1$$Register, $op2$$constant);
11963   %}
11964 
11965   ins_pipe(icmp_reg_imm);
11966 %}
11967 
11968 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
11969 %{
11970   match(Set cr (OverflowSubI zero op1));
11971 
11972   format %{ "cmpw  zr, $op1\t# overflow check int" %}
11973   ins_cost(INSN_COST);
11974   ins_encode %{
11975     __ cmpw(zr, $op1$$Register);
11976   %}
11977 
11978   ins_pipe(icmp_reg_imm);
11979 %}
11980 
11981 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
11982 %{
11983   match(Set cr (OverflowSubL zero op1));
11984 
11985   format %{ "cmp   zr, $op1\t# overflow check long" %}
11986   ins_cost(INSN_COST);
11987   ins_encode %{
11988     __ cmp(zr, $op1$$Register);
11989   %}
11990 
11991   ins_pipe(icmp_reg_imm);
11992 %}
11993 
11994 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
11995 %{
11996   match(Set cr (OverflowMulI op1 op2));
11997 
11998   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
11999             "cmp   rscratch1, rscratch1, sxtw\n\t"
12000             "movw  rscratch1, #0x80000000\n\t"
12001             "cselw rscratch1, rscratch1, zr, NE\n\t"
12002             "cmpw  rscratch1, #1" %}
12003   ins_cost(5 * INSN_COST);
12004   ins_encode %{
12005     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12006     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12007     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12008     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12009     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12010   %}
12011 
12012   ins_pipe(pipe_slow);
12013 %}
12014 
12015 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
12016 %{
12017   match(If cmp (OverflowMulI op1 op2));
12018   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12019             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12020   effect(USE labl, KILL cr);
12021 
12022   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12023             "cmp   rscratch1, rscratch1, sxtw\n\t"
12024             "b$cmp   $labl" %}
12025   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
12026   ins_encode %{
12027     Label* L = $labl$$label;
12028     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12029     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12030     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12031     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12032   %}
12033 
12034   ins_pipe(pipe_serial);
12035 %}
12036 
12037 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12038 %{
12039   match(Set cr (OverflowMulL op1 op2));
12040 
12041   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12042             "smulh rscratch2, $op1, $op2\n\t"
12043             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12044             "movw  rscratch1, #0x80000000\n\t"
12045             "cselw rscratch1, rscratch1, zr, NE\n\t"
12046             "cmpw  rscratch1, #1" %}
12047   ins_cost(6 * INSN_COST);
12048   ins_encode %{
12049     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12050     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12051     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12052     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12053     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12054     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12055   %}
12056 
12057   ins_pipe(pipe_slow);
12058 %}
12059 
12060 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
12061 %{
12062   match(If cmp (OverflowMulL op1 op2));
12063   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12064             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12065   effect(USE labl, KILL cr);
12066 
12067   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12068             "smulh rscratch2, $op1, $op2\n\t"
12069             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12070             "b$cmp $labl" %}
12071   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
12072   ins_encode %{
12073     Label* L = $labl$$label;
12074     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12075     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12076     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12077     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12078     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12079   %}
12080 
12081   ins_pipe(pipe_serial);
12082 %}
12083 
12084 // ============================================================================
12085 // Compare Instructions
12086 
12087 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
12088 %{
12089   match(Set cr (CmpI op1 op2));
12090 
12091   effect(DEF cr, USE op1, USE op2);
12092 
12093   ins_cost(INSN_COST);
12094   format %{ "cmpw  $op1, $op2" %}
12095 
12096   ins_encode(aarch64_enc_cmpw(op1, op2));
12097 
12098   ins_pipe(icmp_reg_reg);
12099 %}
12100 
12101 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
12102 %{
12103   match(Set cr (CmpI op1 zero));
12104 
12105   effect(DEF cr, USE op1);
12106 
12107   ins_cost(INSN_COST);
12108   format %{ "cmpw $op1, 0" %}
12109 
12110   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12111 
12112   ins_pipe(icmp_reg_imm);
12113 %}
12114 
12115 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
12116 %{
12117   match(Set cr (CmpI op1 op2));
12118 
12119   effect(DEF cr, USE op1);
12120 
12121   ins_cost(INSN_COST);
12122   format %{ "cmpw  $op1, $op2" %}
12123 
12124   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12125 
12126   ins_pipe(icmp_reg_imm);
12127 %}
12128 
12129 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
12130 %{
12131   match(Set cr (CmpI op1 op2));
12132 
12133   effect(DEF cr, USE op1);
12134 
12135   ins_cost(INSN_COST * 2);
12136   format %{ "cmpw  $op1, $op2" %}
12137 
12138   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12139 
12140   ins_pipe(icmp_reg_imm);
12141 %}
12142 
12143 // Unsigned compare Instructions; really, same as signed compare
12144 // except it should only be used to feed an If or a CMovI which takes a
12145 // cmpOpU.
12146 
12147 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
12148 %{
12149   match(Set cr (CmpU op1 op2));
12150 
12151   effect(DEF cr, USE op1, USE op2);
12152 
12153   ins_cost(INSN_COST);
12154   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12155 
12156   ins_encode(aarch64_enc_cmpw(op1, op2));
12157 
12158   ins_pipe(icmp_reg_reg);
12159 %}
12160 
12161 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
12162 %{
12163   match(Set cr (CmpU op1 zero));
12164 
12165   effect(DEF cr, USE op1);
12166 
12167   ins_cost(INSN_COST);
12168   format %{ "cmpw $op1, #0\t# unsigned" %}
12169 
12170   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12171 
12172   ins_pipe(icmp_reg_imm);
12173 %}
12174 
12175 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
12176 %{
12177   match(Set cr (CmpU op1 op2));
12178 
12179   effect(DEF cr, USE op1);
12180 
12181   ins_cost(INSN_COST);
12182   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12183 
12184   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12185 
12186   ins_pipe(icmp_reg_imm);
12187 %}
12188 
12189 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
12190 %{
12191   match(Set cr (CmpU op1 op2));
12192 
12193   effect(DEF cr, USE op1);
12194 
12195   ins_cost(INSN_COST * 2);
12196   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12197 
12198   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12199 
12200   ins_pipe(icmp_reg_imm);
12201 %}
12202 
12203 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12204 %{
12205   match(Set cr (CmpL op1 op2));
12206 
12207   effect(DEF cr, USE op1, USE op2);
12208 
12209   ins_cost(INSN_COST);
12210   format %{ "cmp  $op1, $op2" %}
12211 
12212   ins_encode(aarch64_enc_cmp(op1, op2));
12213 
12214   ins_pipe(icmp_reg_reg);
12215 %}
12216 
12217 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
12218 %{
12219   match(Set cr (CmpL op1 zero));
12220 
12221   effect(DEF cr, USE op1);
12222 
12223   ins_cost(INSN_COST);
12224   format %{ "tst  $op1" %}
12225 
12226   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
12227 
12228   ins_pipe(icmp_reg_imm);
12229 %}
12230 
12231 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
12232 %{
12233   match(Set cr (CmpL op1 op2));
12234 
12235   effect(DEF cr, USE op1);
12236 
12237   ins_cost(INSN_COST);
12238   format %{ "cmp  $op1, $op2" %}
12239 
12240   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
12241 
12242   ins_pipe(icmp_reg_imm);
12243 %}
12244 
12245 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
12246 %{
12247   match(Set cr (CmpL op1 op2));
12248 
12249   effect(DEF cr, USE op1);
12250 
12251   ins_cost(INSN_COST * 2);
12252   format %{ "cmp  $op1, $op2" %}
12253 
12254   ins_encode(aarch64_enc_cmp_imm(op1, op2));
12255 
12256   ins_pipe(icmp_reg_imm);
12257 %}
12258 
12259 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
12260 %{
12261   match(Set cr (CmpP op1 op2));
12262 
12263   effect(DEF cr, USE op1, USE op2);
12264 
12265   ins_cost(INSN_COST);
12266   format %{ "cmp  $op1, $op2\t // ptr" %}
12267 
12268   ins_encode(aarch64_enc_cmpp(op1, op2));
12269 
12270   ins_pipe(icmp_reg_reg);
12271 %}
12272 
12273 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
12274 %{
12275   match(Set cr (CmpN op1 op2));
12276 
12277   effect(DEF cr, USE op1, USE op2);
12278 
12279   ins_cost(INSN_COST);
12280   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
12281 
12282   ins_encode(aarch64_enc_cmpn(op1, op2));
12283 
12284   ins_pipe(icmp_reg_reg);
12285 %}
12286 
12287 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
12288 %{
12289   match(Set cr (CmpP op1 zero));
12290 
12291   effect(DEF cr, USE op1, USE zero);
12292 
12293   ins_cost(INSN_COST);
12294   format %{ "cmp  $op1, 0\t // ptr" %}
12295 
12296   ins_encode(aarch64_enc_testp(op1));
12297 
12298   ins_pipe(icmp_reg_imm);
12299 %}
12300 
12301 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
12302 %{
12303   match(Set cr (CmpN op1 zero));
12304 
12305   effect(DEF cr, USE op1, USE zero);
12306 
12307   ins_cost(INSN_COST);
12308   format %{ "cmp  $op1, 0\t // compressed ptr" %}
12309 
12310   ins_encode(aarch64_enc_testn(op1));
12311 
12312   ins_pipe(icmp_reg_imm);
12313 %}
12314 
12315 // FP comparisons
12316 //
12317 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
12318 // using normal cmpOp. See declaration of rFlagsReg for details.
12319 
12320 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
12321 %{
12322   match(Set cr (CmpF src1 src2));
12323 
12324   ins_cost(3 * INSN_COST);
12325   format %{ "fcmps $src1, $src2" %}
12326 
12327   ins_encode %{
12328     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12329   %}
12330 
12331   ins_pipe(pipe_class_compare);
12332 %}
12333 
12334 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
12335 %{
12336   match(Set cr (CmpF src1 src2));
12337 
12338   ins_cost(3 * INSN_COST);
12339   format %{ "fcmps $src1, 0.0" %}
12340 
12341   ins_encode %{
12342     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
12343   %}
12344 
12345   ins_pipe(pipe_class_compare);
12346 %}
12347 // FROM HERE
12348 
12349 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
12350 %{
12351   match(Set cr (CmpD src1 src2));
12352 
12353   ins_cost(3 * INSN_COST);
12354   format %{ "fcmpd $src1, $src2" %}
12355 
12356   ins_encode %{
12357     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12358   %}
12359 
12360   ins_pipe(pipe_class_compare);
12361 %}
12362 
12363 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
12364 %{
12365   match(Set cr (CmpD src1 src2));
12366 
12367   ins_cost(3 * INSN_COST);
12368   format %{ "fcmpd $src1, 0.0" %}
12369 
12370   ins_encode %{
12371     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
12372   %}
12373 
12374   ins_pipe(pipe_class_compare);
12375 %}
12376 
12377 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
12378 %{
12379   match(Set dst (CmpF3 src1 src2));
12380   effect(KILL cr);
12381 
12382   ins_cost(5 * INSN_COST);
12383   format %{ "fcmps $src1, $src2\n\t"
12384             "csinvw($dst, zr, zr, eq\n\t"
12385             "csnegw($dst, $dst, $dst, lt)"
12386   %}
12387 
12388   ins_encode %{
12389     Label done;
12390     FloatRegister s1 = as_FloatRegister($src1$$reg);
12391     FloatRegister s2 = as_FloatRegister($src2$$reg);
12392     Register d = as_Register($dst$$reg);
12393     __ fcmps(s1, s2);
12394     // installs 0 if EQ else -1
12395     __ csinvw(d, zr, zr, Assembler::EQ);
12396     // keeps -1 if less or unordered else installs 1
12397     __ csnegw(d, d, d, Assembler::LT);
12398     __ bind(done);
12399   %}
12400 
12401   ins_pipe(pipe_class_default);
12402 
12403 %}
12404 
12405 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
12406 %{
12407   match(Set dst (CmpD3 src1 src2));
12408   effect(KILL cr);
12409 
12410   ins_cost(5 * INSN_COST);
12411   format %{ "fcmpd $src1, $src2\n\t"
12412             "csinvw($dst, zr, zr, eq\n\t"
12413             "csnegw($dst, $dst, $dst, lt)"
12414   %}
12415 
12416   ins_encode %{
12417     Label done;
12418     FloatRegister s1 = as_FloatRegister($src1$$reg);
12419     FloatRegister s2 = as_FloatRegister($src2$$reg);
12420     Register d = as_Register($dst$$reg);
12421     __ fcmpd(s1, s2);
12422     // installs 0 if EQ else -1
12423     __ csinvw(d, zr, zr, Assembler::EQ);
12424     // keeps -1 if less or unordered else installs 1
12425     __ csnegw(d, d, d, Assembler::LT);
12426     __ bind(done);
12427   %}
12428   ins_pipe(pipe_class_default);
12429 
12430 %}
12431 
12432 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
12433 %{
12434   match(Set dst (CmpF3 src1 zero));
12435   effect(KILL cr);
12436 
12437   ins_cost(5 * INSN_COST);
12438   format %{ "fcmps $src1, 0.0\n\t"
12439             "csinvw($dst, zr, zr, eq\n\t"
12440             "csnegw($dst, $dst, $dst, lt)"
12441   %}
12442 
12443   ins_encode %{
12444     Label done;
12445     FloatRegister s1 = as_FloatRegister($src1$$reg);
12446     Register d = as_Register($dst$$reg);
12447     __ fcmps(s1, 0.0D);
12448     // installs 0 if EQ else -1
12449     __ csinvw(d, zr, zr, Assembler::EQ);
12450     // keeps -1 if less or unordered else installs 1
12451     __ csnegw(d, d, d, Assembler::LT);
12452     __ bind(done);
12453   %}
12454 
12455   ins_pipe(pipe_class_default);
12456 
12457 %}
12458 
12459 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
12460 %{
12461   match(Set dst (CmpD3 src1 zero));
12462   effect(KILL cr);
12463 
12464   ins_cost(5 * INSN_COST);
12465   format %{ "fcmpd $src1, 0.0\n\t"
12466             "csinvw($dst, zr, zr, eq\n\t"
12467             "csnegw($dst, $dst, $dst, lt)"
12468   %}
12469 
12470   ins_encode %{
12471     Label done;
12472     FloatRegister s1 = as_FloatRegister($src1$$reg);
12473     Register d = as_Register($dst$$reg);
12474     __ fcmpd(s1, 0.0D);
12475     // installs 0 if EQ else -1
12476     __ csinvw(d, zr, zr, Assembler::EQ);
12477     // keeps -1 if less or unordered else installs 1
12478     __ csnegw(d, d, d, Assembler::LT);
12479     __ bind(done);
12480   %}
12481   ins_pipe(pipe_class_default);
12482 
12483 %}
12484 
12485 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
12486 %{
12487   match(Set dst (CmpLTMask p q));
12488   effect(KILL cr);
12489 
12490   ins_cost(3 * INSN_COST);
12491 
12492   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
12493             "csetw $dst, lt\n\t"
12494             "subw $dst, zr, $dst"
12495   %}
12496 
12497   ins_encode %{
12498     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
12499     __ csetw(as_Register($dst$$reg), Assembler::LT);
12500     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
12501   %}
12502 
12503   ins_pipe(ialu_reg_reg);
12504 %}
12505 
12506 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
12507 %{
12508   match(Set dst (CmpLTMask src zero));
12509   effect(KILL cr);
12510 
12511   ins_cost(INSN_COST);
12512 
12513   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
12514 
12515   ins_encode %{
12516     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
12517   %}
12518 
12519   ins_pipe(ialu_reg_shift);
12520 %}
12521 
12522 // ============================================================================
12523 // Max and Min
12524 
12525 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12526 %{
12527   match(Set dst (MinI src1 src2));
12528 
12529   effect(DEF dst, USE src1, USE src2, KILL cr);
12530   size(8);
12531 
12532   ins_cost(INSN_COST * 3);
12533   format %{
12534     "cmpw $src1 $src2\t signed int\n\t"
12535     "cselw $dst, $src1, $src2 lt\t"
12536   %}
12537 
12538   ins_encode %{
12539     __ cmpw(as_Register($src1$$reg),
12540             as_Register($src2$$reg));
12541     __ cselw(as_Register($dst$$reg),
12542              as_Register($src1$$reg),
12543              as_Register($src2$$reg),
12544              Assembler::LT);
12545   %}
12546 
12547   ins_pipe(ialu_reg_reg);
12548 %}
12549 // FROM HERE
12550 
12551 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
12552 %{
12553   match(Set dst (MaxI src1 src2));
12554 
12555   effect(DEF dst, USE src1, USE src2, KILL cr);
12556   size(8);
12557 
12558   ins_cost(INSN_COST * 3);
12559   format %{
12560     "cmpw $src1 $src2\t signed int\n\t"
12561     "cselw $dst, $src1, $src2 gt\t"
12562   %}
12563 
12564   ins_encode %{
12565     __ cmpw(as_Register($src1$$reg),
12566             as_Register($src2$$reg));
12567     __ cselw(as_Register($dst$$reg),
12568              as_Register($src1$$reg),
12569              as_Register($src2$$reg),
12570              Assembler::GT);
12571   %}
12572 
12573   ins_pipe(ialu_reg_reg);
12574 %}
12575 
12576 // ============================================================================
12577 // Branch Instructions
12578 
12579 // Direct Branch.
12580 instruct branch(label lbl)
12581 %{
12582   match(Goto);
12583 
12584   effect(USE lbl);
12585 
12586   ins_cost(BRANCH_COST);
12587   format %{ "b  $lbl" %}
12588 
12589   ins_encode(aarch64_enc_b(lbl));
12590 
12591   ins_pipe(pipe_branch);
12592 %}
12593 
12594 // Conditional Near Branch
12595 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
12596 %{
12597   // Same match rule as `branchConFar'.
12598   match(If cmp cr);
12599 
12600   effect(USE lbl);
12601 
12602   ins_cost(BRANCH_COST);
12603   // If set to 1 this indicates that the current instruction is a
12604   // short variant of a long branch. This avoids using this
12605   // instruction in first-pass matching. It will then only be used in
12606   // the `Shorten_branches' pass.
12607   // ins_short_branch(1);
12608   format %{ "b$cmp  $lbl" %}
12609 
12610   ins_encode(aarch64_enc_br_con(cmp, lbl));
12611 
12612   ins_pipe(pipe_branch_cond);
12613 %}
12614 
12615 // Conditional Near Branch Unsigned
12616 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12617 %{
12618   // Same match rule as `branchConFar'.
12619   match(If cmp cr);
12620 
12621   effect(USE lbl);
12622 
12623   ins_cost(BRANCH_COST);
12624   // If set to 1 this indicates that the current instruction is a
12625   // short variant of a long branch. This avoids using this
12626   // instruction in first-pass matching. It will then only be used in
12627   // the `Shorten_branches' pass.
12628   // ins_short_branch(1);
12629   format %{ "b$cmp  $lbl\t# unsigned" %}
12630 
12631   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12632 
12633   ins_pipe(pipe_branch_cond);
12634 %}
12635 
12636 // Make use of CBZ and CBNZ.  These instructions, as well as being
12637 // shorter than (cmp; branch), have the additional benefit of not
12638 // killing the flags.
12639 
12640 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
12641   match(If cmp (CmpI op1 op2));
12642   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12643             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12644   effect(USE labl);
12645 
12646   ins_cost(BRANCH_COST);
12647   format %{ "cbw$cmp   $op1, $labl" %}
12648   ins_encode %{
12649     Label* L = $labl$$label;
12650     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12651     if (cond == Assembler::EQ)
12652       __ cbzw($op1$$Register, *L);
12653     else
12654       __ cbnzw($op1$$Register, *L);
12655   %}
12656   ins_pipe(pipe_cmp_branch);
12657 %}
12658 
12659 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
12660   match(If cmp (CmpL op1 op2));
12661   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12662             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12663   effect(USE labl);
12664 
12665   ins_cost(BRANCH_COST);
12666   format %{ "cb$cmp   $op1, $labl" %}
12667   ins_encode %{
12668     Label* L = $labl$$label;
12669     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12670     if (cond == Assembler::EQ)
12671       __ cbz($op1$$Register, *L);
12672     else
12673       __ cbnz($op1$$Register, *L);
12674   %}
12675   ins_pipe(pipe_cmp_branch);
12676 %}
12677 
12678 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
12679   match(If cmp (CmpP op1 op2));
12680   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
12681             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
12682   effect(USE labl);
12683 
12684   ins_cost(BRANCH_COST);
12685   format %{ "cb$cmp   $op1, $labl" %}
12686   ins_encode %{
12687     Label* L = $labl$$label;
12688     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12689     if (cond == Assembler::EQ)
12690       __ cbz($op1$$Register, *L);
12691     else
12692       __ cbnz($op1$$Register, *L);
12693   %}
12694   ins_pipe(pipe_cmp_branch);
12695 %}
12696 
12697 // Conditional Far Branch
12698 // Conditional Far Branch Unsigned
12699 // TODO: fixme
12700 
12701 // counted loop end branch near
12702 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
12703 %{
12704   match(CountedLoopEnd cmp cr);
12705 
12706   effect(USE lbl);
12707 
12708   ins_cost(BRANCH_COST);
12709   // short variant.
12710   // ins_short_branch(1);
12711   format %{ "b$cmp $lbl \t// counted loop end" %}
12712 
12713   ins_encode(aarch64_enc_br_con(cmp, lbl));
12714 
12715   ins_pipe(pipe_branch);
12716 %}
12717 
12718 // counted loop end branch near Unsigned
12719 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
12720 %{
12721   match(CountedLoopEnd cmp cr);
12722 
12723   effect(USE lbl);
12724 
12725   ins_cost(BRANCH_COST);
12726   // short variant.
12727   // ins_short_branch(1);
12728   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
12729 
12730   ins_encode(aarch64_enc_br_conU(cmp, lbl));
12731 
12732   ins_pipe(pipe_branch);
12733 %}
12734 
12735 // counted loop end branch far
12736 // counted loop end branch far unsigned
12737 // TODO: fixme
12738 
12739 // ============================================================================
12740 // inlined locking and unlocking
12741 
12742 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12743 %{
12744   match(Set cr (FastLock object box));
12745   effect(TEMP tmp, TEMP tmp2);
12746 
12747   // TODO
12748   // identify correct cost
12749   ins_cost(5 * INSN_COST);
12750   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
12751 
12752   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
12753 
12754   ins_pipe(pipe_serial);
12755 %}
12756 
12757 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
12758 %{
12759   match(Set cr (FastUnlock object box));
12760   effect(TEMP tmp, TEMP tmp2);
12761 
12762   ins_cost(5 * INSN_COST);
12763   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
12764 
12765   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
12766 
12767   ins_pipe(pipe_serial);
12768 %}
12769 
12770 
12771 // ============================================================================
12772 // Safepoint Instructions
12773 
12774 // TODO
12775 // provide a near and far version of this code
12776 
12777 instruct safePoint(iRegP poll)
12778 %{
12779   match(SafePoint poll);
12780 
12781   format %{
12782     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
12783   %}
12784   ins_encode %{
12785     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
12786   %}
12787   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
12788 %}
12789 
12790 
12791 // ============================================================================
12792 // Procedure Call/Return Instructions
12793 
12794 // Call Java Static Instruction
12795 
12796 instruct CallStaticJavaDirect(method meth)
12797 %{
12798   match(CallStaticJava);
12799 
12800   effect(USE meth);
12801 
12802   ins_cost(CALL_COST);
12803 
12804   format %{ "call,static $meth \t// ==> " %}
12805 
12806   ins_encode( aarch64_enc_java_static_call(meth),
12807               aarch64_enc_call_epilog );
12808 
12809   ins_pipe(pipe_class_call);
12810 %}
12811 
12812 // TO HERE
12813 
12814 // Call Java Dynamic Instruction
12815 instruct CallDynamicJavaDirect(method meth)
12816 %{
12817   match(CallDynamicJava);
12818 
12819   effect(USE meth);
12820 
12821   ins_cost(CALL_COST);
12822 
12823   format %{ "CALL,dynamic $meth \t// ==> " %}
12824 
12825   ins_encode( aarch64_enc_java_dynamic_call(meth),
12826                aarch64_enc_call_epilog );
12827 
12828   ins_pipe(pipe_class_call);
12829 %}
12830 
12831 // Call Runtime Instruction
12832 
12833 instruct CallRuntimeDirect(method meth)
12834 %{
12835   match(CallRuntime);
12836 
12837   effect(USE meth);
12838 
12839   ins_cost(CALL_COST);
12840 
12841   format %{ "CALL, runtime $meth" %}
12842 
12843   ins_encode( aarch64_enc_java_to_runtime(meth) );
12844 
12845   ins_pipe(pipe_class_call);
12846 %}
12847 
12848 // Call Runtime Instruction
12849 
12850 instruct CallLeafDirect(method meth)
12851 %{
12852   match(CallLeaf);
12853 
12854   effect(USE meth);
12855 
12856   ins_cost(CALL_COST);
12857 
12858   format %{ "CALL, runtime leaf $meth" %}
12859 
12860   ins_encode( aarch64_enc_java_to_runtime(meth) );
12861 
12862   ins_pipe(pipe_class_call);
12863 %}
12864 
12865 // Call Runtime Instruction
12866 
12867 instruct CallLeafNoFPDirect(method meth)
12868 %{
12869   match(CallLeafNoFP);
12870 
12871   effect(USE meth);
12872 
12873   ins_cost(CALL_COST);
12874 
12875   format %{ "CALL, runtime leaf nofp $meth" %}
12876 
12877   ins_encode( aarch64_enc_java_to_runtime(meth) );
12878 
12879   ins_pipe(pipe_class_call);
12880 %}
12881 
12882 // Tail Call; Jump from runtime stub to Java code.
12883 // Also known as an 'interprocedural jump'.
12884 // Target of jump will eventually return to caller.
12885 // TailJump below removes the return address.
12886 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
12887 %{
12888   match(TailCall jump_target method_oop);
12889 
12890   ins_cost(CALL_COST);
12891 
12892   format %{ "br $jump_target\t# $method_oop holds method oop" %}
12893 
12894   ins_encode(aarch64_enc_tail_call(jump_target));
12895 
12896   ins_pipe(pipe_class_call);
12897 %}
12898 
12899 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
12900 %{
12901   match(TailJump jump_target ex_oop);
12902 
12903   ins_cost(CALL_COST);
12904 
12905   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
12906 
12907   ins_encode(aarch64_enc_tail_jmp(jump_target));
12908 
12909   ins_pipe(pipe_class_call);
12910 %}
12911 
12912 // Create exception oop: created by stack-crawling runtime code.
12913 // Created exception is now available to this handler, and is setup
12914 // just prior to jumping to this handler. No code emitted.
12915 // TODO check
12916 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
12917 instruct CreateException(iRegP_R0 ex_oop)
12918 %{
12919   match(Set ex_oop (CreateEx));
12920 
12921   format %{ " -- \t// exception oop; no code emitted" %}
12922 
12923   size(0);
12924 
12925   ins_encode( /*empty*/ );
12926 
12927   ins_pipe(pipe_class_empty);
12928 %}
12929 
12930 // Rethrow exception: The exception oop will come in the first
12931 // argument position. Then JUMP (not call) to the rethrow stub code.
12932 instruct RethrowException() %{
12933   match(Rethrow);
12934   ins_cost(CALL_COST);
12935 
12936   format %{ "b rethrow_stub" %}
12937 
12938   ins_encode( aarch64_enc_rethrow() );
12939 
12940   ins_pipe(pipe_class_call);
12941 %}
12942 
12943 
12944 // Return Instruction
12945 // epilog node loads ret address into lr as part of frame pop
12946 instruct Ret()
12947 %{
12948   match(Return);
12949 
12950   format %{ "ret\t// return register" %}
12951 
12952   ins_encode( aarch64_enc_ret() );
12953 
12954   ins_pipe(pipe_branch);
12955 %}
12956 
12957 // Die now.
12958 instruct ShouldNotReachHere() %{
12959   match(Halt);
12960 
12961   ins_cost(CALL_COST);
12962   format %{ "ShouldNotReachHere" %}
12963 
12964   ins_encode %{
12965     // TODO
12966     // implement proper trap call here
12967     __ brk(999);
12968   %}
12969 
12970   ins_pipe(pipe_class_default);
12971 %}
12972 
12973 // ============================================================================
12974 // Partial Subtype Check
12975 //
12976 // superklass array for an instance of the superklass.  Set a hidden
12977 // internal cache on a hit (cache is checked with exposed code in
12978 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
12979 // encoding ALSO sets flags.
12980 
12981 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
12982 %{
12983   match(Set result (PartialSubtypeCheck sub super));
12984   effect(KILL cr, KILL temp);
12985 
12986   ins_cost(1100);  // slightly larger than the next version
12987   format %{ "partialSubtypeCheck $result, $sub, $super" %}
12988 
12989   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
12990 
12991   opcode(0x1); // Force zero of result reg on hit
12992 
12993   ins_pipe(pipe_class_memory);
12994 %}
12995 
12996 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
12997 %{
12998   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
12999   effect(KILL temp, KILL result);
13000 
13001   ins_cost(1100);  // slightly larger than the next version
13002   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
13003 
13004   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13005 
13006   opcode(0x0); // Don't zero result reg on hit
13007 
13008   ins_pipe(pipe_class_memory);
13009 %}
13010 
13011 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
13012                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
13013 %{
13014   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
13015   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
13016 
13017   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
13018   ins_encode %{
13019     __ string_compare($str1$$Register, $str2$$Register,
13020                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
13021                       $tmp1$$Register);
13022   %}
13023   ins_pipe(pipe_class_memory);
13024 %}
13025 
13026 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
13027        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13028 %{
13029   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
13030   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
13031          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13032   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
13033 
13034   ins_encode %{
13035     __ string_indexof($str1$$Register, $str2$$Register,
13036                       $cnt1$$Register, $cnt2$$Register,
13037                       $tmp1$$Register, $tmp2$$Register,
13038                       $tmp3$$Register, $tmp4$$Register,
13039                       -1, $result$$Register);
13040   %}
13041   ins_pipe(pipe_class_memory);
13042 %}
13043 
13044 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
13045                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
13046                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
13047 %{
13048   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
13049   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
13050          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13051   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
13052 
13053   ins_encode %{
13054     int icnt2 = (int)$int_cnt2$$constant;
13055     __ string_indexof($str1$$Register, $str2$$Register,
13056                       $cnt1$$Register, zr,
13057                       $tmp1$$Register, $tmp2$$Register,
13058                       $tmp3$$Register, $tmp4$$Register,
13059                       icnt2, $result$$Register);
13060   %}
13061   ins_pipe(pipe_class_memory);
13062 %}
13063 
13064 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
13065                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
13066 %{
13067   match(Set result (StrEquals (Binary str1 str2) cnt));
13068   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
13069 
13070   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
13071   ins_encode %{
13072     __ string_equals($str1$$Register, $str2$$Register,
13073                       $cnt$$Register, $result$$Register,
13074                       $tmp$$Register);
13075   %}
13076   ins_pipe(pipe_class_memory);
13077 %}
13078 
13079 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
13080                       iRegP_R10 tmp, rFlagsReg cr)
13081 %{
13082   match(Set result (AryEq ary1 ary2));
13083   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
13084 
13085   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
13086   ins_encode %{
13087     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
13088                           $result$$Register, $tmp$$Register);
13089   %}
13090   ins_pipe(pipe_class_memory);
13091 %}
13092 
13093 // encode char[] to byte[] in ISO_8859_1
13094 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
13095                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
13096                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
13097                           iRegI_R0 result, rFlagsReg cr)
13098 %{
13099   match(Set result (EncodeISOArray src (Binary dst len)));
13100   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
13101          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
13102 
13103   format %{ "Encode array $src,$dst,$len -> $result" %}
13104   ins_encode %{
13105     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
13106          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
13107          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
13108   %}
13109   ins_pipe( pipe_class_memory );
13110 %}
13111 
13112 // ============================================================================
13113 // This name is KNOWN by the ADLC and cannot be changed.
13114 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
13115 // for this guy.
13116 instruct tlsLoadP(thread_RegP dst)
13117 %{
13118   match(Set dst (ThreadLocal));
13119 
13120   ins_cost(0);
13121 
13122   format %{ " -- \t// $dst=Thread::current(), empty" %}
13123 
13124   size(0);
13125 
13126   ins_encode( /*empty*/ );
13127 
13128   ins_pipe(pipe_class_empty);
13129 %}
13130 
13131 // ====================VECTOR INSTRUCTIONS=====================================
13132 
13133 // Load vector (32 bits)
13134 instruct loadV4(vecD dst, vmem mem)
13135 %{
13136   predicate(n->as_LoadVector()->memory_size() == 4);
13137   match(Set dst (LoadVector mem));
13138   ins_cost(4 * INSN_COST);
13139   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
13140   ins_encode( aarch64_enc_ldrvS(dst, mem) );
13141   ins_pipe(pipe_class_memory);
13142 %}
13143 
13144 // Load vector (64 bits)
13145 instruct loadV8(vecD dst, vmem mem)
13146 %{
13147   predicate(n->as_LoadVector()->memory_size() == 8);
13148   match(Set dst (LoadVector mem));
13149   ins_cost(4 * INSN_COST);
13150   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
13151   ins_encode( aarch64_enc_ldrvD(dst, mem) );
13152   ins_pipe(pipe_class_memory);
13153 %}
13154 
13155 // Load Vector (128 bits)
13156 instruct loadV16(vecX dst, vmem mem)
13157 %{
13158   predicate(n->as_LoadVector()->memory_size() == 16);
13159   match(Set dst (LoadVector mem));
13160   ins_cost(4 * INSN_COST);
13161   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
13162   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
13163   ins_pipe(pipe_class_memory);
13164 %}
13165 
13166 // Store Vector (32 bits)
13167 instruct storeV4(vecD src, vmem mem)
13168 %{
13169   predicate(n->as_StoreVector()->memory_size() == 4);
13170   match(Set mem (StoreVector mem src));
13171   ins_cost(4 * INSN_COST);
13172   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
13173   ins_encode( aarch64_enc_strvS(src, mem) );
13174   ins_pipe(pipe_class_memory);
13175 %}
13176 
13177 // Store Vector (64 bits)
13178 instruct storeV8(vecD src, vmem mem)
13179 %{
13180   predicate(n->as_StoreVector()->memory_size() == 8);
13181   match(Set mem (StoreVector mem src));
13182   ins_cost(4 * INSN_COST);
13183   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
13184   ins_encode( aarch64_enc_strvD(src, mem) );
13185   ins_pipe(pipe_class_memory);
13186 %}
13187 
13188 // Store Vector (128 bits)
13189 instruct storeV16(vecX src, vmem mem)
13190 %{
13191   predicate(n->as_StoreVector()->memory_size() == 16);
13192   match(Set mem (StoreVector mem src));
13193   ins_cost(4 * INSN_COST);
13194   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
13195   ins_encode( aarch64_enc_strvQ(src, mem) );
13196   ins_pipe(pipe_class_memory);
13197 %}
13198 
13199 instruct replicate8B(vecD dst, iRegIorL2I src)
13200 %{
13201   predicate(n->as_Vector()->length() == 4 ||
13202             n->as_Vector()->length() == 8);
13203   match(Set dst (ReplicateB src));
13204   ins_cost(INSN_COST);
13205   format %{ "dup  $dst, $src\t# vector (8B)" %}
13206   ins_encode %{
13207     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
13208   %}
13209   ins_pipe(pipe_class_default);
13210 %}
13211 
13212 instruct replicate16B(vecX dst, iRegIorL2I src)
13213 %{
13214   predicate(n->as_Vector()->length() == 16);
13215   match(Set dst (ReplicateB src));
13216   ins_cost(INSN_COST);
13217   format %{ "dup  $dst, $src\t# vector (16B)" %}
13218   ins_encode %{
13219     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
13220   %}
13221   ins_pipe(pipe_class_default);
13222 %}
13223 
13224 instruct replicate8B_imm(vecD dst, immI con)
13225 %{
13226   predicate(n->as_Vector()->length() == 4 ||
13227             n->as_Vector()->length() == 8);
13228   match(Set dst (ReplicateB con));
13229   ins_cost(INSN_COST);
13230   format %{ "movi  $dst, $con\t# vector(8B)" %}
13231   ins_encode %{
13232     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
13233   %}
13234   ins_pipe(pipe_class_default);
13235 %}
13236 
13237 instruct replicate16B_imm(vecX dst, immI con)
13238 %{
13239   predicate(n->as_Vector()->length() == 16);
13240   match(Set dst (ReplicateB con));
13241   ins_cost(INSN_COST);
13242   format %{ "movi  $dst, $con\t# vector(16B)" %}
13243   ins_encode %{
13244     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
13245   %}
13246   ins_pipe(pipe_class_default);
13247 %}
13248 
13249 instruct replicate4S(vecD dst, iRegIorL2I src)
13250 %{
13251   predicate(n->as_Vector()->length() == 2 ||
13252             n->as_Vector()->length() == 4);
13253   match(Set dst (ReplicateS src));
13254   ins_cost(INSN_COST);
13255   format %{ "dup  $dst, $src\t# vector (4S)" %}
13256   ins_encode %{
13257     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
13258   %}
13259   ins_pipe(pipe_class_default);
13260 %}
13261 
13262 instruct replicate8S(vecX dst, iRegIorL2I src)
13263 %{
13264   predicate(n->as_Vector()->length() == 8);
13265   match(Set dst (ReplicateS src));
13266   ins_cost(INSN_COST);
13267   format %{ "dup  $dst, $src\t# vector (8S)" %}
13268   ins_encode %{
13269     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
13270   %}
13271   ins_pipe(pipe_class_default);
13272 %}
13273 
13274 instruct replicate4S_imm(vecD dst, immI con)
13275 %{
13276   predicate(n->as_Vector()->length() == 2 ||
13277             n->as_Vector()->length() == 4);
13278   match(Set dst (ReplicateS con));
13279   ins_cost(INSN_COST);
13280   format %{ "movi  $dst, $con\t# vector(4H)" %}
13281   ins_encode %{
13282     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
13283   %}
13284   ins_pipe(pipe_class_default);
13285 %}
13286 
13287 instruct replicate8S_imm(vecX dst, immI con)
13288 %{
13289   predicate(n->as_Vector()->length() == 8);
13290   match(Set dst (ReplicateS con));
13291   ins_cost(INSN_COST);
13292   format %{ "movi  $dst, $con\t# vector(8H)" %}
13293   ins_encode %{
13294     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
13295   %}
13296   ins_pipe(pipe_class_default);
13297 %}
13298 
13299 instruct replicate2I(vecD dst, iRegIorL2I src)
13300 %{
13301   predicate(n->as_Vector()->length() == 2);
13302   match(Set dst (ReplicateI src));
13303   ins_cost(INSN_COST);
13304   format %{ "dup  $dst, $src\t# vector (2I)" %}
13305   ins_encode %{
13306     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
13307   %}
13308   ins_pipe(pipe_class_default);
13309 %}
13310 
13311 instruct replicate4I(vecX dst, iRegIorL2I src)
13312 %{
13313   predicate(n->as_Vector()->length() == 4);
13314   match(Set dst (ReplicateI src));
13315   ins_cost(INSN_COST);
13316   format %{ "dup  $dst, $src\t# vector (4I)" %}
13317   ins_encode %{
13318     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
13319   %}
13320   ins_pipe(pipe_class_default);
13321 %}
13322 
13323 instruct replicate2I_imm(vecD dst, immI con)
13324 %{
13325   predicate(n->as_Vector()->length() == 2);
13326   match(Set dst (ReplicateI con));
13327   ins_cost(INSN_COST);
13328   format %{ "movi  $dst, $con\t# vector(2I)" %}
13329   ins_encode %{
13330     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
13331   %}
13332   ins_pipe(pipe_class_default);
13333 %}
13334 
13335 instruct replicate4I_imm(vecX dst, immI con)
13336 %{
13337   predicate(n->as_Vector()->length() == 4);
13338   match(Set dst (ReplicateI con));
13339   ins_cost(INSN_COST);
13340   format %{ "movi  $dst, $con\t# vector(4I)" %}
13341   ins_encode %{
13342     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
13343   %}
13344   ins_pipe(pipe_class_default);
13345 %}
13346 
13347 instruct replicate2L(vecX dst, iRegL src)
13348 %{
13349   predicate(n->as_Vector()->length() == 2);
13350   match(Set dst (ReplicateL src));
13351   ins_cost(INSN_COST);
13352   format %{ "dup  $dst, $src\t# vector (2L)" %}
13353   ins_encode %{
13354     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
13355   %}
13356   ins_pipe(pipe_class_default);
13357 %}
13358 
13359 instruct replicate2L_zero(vecX dst, immI0 zero)
13360 %{
13361   predicate(n->as_Vector()->length() == 2);
13362   match(Set dst (ReplicateI zero));
13363   ins_cost(INSN_COST);
13364   format %{ "movi  $dst, $zero\t# vector(4I)" %}
13365   ins_encode %{
13366     __ eor(as_FloatRegister($dst$$reg), __ T16B,
13367            as_FloatRegister($dst$$reg),
13368            as_FloatRegister($dst$$reg));
13369   %}
13370   ins_pipe(pipe_class_default);
13371 %}
13372 
13373 instruct replicate2F(vecD dst, vRegF src)
13374 %{
13375   predicate(n->as_Vector()->length() == 2);
13376   match(Set dst (ReplicateF src));
13377   ins_cost(INSN_COST);
13378   format %{ "dup  $dst, $src\t# vector (2F)" %}
13379   ins_encode %{
13380     __ dup(as_FloatRegister($dst$$reg), __ T2S,
13381            as_FloatRegister($src$$reg));
13382   %}
13383   ins_pipe(pipe_class_default);
13384 %}
13385 
13386 instruct replicate4F(vecX dst, vRegF src)
13387 %{
13388   predicate(n->as_Vector()->length() == 4);
13389   match(Set dst (ReplicateF src));
13390   ins_cost(INSN_COST);
13391   format %{ "dup  $dst, $src\t# vector (4F)" %}
13392   ins_encode %{
13393     __ dup(as_FloatRegister($dst$$reg), __ T4S,
13394            as_FloatRegister($src$$reg));
13395   %}
13396   ins_pipe(pipe_class_default);
13397 %}
13398 
13399 instruct replicate2D(vecX dst, vRegD src)
13400 %{
13401   predicate(n->as_Vector()->length() == 2);
13402   match(Set dst (ReplicateD src));
13403   ins_cost(INSN_COST);
13404   format %{ "dup  $dst, $src\t# vector (2D)" %}
13405   ins_encode %{
13406     __ dup(as_FloatRegister($dst$$reg), __ T2D,
13407            as_FloatRegister($src$$reg));
13408   %}
13409   ins_pipe(pipe_class_default);
13410 %}
13411 
13412 // ====================REDUCTION ARITHMETIC====================================
13413 
13414 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
13415 %{
13416   match(Set dst (AddReductionVI src1 src2));
13417   ins_cost(INSN_COST);
13418   effect(TEMP tmp, TEMP tmp2);
13419   format %{ "umov  $tmp, $src2, S, 0\n\t"
13420             "umov  $tmp2, $src2, S, 1\n\t"
13421             "addw  $dst, $src1, $tmp\n\t"
13422             "addw  $dst, $dst, $tmp2\t add reduction2i"
13423   %}
13424   ins_encode %{
13425     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13426     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13427     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
13428     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
13429   %}
13430   ins_pipe(pipe_class_default);
13431 %}
13432 
13433 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
13434 %{
13435   match(Set dst (AddReductionVI src1 src2));
13436   ins_cost(INSN_COST);
13437   effect(TEMP tmp, TEMP tmp2);
13438   format %{ "addv  $tmp, T4S, $src2\n\t"
13439             "umov  $tmp2, $tmp, S, 0\n\t"
13440             "addw  $dst, $tmp2, $src1\t add reduction4i"
13441   %}
13442   ins_encode %{
13443     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
13444             as_FloatRegister($src2$$reg));
13445     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
13446     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
13447   %}
13448   ins_pipe(pipe_class_default);
13449 %}
13450 
13451 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
13452 %{
13453   match(Set dst (MulReductionVI src1 src2));
13454   ins_cost(INSN_COST);
13455   effect(TEMP tmp, TEMP dst);
13456   format %{ "umov  $tmp, $src2, S, 0\n\t"
13457             "mul   $dst, $tmp, $src1\n\t"
13458             "umov  $tmp, $src2, S, 1\n\t"
13459             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
13460   %}
13461   ins_encode %{
13462     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
13463     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
13464     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
13465     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
13466   %}
13467   ins_pipe(pipe_class_default);
13468 %}
13469 
13470 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
13471 %{
13472   match(Set dst (MulReductionVI src1 src2));
13473   ins_cost(INSN_COST);
13474   effect(TEMP tmp, TEMP tmp2, TEMP dst);
13475   format %{ "ins   $tmp, $src2, 0, 1\n\t"
13476             "mul   $tmp, $tmp, $src2\n\t"
13477             "umov  $tmp2, $tmp, S, 0\n\t"
13478             "mul   $dst, $tmp2, $src1\n\t"
13479             "umov  $tmp2, $tmp, S, 1\n\t"
13480             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
13481   %}
13482   ins_encode %{
13483     __ ins(as_FloatRegister($tmp$$reg), __ D,
13484            as_FloatRegister($src2$$reg), 0, 1);
13485     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
13486            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
13487     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
13488     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
13489     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
13490     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
13491   %}
13492   ins_pipe(pipe_class_default);
13493 %}
13494 
13495 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
13496 %{
13497   match(Set dst (AddReductionVF src1 src2));
13498   ins_cost(INSN_COST);
13499   effect(TEMP tmp, TEMP dst);
13500   format %{ "fadds $dst, $src1, $src2\n\t"
13501             "ins   $tmp, S, $src2, 0, 1\n\t"
13502             "fadds $dst, $dst, $tmp\t add reduction2f"
13503   %}
13504   ins_encode %{
13505     __ fadds(as_FloatRegister($dst$$reg),
13506              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13507     __ ins(as_FloatRegister($tmp$$reg), __ S,
13508            as_FloatRegister($src2$$reg), 0, 1);
13509     __ fadds(as_FloatRegister($dst$$reg),
13510              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13511   %}
13512   ins_pipe(pipe_class_default);
13513 %}
13514 
13515 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
13516 %{
13517   match(Set dst (AddReductionVF src1 src2));
13518   ins_cost(INSN_COST);
13519   effect(TEMP tmp, TEMP dst);
13520   format %{ "fadds $dst, $src1, $src2\n\t"
13521             "ins   $tmp, S, $src2, 0, 1\n\t"
13522             "fadds $dst, $dst, $tmp\n\t"
13523             "ins   $tmp, S, $src2, 0, 2\n\t"
13524             "fadds $dst, $dst, $tmp\n\t"
13525             "ins   $tmp, S, $src2, 0, 3\n\t"
13526             "fadds $dst, $dst, $tmp\t add reduction4f"
13527   %}
13528   ins_encode %{
13529     __ fadds(as_FloatRegister($dst$$reg),
13530              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13531     __ ins(as_FloatRegister($tmp$$reg), __ S,
13532            as_FloatRegister($src2$$reg), 0, 1);
13533     __ fadds(as_FloatRegister($dst$$reg),
13534              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13535     __ ins(as_FloatRegister($tmp$$reg), __ S,
13536            as_FloatRegister($src2$$reg), 0, 2);
13537     __ fadds(as_FloatRegister($dst$$reg),
13538              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13539     __ ins(as_FloatRegister($tmp$$reg), __ S,
13540            as_FloatRegister($src2$$reg), 0, 3);
13541     __ fadds(as_FloatRegister($dst$$reg),
13542              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13543   %}
13544   ins_pipe(pipe_class_default);
13545 %}
13546 
13547 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
13548 %{
13549   match(Set dst (MulReductionVF src1 src2));
13550   ins_cost(INSN_COST);
13551   effect(TEMP tmp, TEMP dst);
13552   format %{ "fmuls $dst, $src1, $src2\n\t"
13553             "ins   $tmp, S, $src2, 0, 1\n\t"
13554             "fmuls $dst, $dst, $tmp\t add reduction4f"
13555   %}
13556   ins_encode %{
13557     __ fmuls(as_FloatRegister($dst$$reg),
13558              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13559     __ ins(as_FloatRegister($tmp$$reg), __ S,
13560            as_FloatRegister($src2$$reg), 0, 1);
13561     __ fmuls(as_FloatRegister($dst$$reg),
13562              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13563   %}
13564   ins_pipe(pipe_class_default);
13565 %}
13566 
13567 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
13568 %{
13569   match(Set dst (MulReductionVF src1 src2));
13570   ins_cost(INSN_COST);
13571   effect(TEMP tmp, TEMP dst);
13572   format %{ "fmuls $dst, $src1, $src2\n\t"
13573             "ins   $tmp, S, $src2, 0, 1\n\t"
13574             "fmuls $dst, $dst, $tmp\n\t"
13575             "ins   $tmp, S, $src2, 0, 2\n\t"
13576             "fmuls $dst, $dst, $tmp\n\t"
13577             "ins   $tmp, S, $src2, 0, 3\n\t"
13578             "fmuls $dst, $dst, $tmp\t add reduction4f"
13579   %}
13580   ins_encode %{
13581     __ fmuls(as_FloatRegister($dst$$reg),
13582              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13583     __ ins(as_FloatRegister($tmp$$reg), __ S,
13584            as_FloatRegister($src2$$reg), 0, 1);
13585     __ fmuls(as_FloatRegister($dst$$reg),
13586              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13587     __ ins(as_FloatRegister($tmp$$reg), __ S,
13588            as_FloatRegister($src2$$reg), 0, 2);
13589     __ fmuls(as_FloatRegister($dst$$reg),
13590              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13591     __ ins(as_FloatRegister($tmp$$reg), __ S,
13592            as_FloatRegister($src2$$reg), 0, 3);
13593     __ fmuls(as_FloatRegister($dst$$reg),
13594              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13595   %}
13596   ins_pipe(pipe_class_default);
13597 %}
13598 
13599 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
13600 %{
13601   match(Set dst (AddReductionVD src1 src2));
13602   ins_cost(INSN_COST);
13603   effect(TEMP tmp, TEMP dst);
13604   format %{ "faddd $dst, $src1, $src2\n\t"
13605             "ins   $tmp, D, $src2, 0, 1\n\t"
13606             "faddd $dst, $dst, $tmp\t add reduction2d"
13607   %}
13608   ins_encode %{
13609     __ faddd(as_FloatRegister($dst$$reg),
13610              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13611     __ ins(as_FloatRegister($tmp$$reg), __ D,
13612            as_FloatRegister($src2$$reg), 0, 1);
13613     __ faddd(as_FloatRegister($dst$$reg),
13614              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13615   %}
13616   ins_pipe(pipe_class_default);
13617 %}
13618 
13619 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
13620 %{
13621   match(Set dst (MulReductionVD src1 src2));
13622   ins_cost(INSN_COST);
13623   effect(TEMP tmp, TEMP dst);
13624   format %{ "fmuld $dst, $src1, $src2\n\t"
13625             "ins   $tmp, D, $src2, 0, 1\n\t"
13626             "fmuld $dst, $dst, $tmp\t add reduction2d"
13627   %}
13628   ins_encode %{
13629     __ fmuld(as_FloatRegister($dst$$reg),
13630              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13631     __ ins(as_FloatRegister($tmp$$reg), __ D,
13632            as_FloatRegister($src2$$reg), 0, 1);
13633     __ fmuld(as_FloatRegister($dst$$reg),
13634              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
13635   %}
13636   ins_pipe(pipe_class_default);
13637 %}
13638 
13639 // ====================VECTOR ARITHMETIC=======================================
13640 
13641 // --------------------------------- ADD --------------------------------------
13642 
13643 instruct vadd8B(vecD dst, vecD src1, vecD src2)
13644 %{
13645   predicate(n->as_Vector()->length() == 4 ||
13646             n->as_Vector()->length() == 8);
13647   match(Set dst (AddVB src1 src2));
13648   ins_cost(INSN_COST);
13649   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
13650   ins_encode %{
13651     __ addv(as_FloatRegister($dst$$reg), __ T8B,
13652             as_FloatRegister($src1$$reg),
13653             as_FloatRegister($src2$$reg));
13654   %}
13655   ins_pipe(pipe_class_default);
13656 %}
13657 
13658 instruct vadd16B(vecX dst, vecX src1, vecX src2)
13659 %{
13660   predicate(n->as_Vector()->length() == 16);
13661   match(Set dst (AddVB src1 src2));
13662   ins_cost(INSN_COST);
13663   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
13664   ins_encode %{
13665     __ addv(as_FloatRegister($dst$$reg), __ T16B,
13666             as_FloatRegister($src1$$reg),
13667             as_FloatRegister($src2$$reg));
13668   %}
13669   ins_pipe(pipe_class_default);
13670 %}
13671 
13672 instruct vadd4S(vecD dst, vecD src1, vecD src2)
13673 %{
13674   predicate(n->as_Vector()->length() == 2 ||
13675             n->as_Vector()->length() == 4);
13676   match(Set dst (AddVS src1 src2));
13677   ins_cost(INSN_COST);
13678   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
13679   ins_encode %{
13680     __ addv(as_FloatRegister($dst$$reg), __ T4H,
13681             as_FloatRegister($src1$$reg),
13682             as_FloatRegister($src2$$reg));
13683   %}
13684   ins_pipe(pipe_class_default);
13685 %}
13686 
13687 instruct vadd8S(vecX dst, vecX src1, vecX src2)
13688 %{
13689   predicate(n->as_Vector()->length() == 8);
13690   match(Set dst (AddVS src1 src2));
13691   ins_cost(INSN_COST);
13692   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
13693   ins_encode %{
13694     __ addv(as_FloatRegister($dst$$reg), __ T8H,
13695             as_FloatRegister($src1$$reg),
13696             as_FloatRegister($src2$$reg));
13697   %}
13698   ins_pipe(pipe_class_default);
13699 %}
13700 
13701 instruct vadd2I(vecD dst, vecD src1, vecD src2)
13702 %{
13703   predicate(n->as_Vector()->length() == 2);
13704   match(Set dst (AddVI src1 src2));
13705   ins_cost(INSN_COST);
13706   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
13707   ins_encode %{
13708     __ addv(as_FloatRegister($dst$$reg), __ T2S,
13709             as_FloatRegister($src1$$reg),
13710             as_FloatRegister($src2$$reg));
13711   %}
13712   ins_pipe(pipe_class_default);
13713 %}
13714 
13715 instruct vadd4I(vecX dst, vecX src1, vecX src2)
13716 %{
13717   predicate(n->as_Vector()->length() == 4);
13718   match(Set dst (AddVI src1 src2));
13719   ins_cost(INSN_COST);
13720   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
13721   ins_encode %{
13722     __ addv(as_FloatRegister($dst$$reg), __ T4S,
13723             as_FloatRegister($src1$$reg),
13724             as_FloatRegister($src2$$reg));
13725   %}
13726   ins_pipe(pipe_class_default);
13727 %}
13728 
13729 instruct vadd2L(vecX dst, vecX src1, vecX src2)
13730 %{
13731   predicate(n->as_Vector()->length() == 2);
13732   match(Set dst (AddVL src1 src2));
13733   ins_cost(INSN_COST);
13734   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
13735   ins_encode %{
13736     __ addv(as_FloatRegister($dst$$reg), __ T2D,
13737             as_FloatRegister($src1$$reg),
13738             as_FloatRegister($src2$$reg));
13739   %}
13740   ins_pipe(pipe_class_default);
13741 %}
13742 
13743 instruct vadd2F(vecD dst, vecD src1, vecD src2)
13744 %{
13745   predicate(n->as_Vector()->length() == 2);
13746   match(Set dst (AddVF src1 src2));
13747   ins_cost(INSN_COST);
13748   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
13749   ins_encode %{
13750     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
13751             as_FloatRegister($src1$$reg),
13752             as_FloatRegister($src2$$reg));
13753   %}
13754   ins_pipe(pipe_class_default);
13755 %}
13756 
13757 instruct vadd4F(vecX dst, vecX src1, vecX src2)
13758 %{
13759   predicate(n->as_Vector()->length() == 4);
13760   match(Set dst (AddVF src1 src2));
13761   ins_cost(INSN_COST);
13762   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
13763   ins_encode %{
13764     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
13765             as_FloatRegister($src1$$reg),
13766             as_FloatRegister($src2$$reg));
13767   %}
13768   ins_pipe(pipe_class_default);
13769 %}
13770 
13771 instruct vadd2D(vecX dst, vecX src1, vecX src2)
13772 %{
13773   match(Set dst (AddVD src1 src2));
13774   ins_cost(INSN_COST);
13775   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
13776   ins_encode %{
13777     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
13778             as_FloatRegister($src1$$reg),
13779             as_FloatRegister($src2$$reg));
13780   %}
13781   ins_pipe(pipe_class_default);
13782 %}
13783 
13784 // --------------------------------- SUB --------------------------------------
13785 
13786 instruct vsub8B(vecD dst, vecD src1, vecD src2)
13787 %{
13788   predicate(n->as_Vector()->length() == 4 ||
13789             n->as_Vector()->length() == 8);
13790   match(Set dst (SubVB src1 src2));
13791   ins_cost(INSN_COST);
13792   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
13793   ins_encode %{
13794     __ subv(as_FloatRegister($dst$$reg), __ T8B,
13795             as_FloatRegister($src1$$reg),
13796             as_FloatRegister($src2$$reg));
13797   %}
13798   ins_pipe(pipe_class_default);
13799 %}
13800 
13801 instruct vsub16B(vecX dst, vecX src1, vecX src2)
13802 %{
13803   predicate(n->as_Vector()->length() == 16);
13804   match(Set dst (SubVB src1 src2));
13805   ins_cost(INSN_COST);
13806   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
13807   ins_encode %{
13808     __ subv(as_FloatRegister($dst$$reg), __ T16B,
13809             as_FloatRegister($src1$$reg),
13810             as_FloatRegister($src2$$reg));
13811   %}
13812   ins_pipe(pipe_class_default);
13813 %}
13814 
13815 instruct vsub4S(vecD dst, vecD src1, vecD src2)
13816 %{
13817   predicate(n->as_Vector()->length() == 2 ||
13818             n->as_Vector()->length() == 4);
13819   match(Set dst (SubVS src1 src2));
13820   ins_cost(INSN_COST);
13821   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
13822   ins_encode %{
13823     __ subv(as_FloatRegister($dst$$reg), __ T4H,
13824             as_FloatRegister($src1$$reg),
13825             as_FloatRegister($src2$$reg));
13826   %}
13827   ins_pipe(pipe_class_default);
13828 %}
13829 
13830 instruct vsub8S(vecX dst, vecX src1, vecX src2)
13831 %{
13832   predicate(n->as_Vector()->length() == 8);
13833   match(Set dst (SubVS src1 src2));
13834   ins_cost(INSN_COST);
13835   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
13836   ins_encode %{
13837     __ subv(as_FloatRegister($dst$$reg), __ T8H,
13838             as_FloatRegister($src1$$reg),
13839             as_FloatRegister($src2$$reg));
13840   %}
13841   ins_pipe(pipe_class_default);
13842 %}
13843 
13844 instruct vsub2I(vecD dst, vecD src1, vecD src2)
13845 %{
13846   predicate(n->as_Vector()->length() == 2);
13847   match(Set dst (SubVI src1 src2));
13848   ins_cost(INSN_COST);
13849   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
13850   ins_encode %{
13851     __ subv(as_FloatRegister($dst$$reg), __ T2S,
13852             as_FloatRegister($src1$$reg),
13853             as_FloatRegister($src2$$reg));
13854   %}
13855   ins_pipe(pipe_class_default);
13856 %}
13857 
13858 instruct vsub4I(vecX dst, vecX src1, vecX src2)
13859 %{
13860   predicate(n->as_Vector()->length() == 4);
13861   match(Set dst (SubVI src1 src2));
13862   ins_cost(INSN_COST);
13863   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
13864   ins_encode %{
13865     __ subv(as_FloatRegister($dst$$reg), __ T4S,
13866             as_FloatRegister($src1$$reg),
13867             as_FloatRegister($src2$$reg));
13868   %}
13869   ins_pipe(pipe_class_default);
13870 %}
13871 
13872 instruct vsub2L(vecX dst, vecX src1, vecX src2)
13873 %{
13874   predicate(n->as_Vector()->length() == 2);
13875   match(Set dst (SubVL src1 src2));
13876   ins_cost(INSN_COST);
13877   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
13878   ins_encode %{
13879     __ subv(as_FloatRegister($dst$$reg), __ T2D,
13880             as_FloatRegister($src1$$reg),
13881             as_FloatRegister($src2$$reg));
13882   %}
13883   ins_pipe(pipe_class_default);
13884 %}
13885 
13886 instruct vsub2F(vecD dst, vecD src1, vecD src2)
13887 %{
13888   predicate(n->as_Vector()->length() == 2);
13889   match(Set dst (AddVF src1 src2));
13890   ins_cost(INSN_COST);
13891   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
13892   ins_encode %{
13893     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
13894             as_FloatRegister($src1$$reg),
13895             as_FloatRegister($src2$$reg));
13896   %}
13897   ins_pipe(pipe_class_default);
13898 %}
13899 
13900 instruct vsub4F(vecX dst, vecX src1, vecX src2)
13901 %{
13902   predicate(n->as_Vector()->length() == 4);
13903   match(Set dst (SubVF src1 src2));
13904   ins_cost(INSN_COST);
13905   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
13906   ins_encode %{
13907     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
13908             as_FloatRegister($src1$$reg),
13909             as_FloatRegister($src2$$reg));
13910   %}
13911   ins_pipe(pipe_class_default);
13912 %}
13913 
13914 instruct vsub2D(vecX dst, vecX src1, vecX src2)
13915 %{
13916   predicate(n->as_Vector()->length() == 2);
13917   match(Set dst (SubVD src1 src2));
13918   ins_cost(INSN_COST);
13919   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
13920   ins_encode %{
13921     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
13922             as_FloatRegister($src1$$reg),
13923             as_FloatRegister($src2$$reg));
13924   %}
13925   ins_pipe(pipe_class_default);
13926 %}
13927 
13928 // --------------------------------- MUL --------------------------------------
13929 
13930 instruct vmul4S(vecD dst, vecD src1, vecD src2)
13931 %{
13932   predicate(n->as_Vector()->length() == 2 ||
13933             n->as_Vector()->length() == 4);
13934   match(Set dst (MulVS src1 src2));
13935   ins_cost(INSN_COST);
13936   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
13937   ins_encode %{
13938     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
13939             as_FloatRegister($src1$$reg),
13940             as_FloatRegister($src2$$reg));
13941   %}
13942   ins_pipe(pipe_class_default);
13943 %}
13944 
13945 instruct vmul8S(vecX dst, vecX src1, vecX src2)
13946 %{
13947   predicate(n->as_Vector()->length() == 8);
13948   match(Set dst (MulVS src1 src2));
13949   ins_cost(INSN_COST);
13950   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
13951   ins_encode %{
13952     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
13953             as_FloatRegister($src1$$reg),
13954             as_FloatRegister($src2$$reg));
13955   %}
13956   ins_pipe(pipe_class_default);
13957 %}
13958 
13959 instruct vmul2I(vecD dst, vecD src1, vecD src2)
13960 %{
13961   predicate(n->as_Vector()->length() == 2);
13962   match(Set dst (MulVI src1 src2));
13963   ins_cost(INSN_COST);
13964   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
13965   ins_encode %{
13966     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
13967             as_FloatRegister($src1$$reg),
13968             as_FloatRegister($src2$$reg));
13969   %}
13970   ins_pipe(pipe_class_default);
13971 %}
13972 
13973 instruct vmul4I(vecX dst, vecX src1, vecX src2)
13974 %{
13975   predicate(n->as_Vector()->length() == 4);
13976   match(Set dst (MulVI src1 src2));
13977   ins_cost(INSN_COST);
13978   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
13979   ins_encode %{
13980     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
13981             as_FloatRegister($src1$$reg),
13982             as_FloatRegister($src2$$reg));
13983   %}
13984   ins_pipe(pipe_class_default);
13985 %}
13986 
13987 instruct vmul2F(vecD dst, vecD src1, vecD src2)
13988 %{
13989   predicate(n->as_Vector()->length() == 2);
13990   match(Set dst (MulVF src1 src2));
13991   ins_cost(INSN_COST);
13992   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
13993   ins_encode %{
13994     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
13995             as_FloatRegister($src1$$reg),
13996             as_FloatRegister($src2$$reg));
13997   %}
13998   ins_pipe(pipe_class_default);
13999 %}
14000 
14001 instruct vmul4F(vecX dst, vecX src1, vecX src2)
14002 %{
14003   predicate(n->as_Vector()->length() == 4);
14004   match(Set dst (MulVF src1 src2));
14005   ins_cost(INSN_COST);
14006   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
14007   ins_encode %{
14008     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
14009             as_FloatRegister($src1$$reg),
14010             as_FloatRegister($src2$$reg));
14011   %}
14012   ins_pipe(pipe_class_default);
14013 %}
14014 
14015 instruct vmul2D(vecX dst, vecX src1, vecX src2)
14016 %{
14017   predicate(n->as_Vector()->length() == 2);
14018   match(Set dst (MulVD src1 src2));
14019   ins_cost(INSN_COST);
14020   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
14021   ins_encode %{
14022     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
14023             as_FloatRegister($src1$$reg),
14024             as_FloatRegister($src2$$reg));
14025   %}
14026   ins_pipe(pipe_class_default);
14027 %}
14028 
14029 // --------------------------------- DIV --------------------------------------
14030 
14031 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
14032 %{
14033   predicate(n->as_Vector()->length() == 2);
14034   match(Set dst (DivVF src1 src2));
14035   ins_cost(INSN_COST);
14036   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
14037   ins_encode %{
14038     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
14039             as_FloatRegister($src1$$reg),
14040             as_FloatRegister($src2$$reg));
14041   %}
14042   ins_pipe(pipe_class_default);
14043 %}
14044 
14045 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
14046 %{
14047   predicate(n->as_Vector()->length() == 4);
14048   match(Set dst (DivVF src1 src2));
14049   ins_cost(INSN_COST);
14050   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
14051   ins_encode %{
14052     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
14053             as_FloatRegister($src1$$reg),
14054             as_FloatRegister($src2$$reg));
14055   %}
14056   ins_pipe(pipe_class_default);
14057 %}
14058 
14059 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
14060 %{
14061   predicate(n->as_Vector()->length() == 2);
14062   match(Set dst (DivVD src1 src2));
14063   ins_cost(INSN_COST);
14064   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
14065   ins_encode %{
14066     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
14067             as_FloatRegister($src1$$reg),
14068             as_FloatRegister($src2$$reg));
14069   %}
14070   ins_pipe(pipe_class_default);
14071 %}
14072 
14073 // --------------------------------- AND --------------------------------------
14074 
14075 instruct vand8B(vecD dst, vecD src1, vecD src2)
14076 %{
14077   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14078             n->as_Vector()->length_in_bytes() == 8);
14079   match(Set dst (AndV src1 src2));
14080   ins_cost(INSN_COST);
14081   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14082   ins_encode %{
14083     __ andr(as_FloatRegister($dst$$reg), __ T8B,
14084             as_FloatRegister($src1$$reg),
14085             as_FloatRegister($src2$$reg));
14086   %}
14087   ins_pipe(pipe_class_default);
14088 %}
14089 
14090 instruct vand16B(vecX dst, vecX src1, vecX src2)
14091 %{
14092   predicate(n->as_Vector()->length_in_bytes() == 16);
14093   match(Set dst (AndV src1 src2));
14094   ins_cost(INSN_COST);
14095   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
14096   ins_encode %{
14097     __ andr(as_FloatRegister($dst$$reg), __ T16B,
14098             as_FloatRegister($src1$$reg),
14099             as_FloatRegister($src2$$reg));
14100   %}
14101   ins_pipe(pipe_class_default);
14102 %}
14103 
14104 // --------------------------------- OR ---------------------------------------
14105 
14106 instruct vor8B(vecD dst, vecD src1, vecD src2)
14107 %{
14108   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14109             n->as_Vector()->length_in_bytes() == 8);
14110   match(Set dst (OrV src1 src2));
14111   ins_cost(INSN_COST);
14112   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14113   ins_encode %{
14114     __ orr(as_FloatRegister($dst$$reg), __ T8B,
14115             as_FloatRegister($src1$$reg),
14116             as_FloatRegister($src2$$reg));
14117   %}
14118   ins_pipe(pipe_class_default);
14119 %}
14120 
14121 instruct vor16B(vecX dst, vecX src1, vecX src2)
14122 %{
14123   predicate(n->as_Vector()->length_in_bytes() == 16);
14124   match(Set dst (OrV src1 src2));
14125   ins_cost(INSN_COST);
14126   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
14127   ins_encode %{
14128     __ orr(as_FloatRegister($dst$$reg), __ T16B,
14129             as_FloatRegister($src1$$reg),
14130             as_FloatRegister($src2$$reg));
14131   %}
14132   ins_pipe(pipe_class_default);
14133 %}
14134 
14135 // --------------------------------- XOR --------------------------------------
14136 
14137 instruct vxor8B(vecD dst, vecD src1, vecD src2)
14138 %{
14139   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14140             n->as_Vector()->length_in_bytes() == 8);
14141   match(Set dst (XorV src1 src2));
14142   ins_cost(INSN_COST);
14143   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
14144   ins_encode %{
14145     __ eor(as_FloatRegister($dst$$reg), __ T8B,
14146             as_FloatRegister($src1$$reg),
14147             as_FloatRegister($src2$$reg));
14148   %}
14149   ins_pipe(pipe_class_default);
14150 %}
14151 
14152 instruct vxor16B(vecX dst, vecX src1, vecX src2)
14153 %{
14154   predicate(n->as_Vector()->length_in_bytes() == 16);
14155   match(Set dst (XorV src1 src2));
14156   ins_cost(INSN_COST);
14157   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
14158   ins_encode %{
14159     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14160             as_FloatRegister($src1$$reg),
14161             as_FloatRegister($src2$$reg));
14162   %}
14163   ins_pipe(pipe_class_default);
14164 %}
14165 
14166 // ------------------------------ Shift ---------------------------------------
14167 
14168 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
14169   match(Set dst (LShiftCntV cnt));
14170   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
14171   ins_encode %{
14172     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
14173   %}
14174   ins_pipe(pipe_class_default);
14175 %}
14176 
14177 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
14178 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
14179   match(Set dst (RShiftCntV cnt));
14180   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
14181   ins_encode %{
14182     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
14183     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
14184   %}
14185   ins_pipe(pipe_class_default);
14186 %}
14187 
14188 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
14189   predicate(n->as_Vector()->length() == 4 ||
14190             n->as_Vector()->length() == 8);
14191   match(Set dst (LShiftVB src shift));
14192   match(Set dst (RShiftVB src shift));
14193   ins_cost(INSN_COST);
14194   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
14195   ins_encode %{
14196     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
14197             as_FloatRegister($src$$reg),
14198             as_FloatRegister($shift$$reg));
14199   %}
14200   ins_pipe(pipe_class_default);
14201 %}
14202 
14203 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
14204   predicate(n->as_Vector()->length() == 16);
14205   match(Set dst (LShiftVB src shift));
14206   match(Set dst (RShiftVB src shift));
14207   ins_cost(INSN_COST);
14208   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
14209   ins_encode %{
14210     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
14211             as_FloatRegister($src$$reg),
14212             as_FloatRegister($shift$$reg));
14213   %}
14214   ins_pipe(pipe_class_default);
14215 %}
14216 
14217 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
14218   predicate(n->as_Vector()->length() == 4 ||
14219             n->as_Vector()->length() == 8);
14220   match(Set dst (URShiftVB src shift));
14221   ins_cost(INSN_COST);
14222   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
14223   ins_encode %{
14224     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
14225             as_FloatRegister($src$$reg),
14226             as_FloatRegister($shift$$reg));
14227   %}
14228   ins_pipe(pipe_class_default);
14229 %}
14230 
14231 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
14232   predicate(n->as_Vector()->length() == 16);
14233   match(Set dst (URShiftVB src shift));
14234   ins_cost(INSN_COST);
14235   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
14236   ins_encode %{
14237     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
14238             as_FloatRegister($src$$reg),
14239             as_FloatRegister($shift$$reg));
14240   %}
14241   ins_pipe(pipe_class_default);
14242 %}
14243 
14244 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
14245   predicate(n->as_Vector()->length() == 4 ||
14246             n->as_Vector()->length() == 8);
14247   match(Set dst (LShiftVB src shift));
14248   ins_cost(INSN_COST);
14249   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
14250   ins_encode %{
14251     int sh = (int)$shift$$constant & 31;
14252     if (sh >= 8) {
14253       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14254              as_FloatRegister($src$$reg),
14255              as_FloatRegister($src$$reg));
14256     } else {
14257       __ shl(as_FloatRegister($dst$$reg), __ T8B,
14258              as_FloatRegister($src$$reg), sh);
14259     }
14260   %}
14261   ins_pipe(pipe_class_default);
14262 %}
14263 
14264 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
14265   predicate(n->as_Vector()->length() == 16);
14266   match(Set dst (LShiftVB src shift));
14267   ins_cost(INSN_COST);
14268   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
14269   ins_encode %{
14270     int sh = (int)$shift$$constant & 31;
14271     if (sh >= 8) {
14272       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14273              as_FloatRegister($src$$reg),
14274              as_FloatRegister($src$$reg));
14275     } else {
14276       __ shl(as_FloatRegister($dst$$reg), __ T16B,
14277              as_FloatRegister($src$$reg), sh);
14278     }
14279   %}
14280   ins_pipe(pipe_class_default);
14281 %}
14282 
14283 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
14284   predicate(n->as_Vector()->length() == 4 ||
14285             n->as_Vector()->length() == 8);
14286   match(Set dst (RShiftVB src shift));
14287   ins_cost(INSN_COST);
14288   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
14289   ins_encode %{
14290     int sh = (int)$shift$$constant & 31;
14291     if (sh >= 8) sh = 7;
14292     sh = -sh & 7;
14293     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
14294            as_FloatRegister($src$$reg), sh);
14295   %}
14296   ins_pipe(pipe_class_default);
14297 %}
14298 
14299 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
14300   predicate(n->as_Vector()->length() == 16);
14301   match(Set dst (RShiftVB src shift));
14302   ins_cost(INSN_COST);
14303   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
14304   ins_encode %{
14305     int sh = (int)$shift$$constant & 31;
14306     if (sh >= 8) sh = 7;
14307     sh = -sh & 7;
14308     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
14309            as_FloatRegister($src$$reg), sh);
14310   %}
14311   ins_pipe(pipe_class_default);
14312 %}
14313 
14314 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
14315   predicate(n->as_Vector()->length() == 4 ||
14316             n->as_Vector()->length() == 8);
14317   match(Set dst (URShiftVB src shift));
14318   ins_cost(INSN_COST);
14319   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
14320   ins_encode %{
14321     int sh = (int)$shift$$constant & 31;
14322     if (sh >= 8) {
14323       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14324              as_FloatRegister($src$$reg),
14325              as_FloatRegister($src$$reg));
14326     } else {
14327       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
14328              as_FloatRegister($src$$reg), -sh & 7);
14329     }
14330   %}
14331   ins_pipe(pipe_class_default);
14332 %}
14333 
14334 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
14335   predicate(n->as_Vector()->length() == 16);
14336   match(Set dst (URShiftVB src shift));
14337   ins_cost(INSN_COST);
14338   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
14339   ins_encode %{
14340     int sh = (int)$shift$$constant & 31;
14341     if (sh >= 8) {
14342       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14343              as_FloatRegister($src$$reg),
14344              as_FloatRegister($src$$reg));
14345     } else {
14346       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
14347              as_FloatRegister($src$$reg), -sh & 7);
14348     }
14349   %}
14350   ins_pipe(pipe_class_default);
14351 %}
14352 
14353 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
14354   predicate(n->as_Vector()->length() == 2 ||
14355             n->as_Vector()->length() == 4);
14356   match(Set dst (LShiftVS src shift));
14357   match(Set dst (RShiftVS src shift));
14358   ins_cost(INSN_COST);
14359   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
14360   ins_encode %{
14361     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
14362             as_FloatRegister($src$$reg),
14363             as_FloatRegister($shift$$reg));
14364   %}
14365   ins_pipe(pipe_class_default);
14366 %}
14367 
14368 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
14369   predicate(n->as_Vector()->length() == 8);
14370   match(Set dst (LShiftVS src shift));
14371   match(Set dst (RShiftVS src shift));
14372   ins_cost(INSN_COST);
14373   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
14374   ins_encode %{
14375     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
14376             as_FloatRegister($src$$reg),
14377             as_FloatRegister($shift$$reg));
14378   %}
14379   ins_pipe(pipe_class_default);
14380 %}
14381 
14382 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
14383   predicate(n->as_Vector()->length() == 2 ||
14384             n->as_Vector()->length() == 4);
14385   match(Set dst (URShiftVS src shift));
14386   ins_cost(INSN_COST);
14387   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
14388   ins_encode %{
14389     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
14390             as_FloatRegister($src$$reg),
14391             as_FloatRegister($shift$$reg));
14392   %}
14393   ins_pipe(pipe_class_default);
14394 %}
14395 
14396 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
14397   predicate(n->as_Vector()->length() == 8);
14398   match(Set dst (URShiftVS src shift));
14399   ins_cost(INSN_COST);
14400   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
14401   ins_encode %{
14402     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
14403             as_FloatRegister($src$$reg),
14404             as_FloatRegister($shift$$reg));
14405   %}
14406   ins_pipe(pipe_class_default);
14407 %}
14408 
14409 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
14410   predicate(n->as_Vector()->length() == 2 ||
14411             n->as_Vector()->length() == 4);
14412   match(Set dst (LShiftVS src shift));
14413   ins_cost(INSN_COST);
14414   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
14415   ins_encode %{
14416     int sh = (int)$shift$$constant & 31;
14417     if (sh >= 16) {
14418       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14419              as_FloatRegister($src$$reg),
14420              as_FloatRegister($src$$reg));
14421     } else {
14422       __ shl(as_FloatRegister($dst$$reg), __ T4H,
14423              as_FloatRegister($src$$reg), sh);
14424     }
14425   %}
14426   ins_pipe(pipe_class_default);
14427 %}
14428 
14429 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
14430   predicate(n->as_Vector()->length() == 8);
14431   match(Set dst (LShiftVS src shift));
14432   ins_cost(INSN_COST);
14433   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
14434   ins_encode %{
14435     int sh = (int)$shift$$constant & 31;
14436     if (sh >= 16) {
14437       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14438              as_FloatRegister($src$$reg),
14439              as_FloatRegister($src$$reg));
14440     } else {
14441       __ shl(as_FloatRegister($dst$$reg), __ T8H,
14442              as_FloatRegister($src$$reg), sh);
14443     }
14444   %}
14445   ins_pipe(pipe_class_default);
14446 %}
14447 
14448 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
14449   predicate(n->as_Vector()->length() == 2 ||
14450             n->as_Vector()->length() == 4);
14451   match(Set dst (RShiftVS src shift));
14452   ins_cost(INSN_COST);
14453   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
14454   ins_encode %{
14455     int sh = (int)$shift$$constant & 31;
14456     if (sh >= 16) sh = 15;
14457     sh = -sh & 15;
14458     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
14459            as_FloatRegister($src$$reg), sh);
14460   %}
14461   ins_pipe(pipe_class_default);
14462 %}
14463 
14464 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
14465   predicate(n->as_Vector()->length() == 8);
14466   match(Set dst (RShiftVS src shift));
14467   ins_cost(INSN_COST);
14468   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
14469   ins_encode %{
14470     int sh = (int)$shift$$constant & 31;
14471     if (sh >= 16) sh = 15;
14472     sh = -sh & 15;
14473     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
14474            as_FloatRegister($src$$reg), sh);
14475   %}
14476   ins_pipe(pipe_class_default);
14477 %}
14478 
14479 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
14480   predicate(n->as_Vector()->length() == 2 ||
14481             n->as_Vector()->length() == 4);
14482   match(Set dst (URShiftVS src shift));
14483   ins_cost(INSN_COST);
14484   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
14485   ins_encode %{
14486     int sh = (int)$shift$$constant & 31;
14487     if (sh >= 16) {
14488       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14489              as_FloatRegister($src$$reg),
14490              as_FloatRegister($src$$reg));
14491     } else {
14492       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
14493              as_FloatRegister($src$$reg), -sh & 15);
14494     }
14495   %}
14496   ins_pipe(pipe_class_default);
14497 %}
14498 
14499 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
14500   predicate(n->as_Vector()->length() == 8);
14501   match(Set dst (URShiftVS src shift));
14502   ins_cost(INSN_COST);
14503   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
14504   ins_encode %{
14505     int sh = (int)$shift$$constant & 31;
14506     if (sh >= 16) {
14507       __ eor(as_FloatRegister($dst$$reg), __ T16B,
14508              as_FloatRegister($src$$reg),
14509              as_FloatRegister($src$$reg));
14510     } else {
14511       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
14512              as_FloatRegister($src$$reg), -sh & 15);
14513     }
14514   %}
14515   ins_pipe(pipe_class_default);
14516 %}
14517 
14518 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
14519   predicate(n->as_Vector()->length() == 2);
14520   match(Set dst (LShiftVI src shift));
14521   match(Set dst (RShiftVI src shift));
14522   ins_cost(INSN_COST);
14523   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
14524   ins_encode %{
14525     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
14526             as_FloatRegister($src$$reg),
14527             as_FloatRegister($shift$$reg));
14528   %}
14529   ins_pipe(pipe_class_default);
14530 %}
14531 
14532 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
14533   predicate(n->as_Vector()->length() == 4);
14534   match(Set dst (LShiftVI src shift));
14535   match(Set dst (RShiftVI src shift));
14536   ins_cost(INSN_COST);
14537   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
14538   ins_encode %{
14539     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
14540             as_FloatRegister($src$$reg),
14541             as_FloatRegister($shift$$reg));
14542   %}
14543   ins_pipe(pipe_class_default);
14544 %}
14545 
14546 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
14547   predicate(n->as_Vector()->length() == 2);
14548   match(Set dst (URShiftVI src shift));
14549   ins_cost(INSN_COST);
14550   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
14551   ins_encode %{
14552     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
14553             as_FloatRegister($src$$reg),
14554             as_FloatRegister($shift$$reg));
14555   %}
14556   ins_pipe(pipe_class_default);
14557 %}
14558 
14559 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
14560   predicate(n->as_Vector()->length() == 4);
14561   match(Set dst (URShiftVI src shift));
14562   ins_cost(INSN_COST);
14563   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
14564   ins_encode %{
14565     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
14566             as_FloatRegister($src$$reg),
14567             as_FloatRegister($shift$$reg));
14568   %}
14569   ins_pipe(pipe_class_default);
14570 %}
14571 
14572 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
14573   predicate(n->as_Vector()->length() == 2);
14574   match(Set dst (LShiftVI src shift));
14575   ins_cost(INSN_COST);
14576   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
14577   ins_encode %{
14578     __ shl(as_FloatRegister($dst$$reg), __ T2S,
14579            as_FloatRegister($src$$reg),
14580            (int)$shift$$constant & 31);
14581   %}
14582   ins_pipe(pipe_class_default);
14583 %}
14584 
14585 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
14586   predicate(n->as_Vector()->length() == 4);
14587   match(Set dst (LShiftVI src shift));
14588   ins_cost(INSN_COST);
14589   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
14590   ins_encode %{
14591     __ shl(as_FloatRegister($dst$$reg), __ T4S,
14592            as_FloatRegister($src$$reg),
14593            (int)$shift$$constant & 31);
14594   %}
14595   ins_pipe(pipe_class_default);
14596 %}
14597 
14598 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
14599   predicate(n->as_Vector()->length() == 2);
14600   match(Set dst (RShiftVI src shift));
14601   ins_cost(INSN_COST);
14602   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
14603   ins_encode %{
14604     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
14605             as_FloatRegister($src$$reg),
14606             -(int)$shift$$constant & 31);
14607   %}
14608   ins_pipe(pipe_class_default);
14609 %}
14610 
14611 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
14612   predicate(n->as_Vector()->length() == 4);
14613   match(Set dst (RShiftVI src shift));
14614   ins_cost(INSN_COST);
14615   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
14616   ins_encode %{
14617     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
14618             as_FloatRegister($src$$reg),
14619             -(int)$shift$$constant & 31);
14620   %}
14621   ins_pipe(pipe_class_default);
14622 %}
14623 
14624 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
14625   predicate(n->as_Vector()->length() == 2);
14626   match(Set dst (URShiftVI src shift));
14627   ins_cost(INSN_COST);
14628   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
14629   ins_encode %{
14630     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
14631             as_FloatRegister($src$$reg),
14632             -(int)$shift$$constant & 31);
14633   %}
14634   ins_pipe(pipe_class_default);
14635 %}
14636 
14637 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
14638   predicate(n->as_Vector()->length() == 4);
14639   match(Set dst (URShiftVI src shift));
14640   ins_cost(INSN_COST);
14641   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
14642   ins_encode %{
14643     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
14644             as_FloatRegister($src$$reg),
14645             -(int)$shift$$constant & 31);
14646   %}
14647   ins_pipe(pipe_class_default);
14648 %}
14649 
14650 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
14651   predicate(n->as_Vector()->length() == 2);
14652   match(Set dst (LShiftVL src shift));
14653   match(Set dst (RShiftVL src shift));
14654   ins_cost(INSN_COST);
14655   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
14656   ins_encode %{
14657     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
14658             as_FloatRegister($src$$reg),
14659             as_FloatRegister($shift$$reg));
14660   %}
14661   ins_pipe(pipe_class_default);
14662 %}
14663 
14664 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
14665   predicate(n->as_Vector()->length() == 2);
14666   match(Set dst (URShiftVL src shift));
14667   ins_cost(INSN_COST);
14668   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
14669   ins_encode %{
14670     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
14671             as_FloatRegister($src$$reg),
14672             as_FloatRegister($shift$$reg));
14673   %}
14674   ins_pipe(pipe_class_default);
14675 %}
14676 
14677 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
14678   predicate(n->as_Vector()->length() == 2);
14679   match(Set dst (LShiftVL src shift));
14680   ins_cost(INSN_COST);
14681   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
14682   ins_encode %{
14683     __ shl(as_FloatRegister($dst$$reg), __ T2D,
14684            as_FloatRegister($src$$reg),
14685            (int)$shift$$constant & 63);
14686   %}
14687   ins_pipe(pipe_class_default);
14688 %}
14689 
14690 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
14691   predicate(n->as_Vector()->length() == 2);
14692   match(Set dst (RShiftVL src shift));
14693   ins_cost(INSN_COST);
14694   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
14695   ins_encode %{
14696     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
14697             as_FloatRegister($src$$reg),
14698             -(int)$shift$$constant & 63);
14699   %}
14700   ins_pipe(pipe_class_default);
14701 %}
14702 
14703 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
14704   predicate(n->as_Vector()->length() == 2);
14705   match(Set dst (URShiftVL src shift));
14706   ins_cost(INSN_COST);
14707   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
14708   ins_encode %{
14709     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
14710             as_FloatRegister($src$$reg),
14711             -(int)$shift$$constant & 63);
14712   %}
14713   ins_pipe(pipe_class_default);
14714 %}
14715 
14716 //----------PEEPHOLE RULES-----------------------------------------------------
14717 // These must follow all instruction definitions as they use the names
14718 // defined in the instructions definitions.
14719 //
14720 // peepmatch ( root_instr_name [preceding_instruction]* );
14721 //
14722 // peepconstraint %{
14723 // (instruction_number.operand_name relational_op instruction_number.operand_name
14724 //  [, ...] );
14725 // // instruction numbers are zero-based using left to right order in peepmatch
14726 //
14727 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
14728 // // provide an instruction_number.operand_name for each operand that appears
14729 // // in the replacement instruction's match rule
14730 //
14731 // ---------VM FLAGS---------------------------------------------------------
14732 //
14733 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14734 //
14735 // Each peephole rule is given an identifying number starting with zero and
14736 // increasing by one in the order seen by the parser.  An individual peephole
14737 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14738 // on the command-line.
14739 //
14740 // ---------CURRENT LIMITATIONS----------------------------------------------
14741 //
14742 // Only match adjacent instructions in same basic block
14743 // Only equality constraints
14744 // Only constraints between operands, not (0.dest_reg == RAX_enc)
14745 // Only one replacement instruction
14746 //
14747 // ---------EXAMPLE----------------------------------------------------------
14748 //
14749 // // pertinent parts of existing instructions in architecture description
14750 // instruct movI(iRegINoSp dst, iRegI src)
14751 // %{
14752 //   match(Set dst (CopyI src));
14753 // %}
14754 //
14755 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
14756 // %{
14757 //   match(Set dst (AddI dst src));
14758 //   effect(KILL cr);
14759 // %}
14760 //
14761 // // Change (inc mov) to lea
14762 // peephole %{
14763 //   // increment preceeded by register-register move
14764 //   peepmatch ( incI_iReg movI );
14765 //   // require that the destination register of the increment
14766 //   // match the destination register of the move
14767 //   peepconstraint ( 0.dst == 1.dst );
14768 //   // construct a replacement instruction that sets
14769 //   // the destination to ( move's source register + one )
14770 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
14771 // %}
14772 //
14773 
14774 // Implementation no longer uses movX instructions since
14775 // machine-independent system no longer uses CopyX nodes.
14776 //
14777 // peephole
14778 // %{
14779 //   peepmatch (incI_iReg movI);
14780 //   peepconstraint (0.dst == 1.dst);
14781 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
14782 // %}
14783 
14784 // peephole
14785 // %{
14786 //   peepmatch (decI_iReg movI);
14787 //   peepconstraint (0.dst == 1.dst);
14788 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
14789 // %}
14790 
14791 // peephole
14792 // %{
14793 //   peepmatch (addI_iReg_imm movI);
14794 //   peepconstraint (0.dst == 1.dst);
14795 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
14796 // %}
14797 
14798 // peephole
14799 // %{
14800 //   peepmatch (incL_iReg movL);
14801 //   peepconstraint (0.dst == 1.dst);
14802 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
14803 // %}
14804 
14805 // peephole
14806 // %{
14807 //   peepmatch (decL_iReg movL);
14808 //   peepconstraint (0.dst == 1.dst);
14809 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
14810 // %}
14811 
14812 // peephole
14813 // %{
14814 //   peepmatch (addL_iReg_imm movL);
14815 //   peepconstraint (0.dst == 1.dst);
14816 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
14817 // %}
14818 
14819 // peephole
14820 // %{
14821 //   peepmatch (addP_iReg_imm movP);
14822 //   peepconstraint (0.dst == 1.dst);
14823 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
14824 // %}
14825 
14826 // // Change load of spilled value to only a spill
14827 // instruct storeI(memory mem, iRegI src)
14828 // %{
14829 //   match(Set mem (StoreI mem src));
14830 // %}
14831 //
14832 // instruct loadI(iRegINoSp dst, memory mem)
14833 // %{
14834 //   match(Set dst (LoadI mem));
14835 // %}
14836 //
14837 
14838 //----------SMARTSPILL RULES---------------------------------------------------
14839 // These must follow all instruction definitions as they use the names
14840 // defined in the instructions definitions.
14841 
14842 // Local Variables:
14843 // mode: c++
14844 // End: