1 //
   2 // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_normal(MemBarNode *leading);
1045   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1046   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1048   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1049 
1050   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1051 
1052   bool unnecessary_acquire(const Node *barrier);
1053   bool needs_acquiring_load(const Node *load);
1054 
1055   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1056 
1057   bool unnecessary_release(const Node *barrier);
1058   bool unnecessary_volatile(const Node *barrier);
1059   bool needs_releasing_store(const Node *store);
1060 
1061   // predicate controlling translation of CompareAndSwapX
1062   bool needs_acquiring_load_exclusive(const Node *load);
1063 
1064   // predicate controlling translation of StoreCM
1065   bool unnecessary_storestore(const Node *storecm);
1066 %}
1067 
1068 source %{
1069 
1070   // Optimizaton of volatile gets and puts
1071   // -------------------------------------
1072   //
1073   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1074   // use to implement volatile reads and writes. For a volatile read
1075   // we simply need
1076   //
1077   //   ldar<x>
1078   //
1079   // and for a volatile write we need
1080   //
1081   //   stlr<x>
1082   // 
1083   // Alternatively, we can implement them by pairing a normal
1084   // load/store with a memory barrier. For a volatile read we need
1085   // 
1086   //   ldr<x>
1087   //   dmb ishld
1088   //
1089   // for a volatile write
1090   //
1091   //   dmb ish
1092   //   str<x>
1093   //   dmb ish
1094   //
1095   // We can also use ldaxr and stlxr to implement compare and swap CAS
1096   // sequences. These are normally translated to an instruction
1097   // sequence like the following
1098   //
1099   //   dmb      ish
1100   // retry:
1101   //   ldxr<x>   rval raddr
1102   //   cmp       rval rold
1103   //   b.ne done
1104   //   stlxr<x>  rval, rnew, rold
1105   //   cbnz      rval retry
1106   // done:
1107   //   cset      r0, eq
1108   //   dmb ishld
1109   //
1110   // Note that the exclusive store is already using an stlxr
1111   // instruction. That is required to ensure visibility to other
1112   // threads of the exclusive write (assuming it succeeds) before that
1113   // of any subsequent writes.
1114   //
1115   // The following instruction sequence is an improvement on the above
1116   //
1117   // retry:
1118   //   ldaxr<x>  rval raddr
1119   //   cmp       rval rold
1120   //   b.ne done
1121   //   stlxr<x>  rval, rnew, rold
1122   //   cbnz      rval retry
1123   // done:
1124   //   cset      r0, eq
1125   //
1126   // We don't need the leading dmb ish since the stlxr guarantees
1127   // visibility of prior writes in the case that the swap is
1128   // successful. Crucially we don't have to worry about the case where
1129   // the swap is not successful since no valid program should be
1130   // relying on visibility of prior changes by the attempting thread
1131   // in the case where the CAS fails.
1132   //
1133   // Similarly, we don't need the trailing dmb ishld if we substitute
1134   // an ldaxr instruction since that will provide all the guarantees we
1135   // require regarding observation of changes made by other threads
1136   // before any change to the CAS address observed by the load.
1137   //
1138   // In order to generate the desired instruction sequence we need to
1139   // be able to identify specific 'signature' ideal graph node
1140   // sequences which i) occur as a translation of a volatile reads or
1141   // writes or CAS operations and ii) do not occur through any other
1142   // translation or graph transformation. We can then provide
1143   // alternative aldc matching rules which translate these node
1144   // sequences to the desired machine code sequences. Selection of the
1145   // alternative rules can be implemented by predicates which identify
1146   // the relevant node sequences.
1147   //
1148   // The ideal graph generator translates a volatile read to the node
1149   // sequence
1150   //
1151   //   LoadX[mo_acquire]
1152   //   MemBarAcquire
1153   //
1154   // As a special case when using the compressed oops optimization we
1155   // may also see this variant
1156   //
1157   //   LoadN[mo_acquire]
1158   //   DecodeN
1159   //   MemBarAcquire
1160   //
1161   // A volatile write is translated to the node sequence
1162   //
1163   //   MemBarRelease
1164   //   StoreX[mo_release] {CardMark}-optional
1165   //   MemBarVolatile
1166   //
1167   // n.b. the above node patterns are generated with a strict
1168   // 'signature' configuration of input and output dependencies (see
1169   // the predicates below for exact details). The card mark may be as
1170   // simple as a few extra nodes or, in a few GC configurations, may
1171   // include more complex control flow between the leading and
1172   // trailing memory barriers. However, whatever the card mark
1173   // configuration these signatures are unique to translated volatile
1174   // reads/stores -- they will not appear as a result of any other
1175   // bytecode translation or inlining nor as a consequence of
1176   // optimizing transforms.
1177   //
1178   // We also want to catch inlined unsafe volatile gets and puts and
1179   // be able to implement them using either ldar<x>/stlr<x> or some
1180   // combination of ldr<x>/stlr<x> and dmb instructions.
1181   //
1182   // Inlined unsafe volatiles puts manifest as a minor variant of the
1183   // normal volatile put node sequence containing an extra cpuorder
1184   // membar
1185   //
1186   //   MemBarRelease
1187   //   MemBarCPUOrder
1188   //   StoreX[mo_release] {CardMark}-optional
1189   //   MemBarVolatile
1190   //
1191   // n.b. as an aside, the cpuorder membar is not itself subject to
1192   // matching and translation by adlc rules.  However, the rule
1193   // predicates need to detect its presence in order to correctly
1194   // select the desired adlc rules.
1195   //
1196   // Inlined unsafe volatile gets manifest as a somewhat different
1197   // node sequence to a normal volatile get
1198   //
1199   //   MemBarCPUOrder
1200   //        ||       \\
1201   //   MemBarAcquire LoadX[mo_acquire]
1202   //        ||
1203   //   MemBarCPUOrder
1204   //
1205   // In this case the acquire membar does not directly depend on the
1206   // load. However, we can be sure that the load is generated from an
1207   // inlined unsafe volatile get if we see it dependent on this unique
1208   // sequence of membar nodes. Similarly, given an acquire membar we
1209   // can know that it was added because of an inlined unsafe volatile
1210   // get if it is fed and feeds a cpuorder membar and if its feed
1211   // membar also feeds an acquiring load.
1212   //
1213   // Finally an inlined (Unsafe) CAS operation is translated to the
1214   // following ideal graph
1215   //
1216   //   MemBarRelease
1217   //   MemBarCPUOrder
1218   //   CompareAndSwapX {CardMark}-optional
1219   //   MemBarCPUOrder
1220   //   MemBarAcquire
1221   //
1222   // So, where we can identify these volatile read and write
1223   // signatures we can choose to plant either of the above two code
1224   // sequences. For a volatile read we can simply plant a normal
1225   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1226   // also choose to inhibit translation of the MemBarAcquire and
1227   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1228   //
1229   // When we recognise a volatile store signature we can choose to
1230   // plant at a dmb ish as a translation for the MemBarRelease, a
1231   // normal str<x> and then a dmb ish for the MemBarVolatile.
1232   // Alternatively, we can inhibit translation of the MemBarRelease
1233   // and MemBarVolatile and instead plant a simple stlr<x>
1234   // instruction.
1235   //
1236   // when we recognise a CAS signature we can choose to plant a dmb
1237   // ish as a translation for the MemBarRelease, the conventional
1238   // macro-instruction sequence for the CompareAndSwap node (which
1239   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1240   // Alternatively, we can elide generation of the dmb instructions
1241   // and plant the alternative CompareAndSwap macro-instruction
1242   // sequence (which uses ldaxr<x>).
1243   // 
1244   // Of course, the above only applies when we see these signature
1245   // configurations. We still want to plant dmb instructions in any
1246   // other cases where we may see a MemBarAcquire, MemBarRelease or
1247   // MemBarVolatile. For example, at the end of a constructor which
1248   // writes final/volatile fields we will see a MemBarRelease
1249   // instruction and this needs a 'dmb ish' lest we risk the
1250   // constructed object being visible without making the
1251   // final/volatile field writes visible.
1252   //
1253   // n.b. the translation rules below which rely on detection of the
1254   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1255   // If we see anything other than the signature configurations we
1256   // always just translate the loads and stores to ldr<x> and str<x>
1257   // and translate acquire, release and volatile membars to the
1258   // relevant dmb instructions.
1259   //
1260 
1261   // graph traversal helpers used for volatile put/get and CAS
1262   // optimization
1263 
1264   // 1) general purpose helpers
1265 
1266   // if node n is linked to a parent MemBarNode by an intervening
1267   // Control and Memory ProjNode return the MemBarNode otherwise return
1268   // NULL.
1269   //
1270   // n may only be a Load or a MemBar.
1271 
1272   MemBarNode *parent_membar(const Node *n)
1273   {
1274     Node *ctl = NULL;
1275     Node *mem = NULL;
1276     Node *membar = NULL;
1277 
1278     if (n->is_Load()) {
1279       ctl = n->lookup(LoadNode::Control);
1280       mem = n->lookup(LoadNode::Memory);
1281     } else if (n->is_MemBar()) {
1282       ctl = n->lookup(TypeFunc::Control);
1283       mem = n->lookup(TypeFunc::Memory);
1284     } else {
1285         return NULL;
1286     }
1287 
1288     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1289       return NULL;
1290     }
1291 
1292     membar = ctl->lookup(0);
1293 
1294     if (!membar || !membar->is_MemBar()) {
1295       return NULL;
1296     }
1297 
1298     if (mem->lookup(0) != membar) {
1299       return NULL;
1300     }
1301 
1302     return membar->as_MemBar();
1303   }
1304 
1305   // if n is linked to a child MemBarNode by intervening Control and
1306   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1307 
1308   MemBarNode *child_membar(const MemBarNode *n)
1309   {
1310     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1311     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1312 
1313     // MemBar needs to have both a Ctl and Mem projection
1314     if (! ctl || ! mem)
1315       return NULL;
1316 
1317     MemBarNode *child = NULL;
1318     Node *x;
1319 
1320     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1321       x = ctl->fast_out(i);
1322       // if we see a membar we keep hold of it. we may also see a new
1323       // arena copy of the original but it will appear later
1324       if (x->is_MemBar()) {
1325           child = x->as_MemBar();
1326           break;
1327       }
1328     }
1329 
1330     if (child == NULL) {
1331       return NULL;
1332     }
1333 
1334     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1335       x = mem->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x == child) {
1339         return child;
1340       }
1341     }
1342     return NULL;
1343   }
1344 
1345   // helper predicate use to filter candidates for a leading memory
1346   // barrier
1347   //
1348   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1349   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1350 
1351   bool leading_membar(const MemBarNode *barrier)
1352   {
1353     int opcode = barrier->Opcode();
1354     // if this is a release membar we are ok
1355     if (opcode == Op_MemBarRelease) {
1356       return true;
1357     }
1358     // if its a cpuorder membar . . .
1359     if (opcode != Op_MemBarCPUOrder) {
1360       return false;
1361     }
1362     // then the parent has to be a release membar
1363     MemBarNode *parent = parent_membar(barrier);
1364     if (!parent) {
1365       return false;
1366     }
1367     opcode = parent->Opcode();
1368     return opcode == Op_MemBarRelease;
1369   }
1370  
1371   // 2) card mark detection helper
1372 
1373   // helper predicate which can be used to detect a volatile membar
1374   // introduced as part of a conditional card mark sequence either by
1375   // G1 or by CMS when UseCondCardMark is true.
1376   //
1377   // membar can be definitively determined to be part of a card mark
1378   // sequence if and only if all the following hold
1379   //
1380   // i) it is a MemBarVolatile
1381   //
1382   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1383   // true
1384   //
1385   // iii) the node's Mem projection feeds a StoreCM node.
1386   
1387   bool is_card_mark_membar(const MemBarNode *barrier)
1388   {
1389     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1390       return false;
1391     }
1392 
1393     if (barrier->Opcode() != Op_MemBarVolatile) {
1394       return false;
1395     }
1396 
1397     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1398 
1399     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1400       Node *y = mem->fast_out(i);
1401       if (y->Opcode() == Op_StoreCM) {
1402         return true;
1403       }
1404     }
1405   
1406     return false;
1407   }
1408 
1409 
1410   // 3) helper predicates to traverse volatile put or CAS graphs which
1411   // may contain GC barrier subgraphs
1412 
1413   // Preamble
1414   // --------
1415   //
1416   // for volatile writes we can omit generating barriers and employ a
1417   // releasing store when we see a node sequence sequence with a
1418   // leading MemBarRelease and a trailing MemBarVolatile as follows
1419   //
1420   //   MemBarRelease
1421   //  {      ||      } -- optional
1422   //  {MemBarCPUOrder}
1423   //         ||     \\
1424   //         ||     StoreX[mo_release]
1425   //         | \     /
1426   //         | MergeMem
1427   //         | /
1428   //   MemBarVolatile
1429   //
1430   // where
1431   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1432   //  | \ and / indicate further routing of the Ctl and Mem feeds
1433   // 
1434   // this is the graph we see for non-object stores. however, for a
1435   // volatile Object store (StoreN/P) we may see other nodes below the
1436   // leading membar because of the need for a GC pre- or post-write
1437   // barrier.
1438   //
1439   // with most GC configurations we with see this simple variant which
1440   // includes a post-write barrier card mark.
1441   //
1442   //   MemBarRelease______________________________
1443   //         ||    \\               Ctl \        \\
1444   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1445   //         | \     /                       . . .  /
1446   //         | MergeMem
1447   //         | /
1448   //         ||      /
1449   //   MemBarVolatile
1450   //
1451   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1452   // the object address to an int used to compute the card offset) and
1453   // Ctl+Mem to a StoreB node (which does the actual card mark).
1454   //
1455   // n.b. a StoreCM node will only appear in this configuration when
1456   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1457   // because it implies a requirement to order visibility of the card
1458   // mark (StoreCM) relative to the object put (StoreP/N) using a
1459   // StoreStore memory barrier (arguably this ought to be represented
1460   // explicitly in the ideal graph but that is not how it works). This
1461   // ordering is required for both non-volatile and volatile
1462   // puts. Normally that means we need to translate a StoreCM using
1463   // the sequence
1464   //
1465   //   dmb ishst
1466   //   stlrb
1467   //
1468   // However, in the case of a volatile put if we can recognise this
1469   // configuration and plant an stlr for the object write then we can
1470   // omit the dmb and just plant an strb since visibility of the stlr
1471   // is ordered before visibility of subsequent stores. StoreCM nodes
1472   // also arise when using G1 or using CMS with conditional card
1473   // marking. In these cases (as we shall see) we don't need to insert
1474   // the dmb when translating StoreCM because there is already an
1475   // intervening StoreLoad barrier between it and the StoreP/N.
1476   //
1477   // It is also possible to perform the card mark conditionally on it
1478   // currently being unmarked in which case the volatile put graph
1479   // will look slightly different
1480   //
1481   //   MemBarRelease____________________________________________
1482   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1483   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1484   //         | \     /                              \            |
1485   //         | MergeMem                            . . .      StoreB
1486   //         | /                                                /
1487   //         ||     /
1488   //   MemBarVolatile
1489   //
1490   // It is worth noting at this stage that both the above
1491   // configurations can be uniquely identified by checking that the
1492   // memory flow includes the following subgraph:
1493   //
1494   //   MemBarRelease
1495   //  {MemBarCPUOrder}
1496   //          |  \      . . .
1497   //          |  StoreX[mo_release]  . . .
1498   //          |   /
1499   //         MergeMem
1500   //          |
1501   //   MemBarVolatile
1502   //
1503   // This is referred to as a *normal* subgraph. It can easily be
1504   // detected starting from any candidate MemBarRelease,
1505   // StoreX[mo_release] or MemBarVolatile.
1506   //
1507   // A simple variation on this normal case occurs for an unsafe CAS
1508   // operation. The basic graph for a non-object CAS is
1509   //
1510   //   MemBarRelease
1511   //         ||
1512   //   MemBarCPUOrder
1513   //         ||     \\   . . .
1514   //         ||     CompareAndSwapX
1515   //         ||       |
1516   //         ||     SCMemProj
1517   //         | \     /
1518   //         | MergeMem
1519   //         | /
1520   //   MemBarCPUOrder
1521   //         ||
1522   //   MemBarAcquire
1523   //
1524   // The same basic variations on this arrangement (mutatis mutandis)
1525   // occur when a card mark is introduced. i.e. we se the same basic
1526   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1527   // tail of the graph is a pair comprising a MemBarCPUOrder +
1528   // MemBarAcquire.
1529   //
1530   // So, in the case of a CAS the normal graph has the variant form
1531   //
1532   //   MemBarRelease
1533   //   MemBarCPUOrder
1534   //          |   \      . . .
1535   //          |  CompareAndSwapX  . . .
1536   //          |    |
1537   //          |   SCMemProj
1538   //          |   /  . . .
1539   //         MergeMem
1540   //          |
1541   //   MemBarCPUOrder
1542   //   MemBarAcquire
1543   //
1544   // This graph can also easily be detected starting from any
1545   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1546   //
1547   // the code below uses two helper predicates, leading_to_normal and
1548   // normal_to_leading to identify these normal graphs, one validating
1549   // the layout starting from the top membar and searching down and
1550   // the other validating the layout starting from the lower membar
1551   // and searching up.
1552   //
1553   // There are two special case GC configurations when a normal graph
1554   // may not be generated: when using G1 (which always employs a
1555   // conditional card mark); and when using CMS with conditional card
1556   // marking configured. These GCs are both concurrent rather than
1557   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1558   // graph between the leading and trailing membar nodes, in
1559   // particular enforcing stronger memory serialisation beween the
1560   // object put and the corresponding conditional card mark. CMS
1561   // employs a post-write GC barrier while G1 employs both a pre- and
1562   // post-write GC barrier. Of course the extra nodes may be absent --
1563   // they are only inserted for object puts. This significantly
1564   // complicates the task of identifying whether a MemBarRelease,
1565   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1566   // when using these GC configurations (see below). It adds similar
1567   // complexity to the task of identifying whether a MemBarRelease,
1568   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1569   //
1570   // In both cases the post-write subtree includes an auxiliary
1571   // MemBarVolatile (StoreLoad barrier) separating the object put and
1572   // the read of the corresponding card. This poses two additional
1573   // problems.
1574   //
1575   // Firstly, a card mark MemBarVolatile needs to be distinguished
1576   // from a normal trailing MemBarVolatile. Resolving this first
1577   // problem is straightforward: a card mark MemBarVolatile always
1578   // projects a Mem feed to a StoreCM node and that is a unique marker
1579   //
1580   //      MemBarVolatile (card mark)
1581   //       C |    \     . . .
1582   //         |   StoreCM   . . .
1583   //       . . .
1584   //
1585   // The second problem is how the code generator is to translate the
1586   // card mark barrier? It always needs to be translated to a "dmb
1587   // ish" instruction whether or not it occurs as part of a volatile
1588   // put. A StoreLoad barrier is needed after the object put to ensure
1589   // i) visibility to GC threads of the object put and ii) visibility
1590   // to the mutator thread of any card clearing write by a GC
1591   // thread. Clearly a normal store (str) will not guarantee this
1592   // ordering but neither will a releasing store (stlr). The latter
1593   // guarantees that the object put is visible but does not guarantee
1594   // that writes by other threads have also been observed.
1595   // 
1596   // So, returning to the task of translating the object put and the
1597   // leading/trailing membar nodes: what do the non-normal node graph
1598   // look like for these 2 special cases? and how can we determine the
1599   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1600   // in both normal and non-normal cases?
1601   //
1602   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1603   // which selects conditonal execution based on the value loaded
1604   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1605   // intervening StoreLoad barrier (MemBarVolatile).
1606   //
1607   // So, with CMS we may see a node graph for a volatile object store
1608   // which looks like this
1609   //
1610   //   MemBarRelease
1611   //   MemBarCPUOrder_(leading)__________________
1612   //     C |    M \       \\                   C \
1613   //       |       \    StoreN/P[mo_release]  CastP2X
1614   //       |    Bot \    /
1615   //       |       MergeMem
1616   //       |         /
1617   //      MemBarVolatile (card mark)
1618   //     C |  ||    M |
1619   //       | LoadB    |
1620   //       |   |      |
1621   //       | Cmp      |\
1622   //       | /        | \
1623   //       If         |  \
1624   //       | \        |   \
1625   // IfFalse  IfTrue  |    \
1626   //       \     / \  |     \
1627   //        \   / StoreCM    |
1628   //         \ /      |      |
1629   //        Region   . . .   |
1630   //          | \           /
1631   //          |  . . .  \  / Bot
1632   //          |       MergeMem
1633   //          |          |
1634   //        MemBarVolatile (trailing)
1635   //
1636   // The first MergeMem merges the AliasIdxBot Mem slice from the
1637   // leading membar and the oopptr Mem slice from the Store into the
1638   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1639   // Mem slice from the card mark membar and the AliasIdxRaw slice
1640   // from the StoreCM into the trailing membar (n.b. the latter
1641   // proceeds via a Phi associated with the If region).
1642   //
1643   // The graph for a CAS varies slightly, the obvious difference being
1644   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1645   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1646   // MemBarAcquire pair. The other important difference is that the
1647   // CompareAndSwap node's SCMemProj is not merged into the card mark
1648   // membar - it still feeds the trailing MergeMem. This also means
1649   // that the card mark membar receives its Mem feed directly from the
1650   // leading membar rather than via a MergeMem.
1651   //
1652   //   MemBarRelease
1653   //   MemBarCPUOrder__(leading)_________________________
1654   //       ||                       \\                 C \
1655   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1656   //     C |  ||    M |              |
1657   //       | LoadB    |       ______/|
1658   //       |   |      |      /       |
1659   //       | Cmp      |     /      SCMemProj
1660   //       | /        |    /         |
1661   //       If         |   /         /
1662   //       | \        |  /         /
1663   // IfFalse  IfTrue  | /         /
1664   //       \     / \  |/ prec    /
1665   //        \   / StoreCM       /
1666   //         \ /      |        /
1667   //        Region   . . .    /
1668   //          | \            /
1669   //          |  . . .  \   / Bot
1670   //          |       MergeMem
1671   //          |          |
1672   //        MemBarCPUOrder
1673   //        MemBarAcquire (trailing)
1674   //
1675   // This has a slightly different memory subgraph to the one seen
1676   // previously but the core of it is the same as for the CAS normal
1677   // sungraph
1678   //
1679   //   MemBarRelease
1680   //   MemBarCPUOrder____
1681   //      ||             \      . . .
1682   //   MemBarVolatile  CompareAndSwapX  . . .
1683   //      |  \            |
1684   //        . . .   SCMemProj
1685   //          |     /  . . .
1686   //         MergeMem
1687   //          |
1688   //   MemBarCPUOrder
1689   //   MemBarAcquire
1690   //
1691   //
1692   // G1 is quite a lot more complicated. The nodes inserted on behalf
1693   // of G1 may comprise: a pre-write graph which adds the old value to
1694   // the SATB queue; the releasing store itself; and, finally, a
1695   // post-write graph which performs a card mark.
1696   //
1697   // The pre-write graph may be omitted, but only when the put is
1698   // writing to a newly allocated (young gen) object and then only if
1699   // there is a direct memory chain to the Initialize node for the
1700   // object allocation. This will not happen for a volatile put since
1701   // any memory chain passes through the leading membar.
1702   //
1703   // The pre-write graph includes a series of 3 If tests. The outermost
1704   // If tests whether SATB is enabled (no else case). The next If tests
1705   // whether the old value is non-NULL (no else case). The third tests
1706   // whether the SATB queue index is > 0, if so updating the queue. The
1707   // else case for this third If calls out to the runtime to allocate a
1708   // new queue buffer.
1709   //
1710   // So with G1 the pre-write and releasing store subgraph looks like
1711   // this (the nested Ifs are omitted).
1712   //
1713   //  MemBarRelease (leading)____________
1714   //     C |  ||  M \   M \    M \  M \ . . .
1715   //       | LoadB   \  LoadL  LoadN   \
1716   //       | /        \                 \
1717   //       If         |\                 \
1718   //       | \        | \                 \
1719   //  IfFalse  IfTrue |  \                 \
1720   //       |     |    |   \                 |
1721   //       |     If   |   /\                |
1722   //       |     |          \               |
1723   //       |                 \              |
1724   //       |    . . .         \             |
1725   //       | /       | /       |            |
1726   //      Region  Phi[M]       |            |
1727   //       | \       |         |            |
1728   //       |  \_____ | ___     |            |
1729   //     C | C \     |   C \ M |            |
1730   //       | CastP2X | StoreN/P[mo_release] |
1731   //       |         |         |            |
1732   //     C |       M |       M |          M |
1733   //        \        |         |           /
1734   //                  . . . 
1735   //          (post write subtree elided)
1736   //                    . . .
1737   //             C \         M /
1738   //         MemBarVolatile (trailing)
1739   //
1740   // n.b. the LoadB in this subgraph is not the card read -- it's a
1741   // read of the SATB queue active flag.
1742   //
1743   // Once again the CAS graph is a minor variant on the above with the
1744   // expected substitutions of CompareAndSawpX for StoreN/P and
1745   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1746   //
1747   // The G1 post-write subtree is also optional, this time when the
1748   // new value being written is either null or can be identified as a
1749   // newly allocated (young gen) object with no intervening control
1750   // flow. The latter cannot happen but the former may, in which case
1751   // the card mark membar is omitted and the memory feeds form the
1752   // leading membar and the SToreN/P are merged direct into the
1753   // trailing membar as per the normal subgraph. So, the only special
1754   // case which arises is when the post-write subgraph is generated.
1755   //
1756   // The kernel of the post-write G1 subgraph is the card mark itself
1757   // which includes a card mark memory barrier (MemBarVolatile), a
1758   // card test (LoadB), and a conditional update (If feeding a
1759   // StoreCM). These nodes are surrounded by a series of nested Ifs
1760   // which try to avoid doing the card mark. The top level If skips if
1761   // the object reference does not cross regions (i.e. it tests if
1762   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1763   // need not be recorded. The next If, which skips on a NULL value,
1764   // may be absent (it is not generated if the type of value is >=
1765   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1766   // checking if card_val != young).  n.b. although this test requires
1767   // a pre-read of the card it can safely be done before the StoreLoad
1768   // barrier. However that does not bypass the need to reread the card
1769   // after the barrier.
1770   //
1771   //                (pre-write subtree elided)
1772   //        . . .                  . . .    . . .  . . .
1773   //        C |                    M |     M |    M |
1774   //       Region                  Phi[M] StoreN    |
1775   //          |                     / \      |      |
1776   //         / \_______            /   \     |      |
1777   //      C / C \      . . .            \    |      |
1778   //       If   CastP2X . . .            |   |      |
1779   //       / \                           |   |      |
1780   //      /   \                          |   |      |
1781   // IfFalse IfTrue                      |   |      |
1782   //   |       |                         |   |     /|
1783   //   |       If                        |   |    / |
1784   //   |      / \                        |   |   /  |
1785   //   |     /   \                        \  |  /   |
1786   //   | IfFalse IfTrue                   MergeMem  |
1787   //   |  . . .    / \                       /      |
1788   //   |          /   \                     /       |
1789   //   |     IfFalse IfTrue                /        |
1790   //   |      . . .    |                  /         |
1791   //   |               If                /          |
1792   //   |               / \              /           |
1793   //   |              /   \            /            |
1794   //   |         IfFalse IfTrue       /             |
1795   //   |           . . .   |         /              |
1796   //   |                    \       /               |
1797   //   |                     \     /                |
1798   //   |             MemBarVolatile__(card mark)    |
1799   //   |                ||   C |  M \  M \          |
1800   //   |               LoadB   If    |    |         |
1801   //   |                      / \    |    |         |
1802   //   |                     . . .   |    |         |
1803   //   |                          \  |    |        /
1804   //   |                        StoreCM   |       /
1805   //   |                          . . .   |      /
1806   //   |                        _________/      /
1807   //   |                       /  _____________/
1808   //   |   . . .       . . .  |  /            /
1809   //   |    |                 | /   _________/
1810   //   |    |               Phi[M] /        /
1811   //   |    |                 |   /        /
1812   //   |    |                 |  /        /
1813   //   |  Region  . . .     Phi[M]  _____/
1814   //   |    /                 |    /
1815   //   |                      |   /   
1816   //   | . . .   . . .        |  /
1817   //   | /                    | /
1818   // Region           |  |  Phi[M]
1819   //   |              |  |  / Bot
1820   //    \            MergeMem 
1821   //     \            /
1822   //     MemBarVolatile
1823   //
1824   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1825   // from the leading membar and the oopptr Mem slice from the Store
1826   // into the card mark membar i.e. the memory flow to the card mark
1827   // membar still looks like a normal graph.
1828   //
1829   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1830   // Mem slices (from the StoreCM and other card mark queue stores).
1831   // However in this case the AliasIdxBot Mem slice does not come
1832   // direct from the card mark membar. It is merged through a series
1833   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1834   // from the leading membar with the Mem feed from the card mark
1835   // membar. Each Phi corresponds to one of the Ifs which may skip
1836   // around the card mark membar. So when the If implementing the NULL
1837   // value check has been elided the total number of Phis is 2
1838   // otherwise it is 3.
1839   //
1840   // The CAS graph when using G1GC also includes a pre-write subgraph
1841   // and an optional post-write subgraph. Teh sam evarioations are
1842   // introduced as for CMS with conditional card marking i.e. the
1843   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1844   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1845   // Mem feed from the CompareAndSwapP/N includes a precedence
1846   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1847   // trailing membar. So, as before the configuration includes the
1848   // normal CAS graph as a subgraph of the memory flow.
1849   //
1850   // So, the upshot is that in all cases the volatile put graph will
1851   // include a *normal* memory subgraph betwen the leading membar and
1852   // its child membar, either a volatile put graph (including a
1853   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1854   // When that child is not a card mark membar then it marks the end
1855   // of the volatile put or CAS subgraph. If the child is a card mark
1856   // membar then the normal subgraph will form part of a volatile put
1857   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1858   // to a trailing barrier via a MergeMem. That feed is either direct
1859   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1860   // memory flow (for G1).
1861   // 
1862   // The predicates controlling generation of instructions for store
1863   // and barrier nodes employ a few simple helper functions (described
1864   // below) which identify the presence or absence of all these
1865   // subgraph configurations and provide a means of traversing from
1866   // one node in the subgraph to another.
1867 
1868   // is_CAS(int opcode)
1869   //
1870   // return true if opcode is one of the possible CompareAndSwapX
1871   // values otherwise false.
1872 
1873   bool is_CAS(int opcode)
1874   {
1875     return (opcode == Op_CompareAndSwapI ||
1876             opcode == Op_CompareAndSwapL ||
1877             opcode == Op_CompareAndSwapN ||
1878             opcode == Op_CompareAndSwapP);
1879   }
1880 
1881   // leading_to_normal
1882   //
1883   //graph traversal helper which detects the normal case Mem feed from
1884   // a release membar (or, optionally, its cpuorder child) to a
1885   // dependent volatile membar i.e. it ensures that one or other of
1886   // the following Mem flow subgraph is present.
1887   //
1888   //   MemBarRelease
1889   //   MemBarCPUOrder {leading}
1890   //          |  \      . . .
1891   //          |  StoreN/P[mo_release]  . . .
1892   //          |   /
1893   //         MergeMem
1894   //          |
1895   //   MemBarVolatile {trailing or card mark}
1896   //
1897   //   MemBarRelease
1898   //   MemBarCPUOrder {leading}
1899   //      |       \      . . .
1900   //      |     CompareAndSwapX  . . .
1901   //               |
1902   //     . . .    SCMemProj
1903   //           \   |
1904   //      |    MergeMem
1905   //      |       /
1906   //    MemBarCPUOrder
1907   //    MemBarAcquire {trailing}
1908   //
1909   // if the correct configuration is present returns the trailing
1910   // membar otherwise NULL.
1911   //
1912   // the input membar is expected to be either a cpuorder membar or a
1913   // release membar. in the latter case it should not have a cpu membar
1914   // child.
1915   //
1916   // the returned value may be a card mark or trailing membar
1917   //
1918 
1919   MemBarNode *leading_to_normal(MemBarNode *leading)
1920   {
1921     assert((leading->Opcode() == Op_MemBarRelease ||
1922             leading->Opcode() == Op_MemBarCPUOrder),
1923            "expecting a volatile or cpuroder membar!");
1924 
1925     // check the mem flow
1926     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1927 
1928     if (!mem) {
1929       return NULL;
1930     }
1931 
1932     Node *x = NULL;
1933     StoreNode * st = NULL;
1934     LoadStoreNode *cas = NULL;
1935     MergeMemNode *mm = NULL;
1936 
1937     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1938       x = mem->fast_out(i);
1939       if (x->is_MergeMem()) {
1940         if (mm != NULL) {
1941           return NULL;
1942         }
1943         // two merge mems is one too many
1944         mm = x->as_MergeMem();
1945       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1946         // two releasing stores/CAS nodes is one too many
1947         if (st != NULL || cas != NULL) {
1948           return NULL;
1949         }
1950         st = x->as_Store();
1951       } else if (is_CAS(x->Opcode())) {
1952         if (st != NULL || cas != NULL) {
1953           return NULL;
1954         }
1955         cas = x->as_LoadStore();
1956       }
1957     }
1958 
1959     // must have a store or a cas
1960     if (!st && !cas) {
1961       return NULL;
1962     }
1963 
1964     // must have a merge if we also have st
1965     if (st && !mm) {
1966       return NULL;
1967     }
1968 
1969     Node *y = NULL;
1970     if (cas) {
1971       // look for an SCMemProj
1972       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
1973         x = cas->fast_out(i);
1974         if (x->is_Proj()) {
1975           y = x;
1976           break;
1977         }
1978       }
1979       if (y == NULL) {
1980         return NULL;
1981       }
1982       // the proj must feed a MergeMem
1983       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
1984         x = y->fast_out(i);
1985         if (x->is_MergeMem()) {
1986           mm = x->as_MergeMem();
1987           break;
1988         }
1989       }
1990       if (mm == NULL)
1991         return NULL;
1992     } else {
1993       // ensure the store feeds the existing mergemem;
1994       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1995         if (st->fast_out(i) == mm) {
1996           y = st;
1997           break;
1998         }
1999       }
2000       if (y == NULL) {
2001         return NULL;
2002       }
2003     }
2004 
2005     MemBarNode *mbar = NULL;
2006     // ensure the merge feeds to the expected type of membar
2007     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2008       x = mm->fast_out(i);
2009       if (x->is_MemBar()) {
2010         int opcode = x->Opcode();
2011         if (opcode == Op_MemBarVolatile && st) {
2012           mbar = x->as_MemBar();
2013         } else if (cas && opcode == Op_MemBarCPUOrder) {
2014           MemBarNode *y =  x->as_MemBar();
2015           y = child_membar(y);
2016           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2017             mbar = y;
2018           }
2019         }
2020         break;
2021       }
2022     }
2023 
2024     return mbar;
2025   }
2026 
2027   // normal_to_leading
2028   //
2029   // graph traversal helper which detects the normal case Mem feed
2030   // from either a card mark or a trailing membar to a preceding
2031   // release membar (optionally its cpuorder child) i.e. it ensures
2032   // that one or other of the following Mem flow subgraphs is present.
2033   //
2034   //   MemBarRelease
2035   //   MemBarCPUOrder {leading}
2036   //          |  \      . . .
2037   //          |  StoreN/P[mo_release]  . . .
2038   //          |   /
2039   //         MergeMem
2040   //          |
2041   //   MemBarVolatile {card mark or trailing}
2042   //
2043   //   MemBarRelease
2044   //   MemBarCPUOrder {leading}
2045   //      |       \      . . .
2046   //      |     CompareAndSwapX  . . .
2047   //               |
2048   //     . . .    SCMemProj
2049   //           \   |
2050   //      |    MergeMem
2051   //      |        /
2052   //    MemBarCPUOrder
2053   //    MemBarAcquire {trailing}
2054   //
2055   // this predicate checks for the same flow as the previous predicate
2056   // but starting from the bottom rather than the top.
2057   //
2058   // if the configuration is present returns the cpuorder member for
2059   // preference or when absent the release membar otherwise NULL.
2060   //
2061   // n.b. the input membar is expected to be a MemBarVolatile but
2062   // need not be a card mark membar.
2063 
2064   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2065   {
2066     // input must be a volatile membar
2067     assert((barrier->Opcode() == Op_MemBarVolatile ||
2068             barrier->Opcode() == Op_MemBarAcquire),
2069            "expecting a volatile or an acquire membar");
2070     Node *x;
2071     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2072 
2073     // if we have an acquire membar then it must be fed via a CPUOrder
2074     // membar
2075 
2076     if (is_cas) {
2077       // skip to parent barrier which must be a cpuorder
2078       x = parent_membar(barrier);
2079       if (x->Opcode() != Op_MemBarCPUOrder)
2080         return NULL;
2081     } else {
2082       // start from the supplied barrier
2083       x = (Node *)barrier;
2084     }
2085 
2086     // the Mem feed to the membar should be a merge
2087     x = x ->in(TypeFunc::Memory);
2088     if (!x->is_MergeMem())
2089       return NULL;
2090 
2091     MergeMemNode *mm = x->as_MergeMem();
2092 
2093     if (is_cas) {
2094       // the merge should be fed from the CAS via an SCMemProj node
2095       x = NULL;
2096       for (uint idx = 1; idx < mm->req(); idx++) {
2097         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2098           x = mm->in(idx);
2099           break;
2100         }
2101       }
2102       if (x == NULL) {
2103         return NULL;
2104       }
2105       // check for a CAS feeding this proj
2106       x = x->in(0);
2107       int opcode = x->Opcode();
2108       if (!is_CAS(opcode)) {
2109         return NULL;
2110       }
2111       // the CAS should get its mem feed from the leading membar
2112       x = x->in(MemNode::Memory);
2113     } else {
2114       // the merge should get its Bottom mem feed from the leading membar
2115       x = mm->in(Compile::AliasIdxBot);      
2116     } 
2117 
2118     // ensure this is a non control projection
2119     if (!x->is_Proj() || x->is_CFG()) {
2120       return NULL;
2121     }
2122     // if it is fed by a membar that's the one we want
2123     x = x->in(0);
2124 
2125     if (!x->is_MemBar()) {
2126       return NULL;
2127     }
2128 
2129     MemBarNode *leading = x->as_MemBar();
2130     // reject invalid candidates
2131     if (!leading_membar(leading)) {
2132       return NULL;
2133     }
2134 
2135     // ok, we have a leading membar, now for the sanity clauses
2136 
2137     // the leading membar must feed Mem to a releasing store or CAS
2138     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2139     StoreNode *st = NULL;
2140     LoadStoreNode *cas = NULL;
2141     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2142       x = mem->fast_out(i);
2143       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2144         // two stores or CASes is one too many
2145         if (st != NULL || cas != NULL) {
2146           return NULL;
2147         }
2148         st = x->as_Store();
2149       } else if (is_CAS(x->Opcode())) {
2150         if (st != NULL || cas != NULL) {
2151           return NULL;
2152         }
2153         cas = x->as_LoadStore();
2154       }
2155     }
2156 
2157     // we should not have both a store and a cas
2158     if (st == NULL & cas == NULL) {
2159       return NULL;
2160     }
2161 
2162     if (st == NULL) {
2163       // nothing more to check
2164       return leading;
2165     } else {
2166       // we should not have a store if we started from an acquire
2167       if (is_cas) {
2168         return NULL;
2169       }
2170 
2171       // the store should feed the merge we used to get here
2172       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2173         if (st->fast_out(i) == mm) {
2174           return leading;
2175         }
2176       }
2177     }
2178 
2179     return NULL;
2180   }
2181 
2182   // card_mark_to_trailing
2183   //
2184   // graph traversal helper which detects extra, non-normal Mem feed
2185   // from a card mark volatile membar to a trailing membar i.e. it
2186   // ensures that one of the following three GC post-write Mem flow
2187   // subgraphs is present.
2188   //
2189   // 1)
2190   //     . . .
2191   //       |
2192   //   MemBarVolatile (card mark)
2193   //      |          |     
2194   //      |        StoreCM
2195   //      |          |
2196   //      |        . . .
2197   //  Bot |  / 
2198   //   MergeMem 
2199   //      |
2200   //      |
2201   //    MemBarVolatile {trailing}
2202   //
2203   // 2)
2204   //   MemBarRelease/CPUOrder (leading)
2205   //    |
2206   //    | 
2207   //    |\       . . .
2208   //    | \        | 
2209   //    |  \  MemBarVolatile (card mark) 
2210   //    |   \   |     |
2211   //     \   \  |   StoreCM    . . .
2212   //      \   \ |
2213   //       \  Phi
2214   //        \ /
2215   //        Phi  . . .
2216   //     Bot |   /
2217   //       MergeMem
2218   //         |
2219   //    MemBarVolatile {trailing}
2220   //
2221   //
2222   // 3)
2223   //   MemBarRelease/CPUOrder (leading)
2224   //    |
2225   //    |\
2226   //    | \
2227   //    |  \      . . .
2228   //    |   \       |
2229   //    |\   \  MemBarVolatile (card mark)
2230   //    | \   \   |     |
2231   //    |  \   \  |   StoreCM    . . .
2232   //    |   \   \ |
2233   //     \   \  Phi
2234   //      \   \ /  
2235   //       \  Phi
2236   //        \ /
2237   //        Phi  . . .
2238   //     Bot |   /
2239   //       MergeMem
2240   //         |
2241   //         |
2242   //    MemBarVolatile {trailing}
2243   //
2244   // configuration 1 is only valid if UseConcMarkSweepGC &&
2245   // UseCondCardMark
2246   //
2247   // configurations 2 and 3 are only valid if UseG1GC.
2248   //
2249   // if a valid configuration is present returns the trailing membar
2250   // otherwise NULL.
2251   //
2252   // n.b. the supplied membar is expected to be a card mark
2253   // MemBarVolatile i.e. the caller must ensure the input node has the
2254   // correct operand and feeds Mem to a StoreCM node
2255 
2256   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2257   {
2258     // input must be a card mark volatile membar
2259     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2260 
2261     Node *feed = barrier->proj_out(TypeFunc::Memory);
2262     Node *x;
2263     MergeMemNode *mm = NULL;
2264 
2265     const int MAX_PHIS = 3;     // max phis we will search through
2266     int phicount = 0;           // current search count
2267 
2268     bool retry_feed = true;
2269     while (retry_feed) {
2270       // see if we have a direct MergeMem feed
2271       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2272         x = feed->fast_out(i);
2273         // the correct Phi will be merging a Bot memory slice
2274         if (x->is_MergeMem()) {
2275           mm = x->as_MergeMem();
2276           break;
2277         }
2278       }
2279       if (mm) {
2280         retry_feed = false;
2281       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2282         // the barrier may feed indirectly via one or two Phi nodes
2283         PhiNode *phi = NULL;
2284         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2285           x = feed->fast_out(i);
2286           // the correct Phi will be merging a Bot memory slice
2287           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2288             phi = x->as_Phi();
2289             break;
2290           }
2291         }
2292         if (!phi) {
2293           return NULL;
2294         }
2295         // look for another merge below this phi
2296         feed = phi;
2297       } else {
2298         // couldn't find a merge
2299         return NULL;
2300       }
2301     }
2302 
2303     // sanity check this feed turns up as the expected slice
2304     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2305 
2306     MemBarNode *trailing = NULL;
2307     // be sure we have a trailing membar the merge
2308     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2309       x = mm->fast_out(i);
2310       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2311         trailing = x->as_MemBar();
2312         break;
2313       }
2314     }
2315 
2316     return trailing;
2317   }
2318 
2319   // trailing_to_card_mark
2320   //
2321   // graph traversal helper which detects extra, non-normal Mem feed
2322   // from a trailing volatile membar to a preceding card mark volatile
2323   // membar i.e. it identifies whether one of the three possible extra
2324   // GC post-write Mem flow subgraphs is present
2325   //
2326   // this predicate checks for the same flow as the previous predicate
2327   // but starting from the bottom rather than the top.
2328   //
2329   // if the configuration is present returns the card mark membar
2330   // otherwise NULL
2331   //
2332   // n.b. the supplied membar is expected to be a trailing
2333   // MemBarVolatile i.e. the caller must ensure the input node has the
2334   // correct opcode
2335 
2336   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2337   {
2338     assert(trailing->Opcode() == Op_MemBarVolatile,
2339            "expecting a volatile membar");
2340     assert(!is_card_mark_membar(trailing),
2341            "not expecting a card mark membar");
2342 
2343     // the Mem feed to the membar should be a merge
2344     Node *x = trailing->in(TypeFunc::Memory);
2345     if (!x->is_MergeMem()) {
2346       return NULL;
2347     }
2348 
2349     MergeMemNode *mm = x->as_MergeMem();
2350 
2351     x = mm->in(Compile::AliasIdxBot);
2352     // with G1 we may possibly see a Phi or two before we see a Memory
2353     // Proj from the card mark membar
2354 
2355     const int MAX_PHIS = 3;     // max phis we will search through
2356     int phicount = 0;           // current search count
2357 
2358     bool retry_feed = !x->is_Proj();
2359 
2360     while (retry_feed) {
2361       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2362         PhiNode *phi = x->as_Phi();
2363         ProjNode *proj = NULL;
2364         PhiNode *nextphi = NULL;
2365         bool found_leading = false;
2366         for (uint i = 1; i < phi->req(); i++) {
2367           x = phi->in(i);
2368           if (x->is_Phi()) {
2369             nextphi = x->as_Phi();
2370           } else if (x->is_Proj()) {
2371             int opcode = x->in(0)->Opcode();
2372             if (opcode == Op_MemBarVolatile) {
2373               proj = x->as_Proj();
2374             } else if (opcode == Op_MemBarRelease ||
2375                        opcode == Op_MemBarCPUOrder) {
2376               // probably a leading membar
2377               found_leading = true;
2378             }
2379           }
2380         }
2381         // if we found a correct looking proj then retry from there
2382         // otherwise we must see a leading and a phi or this the
2383         // wrong config
2384         if (proj != NULL) {
2385           x = proj;
2386           retry_feed = false;
2387         } else if (found_leading && nextphi != NULL) {
2388           // retry from this phi to check phi2
2389           x = nextphi;
2390         } else {
2391           // not what we were looking for
2392           return NULL;
2393         }
2394       } else {
2395         return NULL;
2396       }
2397     }
2398     // the proj has to come from the card mark membar
2399     x = x->in(0);
2400     if (!x->is_MemBar()) {
2401       return NULL;
2402     }
2403 
2404     MemBarNode *card_mark_membar = x->as_MemBar();
2405 
2406     if (!is_card_mark_membar(card_mark_membar)) {
2407       return NULL;
2408     }
2409 
2410     return card_mark_membar;
2411   }
2412 
2413   // trailing_to_leading
2414   //
2415   // graph traversal helper which checks the Mem flow up the graph
2416   // from a (non-card mark) trailing membar attempting to locate and
2417   // return an associated leading membar. it first looks for a
2418   // subgraph in the normal configuration (relying on helper
2419   // normal_to_leading). failing that it then looks for one of the
2420   // possible post-write card mark subgraphs linking the trailing node
2421   // to a the card mark membar (relying on helper
2422   // trailing_to_card_mark), and then checks that the card mark membar
2423   // is fed by a leading membar (once again relying on auxiliary
2424   // predicate normal_to_leading).
2425   //
2426   // if the configuration is valid returns the cpuorder member for
2427   // preference or when absent the release membar otherwise NULL.
2428   //
2429   // n.b. the input membar is expected to be either a volatile or
2430   // acquire membar but in the former case must *not* be a card mark
2431   // membar.
2432 
2433   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2434   {
2435     assert((trailing->Opcode() == Op_MemBarAcquire ||
2436             trailing->Opcode() == Op_MemBarVolatile),
2437            "expecting an acquire or volatile membar");
2438     assert((trailing->Opcode() != Op_MemBarVolatile ||
2439             !is_card_mark_membar(trailing)),
2440            "not expecting a card mark membar");
2441 
2442     MemBarNode *leading = normal_to_leading(trailing);
2443 
2444     if (leading) {
2445       return leading;
2446     }
2447 
2448     // nothing more to do if this is an acquire
2449     if (trailing->Opcode() == Op_MemBarAcquire) {
2450       return NULL;
2451     }
2452 
2453     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2454 
2455     if (!card_mark_membar) {
2456       return NULL;
2457     }
2458 
2459     return normal_to_leading(card_mark_membar);
2460   }
2461 
2462   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2463 
2464 bool unnecessary_acquire(const Node *barrier)
2465 {
2466   assert(barrier->is_MemBar(), "expecting a membar");
2467 
2468   if (UseBarriersForVolatile) {
2469     // we need to plant a dmb
2470     return false;
2471   }
2472 
2473   // a volatile read derived from bytecode (or also from an inlined
2474   // SHA field read via LibraryCallKit::load_field_from_object)
2475   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2476   // with a bogus read dependency on it's preceding load. so in those
2477   // cases we will find the load node at the PARMS offset of the
2478   // acquire membar.  n.b. there may be an intervening DecodeN node.
2479   //
2480   // a volatile load derived from an inlined unsafe field access
2481   // manifests as a cpuorder membar with Ctl and Mem projections
2482   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2483   // acquire then feeds another cpuorder membar via Ctl and Mem
2484   // projections. The load has no output dependency on these trailing
2485   // membars because subsequent nodes inserted into the graph take
2486   // their control feed from the final membar cpuorder meaning they
2487   // are all ordered after the load.
2488 
2489   Node *x = barrier->lookup(TypeFunc::Parms);
2490   if (x) {
2491     // we are starting from an acquire and it has a fake dependency
2492     //
2493     // need to check for
2494     //
2495     //   LoadX[mo_acquire]
2496     //   {  |1   }
2497     //   {DecodeN}
2498     //      |Parms
2499     //   MemBarAcquire*
2500     //
2501     // where * tags node we were passed
2502     // and |k means input k
2503     if (x->is_DecodeNarrowPtr()) {
2504       x = x->in(1);
2505     }
2506 
2507     return (x->is_Load() && x->as_Load()->is_acquire());
2508   }
2509   
2510   // now check for an unsafe volatile get
2511 
2512   // need to check for
2513   //
2514   //   MemBarCPUOrder
2515   //        ||       \\
2516   //   MemBarAcquire* LoadX[mo_acquire]
2517   //        ||
2518   //   MemBarCPUOrder
2519   //
2520   // where * tags node we were passed
2521   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2522 
2523   // check for a parent MemBarCPUOrder
2524   ProjNode *ctl;
2525   ProjNode *mem;
2526   MemBarNode *parent = parent_membar(barrier);
2527   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2528     return false;
2529   ctl = parent->proj_out(TypeFunc::Control);
2530   mem = parent->proj_out(TypeFunc::Memory);
2531   if (!ctl || !mem) {
2532     return false;
2533   }
2534   // ensure the proj nodes both feed a LoadX[mo_acquire]
2535   LoadNode *ld = NULL;
2536   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2537     x = ctl->fast_out(i);
2538     // if we see a load we keep hold of it and stop searching
2539     if (x->is_Load()) {
2540       ld = x->as_Load();
2541       break;
2542     }
2543   }
2544   // it must be an acquiring load
2545   if (ld && ld->is_acquire()) {
2546 
2547     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2548       x = mem->fast_out(i);
2549       // if we see the same load we drop it and stop searching
2550       if (x == ld) {
2551         ld = NULL;
2552         break;
2553       }
2554     }
2555     // we must have dropped the load
2556     if (ld == NULL) {
2557       // check for a child cpuorder membar
2558       MemBarNode *child  = child_membar(barrier->as_MemBar());
2559       if (child && child->Opcode() == Op_MemBarCPUOrder)
2560         return true;
2561     }
2562   }
2563 
2564   // final option for unnecessary mebar is that it is a trailing node
2565   // belonging to a CAS
2566 
2567   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2568 
2569   return leading != NULL;
2570 }
2571 
2572 bool needs_acquiring_load(const Node *n)
2573 {
2574   assert(n->is_Load(), "expecting a load");
2575   if (UseBarriersForVolatile) {
2576     // we use a normal load and a dmb
2577     return false;
2578   }
2579 
2580   LoadNode *ld = n->as_Load();
2581 
2582   if (!ld->is_acquire()) {
2583     return false;
2584   }
2585 
2586   // check if this load is feeding an acquire membar
2587   //
2588   //   LoadX[mo_acquire]
2589   //   {  |1   }
2590   //   {DecodeN}
2591   //      |Parms
2592   //   MemBarAcquire*
2593   //
2594   // where * tags node we were passed
2595   // and |k means input k
2596 
2597   Node *start = ld;
2598   Node *mbacq = NULL;
2599 
2600   // if we hit a DecodeNarrowPtr we reset the start node and restart
2601   // the search through the outputs
2602  restart:
2603 
2604   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2605     Node *x = start->fast_out(i);
2606     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2607       mbacq = x;
2608     } else if (!mbacq &&
2609                (x->is_DecodeNarrowPtr() ||
2610                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2611       start = x;
2612       goto restart;
2613     }
2614   }
2615 
2616   if (mbacq) {
2617     return true;
2618   }
2619 
2620   // now check for an unsafe volatile get
2621 
2622   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2623   //
2624   //     MemBarCPUOrder
2625   //        ||       \\
2626   //   MemBarAcquire* LoadX[mo_acquire]
2627   //        ||
2628   //   MemBarCPUOrder
2629 
2630   MemBarNode *membar;
2631 
2632   membar = parent_membar(ld);
2633 
2634   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2635     return false;
2636   }
2637 
2638   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2639 
2640   membar = child_membar(membar);
2641 
2642   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2643     return false;
2644   }
2645 
2646   membar = child_membar(membar);
2647   
2648   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2649     return false;
2650   }
2651 
2652   return true;
2653 }
2654 
2655 bool unnecessary_release(const Node *n)
2656 {
2657   assert((n->is_MemBar() &&
2658           n->Opcode() == Op_MemBarRelease),
2659          "expecting a release membar");
2660 
2661   if (UseBarriersForVolatile) {
2662     // we need to plant a dmb
2663     return false;
2664   }
2665 
2666   // if there is a dependent CPUOrder barrier then use that as the
2667   // leading
2668 
2669   MemBarNode *barrier = n->as_MemBar();
2670   // check for an intervening cpuorder membar
2671   MemBarNode *b = child_membar(barrier);
2672   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2673     // ok, so start the check from the dependent cpuorder barrier
2674     barrier = b;
2675   }
2676 
2677   // must start with a normal feed
2678   MemBarNode *child_barrier = leading_to_normal(barrier);
2679 
2680   if (!child_barrier) {
2681     return false;
2682   }
2683 
2684   if (!is_card_mark_membar(child_barrier)) {
2685     // this is the trailing membar and we are done
2686     return true;
2687   }
2688 
2689   // must be sure this card mark feeds a trailing membar
2690   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2691   return (trailing != NULL);
2692 }
2693 
2694 bool unnecessary_volatile(const Node *n)
2695 {
2696   // assert n->is_MemBar();
2697   if (UseBarriersForVolatile) {
2698     // we need to plant a dmb
2699     return false;
2700   }
2701 
2702   MemBarNode *mbvol = n->as_MemBar();
2703 
2704   // first we check if this is part of a card mark. if so then we have
2705   // to generate a StoreLoad barrier
2706   
2707   if (is_card_mark_membar(mbvol)) {
2708       return false;
2709   }
2710 
2711   // ok, if it's not a card mark then we still need to check if it is
2712   // a trailing membar of a volatile put hgraph.
2713 
2714   return (trailing_to_leading(mbvol) != NULL);
2715 }
2716 
2717 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2718 
2719 bool needs_releasing_store(const Node *n)
2720 {
2721   // assert n->is_Store();
2722   if (UseBarriersForVolatile) {
2723     // we use a normal store and dmb combination
2724     return false;
2725   }
2726 
2727   StoreNode *st = n->as_Store();
2728 
2729   // the store must be marked as releasing
2730   if (!st->is_release()) {
2731     return false;
2732   }
2733 
2734   // the store must be fed by a membar
2735 
2736   Node *x = st->lookup(StoreNode::Memory);
2737 
2738   if (! x || !x->is_Proj()) {
2739     return false;
2740   }
2741 
2742   ProjNode *proj = x->as_Proj();
2743 
2744   x = proj->lookup(0);
2745 
2746   if (!x || !x->is_MemBar()) {
2747     return false;
2748   }
2749 
2750   MemBarNode *barrier = x->as_MemBar();
2751 
2752   // if the barrier is a release membar or a cpuorder mmebar fed by a
2753   // release membar then we need to check whether that forms part of a
2754   // volatile put graph.
2755 
2756   // reject invalid candidates
2757   if (!leading_membar(barrier)) {
2758     return false;
2759   }
2760 
2761   // does this lead a normal subgraph?
2762   MemBarNode *mbvol = leading_to_normal(barrier);
2763 
2764   if (!mbvol) {
2765     return false;
2766   }
2767 
2768   // all done unless this is a card mark
2769   if (!is_card_mark_membar(mbvol)) {
2770     return true;
2771   }
2772   
2773   // we found a card mark -- just make sure we have a trailing barrier
2774 
2775   return (card_mark_to_trailing(mbvol) != NULL);
2776 }
2777 
2778 // predicate controlling translation of CAS
2779 //
2780 // returns true if CAS needs to use an acquiring load otherwise false
2781 
2782 bool needs_acquiring_load_exclusive(const Node *n)
2783 {
2784   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2785   if (UseBarriersForVolatile) {
2786     return false;
2787   }
2788 
2789   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2790 #ifdef ASSERT
2791   LoadStoreNode *st = n->as_LoadStore();
2792 
2793   // the store must be fed by a membar
2794 
2795   Node *x = st->lookup(StoreNode::Memory);
2796 
2797   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2798 
2799   ProjNode *proj = x->as_Proj();
2800 
2801   x = proj->lookup(0);
2802 
2803   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2804 
2805   MemBarNode *barrier = x->as_MemBar();
2806 
2807   // the barrier must be a cpuorder mmebar fed by a release membar
2808 
2809   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2810          "CAS not fed by cpuorder membar!");
2811       
2812   MemBarNode *b = parent_membar(barrier);
2813   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2814           "CAS not fed by cpuorder+release membar pair!");
2815 
2816   // does this lead a normal subgraph?
2817   MemBarNode *mbar = leading_to_normal(barrier);
2818 
2819   assert(mbar != NULL, "CAS not embedded in normal graph!");
2820 
2821   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2822 #endif // ASSERT
2823   // so we can just return true here
2824   return true;
2825 }
2826 
2827 // predicate controlling translation of StoreCM
2828 //
2829 // returns true if a StoreStore must precede the card write otherwise
2830 // false
2831 
2832 bool unnecessary_storestore(const Node *storecm)
2833 {
2834   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2835 
2836   // we only ever need to generate a dmb ishst between an object put
2837   // and the associated card mark when we are using CMS without
2838   // conditional card marking
2839 
2840   if (!UseConcMarkSweepGC || UseCondCardMark) {
2841     return true;
2842   }
2843 
2844   // if we are implementing volatile puts using barriers then the
2845   // object put as an str so we must insert the dmb ishst
2846 
2847   if (UseBarriersForVolatile) {
2848     return false;
2849   }
2850 
2851   // we can omit the dmb ishst if this StoreCM is part of a volatile
2852   // put because in thta case the put will be implemented by stlr
2853   //
2854   // we need to check for a normal subgraph feeding this StoreCM.
2855   // that means the StoreCM must be fed Memory from a leading membar,
2856   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2857   // leading membar must be part of a normal subgraph
2858 
2859   Node *x = storecm->in(StoreNode::Memory);
2860 
2861   if (!x->is_Proj()) {
2862     return false;
2863   }
2864 
2865   x = x->in(0);
2866 
2867   if (!x->is_MemBar()) {
2868     return false;
2869   }
2870 
2871   MemBarNode *leading = x->as_MemBar();
2872 
2873   // reject invalid candidates
2874   if (!leading_membar(leading)) {
2875     return false;
2876   }
2877 
2878   // we can omit the StoreStore if it is the head of a normal subgraph
2879   return (leading_to_normal(leading) != NULL);
2880 }
2881 
2882 
2883 #define __ _masm.
2884 
2885 // advance declarations for helper functions to convert register
2886 // indices to register objects
2887 
2888 // the ad file has to provide implementations of certain methods
2889 // expected by the generic code
2890 //
2891 // REQUIRED FUNCTIONALITY
2892 
2893 //=============================================================================
2894 
2895 // !!!!! Special hack to get all types of calls to specify the byte offset
2896 //       from the start of the call to the point where the return address
2897 //       will point.
2898 
2899 int MachCallStaticJavaNode::ret_addr_offset()
2900 {
2901   // call should be a simple bl
2902   int off = 4;
2903   return off;
2904 }
2905 
2906 int MachCallDynamicJavaNode::ret_addr_offset()
2907 {
2908   return 16; // movz, movk, movk, bl
2909 }
2910 
2911 int MachCallRuntimeNode::ret_addr_offset() {
2912   // for generated stubs the call will be
2913   //   far_call(addr)
2914   // for real runtime callouts it will be six instructions
2915   // see aarch64_enc_java_to_runtime
2916   //   adr(rscratch2, retaddr)
2917   //   lea(rscratch1, RuntimeAddress(addr)
2918   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2919   //   blrt rscratch1
2920   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2921   if (cb) {
2922     return MacroAssembler::far_branch_size();
2923   } else {
2924     return 6 * NativeInstruction::instruction_size;
2925   }
2926 }
2927 
2928 // Indicate if the safepoint node needs the polling page as an input
2929 
2930 // the shared code plants the oop data at the start of the generated
2931 // code for the safepoint node and that needs ot be at the load
2932 // instruction itself. so we cannot plant a mov of the safepoint poll
2933 // address followed by a load. setting this to true means the mov is
2934 // scheduled as a prior instruction. that's better for scheduling
2935 // anyway.
2936 
2937 bool SafePointNode::needs_polling_address_input()
2938 {
2939   return true;
2940 }
2941 
2942 //=============================================================================
2943 
2944 #ifndef PRODUCT
2945 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2946   st->print("BREAKPOINT");
2947 }
2948 #endif
2949 
2950 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2951   MacroAssembler _masm(&cbuf);
2952   __ brk(0);
2953 }
2954 
2955 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2956   return MachNode::size(ra_);
2957 }
2958 
2959 //=============================================================================
2960 
2961 #ifndef PRODUCT
2962   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2963     st->print("nop \t# %d bytes pad for loops and calls", _count);
2964   }
2965 #endif
2966 
2967   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2968     MacroAssembler _masm(&cbuf);
2969     for (int i = 0; i < _count; i++) {
2970       __ nop();
2971     }
2972   }
2973 
2974   uint MachNopNode::size(PhaseRegAlloc*) const {
2975     return _count * NativeInstruction::instruction_size;
2976   }
2977 
2978 //=============================================================================
2979 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2980 
2981 int Compile::ConstantTable::calculate_table_base_offset() const {
2982   return 0;  // absolute addressing, no offset
2983 }
2984 
2985 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2986 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2987   ShouldNotReachHere();
2988 }
2989 
2990 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2991   // Empty encoding
2992 }
2993 
2994 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2995   return 0;
2996 }
2997 
2998 #ifndef PRODUCT
2999 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3000   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3001 }
3002 #endif
3003 
3004 #ifndef PRODUCT
3005 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3006   Compile* C = ra_->C;
3007 
3008   int framesize = C->frame_slots() << LogBytesPerInt;
3009 
3010   if (C->need_stack_bang(framesize))
3011     st->print("# stack bang size=%d\n\t", framesize);
3012 
3013   if (framesize < ((1 << 9) + 2 * wordSize)) {
3014     st->print("sub  sp, sp, #%d\n\t", framesize);
3015     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3016     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3017   } else {
3018     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3019     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3020     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3021     st->print("sub  sp, sp, rscratch1");
3022   }
3023 }
3024 #endif
3025 
3026 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3027   Compile* C = ra_->C;
3028   MacroAssembler _masm(&cbuf);
3029 
3030   // n.b. frame size includes space for return pc and rfp
3031   const long framesize = C->frame_size_in_bytes();
3032   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3033 
3034   // insert a nop at the start of the prolog so we can patch in a
3035   // branch if we need to invalidate the method later
3036   __ nop();
3037 
3038   int bangsize = C->bang_size_in_bytes();
3039   if (C->need_stack_bang(bangsize) && UseStackBanging)
3040     __ generate_stack_overflow_check(bangsize);
3041 
3042   __ build_frame(framesize);
3043 
3044   if (NotifySimulator) {
3045     __ notify(Assembler::method_entry);
3046   }
3047 
3048   if (VerifyStackAtCalls) {
3049     Unimplemented();
3050   }
3051 
3052   C->set_frame_complete(cbuf.insts_size());
3053 
3054   if (C->has_mach_constant_base_node()) {
3055     // NOTE: We set the table base offset here because users might be
3056     // emitted before MachConstantBaseNode.
3057     Compile::ConstantTable& constant_table = C->constant_table();
3058     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3059   }
3060 }
3061 
3062 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3063 {
3064   return MachNode::size(ra_); // too many variables; just compute it
3065                               // the hard way
3066 }
3067 
3068 int MachPrologNode::reloc() const
3069 {
3070   return 0;
3071 }
3072 
3073 //=============================================================================
3074 
3075 #ifndef PRODUCT
3076 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3077   Compile* C = ra_->C;
3078   int framesize = C->frame_slots() << LogBytesPerInt;
3079 
3080   st->print("# pop frame %d\n\t",framesize);
3081 
3082   if (framesize == 0) {
3083     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3084   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3085     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3086     st->print("add  sp, sp, #%d\n\t", framesize);
3087   } else {
3088     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3089     st->print("add  sp, sp, rscratch1\n\t");
3090     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3091   }
3092 
3093   if (do_polling() && C->is_method_compilation()) {
3094     st->print("# touch polling page\n\t");
3095     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3096     st->print("ldr zr, [rscratch1]");
3097   }
3098 }
3099 #endif
3100 
3101 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3102   Compile* C = ra_->C;
3103   MacroAssembler _masm(&cbuf);
3104   int framesize = C->frame_slots() << LogBytesPerInt;
3105 
3106   __ remove_frame(framesize);
3107 
3108   if (NotifySimulator) {
3109     __ notify(Assembler::method_reentry);
3110   }
3111 
3112   if (do_polling() && C->is_method_compilation()) {
3113     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3114   }
3115 }
3116 
3117 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3118   // Variable size. Determine dynamically.
3119   return MachNode::size(ra_);
3120 }
3121 
3122 int MachEpilogNode::reloc() const {
3123   // Return number of relocatable values contained in this instruction.
3124   return 1; // 1 for polling page.
3125 }
3126 
3127 const Pipeline * MachEpilogNode::pipeline() const {
3128   return MachNode::pipeline_class();
3129 }
3130 
3131 // This method seems to be obsolete. It is declared in machnode.hpp
3132 // and defined in all *.ad files, but it is never called. Should we
3133 // get rid of it?
3134 int MachEpilogNode::safepoint_offset() const {
3135   assert(do_polling(), "no return for this epilog node");
3136   return 4;
3137 }
3138 
3139 //=============================================================================
3140 
3141 // Figure out which register class each belongs in: rc_int, rc_float or
3142 // rc_stack.
3143 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3144 
3145 static enum RC rc_class(OptoReg::Name reg) {
3146 
3147   if (reg == OptoReg::Bad) {
3148     return rc_bad;
3149   }
3150 
3151   // we have 30 int registers * 2 halves
3152   // (rscratch1 and rscratch2 are omitted)
3153 
3154   if (reg < 60) {
3155     return rc_int;
3156   }
3157 
3158   // we have 32 float register * 2 halves
3159   if (reg < 60 + 128) {
3160     return rc_float;
3161   }
3162 
3163   // Between float regs & stack is the flags regs.
3164   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3165 
3166   return rc_stack;
3167 }
3168 
3169 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3170   Compile* C = ra_->C;
3171 
3172   // Get registers to move.
3173   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3174   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3175   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3176   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3177 
3178   enum RC src_hi_rc = rc_class(src_hi);
3179   enum RC src_lo_rc = rc_class(src_lo);
3180   enum RC dst_hi_rc = rc_class(dst_hi);
3181   enum RC dst_lo_rc = rc_class(dst_lo);
3182 
3183   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3184 
3185   if (src_hi != OptoReg::Bad) {
3186     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3187            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3188            "expected aligned-adjacent pairs");
3189   }
3190 
3191   if (src_lo == dst_lo && src_hi == dst_hi) {
3192     return 0;            // Self copy, no move.
3193   }
3194 
3195   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3196               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3197   int src_offset = ra_->reg2offset(src_lo);
3198   int dst_offset = ra_->reg2offset(dst_lo);
3199 
3200   if (bottom_type()->isa_vect() != NULL) {
3201     uint ireg = ideal_reg();
3202     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3203     if (cbuf) {
3204       MacroAssembler _masm(cbuf);
3205       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3206       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3207         // stack->stack
3208         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3209         if (ireg == Op_VecD) {
3210           __ unspill(rscratch1, true, src_offset);
3211           __ spill(rscratch1, true, dst_offset);
3212         } else {
3213           __ spill_copy128(src_offset, dst_offset);
3214         }
3215       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3216         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3217                ireg == Op_VecD ? __ T8B : __ T16B,
3218                as_FloatRegister(Matcher::_regEncode[src_lo]));
3219       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3220         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3221                        ireg == Op_VecD ? __ D : __ Q,
3222                        ra_->reg2offset(dst_lo));
3223       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3224         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3225                        ireg == Op_VecD ? __ D : __ Q,
3226                        ra_->reg2offset(src_lo));
3227       } else {
3228         ShouldNotReachHere();
3229       }
3230     }
3231   } else if (cbuf) {
3232     MacroAssembler _masm(cbuf);
3233     switch (src_lo_rc) {
3234     case rc_int:
3235       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3236         if (is64) {
3237             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3238                    as_Register(Matcher::_regEncode[src_lo]));
3239         } else {
3240             MacroAssembler _masm(cbuf);
3241             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3242                     as_Register(Matcher::_regEncode[src_lo]));
3243         }
3244       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3245         if (is64) {
3246             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3247                      as_Register(Matcher::_regEncode[src_lo]));
3248         } else {
3249             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3250                      as_Register(Matcher::_regEncode[src_lo]));
3251         }
3252       } else {                    // gpr --> stack spill
3253         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3254         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3255       }
3256       break;
3257     case rc_float:
3258       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3259         if (is64) {
3260             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3261                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3262         } else {
3263             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3264                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3265         }
3266       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3267           if (cbuf) {
3268             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3269                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3270         } else {
3271             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3272                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3273         }
3274       } else {                    // fpr --> stack spill
3275         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3276         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3277                  is64 ? __ D : __ S, dst_offset);
3278       }
3279       break;
3280     case rc_stack:
3281       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3282         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3283       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3284         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3285                    is64 ? __ D : __ S, src_offset);
3286       } else {                    // stack --> stack copy
3287         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3288         __ unspill(rscratch1, is64, src_offset);
3289         __ spill(rscratch1, is64, dst_offset);
3290       }
3291       break;
3292     default:
3293       assert(false, "bad rc_class for spill");
3294       ShouldNotReachHere();
3295     }
3296   }
3297 
3298   if (st) {
3299     st->print("spill ");
3300     if (src_lo_rc == rc_stack) {
3301       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3302     } else {
3303       st->print("%s -> ", Matcher::regName[src_lo]);
3304     }
3305     if (dst_lo_rc == rc_stack) {
3306       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3307     } else {
3308       st->print("%s", Matcher::regName[dst_lo]);
3309     }
3310     if (bottom_type()->isa_vect() != NULL) {
3311       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3312     } else {
3313       st->print("\t# spill size = %d", is64 ? 64:32);
3314     }
3315   }
3316 
3317   return 0;
3318 
3319 }
3320 
3321 #ifndef PRODUCT
3322 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3323   if (!ra_)
3324     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3325   else
3326     implementation(NULL, ra_, false, st);
3327 }
3328 #endif
3329 
3330 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3331   implementation(&cbuf, ra_, false, NULL);
3332 }
3333 
3334 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3335   return MachNode::size(ra_);
3336 }
3337 
3338 //=============================================================================
3339 
3340 #ifndef PRODUCT
3341 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3342   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3343   int reg = ra_->get_reg_first(this);
3344   st->print("add %s, rsp, #%d]\t# box lock",
3345             Matcher::regName[reg], offset);
3346 }
3347 #endif
3348 
3349 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3350   MacroAssembler _masm(&cbuf);
3351 
3352   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3353   int reg    = ra_->get_encode(this);
3354 
3355   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3356     __ add(as_Register(reg), sp, offset);
3357   } else {
3358     ShouldNotReachHere();
3359   }
3360 }
3361 
3362 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3363   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3364   return 4;
3365 }
3366 
3367 //=============================================================================
3368 
3369 #ifndef PRODUCT
3370 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3371 {
3372   st->print_cr("# MachUEPNode");
3373   if (UseCompressedClassPointers) {
3374     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3375     if (Universe::narrow_klass_shift() != 0) {
3376       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3377     }
3378   } else {
3379    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3380   }
3381   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3382   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3383 }
3384 #endif
3385 
3386 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3387 {
3388   // This is the unverified entry point.
3389   MacroAssembler _masm(&cbuf);
3390 
3391   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3392   Label skip;
3393   // TODO
3394   // can we avoid this skip and still use a reloc?
3395   __ br(Assembler::EQ, skip);
3396   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3397   __ bind(skip);
3398 }
3399 
3400 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3401 {
3402   return MachNode::size(ra_);
3403 }
3404 
3405 // REQUIRED EMIT CODE
3406 
3407 //=============================================================================
3408 
3409 // Emit exception handler code.
3410 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3411 {
3412   // mov rscratch1 #exception_blob_entry_point
3413   // br rscratch1
3414   // Note that the code buffer's insts_mark is always relative to insts.
3415   // That's why we must use the macroassembler to generate a handler.
3416   MacroAssembler _masm(&cbuf);
3417   address base = __ start_a_stub(size_exception_handler());
3418   if (base == NULL) {
3419     ciEnv::current()->record_failure("CodeCache is full");
3420     return 0;  // CodeBuffer::expand failed
3421   }
3422   int offset = __ offset();
3423   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3424   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3425   __ end_a_stub();
3426   return offset;
3427 }
3428 
3429 // Emit deopt handler code.
3430 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3431 {
3432   // Note that the code buffer's insts_mark is always relative to insts.
3433   // That's why we must use the macroassembler to generate a handler.
3434   MacroAssembler _masm(&cbuf);
3435   address base = __ start_a_stub(size_deopt_handler());
3436   if (base == NULL) {
3437     ciEnv::current()->record_failure("CodeCache is full");
3438     return 0;  // CodeBuffer::expand failed
3439   }
3440   int offset = __ offset();
3441 
3442   __ adr(lr, __ pc());
3443   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3444 
3445   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3446   __ end_a_stub();
3447   return offset;
3448 }
3449 
3450 // REQUIRED MATCHER CODE
3451 
3452 //=============================================================================
3453 
3454 const bool Matcher::match_rule_supported(int opcode) {
3455 
3456   // TODO
3457   // identify extra cases that we might want to provide match rules for
3458   // e.g. Op_StrEquals and other intrinsics
3459   if (!has_match_rule(opcode)) {
3460     return false;
3461   }
3462 
3463   return true;  // Per default match rules are supported.
3464 }
3465 
3466 const int Matcher::float_pressure(int default_pressure_threshold) {
3467   return default_pressure_threshold;
3468 }
3469 
3470 int Matcher::regnum_to_fpu_offset(int regnum)
3471 {
3472   Unimplemented();
3473   return 0;
3474 }
3475 
3476 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
3477 {
3478   Unimplemented();
3479   return false;
3480 }
3481 
3482 const bool Matcher::isSimpleConstant64(jlong value) {
3483   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3484   // Probably always true, even if a temp register is required.
3485   return true;
3486 }
3487 
3488 // true just means we have fast l2f conversion
3489 const bool Matcher::convL2FSupported(void) {
3490   return true;
3491 }
3492 
3493 // Vector width in bytes.
3494 const int Matcher::vector_width_in_bytes(BasicType bt) {
3495   int size = MIN2(16,(int)MaxVectorSize);
3496   // Minimum 2 values in vector
3497   if (size < 2*type2aelembytes(bt)) size = 0;
3498   // But never < 4
3499   if (size < 4) size = 0;
3500   return size;
3501 }
3502 
3503 // Limits on vector size (number of elements) loaded into vector.
3504 const int Matcher::max_vector_size(const BasicType bt) {
3505   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3506 }
3507 const int Matcher::min_vector_size(const BasicType bt) {
3508 //  For the moment limit the vector size to 8 bytes
3509     int size = 8 / type2aelembytes(bt);
3510     if (size < 2) size = 2;
3511     return size;
3512 }
3513 
3514 // Vector ideal reg.
3515 const int Matcher::vector_ideal_reg(int len) {
3516   switch(len) {
3517     case  8: return Op_VecD;
3518     case 16: return Op_VecX;
3519   }
3520   ShouldNotReachHere();
3521   return 0;
3522 }
3523 
3524 const int Matcher::vector_shift_count_ideal_reg(int size) {
3525   return Op_VecX;
3526 }
3527 
3528 // AES support not yet implemented
3529 const bool Matcher::pass_original_key_for_aes() {
3530   return false;
3531 }
3532 
3533 // x86 supports misaligned vectors store/load.
3534 const bool Matcher::misaligned_vectors_ok() {
3535   return !AlignVector; // can be changed by flag
3536 }
3537 
3538 // false => size gets scaled to BytesPerLong, ok.
3539 const bool Matcher::init_array_count_is_in_bytes = false;
3540 
3541 // Threshold size for cleararray.
3542 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3543 
3544 // Use conditional move (CMOVL)
3545 const int Matcher::long_cmove_cost() {
3546   // long cmoves are no more expensive than int cmoves
3547   return 0;
3548 }
3549 
3550 const int Matcher::float_cmove_cost() {
3551   // float cmoves are no more expensive than int cmoves
3552   return 0;
3553 }
3554 
3555 // Does the CPU require late expand (see block.cpp for description of late expand)?
3556 const bool Matcher::require_postalloc_expand = false;
3557 
3558 // Should the Matcher clone shifts on addressing modes, expecting them
3559 // to be subsumed into complex addressing expressions or compute them
3560 // into registers?  True for Intel but false for most RISCs
3561 const bool Matcher::clone_shift_expressions = false;
3562 
3563 // Do we need to mask the count passed to shift instructions or does
3564 // the cpu only look at the lower 5/6 bits anyway?
3565 const bool Matcher::need_masked_shift_count = false;
3566 
3567 // This affects two different things:
3568 //  - how Decode nodes are matched
3569 //  - how ImplicitNullCheck opportunities are recognized
3570 // If true, the matcher will try to remove all Decodes and match them
3571 // (as operands) into nodes. NullChecks are not prepared to deal with
3572 // Decodes by final_graph_reshaping().
3573 // If false, final_graph_reshaping() forces the decode behind the Cmp
3574 // for a NullCheck. The matcher matches the Decode node into a register.
3575 // Implicit_null_check optimization moves the Decode along with the
3576 // memory operation back up before the NullCheck.
3577 bool Matcher::narrow_oop_use_complex_address() {
3578   return Universe::narrow_oop_shift() == 0;
3579 }
3580 
3581 bool Matcher::narrow_klass_use_complex_address() {
3582 // TODO
3583 // decide whether we need to set this to true
3584   return false;
3585 }
3586 
3587 // Is it better to copy float constants, or load them directly from
3588 // memory?  Intel can load a float constant from a direct address,
3589 // requiring no extra registers.  Most RISCs will have to materialize
3590 // an address into a register first, so they would do better to copy
3591 // the constant from stack.
3592 const bool Matcher::rematerialize_float_constants = false;
3593 
3594 // If CPU can load and store mis-aligned doubles directly then no
3595 // fixup is needed.  Else we split the double into 2 integer pieces
3596 // and move it piece-by-piece.  Only happens when passing doubles into
3597 // C code as the Java calling convention forces doubles to be aligned.
3598 const bool Matcher::misaligned_doubles_ok = true;
3599 
3600 // No-op on amd64
3601 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3602   Unimplemented();
3603 }
3604 
3605 // Advertise here if the CPU requires explicit rounding operations to
3606 // implement the UseStrictFP mode.
3607 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3608 
3609 // Are floats converted to double when stored to stack during
3610 // deoptimization?
3611 bool Matcher::float_in_double() { return true; }
3612 
3613 // Do ints take an entire long register or just half?
3614 // The relevant question is how the int is callee-saved:
3615 // the whole long is written but de-opt'ing will have to extract
3616 // the relevant 32 bits.
3617 const bool Matcher::int_in_long = true;
3618 
3619 // Return whether or not this register is ever used as an argument.
3620 // This function is used on startup to build the trampoline stubs in
3621 // generateOptoStub.  Registers not mentioned will be killed by the VM
3622 // call in the trampoline, and arguments in those registers not be
3623 // available to the callee.
3624 bool Matcher::can_be_java_arg(int reg)
3625 {
3626   return
3627     reg ==  R0_num || reg == R0_H_num ||
3628     reg ==  R1_num || reg == R1_H_num ||
3629     reg ==  R2_num || reg == R2_H_num ||
3630     reg ==  R3_num || reg == R3_H_num ||
3631     reg ==  R4_num || reg == R4_H_num ||
3632     reg ==  R5_num || reg == R5_H_num ||
3633     reg ==  R6_num || reg == R6_H_num ||
3634     reg ==  R7_num || reg == R7_H_num ||
3635     reg ==  V0_num || reg == V0_H_num ||
3636     reg ==  V1_num || reg == V1_H_num ||
3637     reg ==  V2_num || reg == V2_H_num ||
3638     reg ==  V3_num || reg == V3_H_num ||
3639     reg ==  V4_num || reg == V4_H_num ||
3640     reg ==  V5_num || reg == V5_H_num ||
3641     reg ==  V6_num || reg == V6_H_num ||
3642     reg ==  V7_num || reg == V7_H_num;
3643 }
3644 
3645 bool Matcher::is_spillable_arg(int reg)
3646 {
3647   return can_be_java_arg(reg);
3648 }
3649 
3650 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3651   return false;
3652 }
3653 
3654 RegMask Matcher::divI_proj_mask() {
3655   ShouldNotReachHere();
3656   return RegMask();
3657 }
3658 
3659 // Register for MODI projection of divmodI.
3660 RegMask Matcher::modI_proj_mask() {
3661   ShouldNotReachHere();
3662   return RegMask();
3663 }
3664 
3665 // Register for DIVL projection of divmodL.
3666 RegMask Matcher::divL_proj_mask() {
3667   ShouldNotReachHere();
3668   return RegMask();
3669 }
3670 
3671 // Register for MODL projection of divmodL.
3672 RegMask Matcher::modL_proj_mask() {
3673   ShouldNotReachHere();
3674   return RegMask();
3675 }
3676 
3677 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3678   return FP_REG_mask();
3679 }
3680 
3681 // helper for encoding java_to_runtime calls on sim
3682 //
3683 // this is needed to compute the extra arguments required when
3684 // planting a call to the simulator blrt instruction. the TypeFunc
3685 // can be queried to identify the counts for integral, and floating
3686 // arguments and the return type
3687 
3688 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3689 {
3690   int gps = 0;
3691   int fps = 0;
3692   const TypeTuple *domain = tf->domain();
3693   int max = domain->cnt();
3694   for (int i = TypeFunc::Parms; i < max; i++) {
3695     const Type *t = domain->field_at(i);
3696     switch(t->basic_type()) {
3697     case T_FLOAT:
3698     case T_DOUBLE:
3699       fps++;
3700     default:
3701       gps++;
3702     }
3703   }
3704   gpcnt = gps;
3705   fpcnt = fps;
3706   BasicType rt = tf->return_type();
3707   switch (rt) {
3708   case T_VOID:
3709     rtype = MacroAssembler::ret_type_void;
3710     break;
3711   default:
3712     rtype = MacroAssembler::ret_type_integral;
3713     break;
3714   case T_FLOAT:
3715     rtype = MacroAssembler::ret_type_float;
3716     break;
3717   case T_DOUBLE:
3718     rtype = MacroAssembler::ret_type_double;
3719     break;
3720   }
3721 }
3722 
3723 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3724   MacroAssembler _masm(&cbuf);                                          \
3725   {                                                                     \
3726     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3727     guarantee(DISP == 0, "mode not permitted for volatile");            \
3728     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3729     __ INSN(REG, as_Register(BASE));                                    \
3730   }
3731 
3732 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3733 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3734 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3735                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3736 
3737   // Used for all non-volatile memory accesses.  The use of
3738   // $mem->opcode() to discover whether this pattern uses sign-extended
3739   // offsets is something of a kludge.
3740   static void loadStore(MacroAssembler masm, mem_insn insn,
3741                          Register reg, int opcode,
3742                          Register base, int index, int size, int disp)
3743   {
3744     Address::extend scale;
3745 
3746     // Hooboy, this is fugly.  We need a way to communicate to the
3747     // encoder that the index needs to be sign extended, so we have to
3748     // enumerate all the cases.
3749     switch (opcode) {
3750     case INDINDEXSCALEDOFFSETI2L:
3751     case INDINDEXSCALEDI2L:
3752     case INDINDEXSCALEDOFFSETI2LN:
3753     case INDINDEXSCALEDI2LN:
3754     case INDINDEXOFFSETI2L:
3755     case INDINDEXOFFSETI2LN:
3756       scale = Address::sxtw(size);
3757       break;
3758     default:
3759       scale = Address::lsl(size);
3760     }
3761 
3762     if (index == -1) {
3763       (masm.*insn)(reg, Address(base, disp));
3764     } else {
3765       if (disp == 0) {
3766         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3767       } else {
3768         masm.lea(rscratch1, Address(base, disp));
3769         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3770       }
3771     }
3772   }
3773 
3774   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3775                          FloatRegister reg, int opcode,
3776                          Register base, int index, int size, int disp)
3777   {
3778     Address::extend scale;
3779 
3780     switch (opcode) {
3781     case INDINDEXSCALEDOFFSETI2L:
3782     case INDINDEXSCALEDI2L:
3783     case INDINDEXSCALEDOFFSETI2LN:
3784     case INDINDEXSCALEDI2LN:
3785       scale = Address::sxtw(size);
3786       break;
3787     default:
3788       scale = Address::lsl(size);
3789     }
3790 
3791      if (index == -1) {
3792       (masm.*insn)(reg, Address(base, disp));
3793     } else {
3794       if (disp == 0) {
3795         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3796       } else {
3797         masm.lea(rscratch1, Address(base, disp));
3798         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3799       }
3800     }
3801   }
3802 
3803   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3804                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3805                          int opcode, Register base, int index, int size, int disp)
3806   {
3807     if (index == -1) {
3808       (masm.*insn)(reg, T, Address(base, disp));
3809     } else {
3810       assert(disp == 0, "unsupported address mode");
3811       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3812     }
3813   }
3814 
3815 %}
3816 
3817 
3818 
3819 //----------ENCODING BLOCK-----------------------------------------------------
3820 // This block specifies the encoding classes used by the compiler to
3821 // output byte streams.  Encoding classes are parameterized macros
3822 // used by Machine Instruction Nodes in order to generate the bit
3823 // encoding of the instruction.  Operands specify their base encoding
3824 // interface with the interface keyword.  There are currently
3825 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3826 // COND_INTER.  REG_INTER causes an operand to generate a function
3827 // which returns its register number when queried.  CONST_INTER causes
3828 // an operand to generate a function which returns the value of the
3829 // constant when queried.  MEMORY_INTER causes an operand to generate
3830 // four functions which return the Base Register, the Index Register,
3831 // the Scale Value, and the Offset Value of the operand when queried.
3832 // COND_INTER causes an operand to generate six functions which return
3833 // the encoding code (ie - encoding bits for the instruction)
3834 // associated with each basic boolean condition for a conditional
3835 // instruction.
3836 //
3837 // Instructions specify two basic values for encoding.  Again, a
3838 // function is available to check if the constant displacement is an
3839 // oop. They use the ins_encode keyword to specify their encoding
3840 // classes (which must be a sequence of enc_class names, and their
3841 // parameters, specified in the encoding block), and they use the
3842 // opcode keyword to specify, in order, their primary, secondary, and
3843 // tertiary opcode.  Only the opcode sections which a particular
3844 // instruction needs for encoding need to be specified.
3845 encode %{
3846   // Build emit functions for each basic byte or larger field in the
3847   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3848   // from C++ code in the enc_class source block.  Emit functions will
3849   // live in the main source block for now.  In future, we can
3850   // generalize this by adding a syntax that specifies the sizes of
3851   // fields in an order, so that the adlc can build the emit functions
3852   // automagically
3853 
3854   // catch all for unimplemented encodings
3855   enc_class enc_unimplemented %{
3856     MacroAssembler _masm(&cbuf);
3857     __ unimplemented("C2 catch all");
3858   %}
3859 
3860   // BEGIN Non-volatile memory access
3861 
3862   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3863     Register dst_reg = as_Register($dst$$reg);
3864     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3865                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3866   %}
3867 
3868   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3869     Register dst_reg = as_Register($dst$$reg);
3870     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3871                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3872   %}
3873 
3874   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3875     Register dst_reg = as_Register($dst$$reg);
3876     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3877                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3878   %}
3879 
3880   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3881     Register dst_reg = as_Register($dst$$reg);
3882     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3883                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3884   %}
3885 
3886   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3887     Register dst_reg = as_Register($dst$$reg);
3888     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3889                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3890   %}
3891 
3892   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3893     Register dst_reg = as_Register($dst$$reg);
3894     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3895                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3896   %}
3897 
3898   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3899     Register dst_reg = as_Register($dst$$reg);
3900     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3901                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3902   %}
3903 
3904   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3905     Register dst_reg = as_Register($dst$$reg);
3906     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3907                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3908   %}
3909 
3910   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3911     Register dst_reg = as_Register($dst$$reg);
3912     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3913                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3914   %}
3915 
3916   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3917     Register dst_reg = as_Register($dst$$reg);
3918     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3919                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3920   %}
3921 
3922   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3923     Register dst_reg = as_Register($dst$$reg);
3924     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3925                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3926   %}
3927 
3928   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3929     Register dst_reg = as_Register($dst$$reg);
3930     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3931                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3932   %}
3933 
3934   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3935     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3936     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3937                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3938   %}
3939 
3940   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3941     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3942     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3943                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3944   %}
3945 
3946   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3947     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3948     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3949        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3950   %}
3951 
3952   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3953     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3954     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3955        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3956   %}
3957 
3958   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3959     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3960     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3961        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3962   %}
3963 
3964   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3965     Register src_reg = as_Register($src$$reg);
3966     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3967                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3968   %}
3969 
3970   enc_class aarch64_enc_strb0(memory mem) %{
3971     MacroAssembler _masm(&cbuf);
3972     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3973                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3974   %}
3975 
3976   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3977     MacroAssembler _masm(&cbuf);
3978     __ membar(Assembler::StoreStore);
3979     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3980                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3981   %}
3982 
3983   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3984     Register src_reg = as_Register($src$$reg);
3985     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3986                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3987   %}
3988 
3989   enc_class aarch64_enc_strh0(memory mem) %{
3990     MacroAssembler _masm(&cbuf);
3991     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3992                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3993   %}
3994 
3995   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3996     Register src_reg = as_Register($src$$reg);
3997     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3998                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3999   %}
4000 
4001   enc_class aarch64_enc_strw0(memory mem) %{
4002     MacroAssembler _masm(&cbuf);
4003     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4004                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4005   %}
4006 
4007   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4008     Register src_reg = as_Register($src$$reg);
4009     // we sometimes get asked to store the stack pointer into the
4010     // current thread -- we cannot do that directly on AArch64
4011     if (src_reg == r31_sp) {
4012       MacroAssembler _masm(&cbuf);
4013       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4014       __ mov(rscratch2, sp);
4015       src_reg = rscratch2;
4016     }
4017     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4018                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4019   %}
4020 
4021   enc_class aarch64_enc_str0(memory mem) %{
4022     MacroAssembler _masm(&cbuf);
4023     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4024                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4025   %}
4026 
4027   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4028     FloatRegister src_reg = as_FloatRegister($src$$reg);
4029     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4030                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4031   %}
4032 
4033   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4034     FloatRegister src_reg = as_FloatRegister($src$$reg);
4035     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4036                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4037   %}
4038 
4039   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4040     FloatRegister src_reg = as_FloatRegister($src$$reg);
4041     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4042        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4043   %}
4044 
4045   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4046     FloatRegister src_reg = as_FloatRegister($src$$reg);
4047     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4048        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4049   %}
4050 
4051   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4052     FloatRegister src_reg = as_FloatRegister($src$$reg);
4053     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4054        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4055   %}
4056 
4057   // END Non-volatile memory access
4058 
4059   // volatile loads and stores
4060 
4061   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4062     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4063                  rscratch1, stlrb);
4064   %}
4065 
4066   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4067     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4068                  rscratch1, stlrh);
4069   %}
4070 
4071   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4072     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4073                  rscratch1, stlrw);
4074   %}
4075 
4076 
4077   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4078     Register dst_reg = as_Register($dst$$reg);
4079     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4080              rscratch1, ldarb);
4081     __ sxtbw(dst_reg, dst_reg);
4082   %}
4083 
4084   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4085     Register dst_reg = as_Register($dst$$reg);
4086     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4087              rscratch1, ldarb);
4088     __ sxtb(dst_reg, dst_reg);
4089   %}
4090 
4091   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4092     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4093              rscratch1, ldarb);
4094   %}
4095 
4096   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4097     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4098              rscratch1, ldarb);
4099   %}
4100 
4101   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4102     Register dst_reg = as_Register($dst$$reg);
4103     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4104              rscratch1, ldarh);
4105     __ sxthw(dst_reg, dst_reg);
4106   %}
4107 
4108   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4109     Register dst_reg = as_Register($dst$$reg);
4110     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4111              rscratch1, ldarh);
4112     __ sxth(dst_reg, dst_reg);
4113   %}
4114 
4115   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4116     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4117              rscratch1, ldarh);
4118   %}
4119 
4120   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4121     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4122              rscratch1, ldarh);
4123   %}
4124 
4125   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4126     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4127              rscratch1, ldarw);
4128   %}
4129 
4130   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4131     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4132              rscratch1, ldarw);
4133   %}
4134 
4135   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4136     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4137              rscratch1, ldar);
4138   %}
4139 
4140   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4141     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4142              rscratch1, ldarw);
4143     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4144   %}
4145 
4146   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4147     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4148              rscratch1, ldar);
4149     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4150   %}
4151 
4152   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4153     Register src_reg = as_Register($src$$reg);
4154     // we sometimes get asked to store the stack pointer into the
4155     // current thread -- we cannot do that directly on AArch64
4156     if (src_reg == r31_sp) {
4157         MacroAssembler _masm(&cbuf);
4158       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4159       __ mov(rscratch2, sp);
4160       src_reg = rscratch2;
4161     }
4162     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4163                  rscratch1, stlr);
4164   %}
4165 
4166   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4167     {
4168       MacroAssembler _masm(&cbuf);
4169       FloatRegister src_reg = as_FloatRegister($src$$reg);
4170       __ fmovs(rscratch2, src_reg);
4171     }
4172     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4173                  rscratch1, stlrw);
4174   %}
4175 
4176   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4177     {
4178       MacroAssembler _masm(&cbuf);
4179       FloatRegister src_reg = as_FloatRegister($src$$reg);
4180       __ fmovd(rscratch2, src_reg);
4181     }
4182     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4183                  rscratch1, stlr);
4184   %}
4185 
4186   // synchronized read/update encodings
4187 
4188   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4189     MacroAssembler _masm(&cbuf);
4190     Register dst_reg = as_Register($dst$$reg);
4191     Register base = as_Register($mem$$base);
4192     int index = $mem$$index;
4193     int scale = $mem$$scale;
4194     int disp = $mem$$disp;
4195     if (index == -1) {
4196        if (disp != 0) {
4197         __ lea(rscratch1, Address(base, disp));
4198         __ ldaxr(dst_reg, rscratch1);
4199       } else {
4200         // TODO
4201         // should we ever get anything other than this case?
4202         __ ldaxr(dst_reg, base);
4203       }
4204     } else {
4205       Register index_reg = as_Register(index);
4206       if (disp == 0) {
4207         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4208         __ ldaxr(dst_reg, rscratch1);
4209       } else {
4210         __ lea(rscratch1, Address(base, disp));
4211         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4212         __ ldaxr(dst_reg, rscratch1);
4213       }
4214     }
4215   %}
4216 
4217   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4218     MacroAssembler _masm(&cbuf);
4219     Register src_reg = as_Register($src$$reg);
4220     Register base = as_Register($mem$$base);
4221     int index = $mem$$index;
4222     int scale = $mem$$scale;
4223     int disp = $mem$$disp;
4224     if (index == -1) {
4225        if (disp != 0) {
4226         __ lea(rscratch2, Address(base, disp));
4227         __ stlxr(rscratch1, src_reg, rscratch2);
4228       } else {
4229         // TODO
4230         // should we ever get anything other than this case?
4231         __ stlxr(rscratch1, src_reg, base);
4232       }
4233     } else {
4234       Register index_reg = as_Register(index);
4235       if (disp == 0) {
4236         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4237         __ stlxr(rscratch1, src_reg, rscratch2);
4238       } else {
4239         __ lea(rscratch2, Address(base, disp));
4240         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4241         __ stlxr(rscratch1, src_reg, rscratch2);
4242       }
4243     }
4244     __ cmpw(rscratch1, zr);
4245   %}
4246 
4247   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4248     MacroAssembler _masm(&cbuf);
4249     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4250     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4251                &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
4252   %}
4253 
4254   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4255     MacroAssembler _masm(&cbuf);
4256     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4257     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4258                &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4259   %}
4260 
4261 
4262   // The only difference between aarch64_enc_cmpxchg and
4263   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4264   // CompareAndSwap sequence to serve as a barrier on acquiring a
4265   // lock.
4266   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4267     MacroAssembler _masm(&cbuf);
4268     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4269     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4270                &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
4271   %}
4272 
4273   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4274     MacroAssembler _masm(&cbuf);
4275     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4276     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4277                &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4278   %}
4279 
4280 
4281   // auxiliary used for CompareAndSwapX to set result register
4282   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4283     MacroAssembler _masm(&cbuf);
4284     Register res_reg = as_Register($res$$reg);
4285     __ cset(res_reg, Assembler::EQ);
4286   %}
4287 
4288   // prefetch encodings
4289 
4290   enc_class aarch64_enc_prefetchw(memory mem) %{
4291     MacroAssembler _masm(&cbuf);
4292     Register base = as_Register($mem$$base);
4293     int index = $mem$$index;
4294     int scale = $mem$$scale;
4295     int disp = $mem$$disp;
4296     if (index == -1) {
4297       __ prfm(Address(base, disp), PSTL1KEEP);
4298       __ nop();
4299     } else {
4300       Register index_reg = as_Register(index);
4301       if (disp == 0) {
4302         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4303       } else {
4304         __ lea(rscratch1, Address(base, disp));
4305         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4306       }
4307     }
4308   %}
4309 
4310   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4311     MacroAssembler _masm(&cbuf);
4312     Register cnt_reg = as_Register($cnt$$reg);
4313     Register base_reg = as_Register($base$$reg);
4314     // base is word aligned
4315     // cnt is count of words
4316 
4317     Label loop;
4318     Label entry;
4319 
4320 //  Algorithm:
4321 //
4322 //    scratch1 = cnt & 7;
4323 //    cnt -= scratch1;
4324 //    p += scratch1;
4325 //    switch (scratch1) {
4326 //      do {
4327 //        cnt -= 8;
4328 //          p[-8] = 0;
4329 //        case 7:
4330 //          p[-7] = 0;
4331 //        case 6:
4332 //          p[-6] = 0;
4333 //          // ...
4334 //        case 1:
4335 //          p[-1] = 0;
4336 //        case 0:
4337 //          p += 8;
4338 //      } while (cnt);
4339 //    }
4340 
4341     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4342 
4343     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4344     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4345     // base_reg always points to the end of the region we're about to zero
4346     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4347     __ adr(rscratch2, entry);
4348     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4349     __ br(rscratch2);
4350     __ bind(loop);
4351     __ sub(cnt_reg, cnt_reg, unroll);
4352     for (int i = -unroll; i < 0; i++)
4353       __ str(zr, Address(base_reg, i * wordSize));
4354     __ bind(entry);
4355     __ add(base_reg, base_reg, unroll * wordSize);
4356     __ cbnz(cnt_reg, loop);
4357   %}
4358 
4359   /// mov envcodings
4360 
4361   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4362     MacroAssembler _masm(&cbuf);
4363     u_int32_t con = (u_int32_t)$src$$constant;
4364     Register dst_reg = as_Register($dst$$reg);
4365     if (con == 0) {
4366       __ movw(dst_reg, zr);
4367     } else {
4368       __ movw(dst_reg, con);
4369     }
4370   %}
4371 
4372   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4373     MacroAssembler _masm(&cbuf);
4374     Register dst_reg = as_Register($dst$$reg);
4375     u_int64_t con = (u_int64_t)$src$$constant;
4376     if (con == 0) {
4377       __ mov(dst_reg, zr);
4378     } else {
4379       __ mov(dst_reg, con);
4380     }
4381   %}
4382 
4383   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4384     MacroAssembler _masm(&cbuf);
4385     Register dst_reg = as_Register($dst$$reg);
4386     address con = (address)$src$$constant;
4387     if (con == NULL || con == (address)1) {
4388       ShouldNotReachHere();
4389     } else {
4390       relocInfo::relocType rtype = $src->constant_reloc();
4391       if (rtype == relocInfo::oop_type) {
4392         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4393       } else if (rtype == relocInfo::metadata_type) {
4394         __ mov_metadata(dst_reg, (Metadata*)con);
4395       } else {
4396         assert(rtype == relocInfo::none, "unexpected reloc type");
4397         if (con < (address)(uintptr_t)os::vm_page_size()) {
4398           __ mov(dst_reg, con);
4399         } else {
4400           unsigned long offset;
4401           __ adrp(dst_reg, con, offset);
4402           __ add(dst_reg, dst_reg, offset);
4403         }
4404       }
4405     }
4406   %}
4407 
4408   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4409     MacroAssembler _masm(&cbuf);
4410     Register dst_reg = as_Register($dst$$reg);
4411     __ mov(dst_reg, zr);
4412   %}
4413 
4414   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4415     MacroAssembler _masm(&cbuf);
4416     Register dst_reg = as_Register($dst$$reg);
4417     __ mov(dst_reg, (u_int64_t)1);
4418   %}
4419 
4420   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4421     MacroAssembler _masm(&cbuf);
4422     address page = (address)$src$$constant;
4423     Register dst_reg = as_Register($dst$$reg);
4424     unsigned long off;
4425     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4426     assert(off == 0, "assumed offset == 0");
4427   %}
4428 
4429   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4430     MacroAssembler _masm(&cbuf);
4431     address page = (address)$src$$constant;
4432     Register dst_reg = as_Register($dst$$reg);
4433     unsigned long off;
4434     __ adrp(dst_reg, ExternalAddress(page), off);
4435     assert(off == 0, "assumed offset == 0");
4436   %}
4437 
4438   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4439     MacroAssembler _masm(&cbuf);
4440     Register dst_reg = as_Register($dst$$reg);
4441     address con = (address)$src$$constant;
4442     if (con == NULL) {
4443       ShouldNotReachHere();
4444     } else {
4445       relocInfo::relocType rtype = $src->constant_reloc();
4446       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4447       __ set_narrow_oop(dst_reg, (jobject)con);
4448     }
4449   %}
4450 
4451   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4452     MacroAssembler _masm(&cbuf);
4453     Register dst_reg = as_Register($dst$$reg);
4454     __ mov(dst_reg, zr);
4455   %}
4456 
4457   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4458     MacroAssembler _masm(&cbuf);
4459     Register dst_reg = as_Register($dst$$reg);
4460     address con = (address)$src$$constant;
4461     if (con == NULL) {
4462       ShouldNotReachHere();
4463     } else {
4464       relocInfo::relocType rtype = $src->constant_reloc();
4465       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4466       __ set_narrow_klass(dst_reg, (Klass *)con);
4467     }
4468   %}
4469 
4470   // arithmetic encodings
4471 
4472   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4473     MacroAssembler _masm(&cbuf);
4474     Register dst_reg = as_Register($dst$$reg);
4475     Register src_reg = as_Register($src1$$reg);
4476     int32_t con = (int32_t)$src2$$constant;
4477     // add has primary == 0, subtract has primary == 1
4478     if ($primary) { con = -con; }
4479     if (con < 0) {
4480       __ subw(dst_reg, src_reg, -con);
4481     } else {
4482       __ addw(dst_reg, src_reg, con);
4483     }
4484   %}
4485 
4486   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4487     MacroAssembler _masm(&cbuf);
4488     Register dst_reg = as_Register($dst$$reg);
4489     Register src_reg = as_Register($src1$$reg);
4490     int32_t con = (int32_t)$src2$$constant;
4491     // add has primary == 0, subtract has primary == 1
4492     if ($primary) { con = -con; }
4493     if (con < 0) {
4494       __ sub(dst_reg, src_reg, -con);
4495     } else {
4496       __ add(dst_reg, src_reg, con);
4497     }
4498   %}
4499 
4500   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4501     MacroAssembler _masm(&cbuf);
4502    Register dst_reg = as_Register($dst$$reg);
4503    Register src1_reg = as_Register($src1$$reg);
4504    Register src2_reg = as_Register($src2$$reg);
4505     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4506   %}
4507 
4508   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4509     MacroAssembler _masm(&cbuf);
4510    Register dst_reg = as_Register($dst$$reg);
4511    Register src1_reg = as_Register($src1$$reg);
4512    Register src2_reg = as_Register($src2$$reg);
4513     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4514   %}
4515 
4516   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4517     MacroAssembler _masm(&cbuf);
4518    Register dst_reg = as_Register($dst$$reg);
4519    Register src1_reg = as_Register($src1$$reg);
4520    Register src2_reg = as_Register($src2$$reg);
4521     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4522   %}
4523 
4524   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4525     MacroAssembler _masm(&cbuf);
4526    Register dst_reg = as_Register($dst$$reg);
4527    Register src1_reg = as_Register($src1$$reg);
4528    Register src2_reg = as_Register($src2$$reg);
4529     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4530   %}
4531 
4532   // compare instruction encodings
4533 
4534   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4535     MacroAssembler _masm(&cbuf);
4536     Register reg1 = as_Register($src1$$reg);
4537     Register reg2 = as_Register($src2$$reg);
4538     __ cmpw(reg1, reg2);
4539   %}
4540 
4541   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4542     MacroAssembler _masm(&cbuf);
4543     Register reg = as_Register($src1$$reg);
4544     int32_t val = $src2$$constant;
4545     if (val >= 0) {
4546       __ subsw(zr, reg, val);
4547     } else {
4548       __ addsw(zr, reg, -val);
4549     }
4550   %}
4551 
4552   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4553     MacroAssembler _masm(&cbuf);
4554     Register reg1 = as_Register($src1$$reg);
4555     u_int32_t val = (u_int32_t)$src2$$constant;
4556     __ movw(rscratch1, val);
4557     __ cmpw(reg1, rscratch1);
4558   %}
4559 
4560   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4561     MacroAssembler _masm(&cbuf);
4562     Register reg1 = as_Register($src1$$reg);
4563     Register reg2 = as_Register($src2$$reg);
4564     __ cmp(reg1, reg2);
4565   %}
4566 
4567   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4568     MacroAssembler _masm(&cbuf);
4569     Register reg = as_Register($src1$$reg);
4570     int64_t val = $src2$$constant;
4571     if (val >= 0) {
4572       __ subs(zr, reg, val);
4573     } else if (val != -val) {
4574       __ adds(zr, reg, -val);
4575     } else {
4576     // aargh, Long.MIN_VALUE is a special case
4577       __ orr(rscratch1, zr, (u_int64_t)val);
4578       __ subs(zr, reg, rscratch1);
4579     }
4580   %}
4581 
4582   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4583     MacroAssembler _masm(&cbuf);
4584     Register reg1 = as_Register($src1$$reg);
4585     u_int64_t val = (u_int64_t)$src2$$constant;
4586     __ mov(rscratch1, val);
4587     __ cmp(reg1, rscratch1);
4588   %}
4589 
4590   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4591     MacroAssembler _masm(&cbuf);
4592     Register reg1 = as_Register($src1$$reg);
4593     Register reg2 = as_Register($src2$$reg);
4594     __ cmp(reg1, reg2);
4595   %}
4596 
4597   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4598     MacroAssembler _masm(&cbuf);
4599     Register reg1 = as_Register($src1$$reg);
4600     Register reg2 = as_Register($src2$$reg);
4601     __ cmpw(reg1, reg2);
4602   %}
4603 
4604   enc_class aarch64_enc_testp(iRegP src) %{
4605     MacroAssembler _masm(&cbuf);
4606     Register reg = as_Register($src$$reg);
4607     __ cmp(reg, zr);
4608   %}
4609 
4610   enc_class aarch64_enc_testn(iRegN src) %{
4611     MacroAssembler _masm(&cbuf);
4612     Register reg = as_Register($src$$reg);
4613     __ cmpw(reg, zr);
4614   %}
4615 
4616   enc_class aarch64_enc_b(label lbl) %{
4617     MacroAssembler _masm(&cbuf);
4618     Label *L = $lbl$$label;
4619     __ b(*L);
4620   %}
4621 
4622   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4623     MacroAssembler _masm(&cbuf);
4624     Label *L = $lbl$$label;
4625     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4626   %}
4627 
4628   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4629     MacroAssembler _masm(&cbuf);
4630     Label *L = $lbl$$label;
4631     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4632   %}
4633 
4634   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4635   %{
4636      Register sub_reg = as_Register($sub$$reg);
4637      Register super_reg = as_Register($super$$reg);
4638      Register temp_reg = as_Register($temp$$reg);
4639      Register result_reg = as_Register($result$$reg);
4640 
4641      Label miss;
4642      MacroAssembler _masm(&cbuf);
4643      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4644                                      NULL, &miss,
4645                                      /*set_cond_codes:*/ true);
4646      if ($primary) {
4647        __ mov(result_reg, zr);
4648      }
4649      __ bind(miss);
4650   %}
4651 
4652   enc_class aarch64_enc_java_static_call(method meth) %{
4653     MacroAssembler _masm(&cbuf);
4654 
4655     address addr = (address)$meth$$method;
4656     address call;
4657     if (!_method) {
4658       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4659       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4660     } else if (_optimized_virtual) {
4661       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
4662     } else {
4663       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
4664     }
4665     if (call == NULL) {
4666       ciEnv::current()->record_failure("CodeCache is full"); 
4667       return;
4668     }
4669 
4670     if (_method) {
4671       // Emit stub for static call
4672       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4673       if (stub == NULL) {
4674         ciEnv::current()->record_failure("CodeCache is full"); 
4675         return;
4676       }
4677     }
4678   %}
4679 
4680   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4681     MacroAssembler _masm(&cbuf);
4682     address call = __ ic_call((address)$meth$$method);
4683     if (call == NULL) {
4684       ciEnv::current()->record_failure("CodeCache is full"); 
4685       return;
4686     }
4687   %}
4688 
4689   enc_class aarch64_enc_call_epilog() %{
4690     MacroAssembler _masm(&cbuf);
4691     if (VerifyStackAtCalls) {
4692       // Check that stack depth is unchanged: find majik cookie on stack
4693       __ call_Unimplemented();
4694     }
4695   %}
4696 
4697   enc_class aarch64_enc_java_to_runtime(method meth) %{
4698     MacroAssembler _masm(&cbuf);
4699 
4700     // some calls to generated routines (arraycopy code) are scheduled
4701     // by C2 as runtime calls. if so we can call them using a br (they
4702     // will be in a reachable segment) otherwise we have to use a blrt
4703     // which loads the absolute address into a register.
4704     address entry = (address)$meth$$method;
4705     CodeBlob *cb = CodeCache::find_blob(entry);
4706     if (cb) {
4707       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4708       if (call == NULL) {
4709         ciEnv::current()->record_failure("CodeCache is full"); 
4710         return;
4711       }
4712     } else {
4713       int gpcnt;
4714       int fpcnt;
4715       int rtype;
4716       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4717       Label retaddr;
4718       __ adr(rscratch2, retaddr);
4719       __ lea(rscratch1, RuntimeAddress(entry));
4720       // Leave a breadcrumb for JavaThread::pd_last_frame().
4721       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4722       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4723       __ bind(retaddr);
4724       __ add(sp, sp, 2 * wordSize);
4725     }
4726   %}
4727 
4728   enc_class aarch64_enc_rethrow() %{
4729     MacroAssembler _masm(&cbuf);
4730     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4731   %}
4732 
4733   enc_class aarch64_enc_ret() %{
4734     MacroAssembler _masm(&cbuf);
4735     __ ret(lr);
4736   %}
4737 
4738   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4739     MacroAssembler _masm(&cbuf);
4740     Register target_reg = as_Register($jump_target$$reg);
4741     __ br(target_reg);
4742   %}
4743 
4744   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4745     MacroAssembler _masm(&cbuf);
4746     Register target_reg = as_Register($jump_target$$reg);
4747     // exception oop should be in r0
4748     // ret addr has been popped into lr
4749     // callee expects it in r3
4750     __ mov(r3, lr);
4751     __ br(target_reg);
4752   %}
4753 
4754   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4755     MacroAssembler _masm(&cbuf);
4756     Register oop = as_Register($object$$reg);
4757     Register box = as_Register($box$$reg);
4758     Register disp_hdr = as_Register($tmp$$reg);
4759     Register tmp = as_Register($tmp2$$reg);
4760     Label cont;
4761     Label object_has_monitor;
4762     Label cas_failed;
4763 
4764     assert_different_registers(oop, box, tmp, disp_hdr);
4765 
4766     // Load markOop from object into displaced_header.
4767     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4768 
4769     // Always do locking in runtime.
4770     if (EmitSync & 0x01) {
4771       __ cmp(oop, zr);
4772       return;
4773     }
4774 
4775     if (UseBiasedLocking && !UseOptoBiasInlining) {
4776       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4777     }
4778 
4779     // Handle existing monitor
4780     if ((EmitSync & 0x02) == 0) {
4781       // we can use AArch64's bit test and branch here but
4782       // markoopDesc does not define a bit index just the bit value
4783       // so assert in case the bit pos changes
4784 #     define __monitor_value_log2 1
4785       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4786       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4787 #     undef __monitor_value_log2
4788     }
4789 
4790     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4791     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4792 
4793     // Load Compare Value application register.
4794 
4795     // Initialize the box. (Must happen before we update the object mark!)
4796     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4797 
4798     // Compare object markOop with mark and if equal exchange scratch1
4799     // with object markOop.
4800     {
4801       Label retry_load;
4802       __ bind(retry_load);
4803       __ ldaxr(tmp, oop);
4804       __ cmp(tmp, disp_hdr);
4805       __ br(Assembler::NE, cas_failed);
4806       // use stlxr to ensure update is immediately visible
4807       __ stlxr(tmp, box, oop);
4808       __ cbzw(tmp, cont);
4809       __ b(retry_load);
4810     }
4811 
4812     // Formerly:
4813     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4814     //               /*newv=*/box,
4815     //               /*addr=*/oop,
4816     //               /*tmp=*/tmp,
4817     //               cont,
4818     //               /*fail*/NULL);
4819 
4820     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4821 
4822     // If the compare-and-exchange succeeded, then we found an unlocked
4823     // object, will have now locked it will continue at label cont
4824 
4825     __ bind(cas_failed);
4826     // We did not see an unlocked object so try the fast recursive case.
4827 
4828     // Check if the owner is self by comparing the value in the
4829     // markOop of object (disp_hdr) with the stack pointer.
4830     __ mov(rscratch1, sp);
4831     __ sub(disp_hdr, disp_hdr, rscratch1);
4832     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4833     // If condition is true we are cont and hence we can store 0 as the
4834     // displaced header in the box, which indicates that it is a recursive lock.
4835     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4836     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4837 
4838     // Handle existing monitor.
4839     if ((EmitSync & 0x02) == 0) {
4840       __ b(cont);
4841 
4842       __ bind(object_has_monitor);
4843       // The object's monitor m is unlocked iff m->owner == NULL,
4844       // otherwise m->owner may contain a thread or a stack address.
4845       //
4846       // Try to CAS m->owner from NULL to current thread.
4847       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4848       __ mov(disp_hdr, zr);
4849 
4850       {
4851         Label retry_load, fail;
4852         __ bind(retry_load);
4853         __ ldaxr(rscratch1, tmp);
4854         __ cmp(disp_hdr, rscratch1);
4855         __ br(Assembler::NE, fail);
4856         // use stlxr to ensure update is immediately visible
4857         __ stlxr(rscratch1, rthread, tmp);
4858         __ cbnzw(rscratch1, retry_load);
4859         __ bind(fail);
4860       }
4861 
4862       // Label next;
4863       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4864       //               /*newv=*/rthread,
4865       //               /*addr=*/tmp,
4866       //               /*tmp=*/rscratch1,
4867       //               /*succeed*/next,
4868       //               /*fail*/NULL);
4869       // __ bind(next);
4870 
4871       // store a non-null value into the box.
4872       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4873 
4874       // PPC port checks the following invariants
4875       // #ifdef ASSERT
4876       // bne(flag, cont);
4877       // We have acquired the monitor, check some invariants.
4878       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4879       // Invariant 1: _recursions should be 0.
4880       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4881       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4882       //                        "monitor->_recursions should be 0", -1);
4883       // Invariant 2: OwnerIsThread shouldn't be 0.
4884       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4885       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4886       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4887       // #endif
4888     }
4889 
4890     __ bind(cont);
4891     // flag == EQ indicates success
4892     // flag == NE indicates failure
4893 
4894   %}
4895 
4896   // TODO
4897   // reimplement this with custom cmpxchgptr code
4898   // which avoids some of the unnecessary branching
4899   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4900     MacroAssembler _masm(&cbuf);
4901     Register oop = as_Register($object$$reg);
4902     Register box = as_Register($box$$reg);
4903     Register disp_hdr = as_Register($tmp$$reg);
4904     Register tmp = as_Register($tmp2$$reg);
4905     Label cont;
4906     Label object_has_monitor;
4907     Label cas_failed;
4908 
4909     assert_different_registers(oop, box, tmp, disp_hdr);
4910 
4911     // Always do locking in runtime.
4912     if (EmitSync & 0x01) {
4913       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4914       return;
4915     }
4916 
4917     if (UseBiasedLocking && !UseOptoBiasInlining) {
4918       __ biased_locking_exit(oop, tmp, cont);
4919     }
4920 
4921     // Find the lock address and load the displaced header from the stack.
4922     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4923 
4924     // If the displaced header is 0, we have a recursive unlock.
4925     __ cmp(disp_hdr, zr);
4926     __ br(Assembler::EQ, cont);
4927 
4928 
4929     // Handle existing monitor.
4930     if ((EmitSync & 0x02) == 0) {
4931       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4932       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4933     }
4934 
4935     // Check if it is still a light weight lock, this is is true if we
4936     // see the stack address of the basicLock in the markOop of the
4937     // object.
4938 
4939       {
4940         Label retry_load;
4941         __ bind(retry_load);
4942         __ ldxr(tmp, oop);
4943         __ cmp(box, tmp);
4944         __ br(Assembler::NE, cas_failed);
4945         // use stlxr to ensure update is immediately visible
4946         __ stlxr(tmp, disp_hdr, oop);
4947         __ cbzw(tmp, cont);
4948         __ b(retry_load);
4949       }
4950 
4951     // __ cmpxchgptr(/*compare_value=*/box,
4952     //               /*exchange_value=*/disp_hdr,
4953     //               /*where=*/oop,
4954     //               /*result=*/tmp,
4955     //               cont,
4956     //               /*cas_failed*/NULL);
4957     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4958 
4959     __ bind(cas_failed);
4960 
4961     // Handle existing monitor.
4962     if ((EmitSync & 0x02) == 0) {
4963       __ b(cont);
4964 
4965       __ bind(object_has_monitor);
4966       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4967       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4968       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4969       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4970       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4971       __ cmp(rscratch1, zr);
4972       __ br(Assembler::NE, cont);
4973 
4974       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4975       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4976       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4977       __ cmp(rscratch1, zr);
4978       __ cbnz(rscratch1, cont);
4979       // need a release store here
4980       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4981       __ stlr(rscratch1, tmp); // rscratch1 is zero
4982     }
4983 
4984     __ bind(cont);
4985     // flag == EQ indicates success
4986     // flag == NE indicates failure
4987   %}
4988 
4989 %}
4990 
4991 //----------FRAME--------------------------------------------------------------
4992 // Definition of frame structure and management information.
4993 //
4994 //  S T A C K   L A Y O U T    Allocators stack-slot number
4995 //                             |   (to get allocators register number
4996 //  G  Owned by    |        |  v    add OptoReg::stack0())
4997 //  r   CALLER     |        |
4998 //  o     |        +--------+      pad to even-align allocators stack-slot
4999 //  w     V        |  pad0  |        numbers; owned by CALLER
5000 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5001 //  h     ^        |   in   |  5
5002 //        |        |  args  |  4   Holes in incoming args owned by SELF
5003 //  |     |        |        |  3
5004 //  |     |        +--------+
5005 //  V     |        | old out|      Empty on Intel, window on Sparc
5006 //        |    old |preserve|      Must be even aligned.
5007 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5008 //        |        |   in   |  3   area for Intel ret address
5009 //     Owned by    |preserve|      Empty on Sparc.
5010 //       SELF      +--------+
5011 //        |        |  pad2  |  2   pad to align old SP
5012 //        |        +--------+  1
5013 //        |        | locks  |  0
5014 //        |        +--------+----> OptoReg::stack0(), even aligned
5015 //        |        |  pad1  | 11   pad to align new SP
5016 //        |        +--------+
5017 //        |        |        | 10
5018 //        |        | spills |  9   spills
5019 //        V        |        |  8   (pad0 slot for callee)
5020 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5021 //        ^        |  out   |  7
5022 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5023 //     Owned by    +--------+
5024 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5025 //        |    new |preserve|      Must be even-aligned.
5026 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5027 //        |        |        |
5028 //
5029 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5030 //         known from SELF's arguments and the Java calling convention.
5031 //         Region 6-7 is determined per call site.
5032 // Note 2: If the calling convention leaves holes in the incoming argument
5033 //         area, those holes are owned by SELF.  Holes in the outgoing area
5034 //         are owned by the CALLEE.  Holes should not be nessecary in the
5035 //         incoming area, as the Java calling convention is completely under
5036 //         the control of the AD file.  Doubles can be sorted and packed to
5037 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5038 //         varargs C calling conventions.
5039 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5040 //         even aligned with pad0 as needed.
5041 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5042 //           (the latter is true on Intel but is it false on AArch64?)
5043 //         region 6-11 is even aligned; it may be padded out more so that
5044 //         the region from SP to FP meets the minimum stack alignment.
5045 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5046 //         alignment.  Region 11, pad1, may be dynamically extended so that
5047 //         SP meets the minimum alignment.
5048 
5049 frame %{
5050   // What direction does stack grow in (assumed to be same for C & Java)
5051   stack_direction(TOWARDS_LOW);
5052 
5053   // These three registers define part of the calling convention
5054   // between compiled code and the interpreter.
5055 
5056   // Inline Cache Register or methodOop for I2C.
5057   inline_cache_reg(R12);
5058 
5059   // Method Oop Register when calling interpreter.
5060   interpreter_method_oop_reg(R12);
5061 
5062   // Number of stack slots consumed by locking an object
5063   sync_stack_slots(2);
5064 
5065   // Compiled code's Frame Pointer
5066   frame_pointer(R31);
5067 
5068   // Interpreter stores its frame pointer in a register which is
5069   // stored to the stack by I2CAdaptors.
5070   // I2CAdaptors convert from interpreted java to compiled java.
5071   interpreter_frame_pointer(R29);
5072 
5073   // Stack alignment requirement
5074   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5075 
5076   // Number of stack slots between incoming argument block and the start of
5077   // a new frame.  The PROLOG must add this many slots to the stack.  The
5078   // EPILOG must remove this many slots. aarch64 needs two slots for
5079   // return address and fp.
5080   // TODO think this is correct but check
5081   in_preserve_stack_slots(4);
5082 
5083   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5084   // for calls to C.  Supports the var-args backing area for register parms.
5085   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5086 
5087   // The after-PROLOG location of the return address.  Location of
5088   // return address specifies a type (REG or STACK) and a number
5089   // representing the register number (i.e. - use a register name) or
5090   // stack slot.
5091   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5092   // Otherwise, it is above the locks and verification slot and alignment word
5093   // TODO this may well be correct but need to check why that - 2 is there
5094   // ppc port uses 0 but we definitely need to allow for fixed_slots
5095   // which folds in the space used for monitors
5096   return_addr(STACK - 2 +
5097               round_to((Compile::current()->in_preserve_stack_slots() +
5098                         Compile::current()->fixed_slots()),
5099                        stack_alignment_in_slots()));
5100 
5101   // Body of function which returns an integer array locating
5102   // arguments either in registers or in stack slots.  Passed an array
5103   // of ideal registers called "sig" and a "length" count.  Stack-slot
5104   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5105   // arguments for a CALLEE.  Incoming stack arguments are
5106   // automatically biased by the preserve_stack_slots field above.
5107 
5108   calling_convention
5109   %{
5110     // No difference between ingoing/outgoing just pass false
5111     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5112   %}
5113 
5114   c_calling_convention
5115   %{
5116     // This is obviously always outgoing
5117     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5118   %}
5119 
5120   // Location of compiled Java return values.  Same as C for now.
5121   return_value
5122   %{
5123     // TODO do we allow ideal_reg == Op_RegN???
5124     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5125            "only return normal values");
5126 
5127     static const int lo[Op_RegL + 1] = { // enum name
5128       0,                                 // Op_Node
5129       0,                                 // Op_Set
5130       R0_num,                            // Op_RegN
5131       R0_num,                            // Op_RegI
5132       R0_num,                            // Op_RegP
5133       V0_num,                            // Op_RegF
5134       V0_num,                            // Op_RegD
5135       R0_num                             // Op_RegL
5136     };
5137 
5138     static const int hi[Op_RegL + 1] = { // enum name
5139       0,                                 // Op_Node
5140       0,                                 // Op_Set
5141       OptoReg::Bad,                       // Op_RegN
5142       OptoReg::Bad,                      // Op_RegI
5143       R0_H_num,                          // Op_RegP
5144       OptoReg::Bad,                      // Op_RegF
5145       V0_H_num,                          // Op_RegD
5146       R0_H_num                           // Op_RegL
5147     };
5148 
5149     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5150   %}
5151 %}
5152 
5153 //----------ATTRIBUTES---------------------------------------------------------
5154 //----------Operand Attributes-------------------------------------------------
5155 op_attrib op_cost(1);        // Required cost attribute
5156 
5157 //----------Instruction Attributes---------------------------------------------
5158 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5159 ins_attrib ins_size(32);        // Required size attribute (in bits)
5160 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5161                                 // a non-matching short branch variant
5162                                 // of some long branch?
5163 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5164                                 // be a power of 2) specifies the
5165                                 // alignment that some part of the
5166                                 // instruction (not necessarily the
5167                                 // start) requires.  If > 1, a
5168                                 // compute_padding() function must be
5169                                 // provided for the instruction
5170 
5171 //----------OPERANDS-----------------------------------------------------------
5172 // Operand definitions must precede instruction definitions for correct parsing
5173 // in the ADLC because operands constitute user defined types which are used in
5174 // instruction definitions.
5175 
5176 //----------Simple Operands----------------------------------------------------
5177 
5178 // Integer operands 32 bit
5179 // 32 bit immediate
5180 operand immI()
5181 %{
5182   match(ConI);
5183 
5184   op_cost(0);
5185   format %{ %}
5186   interface(CONST_INTER);
5187 %}
5188 
5189 // 32 bit zero
5190 operand immI0()
5191 %{
5192   predicate(n->get_int() == 0);
5193   match(ConI);
5194 
5195   op_cost(0);
5196   format %{ %}
5197   interface(CONST_INTER);
5198 %}
5199 
5200 // 32 bit unit increment
5201 operand immI_1()
5202 %{
5203   predicate(n->get_int() == 1);
5204   match(ConI);
5205 
5206   op_cost(0);
5207   format %{ %}
5208   interface(CONST_INTER);
5209 %}
5210 
5211 // 32 bit unit decrement
5212 operand immI_M1()
5213 %{
5214   predicate(n->get_int() == -1);
5215   match(ConI);
5216 
5217   op_cost(0);
5218   format %{ %}
5219   interface(CONST_INTER);
5220 %}
5221 
5222 operand immI_le_4()
5223 %{
5224   predicate(n->get_int() <= 4);
5225   match(ConI);
5226 
5227   op_cost(0);
5228   format %{ %}
5229   interface(CONST_INTER);
5230 %}
5231 
5232 operand immI_31()
5233 %{
5234   predicate(n->get_int() == 31);
5235   match(ConI);
5236 
5237   op_cost(0);
5238   format %{ %}
5239   interface(CONST_INTER);
5240 %}
5241 
5242 operand immI_8()
5243 %{
5244   predicate(n->get_int() == 8);
5245   match(ConI);
5246 
5247   op_cost(0);
5248   format %{ %}
5249   interface(CONST_INTER);
5250 %}
5251 
5252 operand immI_16()
5253 %{
5254   predicate(n->get_int() == 16);
5255   match(ConI);
5256 
5257   op_cost(0);
5258   format %{ %}
5259   interface(CONST_INTER);
5260 %}
5261 
5262 operand immI_24()
5263 %{
5264   predicate(n->get_int() == 24);
5265   match(ConI);
5266 
5267   op_cost(0);
5268   format %{ %}
5269   interface(CONST_INTER);
5270 %}
5271 
5272 operand immI_32()
5273 %{
5274   predicate(n->get_int() == 32);
5275   match(ConI);
5276 
5277   op_cost(0);
5278   format %{ %}
5279   interface(CONST_INTER);
5280 %}
5281 
5282 operand immI_48()
5283 %{
5284   predicate(n->get_int() == 48);
5285   match(ConI);
5286 
5287   op_cost(0);
5288   format %{ %}
5289   interface(CONST_INTER);
5290 %}
5291 
5292 operand immI_56()
5293 %{
5294   predicate(n->get_int() == 56);
5295   match(ConI);
5296 
5297   op_cost(0);
5298   format %{ %}
5299   interface(CONST_INTER);
5300 %}
5301 
5302 operand immI_64()
5303 %{
5304   predicate(n->get_int() == 64);
5305   match(ConI);
5306 
5307   op_cost(0);
5308   format %{ %}
5309   interface(CONST_INTER);
5310 %}
5311 
5312 operand immI_255()
5313 %{
5314   predicate(n->get_int() == 255);
5315   match(ConI);
5316 
5317   op_cost(0);
5318   format %{ %}
5319   interface(CONST_INTER);
5320 %}
5321 
5322 operand immI_65535()
5323 %{
5324   predicate(n->get_int() == 65535);
5325   match(ConI);
5326 
5327   op_cost(0);
5328   format %{ %}
5329   interface(CONST_INTER);
5330 %}
5331 
5332 operand immL_63()
5333 %{
5334   predicate(n->get_int() == 63);
5335   match(ConI);
5336 
5337   op_cost(0);
5338   format %{ %}
5339   interface(CONST_INTER);
5340 %}
5341 
5342 operand immL_255()
5343 %{
5344   predicate(n->get_int() == 255);
5345   match(ConI);
5346 
5347   op_cost(0);
5348   format %{ %}
5349   interface(CONST_INTER);
5350 %}
5351 
5352 operand immL_65535()
5353 %{
5354   predicate(n->get_long() == 65535L);
5355   match(ConL);
5356 
5357   op_cost(0);
5358   format %{ %}
5359   interface(CONST_INTER);
5360 %}
5361 
5362 operand immL_4294967295()
5363 %{
5364   predicate(n->get_long() == 4294967295L);
5365   match(ConL);
5366 
5367   op_cost(0);
5368   format %{ %}
5369   interface(CONST_INTER);
5370 %}
5371 
5372 operand immL_bitmask()
5373 %{
5374   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5375             && is_power_of_2(n->get_long() + 1));
5376   match(ConL);
5377 
5378   op_cost(0);
5379   format %{ %}
5380   interface(CONST_INTER);
5381 %}
5382 
5383 operand immI_bitmask()
5384 %{
5385   predicate(((n->get_int() & 0xc0000000) == 0)
5386             && is_power_of_2(n->get_int() + 1));
5387   match(ConI);
5388 
5389   op_cost(0);
5390   format %{ %}
5391   interface(CONST_INTER);
5392 %}
5393 
5394 // Scale values for scaled offset addressing modes (up to long but not quad)
5395 operand immIScale()
5396 %{
5397   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5398   match(ConI);
5399 
5400   op_cost(0);
5401   format %{ %}
5402   interface(CONST_INTER);
5403 %}
5404 
5405 // 26 bit signed offset -- for pc-relative branches
5406 operand immI26()
5407 %{
5408   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5409   match(ConI);
5410 
5411   op_cost(0);
5412   format %{ %}
5413   interface(CONST_INTER);
5414 %}
5415 
5416 // 19 bit signed offset -- for pc-relative loads
5417 operand immI19()
5418 %{
5419   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5420   match(ConI);
5421 
5422   op_cost(0);
5423   format %{ %}
5424   interface(CONST_INTER);
5425 %}
5426 
5427 // 12 bit unsigned offset -- for base plus immediate loads
5428 operand immIU12()
5429 %{
5430   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5431   match(ConI);
5432 
5433   op_cost(0);
5434   format %{ %}
5435   interface(CONST_INTER);
5436 %}
5437 
5438 operand immLU12()
5439 %{
5440   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5441   match(ConL);
5442 
5443   op_cost(0);
5444   format %{ %}
5445   interface(CONST_INTER);
5446 %}
5447 
5448 // Offset for scaled or unscaled immediate loads and stores
5449 operand immIOffset()
5450 %{
5451   predicate(Address::offset_ok_for_immed(n->get_int()));
5452   match(ConI);
5453 
5454   op_cost(0);
5455   format %{ %}
5456   interface(CONST_INTER);
5457 %}
5458 
5459 operand immLoffset()
5460 %{
5461   predicate(Address::offset_ok_for_immed(n->get_long()));
5462   match(ConL);
5463 
5464   op_cost(0);
5465   format %{ %}
5466   interface(CONST_INTER);
5467 %}
5468 
5469 // 32 bit integer valid for add sub immediate
5470 operand immIAddSub()
5471 %{
5472   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5473   match(ConI);
5474   op_cost(0);
5475   format %{ %}
5476   interface(CONST_INTER);
5477 %}
5478 
5479 // 32 bit unsigned integer valid for logical immediate
5480 // TODO -- check this is right when e.g the mask is 0x80000000
5481 operand immILog()
5482 %{
5483   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5484   match(ConI);
5485 
5486   op_cost(0);
5487   format %{ %}
5488   interface(CONST_INTER);
5489 %}
5490 
5491 // Integer operands 64 bit
5492 // 64 bit immediate
5493 operand immL()
5494 %{
5495   match(ConL);
5496 
5497   op_cost(0);
5498   format %{ %}
5499   interface(CONST_INTER);
5500 %}
5501 
5502 // 64 bit zero
5503 operand immL0()
5504 %{
5505   predicate(n->get_long() == 0);
5506   match(ConL);
5507 
5508   op_cost(0);
5509   format %{ %}
5510   interface(CONST_INTER);
5511 %}
5512 
5513 // 64 bit unit increment
5514 operand immL_1()
5515 %{
5516   predicate(n->get_long() == 1);
5517   match(ConL);
5518 
5519   op_cost(0);
5520   format %{ %}
5521   interface(CONST_INTER);
5522 %}
5523 
5524 // 64 bit unit decrement
5525 operand immL_M1()
5526 %{
5527   predicate(n->get_long() == -1);
5528   match(ConL);
5529 
5530   op_cost(0);
5531   format %{ %}
5532   interface(CONST_INTER);
5533 %}
5534 
5535 // 32 bit offset of pc in thread anchor
5536 
5537 operand immL_pc_off()
5538 %{
5539   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5540                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5541   match(ConL);
5542 
5543   op_cost(0);
5544   format %{ %}
5545   interface(CONST_INTER);
5546 %}
5547 
5548 // 64 bit integer valid for add sub immediate
5549 operand immLAddSub()
5550 %{
5551   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5552   match(ConL);
5553   op_cost(0);
5554   format %{ %}
5555   interface(CONST_INTER);
5556 %}
5557 
5558 // 64 bit integer valid for logical immediate
5559 operand immLLog()
5560 %{
5561   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5562   match(ConL);
5563   op_cost(0);
5564   format %{ %}
5565   interface(CONST_INTER);
5566 %}
5567 
5568 // Long Immediate: low 32-bit mask
5569 operand immL_32bits()
5570 %{
5571   predicate(n->get_long() == 0xFFFFFFFFL);
5572   match(ConL);
5573   op_cost(0);
5574   format %{ %}
5575   interface(CONST_INTER);
5576 %}
5577 
5578 // Pointer operands
5579 // Pointer Immediate
5580 operand immP()
5581 %{
5582   match(ConP);
5583 
5584   op_cost(0);
5585   format %{ %}
5586   interface(CONST_INTER);
5587 %}
5588 
5589 // NULL Pointer Immediate
5590 operand immP0()
5591 %{
5592   predicate(n->get_ptr() == 0);
5593   match(ConP);
5594 
5595   op_cost(0);
5596   format %{ %}
5597   interface(CONST_INTER);
5598 %}
5599 
5600 // Pointer Immediate One
5601 // this is used in object initialization (initial object header)
5602 operand immP_1()
5603 %{
5604   predicate(n->get_ptr() == 1);
5605   match(ConP);
5606 
5607   op_cost(0);
5608   format %{ %}
5609   interface(CONST_INTER);
5610 %}
5611 
5612 // Polling Page Pointer Immediate
5613 operand immPollPage()
5614 %{
5615   predicate((address)n->get_ptr() == os::get_polling_page());
5616   match(ConP);
5617 
5618   op_cost(0);
5619   format %{ %}
5620   interface(CONST_INTER);
5621 %}
5622 
5623 // Card Table Byte Map Base
5624 operand immByteMapBase()
5625 %{
5626   // Get base of card map
5627   predicate((jbyte*)n->get_ptr() ==
5628         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5629   match(ConP);
5630 
5631   op_cost(0);
5632   format %{ %}
5633   interface(CONST_INTER);
5634 %}
5635 
5636 // Pointer Immediate Minus One
5637 // this is used when we want to write the current PC to the thread anchor
5638 operand immP_M1()
5639 %{
5640   predicate(n->get_ptr() == -1);
5641   match(ConP);
5642 
5643   op_cost(0);
5644   format %{ %}
5645   interface(CONST_INTER);
5646 %}
5647 
5648 // Pointer Immediate Minus Two
5649 // this is used when we want to write the current PC to the thread anchor
5650 operand immP_M2()
5651 %{
5652   predicate(n->get_ptr() == -2);
5653   match(ConP);
5654 
5655   op_cost(0);
5656   format %{ %}
5657   interface(CONST_INTER);
5658 %}
5659 
5660 // Float and Double operands
5661 // Double Immediate
5662 operand immD()
5663 %{
5664   match(ConD);
5665   op_cost(0);
5666   format %{ %}
5667   interface(CONST_INTER);
5668 %}
5669 
5670 // Double Immediate: +0.0d
5671 operand immD0()
5672 %{
5673   predicate(jlong_cast(n->getd()) == 0);
5674   match(ConD);
5675 
5676   op_cost(0);
5677   format %{ %}
5678   interface(CONST_INTER);
5679 %}
5680 
5681 // constant 'double +0.0'.
5682 operand immDPacked()
5683 %{
5684   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5685   match(ConD);
5686   op_cost(0);
5687   format %{ %}
5688   interface(CONST_INTER);
5689 %}
5690 
5691 // Float Immediate
5692 operand immF()
5693 %{
5694   match(ConF);
5695   op_cost(0);
5696   format %{ %}
5697   interface(CONST_INTER);
5698 %}
5699 
5700 // Float Immediate: +0.0f.
5701 operand immF0()
5702 %{
5703   predicate(jint_cast(n->getf()) == 0);
5704   match(ConF);
5705 
5706   op_cost(0);
5707   format %{ %}
5708   interface(CONST_INTER);
5709 %}
5710 
5711 //
5712 operand immFPacked()
5713 %{
5714   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5715   match(ConF);
5716   op_cost(0);
5717   format %{ %}
5718   interface(CONST_INTER);
5719 %}
5720 
5721 // Narrow pointer operands
5722 // Narrow Pointer Immediate
5723 operand immN()
5724 %{
5725   match(ConN);
5726 
5727   op_cost(0);
5728   format %{ %}
5729   interface(CONST_INTER);
5730 %}
5731 
5732 // Narrow NULL Pointer Immediate
5733 operand immN0()
5734 %{
5735   predicate(n->get_narrowcon() == 0);
5736   match(ConN);
5737 
5738   op_cost(0);
5739   format %{ %}
5740   interface(CONST_INTER);
5741 %}
5742 
5743 operand immNKlass()
5744 %{
5745   match(ConNKlass);
5746 
5747   op_cost(0);
5748   format %{ %}
5749   interface(CONST_INTER);
5750 %}
5751 
5752 // Integer 32 bit Register Operands
5753 // Integer 32 bitRegister (excludes SP)
5754 operand iRegI()
5755 %{
5756   constraint(ALLOC_IN_RC(any_reg32));
5757   match(RegI);
5758   match(iRegINoSp);
5759   op_cost(0);
5760   format %{ %}
5761   interface(REG_INTER);
5762 %}
5763 
5764 // Integer 32 bit Register not Special
5765 operand iRegINoSp()
5766 %{
5767   constraint(ALLOC_IN_RC(no_special_reg32));
5768   match(RegI);
5769   op_cost(0);
5770   format %{ %}
5771   interface(REG_INTER);
5772 %}
5773 
5774 // Integer 64 bit Register Operands
5775 // Integer 64 bit Register (includes SP)
5776 operand iRegL()
5777 %{
5778   constraint(ALLOC_IN_RC(any_reg));
5779   match(RegL);
5780   match(iRegLNoSp);
5781   op_cost(0);
5782   format %{ %}
5783   interface(REG_INTER);
5784 %}
5785 
5786 // Integer 64 bit Register not Special
5787 operand iRegLNoSp()
5788 %{
5789   constraint(ALLOC_IN_RC(no_special_reg));
5790   match(RegL);
5791   format %{ %}
5792   interface(REG_INTER);
5793 %}
5794 
5795 // Pointer Register Operands
5796 // Pointer Register
5797 operand iRegP()
5798 %{
5799   constraint(ALLOC_IN_RC(ptr_reg));
5800   match(RegP);
5801   match(iRegPNoSp);
5802   match(iRegP_R0);
5803   //match(iRegP_R2);
5804   //match(iRegP_R4);
5805   //match(iRegP_R5);
5806   match(thread_RegP);
5807   op_cost(0);
5808   format %{ %}
5809   interface(REG_INTER);
5810 %}
5811 
5812 // Pointer 64 bit Register not Special
5813 operand iRegPNoSp()
5814 %{
5815   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5816   match(RegP);
5817   // match(iRegP);
5818   // match(iRegP_R0);
5819   // match(iRegP_R2);
5820   // match(iRegP_R4);
5821   // match(iRegP_R5);
5822   // match(thread_RegP);
5823   op_cost(0);
5824   format %{ %}
5825   interface(REG_INTER);
5826 %}
5827 
5828 // Pointer 64 bit Register R0 only
5829 operand iRegP_R0()
5830 %{
5831   constraint(ALLOC_IN_RC(r0_reg));
5832   match(RegP);
5833   // match(iRegP);
5834   match(iRegPNoSp);
5835   op_cost(0);
5836   format %{ %}
5837   interface(REG_INTER);
5838 %}
5839 
5840 // Pointer 64 bit Register R1 only
5841 operand iRegP_R1()
5842 %{
5843   constraint(ALLOC_IN_RC(r1_reg));
5844   match(RegP);
5845   // match(iRegP);
5846   match(iRegPNoSp);
5847   op_cost(0);
5848   format %{ %}
5849   interface(REG_INTER);
5850 %}
5851 
5852 // Pointer 64 bit Register R2 only
5853 operand iRegP_R2()
5854 %{
5855   constraint(ALLOC_IN_RC(r2_reg));
5856   match(RegP);
5857   // match(iRegP);
5858   match(iRegPNoSp);
5859   op_cost(0);
5860   format %{ %}
5861   interface(REG_INTER);
5862 %}
5863 
5864 // Pointer 64 bit Register R3 only
5865 operand iRegP_R3()
5866 %{
5867   constraint(ALLOC_IN_RC(r3_reg));
5868   match(RegP);
5869   // match(iRegP);
5870   match(iRegPNoSp);
5871   op_cost(0);
5872   format %{ %}
5873   interface(REG_INTER);
5874 %}
5875 
5876 // Pointer 64 bit Register R4 only
5877 operand iRegP_R4()
5878 %{
5879   constraint(ALLOC_IN_RC(r4_reg));
5880   match(RegP);
5881   // match(iRegP);
5882   match(iRegPNoSp);
5883   op_cost(0);
5884   format %{ %}
5885   interface(REG_INTER);
5886 %}
5887 
5888 // Pointer 64 bit Register R5 only
5889 operand iRegP_R5()
5890 %{
5891   constraint(ALLOC_IN_RC(r5_reg));
5892   match(RegP);
5893   // match(iRegP);
5894   match(iRegPNoSp);
5895   op_cost(0);
5896   format %{ %}
5897   interface(REG_INTER);
5898 %}
5899 
5900 // Pointer 64 bit Register R10 only
5901 operand iRegP_R10()
5902 %{
5903   constraint(ALLOC_IN_RC(r10_reg));
5904   match(RegP);
5905   // match(iRegP);
5906   match(iRegPNoSp);
5907   op_cost(0);
5908   format %{ %}
5909   interface(REG_INTER);
5910 %}
5911 
5912 // Long 64 bit Register R11 only
5913 operand iRegL_R11()
5914 %{
5915   constraint(ALLOC_IN_RC(r11_reg));
5916   match(RegL);
5917   match(iRegLNoSp);
5918   op_cost(0);
5919   format %{ %}
5920   interface(REG_INTER);
5921 %}
5922 
5923 // Pointer 64 bit Register FP only
5924 operand iRegP_FP()
5925 %{
5926   constraint(ALLOC_IN_RC(fp_reg));
5927   match(RegP);
5928   // match(iRegP);
5929   op_cost(0);
5930   format %{ %}
5931   interface(REG_INTER);
5932 %}
5933 
5934 // Register R0 only
5935 operand iRegI_R0()
5936 %{
5937   constraint(ALLOC_IN_RC(int_r0_reg));
5938   match(RegI);
5939   match(iRegINoSp);
5940   op_cost(0);
5941   format %{ %}
5942   interface(REG_INTER);
5943 %}
5944 
5945 // Register R2 only
5946 operand iRegI_R2()
5947 %{
5948   constraint(ALLOC_IN_RC(int_r2_reg));
5949   match(RegI);
5950   match(iRegINoSp);
5951   op_cost(0);
5952   format %{ %}
5953   interface(REG_INTER);
5954 %}
5955 
5956 // Register R3 only
5957 operand iRegI_R3()
5958 %{
5959   constraint(ALLOC_IN_RC(int_r3_reg));
5960   match(RegI);
5961   match(iRegINoSp);
5962   op_cost(0);
5963   format %{ %}
5964   interface(REG_INTER);
5965 %}
5966 
5967 
5968 // Register R2 only
5969 operand iRegI_R4()
5970 %{
5971   constraint(ALLOC_IN_RC(int_r4_reg));
5972   match(RegI);
5973   match(iRegINoSp);
5974   op_cost(0);
5975   format %{ %}
5976   interface(REG_INTER);
5977 %}
5978 
5979 
5980 // Pointer Register Operands
5981 // Narrow Pointer Register
5982 operand iRegN()
5983 %{
5984   constraint(ALLOC_IN_RC(any_reg32));
5985   match(RegN);
5986   match(iRegNNoSp);
5987   op_cost(0);
5988   format %{ %}
5989   interface(REG_INTER);
5990 %}
5991 
5992 // Integer 64 bit Register not Special
5993 operand iRegNNoSp()
5994 %{
5995   constraint(ALLOC_IN_RC(no_special_reg32));
5996   match(RegN);
5997   op_cost(0);
5998   format %{ %}
5999   interface(REG_INTER);
6000 %}
6001 
6002 // heap base register -- used for encoding immN0
6003 
6004 operand iRegIHeapbase()
6005 %{
6006   constraint(ALLOC_IN_RC(heapbase_reg));
6007   match(RegI);
6008   op_cost(0);
6009   format %{ %}
6010   interface(REG_INTER);
6011 %}
6012 
6013 // Float Register
6014 // Float register operands
6015 operand vRegF()
6016 %{
6017   constraint(ALLOC_IN_RC(float_reg));
6018   match(RegF);
6019 
6020   op_cost(0);
6021   format %{ %}
6022   interface(REG_INTER);
6023 %}
6024 
6025 // Double Register
6026 // Double register operands
6027 operand vRegD()
6028 %{
6029   constraint(ALLOC_IN_RC(double_reg));
6030   match(RegD);
6031 
6032   op_cost(0);
6033   format %{ %}
6034   interface(REG_INTER);
6035 %}
6036 
6037 operand vecD()
6038 %{
6039   constraint(ALLOC_IN_RC(vectord_reg));
6040   match(VecD);
6041 
6042   op_cost(0);
6043   format %{ %}
6044   interface(REG_INTER);
6045 %}
6046 
6047 operand vecX()
6048 %{
6049   constraint(ALLOC_IN_RC(vectorx_reg));
6050   match(VecX);
6051 
6052   op_cost(0);
6053   format %{ %}
6054   interface(REG_INTER);
6055 %}
6056 
6057 operand vRegD_V0()
6058 %{
6059   constraint(ALLOC_IN_RC(v0_reg));
6060   match(RegD);
6061   op_cost(0);
6062   format %{ %}
6063   interface(REG_INTER);
6064 %}
6065 
6066 operand vRegD_V1()
6067 %{
6068   constraint(ALLOC_IN_RC(v1_reg));
6069   match(RegD);
6070   op_cost(0);
6071   format %{ %}
6072   interface(REG_INTER);
6073 %}
6074 
6075 operand vRegD_V2()
6076 %{
6077   constraint(ALLOC_IN_RC(v2_reg));
6078   match(RegD);
6079   op_cost(0);
6080   format %{ %}
6081   interface(REG_INTER);
6082 %}
6083 
6084 operand vRegD_V3()
6085 %{
6086   constraint(ALLOC_IN_RC(v3_reg));
6087   match(RegD);
6088   op_cost(0);
6089   format %{ %}
6090   interface(REG_INTER);
6091 %}
6092 
6093 // Flags register, used as output of signed compare instructions
6094 
6095 // note that on AArch64 we also use this register as the output for
6096 // for floating point compare instructions (CmpF CmpD). this ensures
6097 // that ordered inequality tests use GT, GE, LT or LE none of which
6098 // pass through cases where the result is unordered i.e. one or both
6099 // inputs to the compare is a NaN. this means that the ideal code can
6100 // replace e.g. a GT with an LE and not end up capturing the NaN case
6101 // (where the comparison should always fail). EQ and NE tests are
6102 // always generated in ideal code so that unordered folds into the NE
6103 // case, matching the behaviour of AArch64 NE.
6104 //
6105 // This differs from x86 where the outputs of FP compares use a
6106 // special FP flags registers and where compares based on this
6107 // register are distinguished into ordered inequalities (cmpOpUCF) and
6108 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6109 // to explicitly handle the unordered case in branches. x86 also has
6110 // to include extra CMoveX rules to accept a cmpOpUCF input.
6111 
6112 operand rFlagsReg()
6113 %{
6114   constraint(ALLOC_IN_RC(int_flags));
6115   match(RegFlags);
6116 
6117   op_cost(0);
6118   format %{ "RFLAGS" %}
6119   interface(REG_INTER);
6120 %}
6121 
6122 // Flags register, used as output of unsigned compare instructions
6123 operand rFlagsRegU()
6124 %{
6125   constraint(ALLOC_IN_RC(int_flags));
6126   match(RegFlags);
6127 
6128   op_cost(0);
6129   format %{ "RFLAGSU" %}
6130   interface(REG_INTER);
6131 %}
6132 
6133 // Special Registers
6134 
6135 // Method Register
6136 operand inline_cache_RegP(iRegP reg)
6137 %{
6138   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6139   match(reg);
6140   match(iRegPNoSp);
6141   op_cost(0);
6142   format %{ %}
6143   interface(REG_INTER);
6144 %}
6145 
6146 operand interpreter_method_oop_RegP(iRegP reg)
6147 %{
6148   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6149   match(reg);
6150   match(iRegPNoSp);
6151   op_cost(0);
6152   format %{ %}
6153   interface(REG_INTER);
6154 %}
6155 
6156 // Thread Register
6157 operand thread_RegP(iRegP reg)
6158 %{
6159   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6160   match(reg);
6161   op_cost(0);
6162   format %{ %}
6163   interface(REG_INTER);
6164 %}
6165 
6166 operand lr_RegP(iRegP reg)
6167 %{
6168   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6169   match(reg);
6170   op_cost(0);
6171   format %{ %}
6172   interface(REG_INTER);
6173 %}
6174 
6175 //----------Memory Operands----------------------------------------------------
6176 
6177 operand indirect(iRegP reg)
6178 %{
6179   constraint(ALLOC_IN_RC(ptr_reg));
6180   match(reg);
6181   op_cost(0);
6182   format %{ "[$reg]" %}
6183   interface(MEMORY_INTER) %{
6184     base($reg);
6185     index(0xffffffff);
6186     scale(0x0);
6187     disp(0x0);
6188   %}
6189 %}
6190 
6191 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6192 %{
6193   constraint(ALLOC_IN_RC(ptr_reg));
6194   match(AddP (AddP reg (LShiftL lreg scale)) off);
6195   op_cost(INSN_COST);
6196   format %{ "$reg, $lreg lsl($scale), $off" %}
6197   interface(MEMORY_INTER) %{
6198     base($reg);
6199     index($lreg);
6200     scale($scale);
6201     disp($off);
6202   %}
6203 %}
6204 
6205 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6206 %{
6207   constraint(ALLOC_IN_RC(ptr_reg));
6208   match(AddP (AddP reg (LShiftL lreg scale)) off);
6209   op_cost(INSN_COST);
6210   format %{ "$reg, $lreg lsl($scale), $off" %}
6211   interface(MEMORY_INTER) %{
6212     base($reg);
6213     index($lreg);
6214     scale($scale);
6215     disp($off);
6216   %}
6217 %}
6218 
6219 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6220 %{
6221   constraint(ALLOC_IN_RC(ptr_reg));
6222   match(AddP (AddP reg (ConvI2L ireg)) off);
6223   op_cost(INSN_COST);
6224   format %{ "$reg, $ireg, $off I2L" %}
6225   interface(MEMORY_INTER) %{
6226     base($reg);
6227     index($ireg);
6228     scale(0x0);
6229     disp($off);
6230   %}
6231 %}
6232 
6233 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6234 %{
6235   constraint(ALLOC_IN_RC(ptr_reg));
6236   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6237   op_cost(INSN_COST);
6238   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6239   interface(MEMORY_INTER) %{
6240     base($reg);
6241     index($ireg);
6242     scale($scale);
6243     disp($off);
6244   %}
6245 %}
6246 
6247 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6248 %{
6249   constraint(ALLOC_IN_RC(ptr_reg));
6250   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6251   op_cost(0);
6252   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6253   interface(MEMORY_INTER) %{
6254     base($reg);
6255     index($ireg);
6256     scale($scale);
6257     disp(0x0);
6258   %}
6259 %}
6260 
6261 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6262 %{
6263   constraint(ALLOC_IN_RC(ptr_reg));
6264   match(AddP reg (LShiftL lreg scale));
6265   op_cost(0);
6266   format %{ "$reg, $lreg lsl($scale)" %}
6267   interface(MEMORY_INTER) %{
6268     base($reg);
6269     index($lreg);
6270     scale($scale);
6271     disp(0x0);
6272   %}
6273 %}
6274 
6275 operand indIndex(iRegP reg, iRegL lreg)
6276 %{
6277   constraint(ALLOC_IN_RC(ptr_reg));
6278   match(AddP reg lreg);
6279   op_cost(0);
6280   format %{ "$reg, $lreg" %}
6281   interface(MEMORY_INTER) %{
6282     base($reg);
6283     index($lreg);
6284     scale(0x0);
6285     disp(0x0);
6286   %}
6287 %}
6288 
6289 operand indOffI(iRegP reg, immIOffset off)
6290 %{
6291   constraint(ALLOC_IN_RC(ptr_reg));
6292   match(AddP reg off);
6293   op_cost(0);
6294   format %{ "[$reg, $off]" %}
6295   interface(MEMORY_INTER) %{
6296     base($reg);
6297     index(0xffffffff);
6298     scale(0x0);
6299     disp($off);
6300   %}
6301 %}
6302 
6303 operand indOffL(iRegP reg, immLoffset off)
6304 %{
6305   constraint(ALLOC_IN_RC(ptr_reg));
6306   match(AddP reg off);
6307   op_cost(0);
6308   format %{ "[$reg, $off]" %}
6309   interface(MEMORY_INTER) %{
6310     base($reg);
6311     index(0xffffffff);
6312     scale(0x0);
6313     disp($off);
6314   %}
6315 %}
6316 
6317 
6318 operand indirectN(iRegN reg)
6319 %{
6320   predicate(Universe::narrow_oop_shift() == 0);
6321   constraint(ALLOC_IN_RC(ptr_reg));
6322   match(DecodeN reg);
6323   op_cost(0);
6324   format %{ "[$reg]\t# narrow" %}
6325   interface(MEMORY_INTER) %{
6326     base($reg);
6327     index(0xffffffff);
6328     scale(0x0);
6329     disp(0x0);
6330   %}
6331 %}
6332 
6333 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6334 %{
6335   predicate(Universe::narrow_oop_shift() == 0);
6336   constraint(ALLOC_IN_RC(ptr_reg));
6337   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6338   op_cost(0);
6339   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6340   interface(MEMORY_INTER) %{
6341     base($reg);
6342     index($lreg);
6343     scale($scale);
6344     disp($off);
6345   %}
6346 %}
6347 
6348 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6349 %{
6350   predicate(Universe::narrow_oop_shift() == 0);
6351   constraint(ALLOC_IN_RC(ptr_reg));
6352   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6353   op_cost(INSN_COST);
6354   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6355   interface(MEMORY_INTER) %{
6356     base($reg);
6357     index($lreg);
6358     scale($scale);
6359     disp($off);
6360   %}
6361 %}
6362 
6363 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6364 %{
6365   predicate(Universe::narrow_oop_shift() == 0);
6366   constraint(ALLOC_IN_RC(ptr_reg));
6367   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6368   op_cost(INSN_COST);
6369   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6370   interface(MEMORY_INTER) %{
6371     base($reg);
6372     index($ireg);
6373     scale(0x0);
6374     disp($off);
6375   %}
6376 %}
6377 
6378 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6379 %{
6380   predicate(Universe::narrow_oop_shift() == 0);
6381   constraint(ALLOC_IN_RC(ptr_reg));
6382   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6383   op_cost(INSN_COST);
6384   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6385   interface(MEMORY_INTER) %{
6386     base($reg);
6387     index($ireg);
6388     scale($scale);
6389     disp($off);
6390   %}
6391 %}
6392 
6393 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6394 %{
6395   predicate(Universe::narrow_oop_shift() == 0);
6396   constraint(ALLOC_IN_RC(ptr_reg));
6397   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6398   op_cost(0);
6399   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6400   interface(MEMORY_INTER) %{
6401     base($reg);
6402     index($ireg);
6403     scale($scale);
6404     disp(0x0);
6405   %}
6406 %}
6407 
6408 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6409 %{
6410   predicate(Universe::narrow_oop_shift() == 0);
6411   constraint(ALLOC_IN_RC(ptr_reg));
6412   match(AddP (DecodeN reg) (LShiftL lreg scale));
6413   op_cost(0);
6414   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6415   interface(MEMORY_INTER) %{
6416     base($reg);
6417     index($lreg);
6418     scale($scale);
6419     disp(0x0);
6420   %}
6421 %}
6422 
6423 operand indIndexN(iRegN reg, iRegL lreg)
6424 %{
6425   predicate(Universe::narrow_oop_shift() == 0);
6426   constraint(ALLOC_IN_RC(ptr_reg));
6427   match(AddP (DecodeN reg) lreg);
6428   op_cost(0);
6429   format %{ "$reg, $lreg\t# narrow" %}
6430   interface(MEMORY_INTER) %{
6431     base($reg);
6432     index($lreg);
6433     scale(0x0);
6434     disp(0x0);
6435   %}
6436 %}
6437 
6438 operand indOffIN(iRegN reg, immIOffset off)
6439 %{
6440   predicate(Universe::narrow_oop_shift() == 0);
6441   constraint(ALLOC_IN_RC(ptr_reg));
6442   match(AddP (DecodeN reg) off);
6443   op_cost(0);
6444   format %{ "[$reg, $off]\t# narrow" %}
6445   interface(MEMORY_INTER) %{
6446     base($reg);
6447     index(0xffffffff);
6448     scale(0x0);
6449     disp($off);
6450   %}
6451 %}
6452 
6453 operand indOffLN(iRegN reg, immLoffset off)
6454 %{
6455   predicate(Universe::narrow_oop_shift() == 0);
6456   constraint(ALLOC_IN_RC(ptr_reg));
6457   match(AddP (DecodeN reg) off);
6458   op_cost(0);
6459   format %{ "[$reg, $off]\t# narrow" %}
6460   interface(MEMORY_INTER) %{
6461     base($reg);
6462     index(0xffffffff);
6463     scale(0x0);
6464     disp($off);
6465   %}
6466 %}
6467 
6468 
6469 
6470 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6471 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6472 %{
6473   constraint(ALLOC_IN_RC(ptr_reg));
6474   match(AddP reg off);
6475   op_cost(0);
6476   format %{ "[$reg, $off]" %}
6477   interface(MEMORY_INTER) %{
6478     base($reg);
6479     index(0xffffffff);
6480     scale(0x0);
6481     disp($off);
6482   %}
6483 %}
6484 
6485 //----------Special Memory Operands--------------------------------------------
6486 // Stack Slot Operand - This operand is used for loading and storing temporary
6487 //                      values on the stack where a match requires a value to
6488 //                      flow through memory.
6489 operand stackSlotP(sRegP reg)
6490 %{
6491   constraint(ALLOC_IN_RC(stack_slots));
6492   op_cost(100);
6493   // No match rule because this operand is only generated in matching
6494   // match(RegP);
6495   format %{ "[$reg]" %}
6496   interface(MEMORY_INTER) %{
6497     base(0x1e);  // RSP
6498     index(0x0);  // No Index
6499     scale(0x0);  // No Scale
6500     disp($reg);  // Stack Offset
6501   %}
6502 %}
6503 
6504 operand stackSlotI(sRegI reg)
6505 %{
6506   constraint(ALLOC_IN_RC(stack_slots));
6507   // No match rule because this operand is only generated in matching
6508   // match(RegI);
6509   format %{ "[$reg]" %}
6510   interface(MEMORY_INTER) %{
6511     base(0x1e);  // RSP
6512     index(0x0);  // No Index
6513     scale(0x0);  // No Scale
6514     disp($reg);  // Stack Offset
6515   %}
6516 %}
6517 
6518 operand stackSlotF(sRegF reg)
6519 %{
6520   constraint(ALLOC_IN_RC(stack_slots));
6521   // No match rule because this operand is only generated in matching
6522   // match(RegF);
6523   format %{ "[$reg]" %}
6524   interface(MEMORY_INTER) %{
6525     base(0x1e);  // RSP
6526     index(0x0);  // No Index
6527     scale(0x0);  // No Scale
6528     disp($reg);  // Stack Offset
6529   %}
6530 %}
6531 
6532 operand stackSlotD(sRegD reg)
6533 %{
6534   constraint(ALLOC_IN_RC(stack_slots));
6535   // No match rule because this operand is only generated in matching
6536   // match(RegD);
6537   format %{ "[$reg]" %}
6538   interface(MEMORY_INTER) %{
6539     base(0x1e);  // RSP
6540     index(0x0);  // No Index
6541     scale(0x0);  // No Scale
6542     disp($reg);  // Stack Offset
6543   %}
6544 %}
6545 
6546 operand stackSlotL(sRegL reg)
6547 %{
6548   constraint(ALLOC_IN_RC(stack_slots));
6549   // No match rule because this operand is only generated in matching
6550   // match(RegL);
6551   format %{ "[$reg]" %}
6552   interface(MEMORY_INTER) %{
6553     base(0x1e);  // RSP
6554     index(0x0);  // No Index
6555     scale(0x0);  // No Scale
6556     disp($reg);  // Stack Offset
6557   %}
6558 %}
6559 
6560 // Operands for expressing Control Flow
6561 // NOTE: Label is a predefined operand which should not be redefined in
6562 //       the AD file. It is generically handled within the ADLC.
6563 
6564 //----------Conditional Branch Operands----------------------------------------
6565 // Comparison Op  - This is the operation of the comparison, and is limited to
6566 //                  the following set of codes:
6567 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6568 //
6569 // Other attributes of the comparison, such as unsignedness, are specified
6570 // by the comparison instruction that sets a condition code flags register.
6571 // That result is represented by a flags operand whose subtype is appropriate
6572 // to the unsignedness (etc.) of the comparison.
6573 //
6574 // Later, the instruction which matches both the Comparison Op (a Bool) and
6575 // the flags (produced by the Cmp) specifies the coding of the comparison op
6576 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6577 
6578 // used for signed integral comparisons and fp comparisons
6579 
6580 operand cmpOp()
6581 %{
6582   match(Bool);
6583 
6584   format %{ "" %}
6585   interface(COND_INTER) %{
6586     equal(0x0, "eq");
6587     not_equal(0x1, "ne");
6588     less(0xb, "lt");
6589     greater_equal(0xa, "ge");
6590     less_equal(0xd, "le");
6591     greater(0xc, "gt");
6592     overflow(0x6, "vs");
6593     no_overflow(0x7, "vc");
6594   %}
6595 %}
6596 
6597 // used for unsigned integral comparisons
6598 
6599 operand cmpOpU()
6600 %{
6601   match(Bool);
6602 
6603   format %{ "" %}
6604   interface(COND_INTER) %{
6605     equal(0x0, "eq");
6606     not_equal(0x1, "ne");
6607     less(0x3, "lo");
6608     greater_equal(0x2, "hs");
6609     less_equal(0x9, "ls");
6610     greater(0x8, "hi");
6611     overflow(0x6, "vs");
6612     no_overflow(0x7, "vc");
6613   %}
6614 %}
6615 
6616 // Special operand allowing long args to int ops to be truncated for free
6617 
6618 operand iRegL2I(iRegL reg) %{
6619 
6620   op_cost(0);
6621 
6622   match(ConvL2I reg);
6623 
6624   format %{ "l2i($reg)" %}
6625 
6626   interface(REG_INTER)
6627 %}
6628 
6629 opclass vmem(indirect, indIndex, indOffI, indOffL);
6630 
6631 //----------OPERAND CLASSES----------------------------------------------------
6632 // Operand Classes are groups of operands that are used as to simplify
6633 // instruction definitions by not requiring the AD writer to specify
6634 // separate instructions for every form of operand when the
6635 // instruction accepts multiple operand types with the same basic
6636 // encoding and format. The classic case of this is memory operands.
6637 
6638 // memory is used to define read/write location for load/store
6639 // instruction defs. we can turn a memory op into an Address
6640 
6641 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6642                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6643 
6644 
6645 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6646 // operations. it allows the src to be either an iRegI or a (ConvL2I
6647 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6648 // can be elided because the 32-bit instruction will just employ the
6649 // lower 32 bits anyway.
6650 //
6651 // n.b. this does not elide all L2I conversions. if the truncated
6652 // value is consumed by more than one operation then the ConvL2I
6653 // cannot be bundled into the consuming nodes so an l2i gets planted
6654 // (actually a movw $dst $src) and the downstream instructions consume
6655 // the result of the l2i as an iRegI input. That's a shame since the
6656 // movw is actually redundant but its not too costly.
6657 
6658 opclass iRegIorL2I(iRegI, iRegL2I);
6659 
6660 //----------PIPELINE-----------------------------------------------------------
6661 // Rules which define the behavior of the target architectures pipeline.
6662 // Integer ALU reg operation
6663 pipeline %{
6664 
6665 attributes %{
6666   // ARM instructions are of fixed length
6667   fixed_size_instructions;        // Fixed size instructions TODO does
6668   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6669   // ARM instructions come in 32-bit word units
6670   instruction_unit_size = 4;         // An instruction is 4 bytes long
6671   instruction_fetch_unit_size = 64;  // The processor fetches one line
6672   instruction_fetch_units = 1;       // of 64 bytes
6673 
6674   // List of nop instructions
6675   nops( MachNop );
6676 %}
6677 
6678 // We don't use an actual pipeline model so don't care about resources
6679 // or description. we do use pipeline classes to introduce fixed
6680 // latencies
6681 
6682 //----------RESOURCES----------------------------------------------------------
6683 // Resources are the functional units available to the machine
6684 
6685 resources( INS0, INS1, INS01 = INS0 | INS1,
6686            ALU0, ALU1, ALU = ALU0 | ALU1,
6687            MAC,
6688            DIV,
6689            BRANCH,
6690            LDST,
6691            NEON_FP);
6692 
6693 //----------PIPELINE DESCRIPTION-----------------------------------------------
6694 // Pipeline Description specifies the stages in the machine's pipeline
6695 
6696 pipe_desc(ISS, EX1, EX2, WR);
6697 
6698 //----------PIPELINE CLASSES---------------------------------------------------
6699 // Pipeline Classes describe the stages in which input and output are
6700 // referenced by the hardware pipeline.
6701 
6702 //------- Integer ALU operations --------------------------
6703 
6704 // Integer ALU reg-reg operation
6705 // Operands needed in EX1, result generated in EX2
6706 // Eg.  ADD     x0, x1, x2
6707 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6708 %{
6709   single_instruction;
6710   dst    : EX2(write);
6711   src1   : EX1(read);
6712   src2   : EX1(read);
6713   INS01  : ISS; // Dual issue as instruction 0 or 1
6714   ALU    : EX2;
6715 %}
6716 
6717 // Integer ALU reg-reg operation with constant shift
6718 // Shifted register must be available in LATE_ISS instead of EX1
6719 // Eg.  ADD     x0, x1, x2, LSL #2
6720 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
6721 %{
6722   single_instruction;
6723   dst    : EX2(write);
6724   src1   : EX1(read);
6725   src2   : ISS(read);
6726   INS01  : ISS;
6727   ALU    : EX2;
6728 %}
6729 
6730 // Integer ALU reg operation with constant shift
6731 // Eg.  LSL     x0, x1, #shift
6732 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
6733 %{
6734   single_instruction;
6735   dst    : EX2(write);
6736   src1   : ISS(read);
6737   INS01  : ISS;
6738   ALU    : EX2;
6739 %}
6740 
6741 // Integer ALU reg-reg operation with variable shift
6742 // Both operands must be available in LATE_ISS instead of EX1
6743 // Result is available in EX1 instead of EX2
6744 // Eg.  LSLV    x0, x1, x2
6745 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
6746 %{
6747   single_instruction;
6748   dst    : EX1(write);
6749   src1   : ISS(read);
6750   src2   : ISS(read);
6751   INS01  : ISS;
6752   ALU    : EX1;
6753 %}
6754 
6755 // Integer ALU reg-reg operation with extract
6756 // As for _vshift above, but result generated in EX2
6757 // Eg.  EXTR    x0, x1, x2, #N
6758 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
6759 %{
6760   single_instruction;
6761   dst    : EX2(write);
6762   src1   : ISS(read);
6763   src2   : ISS(read);
6764   INS1   : ISS; // Can only dual issue as Instruction 1
6765   ALU    : EX1;
6766 %}
6767 
6768 // Integer ALU reg operation
6769 // Eg.  NEG     x0, x1
6770 pipe_class ialu_reg(iRegI dst, iRegI src)
6771 %{
6772   single_instruction;
6773   dst    : EX2(write);
6774   src    : EX1(read);
6775   INS01  : ISS;
6776   ALU    : EX2;
6777 %}
6778 
6779 // Integer ALU reg mmediate operation
6780 // Eg.  ADD     x0, x1, #N
6781 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
6782 %{
6783   single_instruction;
6784   dst    : EX2(write);
6785   src1   : EX1(read);
6786   INS01  : ISS;
6787   ALU    : EX2;
6788 %}
6789 
6790 // Integer ALU immediate operation (no source operands)
6791 // Eg.  MOV     x0, #N
6792 pipe_class ialu_imm(iRegI dst)
6793 %{
6794   single_instruction;
6795   dst    : EX1(write);
6796   INS01  : ISS;
6797   ALU    : EX1;
6798 %}
6799 
6800 //------- Compare operation -------------------------------
6801 
6802 // Compare reg-reg
6803 // Eg.  CMP     x0, x1
6804 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
6805 %{
6806   single_instruction;
6807 //  fixed_latency(16);
6808   cr     : EX2(write);
6809   op1    : EX1(read);
6810   op2    : EX1(read);
6811   INS01  : ISS;
6812   ALU    : EX2;
6813 %}
6814 
6815 // Compare reg-reg
6816 // Eg.  CMP     x0, #N
6817 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
6818 %{
6819   single_instruction;
6820 //  fixed_latency(16);
6821   cr     : EX2(write);
6822   op1    : EX1(read);
6823   INS01  : ISS;
6824   ALU    : EX2;
6825 %}
6826 
6827 //------- Conditional instructions ------------------------
6828 
6829 // Conditional no operands
6830 // Eg.  CSINC   x0, zr, zr, <cond>
6831 pipe_class icond_none(iRegI dst, rFlagsReg cr)
6832 %{
6833   single_instruction;
6834   cr     : EX1(read);
6835   dst    : EX2(write);
6836   INS01  : ISS;
6837   ALU    : EX2;
6838 %}
6839 
6840 // Conditional 2 operand
6841 // EG.  CSEL    X0, X1, X2, <cond>
6842 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
6843 %{
6844   single_instruction;
6845   cr     : EX1(read);
6846   src1   : EX1(read);
6847   src2   : EX1(read);
6848   dst    : EX2(write);
6849   INS01  : ISS;
6850   ALU    : EX2;
6851 %}
6852 
6853 // Conditional 2 operand
6854 // EG.  CSEL    X0, X1, X2, <cond>
6855 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
6856 %{
6857   single_instruction;
6858   cr     : EX1(read);
6859   src    : EX1(read);
6860   dst    : EX2(write);
6861   INS01  : ISS;
6862   ALU    : EX2;
6863 %}
6864 
6865 //------- Multiply pipeline operations --------------------
6866 
6867 // Multiply reg-reg
6868 // Eg.  MUL     w0, w1, w2
6869 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6870 %{
6871   single_instruction;
6872   dst    : WR(write);
6873   src1   : ISS(read);
6874   src2   : ISS(read);
6875   INS01  : ISS;
6876   MAC    : WR;
6877 %}
6878 
6879 // Multiply accumulate
6880 // Eg.  MADD    w0, w1, w2, w3
6881 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6882 %{
6883   single_instruction;
6884   dst    : WR(write);
6885   src1   : ISS(read);
6886   src2   : ISS(read);
6887   src3   : ISS(read);
6888   INS01  : ISS;
6889   MAC    : WR;
6890 %}
6891 
6892 // Eg.  MUL     w0, w1, w2
6893 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6894 %{
6895   single_instruction;
6896   fixed_latency(3); // Maximum latency for 64 bit mul
6897   dst    : WR(write);
6898   src1   : ISS(read);
6899   src2   : ISS(read);
6900   INS01  : ISS;
6901   MAC    : WR;
6902 %}
6903 
6904 // Multiply accumulate
6905 // Eg.  MADD    w0, w1, w2, w3
6906 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6907 %{
6908   single_instruction;
6909   fixed_latency(3); // Maximum latency for 64 bit mul
6910   dst    : WR(write);
6911   src1   : ISS(read);
6912   src2   : ISS(read);
6913   src3   : ISS(read);
6914   INS01  : ISS;
6915   MAC    : WR;
6916 %}
6917 
6918 //------- Divide pipeline operations --------------------
6919 
6920 // Eg.  SDIV    w0, w1, w2
6921 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6922 %{
6923   single_instruction;
6924   fixed_latency(8); // Maximum latency for 32 bit divide
6925   dst    : WR(write);
6926   src1   : ISS(read);
6927   src2   : ISS(read);
6928   INS0   : ISS; // Can only dual issue as instruction 0
6929   DIV    : WR;
6930 %}
6931 
6932 // Eg.  SDIV    x0, x1, x2
6933 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6934 %{
6935   single_instruction;
6936   fixed_latency(16); // Maximum latency for 64 bit divide
6937   dst    : WR(write);
6938   src1   : ISS(read);
6939   src2   : ISS(read);
6940   INS0   : ISS; // Can only dual issue as instruction 0
6941   DIV    : WR;
6942 %}
6943 
6944 //------- Load pipeline operations ------------------------
6945 
6946 // Load - prefetch
6947 // Eg.  PFRM    <mem>
6948 pipe_class iload_prefetch(memory mem)
6949 %{
6950   single_instruction;
6951   mem    : ISS(read);
6952   INS01  : ISS;
6953   LDST   : WR;
6954 %}
6955 
6956 // Load - reg, mem
6957 // Eg.  LDR     x0, <mem>
6958 pipe_class iload_reg_mem(iRegI dst, memory mem)
6959 %{
6960   single_instruction;
6961   dst    : WR(write);
6962   mem    : ISS(read);
6963   INS01  : ISS;
6964   LDST   : WR;
6965 %}
6966 
6967 // Load - reg, reg
6968 // Eg.  LDR     x0, [sp, x1]
6969 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6970 %{
6971   single_instruction;
6972   dst    : WR(write);
6973   src    : ISS(read);
6974   INS01  : ISS;
6975   LDST   : WR;
6976 %}
6977 
6978 //------- Store pipeline operations -----------------------
6979 
6980 // Store - zr, mem
6981 // Eg.  STR     zr, <mem>
6982 pipe_class istore_mem(memory mem)
6983 %{
6984   single_instruction;
6985   mem    : ISS(read);
6986   INS01  : ISS;
6987   LDST   : WR;
6988 %}
6989 
6990 // Store - reg, mem
6991 // Eg.  STR     x0, <mem>
6992 pipe_class istore_reg_mem(iRegI src, memory mem)
6993 %{
6994   single_instruction;
6995   mem    : ISS(read);
6996   src    : EX2(read);
6997   INS01  : ISS;
6998   LDST   : WR;
6999 %}
7000 
7001 // Store - reg, reg
7002 // Eg. STR      x0, [sp, x1]
7003 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7004 %{
7005   single_instruction;
7006   dst    : ISS(read);
7007   src    : EX2(read);
7008   INS01  : ISS;
7009   LDST   : WR;
7010 %}
7011 
7012 //------- Store pipeline operations -----------------------
7013 
7014 // Branch
7015 pipe_class pipe_branch()
7016 %{
7017   single_instruction;
7018   INS01  : ISS;
7019   BRANCH : EX1;
7020 %}
7021 
7022 // Conditional branch
7023 pipe_class pipe_branch_cond(rFlagsReg cr)
7024 %{
7025   single_instruction;
7026   cr     : EX1(read);
7027   INS01  : ISS;
7028   BRANCH : EX1;
7029 %}
7030 
7031 // Compare & Branch
7032 // EG.  CBZ/CBNZ
7033 pipe_class pipe_cmp_branch(iRegI op1)
7034 %{
7035   single_instruction;
7036   op1    : EX1(read);
7037   INS01  : ISS;
7038   BRANCH : EX1;
7039 %}
7040 
7041 //------- Synchronisation operations ----------------------
7042 
7043 // Any operation requiring serialization.
7044 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7045 pipe_class pipe_serial()
7046 %{
7047   single_instruction;
7048   force_serialization;
7049   fixed_latency(16);
7050   INS01  : ISS(2); // Cannot dual issue with any other instruction
7051   LDST   : WR;
7052 %}
7053 
7054 // Generic big/slow expanded idiom - also serialized
7055 pipe_class pipe_slow()
7056 %{
7057   instruction_count(10);
7058   multiple_bundles;
7059   force_serialization;
7060   fixed_latency(16);
7061   INS01  : ISS(2); // Cannot dual issue with any other instruction
7062   LDST   : WR;
7063 %}
7064 
7065 // Empty pipeline class
7066 pipe_class pipe_class_empty()
7067 %{
7068   single_instruction;
7069   fixed_latency(0);
7070 %}
7071 
7072 // Default pipeline class.
7073 pipe_class pipe_class_default()
7074 %{
7075   single_instruction;
7076   fixed_latency(2);
7077 %}
7078 
7079 // Pipeline class for compares.
7080 pipe_class pipe_class_compare()
7081 %{
7082   single_instruction;
7083   fixed_latency(16);
7084 %}
7085 
7086 // Pipeline class for memory operations.
7087 pipe_class pipe_class_memory()
7088 %{
7089   single_instruction;
7090   fixed_latency(16);
7091 %}
7092 
7093 // Pipeline class for call.
7094 pipe_class pipe_class_call()
7095 %{
7096   single_instruction;
7097   fixed_latency(100);
7098 %}
7099 
7100 // Define the class for the Nop node.
7101 define %{
7102    MachNop = pipe_class_empty;
7103 %}
7104 
7105 %}
7106 //----------INSTRUCTIONS-------------------------------------------------------
7107 //
7108 // match      -- States which machine-independent subtree may be replaced
7109 //               by this instruction.
7110 // ins_cost   -- The estimated cost of this instruction is used by instruction
7111 //               selection to identify a minimum cost tree of machine
7112 //               instructions that matches a tree of machine-independent
7113 //               instructions.
7114 // format     -- A string providing the disassembly for this instruction.
7115 //               The value of an instruction's operand may be inserted
7116 //               by referring to it with a '$' prefix.
7117 // opcode     -- Three instruction opcodes may be provided.  These are referred
7118 //               to within an encode class as $primary, $secondary, and $tertiary
7119 //               rrspectively.  The primary opcode is commonly used to
7120 //               indicate the type of machine instruction, while secondary
7121 //               and tertiary are often used for prefix options or addressing
7122 //               modes.
7123 // ins_encode -- A list of encode classes with parameters. The encode class
7124 //               name must have been defined in an 'enc_class' specification
7125 //               in the encode section of the architecture description.
7126 
7127 // ============================================================================
7128 // Memory (Load/Store) Instructions
7129 
7130 // Load Instructions
7131 
7132 // Load Byte (8 bit signed)
7133 instruct loadB(iRegINoSp dst, memory mem)
7134 %{
7135   match(Set dst (LoadB mem));
7136   predicate(!needs_acquiring_load(n));
7137 
7138   ins_cost(4 * INSN_COST);
7139   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7140 
7141   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7142 
7143   ins_pipe(iload_reg_mem);
7144 %}
7145 
7146 // Load Byte (8 bit signed) into long
7147 instruct loadB2L(iRegLNoSp dst, memory mem)
7148 %{
7149   match(Set dst (ConvI2L (LoadB mem)));
7150   predicate(!needs_acquiring_load(n->in(1)));
7151 
7152   ins_cost(4 * INSN_COST);
7153   format %{ "ldrsb  $dst, $mem\t# byte" %}
7154 
7155   ins_encode(aarch64_enc_ldrsb(dst, mem));
7156 
7157   ins_pipe(iload_reg_mem);
7158 %}
7159 
7160 // Load Byte (8 bit unsigned)
7161 instruct loadUB(iRegINoSp dst, memory mem)
7162 %{
7163   match(Set dst (LoadUB mem));
7164   predicate(!needs_acquiring_load(n));
7165 
7166   ins_cost(4 * INSN_COST);
7167   format %{ "ldrbw  $dst, $mem\t# byte" %}
7168 
7169   ins_encode(aarch64_enc_ldrb(dst, mem));
7170 
7171   ins_pipe(iload_reg_mem);
7172 %}
7173 
7174 // Load Byte (8 bit unsigned) into long
7175 instruct loadUB2L(iRegLNoSp dst, memory mem)
7176 %{
7177   match(Set dst (ConvI2L (LoadUB mem)));
7178   predicate(!needs_acquiring_load(n->in(1)));
7179 
7180   ins_cost(4 * INSN_COST);
7181   format %{ "ldrb  $dst, $mem\t# byte" %}
7182 
7183   ins_encode(aarch64_enc_ldrb(dst, mem));
7184 
7185   ins_pipe(iload_reg_mem);
7186 %}
7187 
7188 // Load Short (16 bit signed)
7189 instruct loadS(iRegINoSp dst, memory mem)
7190 %{
7191   match(Set dst (LoadS mem));
7192   predicate(!needs_acquiring_load(n));
7193 
7194   ins_cost(4 * INSN_COST);
7195   format %{ "ldrshw  $dst, $mem\t# short" %}
7196 
7197   ins_encode(aarch64_enc_ldrshw(dst, mem));
7198 
7199   ins_pipe(iload_reg_mem);
7200 %}
7201 
7202 // Load Short (16 bit signed) into long
7203 instruct loadS2L(iRegLNoSp dst, memory mem)
7204 %{
7205   match(Set dst (ConvI2L (LoadS mem)));
7206   predicate(!needs_acquiring_load(n->in(1)));
7207 
7208   ins_cost(4 * INSN_COST);
7209   format %{ "ldrsh  $dst, $mem\t# short" %}
7210 
7211   ins_encode(aarch64_enc_ldrsh(dst, mem));
7212 
7213   ins_pipe(iload_reg_mem);
7214 %}
7215 
7216 // Load Char (16 bit unsigned)
7217 instruct loadUS(iRegINoSp dst, memory mem)
7218 %{
7219   match(Set dst (LoadUS mem));
7220   predicate(!needs_acquiring_load(n));
7221 
7222   ins_cost(4 * INSN_COST);
7223   format %{ "ldrh  $dst, $mem\t# short" %}
7224 
7225   ins_encode(aarch64_enc_ldrh(dst, mem));
7226 
7227   ins_pipe(iload_reg_mem);
7228 %}
7229 
7230 // Load Short/Char (16 bit unsigned) into long
7231 instruct loadUS2L(iRegLNoSp dst, memory mem)
7232 %{
7233   match(Set dst (ConvI2L (LoadUS mem)));
7234   predicate(!needs_acquiring_load(n->in(1)));
7235 
7236   ins_cost(4 * INSN_COST);
7237   format %{ "ldrh  $dst, $mem\t# short" %}
7238 
7239   ins_encode(aarch64_enc_ldrh(dst, mem));
7240 
7241   ins_pipe(iload_reg_mem);
7242 %}
7243 
7244 // Load Integer (32 bit signed)
7245 instruct loadI(iRegINoSp dst, memory mem)
7246 %{
7247   match(Set dst (LoadI mem));
7248   predicate(!needs_acquiring_load(n));
7249 
7250   ins_cost(4 * INSN_COST);
7251   format %{ "ldrw  $dst, $mem\t# int" %}
7252 
7253   ins_encode(aarch64_enc_ldrw(dst, mem));
7254 
7255   ins_pipe(iload_reg_mem);
7256 %}
7257 
7258 // Load Integer (32 bit signed) into long
7259 instruct loadI2L(iRegLNoSp dst, memory mem)
7260 %{
7261   match(Set dst (ConvI2L (LoadI mem)));
7262   predicate(!needs_acquiring_load(n->in(1)));
7263 
7264   ins_cost(4 * INSN_COST);
7265   format %{ "ldrsw  $dst, $mem\t# int" %}
7266 
7267   ins_encode(aarch64_enc_ldrsw(dst, mem));
7268 
7269   ins_pipe(iload_reg_mem);
7270 %}
7271 
7272 // Load Integer (32 bit unsigned) into long
7273 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7274 %{
7275   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7276   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7277 
7278   ins_cost(4 * INSN_COST);
7279   format %{ "ldrw  $dst, $mem\t# int" %}
7280 
7281   ins_encode(aarch64_enc_ldrw(dst, mem));
7282 
7283   ins_pipe(iload_reg_mem);
7284 %}
7285 
7286 // Load Long (64 bit signed)
7287 instruct loadL(iRegLNoSp dst, memory mem)
7288 %{
7289   match(Set dst (LoadL mem));
7290   predicate(!needs_acquiring_load(n));
7291 
7292   ins_cost(4 * INSN_COST);
7293   format %{ "ldr  $dst, $mem\t# int" %}
7294 
7295   ins_encode(aarch64_enc_ldr(dst, mem));
7296 
7297   ins_pipe(iload_reg_mem);
7298 %}
7299 
7300 // Load Range
7301 instruct loadRange(iRegINoSp dst, memory mem)
7302 %{
7303   match(Set dst (LoadRange mem));
7304 
7305   ins_cost(4 * INSN_COST);
7306   format %{ "ldrw  $dst, $mem\t# range" %}
7307 
7308   ins_encode(aarch64_enc_ldrw(dst, mem));
7309 
7310   ins_pipe(iload_reg_mem);
7311 %}
7312 
7313 // Load Pointer
7314 instruct loadP(iRegPNoSp dst, memory mem)
7315 %{
7316   match(Set dst (LoadP mem));
7317   predicate(!needs_acquiring_load(n));
7318 
7319   ins_cost(4 * INSN_COST);
7320   format %{ "ldr  $dst, $mem\t# ptr" %}
7321 
7322   ins_encode(aarch64_enc_ldr(dst, mem));
7323 
7324   ins_pipe(iload_reg_mem);
7325 %}
7326 
7327 // Load Compressed Pointer
7328 instruct loadN(iRegNNoSp dst, memory mem)
7329 %{
7330   match(Set dst (LoadN mem));
7331   predicate(!needs_acquiring_load(n));
7332 
7333   ins_cost(4 * INSN_COST);
7334   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7335 
7336   ins_encode(aarch64_enc_ldrw(dst, mem));
7337 
7338   ins_pipe(iload_reg_mem);
7339 %}
7340 
7341 // Load Klass Pointer
7342 instruct loadKlass(iRegPNoSp dst, memory mem)
7343 %{
7344   match(Set dst (LoadKlass mem));
7345   predicate(!needs_acquiring_load(n));
7346 
7347   ins_cost(4 * INSN_COST);
7348   format %{ "ldr  $dst, $mem\t# class" %}
7349 
7350   ins_encode(aarch64_enc_ldr(dst, mem));
7351 
7352   ins_pipe(iload_reg_mem);
7353 %}
7354 
7355 // Load Narrow Klass Pointer
7356 instruct loadNKlass(iRegNNoSp dst, memory mem)
7357 %{
7358   match(Set dst (LoadNKlass mem));
7359   predicate(!needs_acquiring_load(n));
7360 
7361   ins_cost(4 * INSN_COST);
7362   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7363 
7364   ins_encode(aarch64_enc_ldrw(dst, mem));
7365 
7366   ins_pipe(iload_reg_mem);
7367 %}
7368 
7369 // Load Float
7370 instruct loadF(vRegF dst, memory mem)
7371 %{
7372   match(Set dst (LoadF mem));
7373   predicate(!needs_acquiring_load(n));
7374 
7375   ins_cost(4 * INSN_COST);
7376   format %{ "ldrs  $dst, $mem\t# float" %}
7377 
7378   ins_encode( aarch64_enc_ldrs(dst, mem) );
7379 
7380   ins_pipe(pipe_class_memory);
7381 %}
7382 
7383 // Load Double
7384 instruct loadD(vRegD dst, memory mem)
7385 %{
7386   match(Set dst (LoadD mem));
7387   predicate(!needs_acquiring_load(n));
7388 
7389   ins_cost(4 * INSN_COST);
7390   format %{ "ldrd  $dst, $mem\t# double" %}
7391 
7392   ins_encode( aarch64_enc_ldrd(dst, mem) );
7393 
7394   ins_pipe(pipe_class_memory);
7395 %}
7396 
7397 
7398 // Load Int Constant
7399 instruct loadConI(iRegINoSp dst, immI src)
7400 %{
7401   match(Set dst src);
7402 
7403   ins_cost(INSN_COST);
7404   format %{ "mov $dst, $src\t# int" %}
7405 
7406   ins_encode( aarch64_enc_movw_imm(dst, src) );
7407 
7408   ins_pipe(ialu_imm);
7409 %}
7410 
7411 // Load Long Constant
7412 instruct loadConL(iRegLNoSp dst, immL src)
7413 %{
7414   match(Set dst src);
7415 
7416   ins_cost(INSN_COST);
7417   format %{ "mov $dst, $src\t# long" %}
7418 
7419   ins_encode( aarch64_enc_mov_imm(dst, src) );
7420 
7421   ins_pipe(ialu_imm);
7422 %}
7423 
7424 // Load Pointer Constant
7425 
7426 instruct loadConP(iRegPNoSp dst, immP con)
7427 %{
7428   match(Set dst con);
7429 
7430   ins_cost(INSN_COST * 4);
7431   format %{
7432     "mov  $dst, $con\t# ptr\n\t"
7433   %}
7434 
7435   ins_encode(aarch64_enc_mov_p(dst, con));
7436 
7437   ins_pipe(ialu_imm);
7438 %}
7439 
7440 // Load Null Pointer Constant
7441 
7442 instruct loadConP0(iRegPNoSp dst, immP0 con)
7443 %{
7444   match(Set dst con);
7445 
7446   ins_cost(INSN_COST);
7447   format %{ "mov  $dst, $con\t# NULL ptr" %}
7448 
7449   ins_encode(aarch64_enc_mov_p0(dst, con));
7450 
7451   ins_pipe(ialu_imm);
7452 %}
7453 
7454 // Load Pointer Constant One
7455 
7456 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7457 %{
7458   match(Set dst con);
7459 
7460   ins_cost(INSN_COST);
7461   format %{ "mov  $dst, $con\t# NULL ptr" %}
7462 
7463   ins_encode(aarch64_enc_mov_p1(dst, con));
7464 
7465   ins_pipe(ialu_imm);
7466 %}
7467 
7468 // Load Poll Page Constant
7469 
7470 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7471 %{
7472   match(Set dst con);
7473 
7474   ins_cost(INSN_COST);
7475   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7476 
7477   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7478 
7479   ins_pipe(ialu_imm);
7480 %}
7481 
7482 // Load Byte Map Base Constant
7483 
7484 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7485 %{
7486   match(Set dst con);
7487 
7488   ins_cost(INSN_COST);
7489   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7490 
7491   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7492 
7493   ins_pipe(ialu_imm);
7494 %}
7495 
7496 // Load Narrow Pointer Constant
7497 
7498 instruct loadConN(iRegNNoSp dst, immN con)
7499 %{
7500   match(Set dst con);
7501 
7502   ins_cost(INSN_COST * 4);
7503   format %{ "mov  $dst, $con\t# compressed ptr" %}
7504 
7505   ins_encode(aarch64_enc_mov_n(dst, con));
7506 
7507   ins_pipe(ialu_imm);
7508 %}
7509 
7510 // Load Narrow Null Pointer Constant
7511 
7512 instruct loadConN0(iRegNNoSp dst, immN0 con)
7513 %{
7514   match(Set dst con);
7515 
7516   ins_cost(INSN_COST);
7517   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7518 
7519   ins_encode(aarch64_enc_mov_n0(dst, con));
7520 
7521   ins_pipe(ialu_imm);
7522 %}
7523 
7524 // Load Narrow Klass Constant
7525 
7526 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7527 %{
7528   match(Set dst con);
7529 
7530   ins_cost(INSN_COST);
7531   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7532 
7533   ins_encode(aarch64_enc_mov_nk(dst, con));
7534 
7535   ins_pipe(ialu_imm);
7536 %}
7537 
7538 // Load Packed Float Constant
7539 
7540 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7541   match(Set dst con);
7542   ins_cost(INSN_COST * 4);
7543   format %{ "fmovs  $dst, $con"%}
7544   ins_encode %{
7545     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7546   %}
7547 
7548   ins_pipe(pipe_class_default);
7549 %}
7550 
7551 // Load Float Constant
7552 
7553 instruct loadConF(vRegF dst, immF con) %{
7554   match(Set dst con);
7555 
7556   ins_cost(INSN_COST * 4);
7557 
7558   format %{
7559     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7560   %}
7561 
7562   ins_encode %{
7563     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7564   %}
7565 
7566   ins_pipe(pipe_class_default);
7567 %}
7568 
7569 // Load Packed Double Constant
7570 
7571 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7572   match(Set dst con);
7573   ins_cost(INSN_COST);
7574   format %{ "fmovd  $dst, $con"%}
7575   ins_encode %{
7576     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7577   %}
7578 
7579   ins_pipe(pipe_class_default);
7580 %}
7581 
7582 // Load Double Constant
7583 
7584 instruct loadConD(vRegD dst, immD con) %{
7585   match(Set dst con);
7586 
7587   ins_cost(INSN_COST * 5);
7588   format %{
7589     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7590   %}
7591 
7592   ins_encode %{
7593     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
7594   %}
7595 
7596   ins_pipe(pipe_class_default);
7597 %}
7598 
7599 // Store Instructions
7600 
7601 // Store CMS card-mark Immediate
7602 instruct storeimmCM0(immI0 zero, memory mem)
7603 %{
7604   match(Set mem (StoreCM mem zero));
7605   predicate(unnecessary_storestore(n));
7606 
7607   ins_cost(INSN_COST);
7608   format %{ "strb zr, $mem\t# byte" %}
7609 
7610   ins_encode(aarch64_enc_strb0(mem));
7611 
7612   ins_pipe(istore_mem);
7613 %}
7614 
7615 // Store CMS card-mark Immediate with intervening StoreStore
7616 // needed when using CMS with no conditional card marking
7617 instruct storeimmCM0_ordered(immI0 zero, memory mem)
7618 %{
7619   match(Set mem (StoreCM mem zero));
7620 
7621   ins_cost(INSN_COST * 2);
7622   format %{ "dmb ishst"
7623       "\n\tstrb zr, $mem\t# byte" %}
7624 
7625   ins_encode(aarch64_enc_strb0_ordered(mem));
7626 
7627   ins_pipe(istore_mem);
7628 %}
7629 
7630 // Store Byte
7631 instruct storeB(iRegIorL2I src, memory mem)
7632 %{
7633   match(Set mem (StoreB mem src));
7634   predicate(!needs_releasing_store(n));
7635 
7636   ins_cost(INSN_COST);
7637   format %{ "strb  $src, $mem\t# byte" %}
7638 
7639   ins_encode(aarch64_enc_strb(src, mem));
7640 
7641   ins_pipe(istore_reg_mem);
7642 %}
7643 
7644 
7645 instruct storeimmB0(immI0 zero, memory mem)
7646 %{
7647   match(Set mem (StoreB mem zero));
7648   predicate(!needs_releasing_store(n));
7649 
7650   ins_cost(INSN_COST);
7651   format %{ "strb rscractch2, $mem\t# byte" %}
7652 
7653   ins_encode(aarch64_enc_strb0(mem));
7654 
7655   ins_pipe(istore_mem);
7656 %}
7657 
7658 // Store Char/Short
7659 instruct storeC(iRegIorL2I src, memory mem)
7660 %{
7661   match(Set mem (StoreC mem src));
7662   predicate(!needs_releasing_store(n));
7663 
7664   ins_cost(INSN_COST);
7665   format %{ "strh  $src, $mem\t# short" %}
7666 
7667   ins_encode(aarch64_enc_strh(src, mem));
7668 
7669   ins_pipe(istore_reg_mem);
7670 %}
7671 
7672 instruct storeimmC0(immI0 zero, memory mem)
7673 %{
7674   match(Set mem (StoreC mem zero));
7675   predicate(!needs_releasing_store(n));
7676 
7677   ins_cost(INSN_COST);
7678   format %{ "strh  zr, $mem\t# short" %}
7679 
7680   ins_encode(aarch64_enc_strh0(mem));
7681 
7682   ins_pipe(istore_mem);
7683 %}
7684 
7685 // Store Integer
7686 
7687 instruct storeI(iRegIorL2I src, memory mem)
7688 %{
7689   match(Set mem(StoreI mem src));
7690   predicate(!needs_releasing_store(n));
7691 
7692   ins_cost(INSN_COST);
7693   format %{ "strw  $src, $mem\t# int" %}
7694 
7695   ins_encode(aarch64_enc_strw(src, mem));
7696 
7697   ins_pipe(istore_reg_mem);
7698 %}
7699 
7700 instruct storeimmI0(immI0 zero, memory mem)
7701 %{
7702   match(Set mem(StoreI mem zero));
7703   predicate(!needs_releasing_store(n));
7704 
7705   ins_cost(INSN_COST);
7706   format %{ "strw  zr, $mem\t# int" %}
7707 
7708   ins_encode(aarch64_enc_strw0(mem));
7709 
7710   ins_pipe(istore_mem);
7711 %}
7712 
7713 // Store Long (64 bit signed)
7714 instruct storeL(iRegL src, memory mem)
7715 %{
7716   match(Set mem (StoreL mem src));
7717   predicate(!needs_releasing_store(n));
7718 
7719   ins_cost(INSN_COST);
7720   format %{ "str  $src, $mem\t# int" %}
7721 
7722   ins_encode(aarch64_enc_str(src, mem));
7723 
7724   ins_pipe(istore_reg_mem);
7725 %}
7726 
7727 // Store Long (64 bit signed)
7728 instruct storeimmL0(immL0 zero, memory mem)
7729 %{
7730   match(Set mem (StoreL mem zero));
7731   predicate(!needs_releasing_store(n));
7732 
7733   ins_cost(INSN_COST);
7734   format %{ "str  zr, $mem\t# int" %}
7735 
7736   ins_encode(aarch64_enc_str0(mem));
7737 
7738   ins_pipe(istore_mem);
7739 %}
7740 
7741 // Store Pointer
7742 instruct storeP(iRegP src, memory mem)
7743 %{
7744   match(Set mem (StoreP mem src));
7745   predicate(!needs_releasing_store(n));
7746 
7747   ins_cost(INSN_COST);
7748   format %{ "str  $src, $mem\t# ptr" %}
7749 
7750   ins_encode(aarch64_enc_str(src, mem));
7751 
7752   ins_pipe(istore_reg_mem);
7753 %}
7754 
7755 // Store Pointer
7756 instruct storeimmP0(immP0 zero, memory mem)
7757 %{
7758   match(Set mem (StoreP mem zero));
7759   predicate(!needs_releasing_store(n));
7760 
7761   ins_cost(INSN_COST);
7762   format %{ "str zr, $mem\t# ptr" %}
7763 
7764   ins_encode(aarch64_enc_str0(mem));
7765 
7766   ins_pipe(istore_mem);
7767 %}
7768 
7769 // Store Compressed Pointer
7770 instruct storeN(iRegN src, memory mem)
7771 %{
7772   match(Set mem (StoreN mem src));
7773   predicate(!needs_releasing_store(n));
7774 
7775   ins_cost(INSN_COST);
7776   format %{ "strw  $src, $mem\t# compressed ptr" %}
7777 
7778   ins_encode(aarch64_enc_strw(src, mem));
7779 
7780   ins_pipe(istore_reg_mem);
7781 %}
7782 
7783 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
7784 %{
7785   match(Set mem (StoreN mem zero));
7786   predicate(Universe::narrow_oop_base() == NULL &&
7787             Universe::narrow_klass_base() == NULL &&
7788             (!needs_releasing_store(n)));
7789 
7790   ins_cost(INSN_COST);
7791   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
7792 
7793   ins_encode(aarch64_enc_strw(heapbase, mem));
7794 
7795   ins_pipe(istore_reg_mem);
7796 %}
7797 
7798 // Store Float
7799 instruct storeF(vRegF src, memory mem)
7800 %{
7801   match(Set mem (StoreF mem src));
7802   predicate(!needs_releasing_store(n));
7803 
7804   ins_cost(INSN_COST);
7805   format %{ "strs  $src, $mem\t# float" %}
7806 
7807   ins_encode( aarch64_enc_strs(src, mem) );
7808 
7809   ins_pipe(pipe_class_memory);
7810 %}
7811 
7812 // TODO
7813 // implement storeImmF0 and storeFImmPacked
7814 
7815 // Store Double
7816 instruct storeD(vRegD src, memory mem)
7817 %{
7818   match(Set mem (StoreD mem src));
7819   predicate(!needs_releasing_store(n));
7820 
7821   ins_cost(INSN_COST);
7822   format %{ "strd  $src, $mem\t# double" %}
7823 
7824   ins_encode( aarch64_enc_strd(src, mem) );
7825 
7826   ins_pipe(pipe_class_memory);
7827 %}
7828 
7829 // Store Compressed Klass Pointer
7830 instruct storeNKlass(iRegN src, memory mem)
7831 %{
7832   predicate(!needs_releasing_store(n));
7833   match(Set mem (StoreNKlass mem src));
7834 
7835   ins_cost(INSN_COST);
7836   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
7837 
7838   ins_encode(aarch64_enc_strw(src, mem));
7839 
7840   ins_pipe(istore_reg_mem);
7841 %}
7842 
7843 // TODO
7844 // implement storeImmD0 and storeDImmPacked
7845 
7846 // prefetch instructions
7847 // Must be safe to execute with invalid address (cannot fault).
7848 
7849 instruct prefetchalloc( memory mem ) %{
7850   match(PrefetchAllocation mem);
7851 
7852   ins_cost(INSN_COST);
7853   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7854 
7855   ins_encode( aarch64_enc_prefetchw(mem) );
7856 
7857   ins_pipe(iload_prefetch);
7858 %}
7859 
7860 //  ---------------- volatile loads and stores ----------------
7861 
7862 // Load Byte (8 bit signed)
7863 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7864 %{
7865   match(Set dst (LoadB mem));
7866 
7867   ins_cost(VOLATILE_REF_COST);
7868   format %{ "ldarsb  $dst, $mem\t# byte" %}
7869 
7870   ins_encode(aarch64_enc_ldarsb(dst, mem));
7871 
7872   ins_pipe(pipe_serial);
7873 %}
7874 
7875 // Load Byte (8 bit signed) into long
7876 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7877 %{
7878   match(Set dst (ConvI2L (LoadB mem)));
7879 
7880   ins_cost(VOLATILE_REF_COST);
7881   format %{ "ldarsb  $dst, $mem\t# byte" %}
7882 
7883   ins_encode(aarch64_enc_ldarsb(dst, mem));
7884 
7885   ins_pipe(pipe_serial);
7886 %}
7887 
7888 // Load Byte (8 bit unsigned)
7889 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7890 %{
7891   match(Set dst (LoadUB mem));
7892 
7893   ins_cost(VOLATILE_REF_COST);
7894   format %{ "ldarb  $dst, $mem\t# byte" %}
7895 
7896   ins_encode(aarch64_enc_ldarb(dst, mem));
7897 
7898   ins_pipe(pipe_serial);
7899 %}
7900 
7901 // Load Byte (8 bit unsigned) into long
7902 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7903 %{
7904   match(Set dst (ConvI2L (LoadUB mem)));
7905 
7906   ins_cost(VOLATILE_REF_COST);
7907   format %{ "ldarb  $dst, $mem\t# byte" %}
7908 
7909   ins_encode(aarch64_enc_ldarb(dst, mem));
7910 
7911   ins_pipe(pipe_serial);
7912 %}
7913 
7914 // Load Short (16 bit signed)
7915 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7916 %{
7917   match(Set dst (LoadS mem));
7918 
7919   ins_cost(VOLATILE_REF_COST);
7920   format %{ "ldarshw  $dst, $mem\t# short" %}
7921 
7922   ins_encode(aarch64_enc_ldarshw(dst, mem));
7923 
7924   ins_pipe(pipe_serial);
7925 %}
7926 
7927 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7928 %{
7929   match(Set dst (LoadUS mem));
7930 
7931   ins_cost(VOLATILE_REF_COST);
7932   format %{ "ldarhw  $dst, $mem\t# short" %}
7933 
7934   ins_encode(aarch64_enc_ldarhw(dst, mem));
7935 
7936   ins_pipe(pipe_serial);
7937 %}
7938 
7939 // Load Short/Char (16 bit unsigned) into long
7940 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7941 %{
7942   match(Set dst (ConvI2L (LoadUS mem)));
7943 
7944   ins_cost(VOLATILE_REF_COST);
7945   format %{ "ldarh  $dst, $mem\t# short" %}
7946 
7947   ins_encode(aarch64_enc_ldarh(dst, mem));
7948 
7949   ins_pipe(pipe_serial);
7950 %}
7951 
7952 // Load Short/Char (16 bit signed) into long
7953 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7954 %{
7955   match(Set dst (ConvI2L (LoadS mem)));
7956 
7957   ins_cost(VOLATILE_REF_COST);
7958   format %{ "ldarh  $dst, $mem\t# short" %}
7959 
7960   ins_encode(aarch64_enc_ldarsh(dst, mem));
7961 
7962   ins_pipe(pipe_serial);
7963 %}
7964 
7965 // Load Integer (32 bit signed)
7966 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7967 %{
7968   match(Set dst (LoadI mem));
7969 
7970   ins_cost(VOLATILE_REF_COST);
7971   format %{ "ldarw  $dst, $mem\t# int" %}
7972 
7973   ins_encode(aarch64_enc_ldarw(dst, mem));
7974 
7975   ins_pipe(pipe_serial);
7976 %}
7977 
7978 // Load Integer (32 bit unsigned) into long
7979 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7980 %{
7981   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7982 
7983   ins_cost(VOLATILE_REF_COST);
7984   format %{ "ldarw  $dst, $mem\t# int" %}
7985 
7986   ins_encode(aarch64_enc_ldarw(dst, mem));
7987 
7988   ins_pipe(pipe_serial);
7989 %}
7990 
7991 // Load Long (64 bit signed)
7992 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7993 %{
7994   match(Set dst (LoadL mem));
7995 
7996   ins_cost(VOLATILE_REF_COST);
7997   format %{ "ldar  $dst, $mem\t# int" %}
7998 
7999   ins_encode(aarch64_enc_ldar(dst, mem));
8000 
8001   ins_pipe(pipe_serial);
8002 %}
8003 
8004 // Load Pointer
8005 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8006 %{
8007   match(Set dst (LoadP mem));
8008 
8009   ins_cost(VOLATILE_REF_COST);
8010   format %{ "ldar  $dst, $mem\t# ptr" %}
8011 
8012   ins_encode(aarch64_enc_ldar(dst, mem));
8013 
8014   ins_pipe(pipe_serial);
8015 %}
8016 
8017 // Load Compressed Pointer
8018 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8019 %{
8020   match(Set dst (LoadN mem));
8021 
8022   ins_cost(VOLATILE_REF_COST);
8023   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8024 
8025   ins_encode(aarch64_enc_ldarw(dst, mem));
8026 
8027   ins_pipe(pipe_serial);
8028 %}
8029 
8030 // Load Float
8031 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8032 %{
8033   match(Set dst (LoadF mem));
8034 
8035   ins_cost(VOLATILE_REF_COST);
8036   format %{ "ldars  $dst, $mem\t# float" %}
8037 
8038   ins_encode( aarch64_enc_fldars(dst, mem) );
8039 
8040   ins_pipe(pipe_serial);
8041 %}
8042 
8043 // Load Double
8044 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8045 %{
8046   match(Set dst (LoadD mem));
8047 
8048   ins_cost(VOLATILE_REF_COST);
8049   format %{ "ldard  $dst, $mem\t# double" %}
8050 
8051   ins_encode( aarch64_enc_fldard(dst, mem) );
8052 
8053   ins_pipe(pipe_serial);
8054 %}
8055 
8056 // Store Byte
8057 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8058 %{
8059   match(Set mem (StoreB mem src));
8060 
8061   ins_cost(VOLATILE_REF_COST);
8062   format %{ "stlrb  $src, $mem\t# byte" %}
8063 
8064   ins_encode(aarch64_enc_stlrb(src, mem));
8065 
8066   ins_pipe(pipe_class_memory);
8067 %}
8068 
8069 // Store Char/Short
8070 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8071 %{
8072   match(Set mem (StoreC mem src));
8073 
8074   ins_cost(VOLATILE_REF_COST);
8075   format %{ "stlrh  $src, $mem\t# short" %}
8076 
8077   ins_encode(aarch64_enc_stlrh(src, mem));
8078 
8079   ins_pipe(pipe_class_memory);
8080 %}
8081 
8082 // Store Integer
8083 
8084 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8085 %{
8086   match(Set mem(StoreI mem src));
8087 
8088   ins_cost(VOLATILE_REF_COST);
8089   format %{ "stlrw  $src, $mem\t# int" %}
8090 
8091   ins_encode(aarch64_enc_stlrw(src, mem));
8092 
8093   ins_pipe(pipe_class_memory);
8094 %}
8095 
8096 // Store Long (64 bit signed)
8097 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8098 %{
8099   match(Set mem (StoreL mem src));
8100 
8101   ins_cost(VOLATILE_REF_COST);
8102   format %{ "stlr  $src, $mem\t# int" %}
8103 
8104   ins_encode(aarch64_enc_stlr(src, mem));
8105 
8106   ins_pipe(pipe_class_memory);
8107 %}
8108 
8109 // Store Pointer
8110 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8111 %{
8112   match(Set mem (StoreP mem src));
8113 
8114   ins_cost(VOLATILE_REF_COST);
8115   format %{ "stlr  $src, $mem\t# ptr" %}
8116 
8117   ins_encode(aarch64_enc_stlr(src, mem));
8118 
8119   ins_pipe(pipe_class_memory);
8120 %}
8121 
8122 // Store Compressed Pointer
8123 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8124 %{
8125   match(Set mem (StoreN mem src));
8126 
8127   ins_cost(VOLATILE_REF_COST);
8128   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8129 
8130   ins_encode(aarch64_enc_stlrw(src, mem));
8131 
8132   ins_pipe(pipe_class_memory);
8133 %}
8134 
8135 // Store Float
8136 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8137 %{
8138   match(Set mem (StoreF mem src));
8139 
8140   ins_cost(VOLATILE_REF_COST);
8141   format %{ "stlrs  $src, $mem\t# float" %}
8142 
8143   ins_encode( aarch64_enc_fstlrs(src, mem) );
8144 
8145   ins_pipe(pipe_class_memory);
8146 %}
8147 
8148 // TODO
8149 // implement storeImmF0 and storeFImmPacked
8150 
8151 // Store Double
8152 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8153 %{
8154   match(Set mem (StoreD mem src));
8155 
8156   ins_cost(VOLATILE_REF_COST);
8157   format %{ "stlrd  $src, $mem\t# double" %}
8158 
8159   ins_encode( aarch64_enc_fstlrd(src, mem) );
8160 
8161   ins_pipe(pipe_class_memory);
8162 %}
8163 
8164 //  ---------------- end of volatile loads and stores ----------------
8165 
8166 // ============================================================================
8167 // BSWAP Instructions
8168 
8169 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8170   match(Set dst (ReverseBytesI src));
8171 
8172   ins_cost(INSN_COST);
8173   format %{ "revw  $dst, $src" %}
8174 
8175   ins_encode %{
8176     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8177   %}
8178 
8179   ins_pipe(ialu_reg);
8180 %}
8181 
8182 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8183   match(Set dst (ReverseBytesL src));
8184 
8185   ins_cost(INSN_COST);
8186   format %{ "rev  $dst, $src" %}
8187 
8188   ins_encode %{
8189     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8190   %}
8191 
8192   ins_pipe(ialu_reg);
8193 %}
8194 
8195 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8196   match(Set dst (ReverseBytesUS src));
8197 
8198   ins_cost(INSN_COST);
8199   format %{ "rev16w  $dst, $src" %}
8200 
8201   ins_encode %{
8202     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8203   %}
8204 
8205   ins_pipe(ialu_reg);
8206 %}
8207 
8208 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8209   match(Set dst (ReverseBytesS src));
8210 
8211   ins_cost(INSN_COST);
8212   format %{ "rev16w  $dst, $src\n\t"
8213             "sbfmw $dst, $dst, #0, #15" %}
8214 
8215   ins_encode %{
8216     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8217     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8218   %}
8219 
8220   ins_pipe(ialu_reg);
8221 %}
8222 
8223 // ============================================================================
8224 // Zero Count Instructions
8225 
8226 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8227   match(Set dst (CountLeadingZerosI src));
8228 
8229   ins_cost(INSN_COST);
8230   format %{ "clzw  $dst, $src" %}
8231   ins_encode %{
8232     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8233   %}
8234 
8235   ins_pipe(ialu_reg);
8236 %}
8237 
8238 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8239   match(Set dst (CountLeadingZerosL src));
8240 
8241   ins_cost(INSN_COST);
8242   format %{ "clz   $dst, $src" %}
8243   ins_encode %{
8244     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8245   %}
8246 
8247   ins_pipe(ialu_reg);
8248 %}
8249 
8250 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8251   match(Set dst (CountTrailingZerosI src));
8252 
8253   ins_cost(INSN_COST * 2);
8254   format %{ "rbitw  $dst, $src\n\t"
8255             "clzw   $dst, $dst" %}
8256   ins_encode %{
8257     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8258     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8259   %}
8260 
8261   ins_pipe(ialu_reg);
8262 %}
8263 
8264 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8265   match(Set dst (CountTrailingZerosL src));
8266 
8267   ins_cost(INSN_COST * 2);
8268   format %{ "rbit   $dst, $src\n\t"
8269             "clz    $dst, $dst" %}
8270   ins_encode %{
8271     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8272     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8273   %}
8274 
8275   ins_pipe(ialu_reg);
8276 %}
8277 
8278 //---------- Population Count Instructions -------------------------------------
8279 //
8280 
8281 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8282   predicate(UsePopCountInstruction);
8283   match(Set dst (PopCountI src));
8284   effect(TEMP tmp);
8285   ins_cost(INSN_COST * 13);
8286 
8287   format %{ "movw   $src, $src\n\t"
8288             "mov    $tmp, $src\t# vector (1D)\n\t"
8289             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8290             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8291             "mov    $dst, $tmp\t# vector (1D)" %}
8292   ins_encode %{
8293     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8294     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8295     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8296     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8297     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8298   %}
8299 
8300   ins_pipe(pipe_class_default);
8301 %}
8302 
8303 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8304   predicate(UsePopCountInstruction);
8305   match(Set dst (PopCountI (LoadI mem)));
8306   effect(TEMP tmp);
8307   ins_cost(INSN_COST * 13);
8308 
8309   format %{ "ldrs   $tmp, $mem\n\t"
8310             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8311             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8312             "mov    $dst, $tmp\t# vector (1D)" %}
8313   ins_encode %{
8314     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8315     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8316                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8317     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8318     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8319     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8320   %}
8321 
8322   ins_pipe(pipe_class_default);
8323 %}
8324 
8325 // Note: Long.bitCount(long) returns an int.
8326 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8327   predicate(UsePopCountInstruction);
8328   match(Set dst (PopCountL src));
8329   effect(TEMP tmp);
8330   ins_cost(INSN_COST * 13);
8331 
8332   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8333             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8334             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8335             "mov    $dst, $tmp\t# vector (1D)" %}
8336   ins_encode %{
8337     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8338     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8339     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8340     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8341   %}
8342 
8343   ins_pipe(pipe_class_default);
8344 %}
8345 
8346 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8347   predicate(UsePopCountInstruction);
8348   match(Set dst (PopCountL (LoadL mem)));
8349   effect(TEMP tmp);
8350   ins_cost(INSN_COST * 13);
8351 
8352   format %{ "ldrd   $tmp, $mem\n\t"
8353             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8354             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8355             "mov    $dst, $tmp\t# vector (1D)" %}
8356   ins_encode %{
8357     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8358     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8359                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8360     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8361     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8362     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8363   %}
8364 
8365   ins_pipe(pipe_class_default);
8366 %}
8367 
8368 // ============================================================================
8369 // MemBar Instruction
8370 
8371 instruct load_fence() %{
8372   match(LoadFence);
8373   ins_cost(VOLATILE_REF_COST);
8374 
8375   format %{ "load_fence" %}
8376 
8377   ins_encode %{
8378     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8379   %}
8380   ins_pipe(pipe_serial);
8381 %}
8382 
8383 instruct unnecessary_membar_acquire() %{
8384   predicate(unnecessary_acquire(n));
8385   match(MemBarAcquire);
8386   ins_cost(0);
8387 
8388   format %{ "membar_acquire (elided)" %}
8389 
8390   ins_encode %{
8391     __ block_comment("membar_acquire (elided)");
8392   %}
8393 
8394   ins_pipe(pipe_class_empty);
8395 %}
8396 
8397 instruct membar_acquire() %{
8398   match(MemBarAcquire);
8399   ins_cost(VOLATILE_REF_COST);
8400 
8401   format %{ "membar_acquire" %}
8402 
8403   ins_encode %{
8404     __ block_comment("membar_acquire");
8405     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8406   %}
8407 
8408   ins_pipe(pipe_serial);
8409 %}
8410 
8411 
8412 instruct membar_acquire_lock() %{
8413   match(MemBarAcquireLock);
8414   ins_cost(VOLATILE_REF_COST);
8415 
8416   format %{ "membar_acquire_lock (elided)" %}
8417 
8418   ins_encode %{
8419     __ block_comment("membar_acquire_lock (elided)");
8420   %}
8421 
8422   ins_pipe(pipe_serial);
8423 %}
8424 
8425 instruct store_fence() %{
8426   match(StoreFence);
8427   ins_cost(VOLATILE_REF_COST);
8428 
8429   format %{ "store_fence" %}
8430 
8431   ins_encode %{
8432     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8433   %}
8434   ins_pipe(pipe_serial);
8435 %}
8436 
8437 instruct unnecessary_membar_release() %{
8438   predicate(unnecessary_release(n));
8439   match(MemBarRelease);
8440   ins_cost(0);
8441 
8442   format %{ "membar_release (elided)" %}
8443 
8444   ins_encode %{
8445     __ block_comment("membar_release (elided)");
8446   %}
8447   ins_pipe(pipe_serial);
8448 %}
8449 
8450 instruct membar_release() %{
8451   match(MemBarRelease);
8452   ins_cost(VOLATILE_REF_COST);
8453 
8454   format %{ "membar_release" %}
8455 
8456   ins_encode %{
8457     __ block_comment("membar_release");
8458     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8459   %}
8460   ins_pipe(pipe_serial);
8461 %}
8462 
8463 instruct membar_storestore() %{
8464   match(MemBarStoreStore);
8465   ins_cost(VOLATILE_REF_COST);
8466 
8467   format %{ "MEMBAR-store-store" %}
8468 
8469   ins_encode %{
8470     __ membar(Assembler::StoreStore);
8471   %}
8472   ins_pipe(pipe_serial);
8473 %}
8474 
8475 instruct membar_release_lock() %{
8476   match(MemBarReleaseLock);
8477   ins_cost(VOLATILE_REF_COST);
8478 
8479   format %{ "membar_release_lock (elided)" %}
8480 
8481   ins_encode %{
8482     __ block_comment("membar_release_lock (elided)");
8483   %}
8484 
8485   ins_pipe(pipe_serial);
8486 %}
8487 
8488 instruct unnecessary_membar_volatile() %{
8489   predicate(unnecessary_volatile(n));
8490   match(MemBarVolatile);
8491   ins_cost(0);
8492 
8493   format %{ "membar_volatile (elided)" %}
8494 
8495   ins_encode %{
8496     __ block_comment("membar_volatile (elided)");
8497   %}
8498 
8499   ins_pipe(pipe_serial);
8500 %}
8501 
8502 instruct membar_volatile() %{
8503   match(MemBarVolatile);
8504   ins_cost(VOLATILE_REF_COST*100);
8505 
8506   format %{ "membar_volatile" %}
8507 
8508   ins_encode %{
8509     __ block_comment("membar_volatile");
8510     __ membar(Assembler::StoreLoad);
8511   %}
8512 
8513   ins_pipe(pipe_serial);
8514 %}
8515 
8516 // ============================================================================
8517 // Cast/Convert Instructions
8518 
8519 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8520   match(Set dst (CastX2P src));
8521 
8522   ins_cost(INSN_COST);
8523   format %{ "mov $dst, $src\t# long -> ptr" %}
8524 
8525   ins_encode %{
8526     if ($dst$$reg != $src$$reg) {
8527       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8528     }
8529   %}
8530 
8531   ins_pipe(ialu_reg);
8532 %}
8533 
8534 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8535   match(Set dst (CastP2X src));
8536 
8537   ins_cost(INSN_COST);
8538   format %{ "mov $dst, $src\t# ptr -> long" %}
8539 
8540   ins_encode %{
8541     if ($dst$$reg != $src$$reg) {
8542       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8543     }
8544   %}
8545 
8546   ins_pipe(ialu_reg);
8547 %}
8548 
8549 // Convert oop into int for vectors alignment masking
8550 instruct convP2I(iRegINoSp dst, iRegP src) %{
8551   match(Set dst (ConvL2I (CastP2X src)));
8552 
8553   ins_cost(INSN_COST);
8554   format %{ "movw $dst, $src\t# ptr -> int" %}
8555   ins_encode %{
8556     __ movw($dst$$Register, $src$$Register);
8557   %}
8558 
8559   ins_pipe(ialu_reg);
8560 %}
8561 
8562 // Convert compressed oop into int for vectors alignment masking
8563 // in case of 32bit oops (heap < 4Gb).
8564 instruct convN2I(iRegINoSp dst, iRegN src)
8565 %{
8566   predicate(Universe::narrow_oop_shift() == 0);
8567   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8568 
8569   ins_cost(INSN_COST);
8570   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8571   ins_encode %{
8572     __ movw($dst$$Register, $src$$Register);
8573   %}
8574 
8575   ins_pipe(ialu_reg);
8576 %}
8577 
8578 
8579 // Convert oop pointer into compressed form
8580 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8581   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8582   match(Set dst (EncodeP src));
8583   effect(KILL cr);
8584   ins_cost(INSN_COST * 3);
8585   format %{ "encode_heap_oop $dst, $src" %}
8586   ins_encode %{
8587     Register s = $src$$Register;
8588     Register d = $dst$$Register;
8589     __ encode_heap_oop(d, s);
8590   %}
8591   ins_pipe(ialu_reg);
8592 %}
8593 
8594 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8595   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8596   match(Set dst (EncodeP src));
8597   ins_cost(INSN_COST * 3);
8598   format %{ "encode_heap_oop_not_null $dst, $src" %}
8599   ins_encode %{
8600     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8601   %}
8602   ins_pipe(ialu_reg);
8603 %}
8604 
8605 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8606   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8607             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8608   match(Set dst (DecodeN src));
8609   ins_cost(INSN_COST * 3);
8610   format %{ "decode_heap_oop $dst, $src" %}
8611   ins_encode %{
8612     Register s = $src$$Register;
8613     Register d = $dst$$Register;
8614     __ decode_heap_oop(d, s);
8615   %}
8616   ins_pipe(ialu_reg);
8617 %}
8618 
8619 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
8620   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8621             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8622   match(Set dst (DecodeN src));
8623   ins_cost(INSN_COST * 3);
8624   format %{ "decode_heap_oop_not_null $dst, $src" %}
8625   ins_encode %{
8626     Register s = $src$$Register;
8627     Register d = $dst$$Register;
8628     __ decode_heap_oop_not_null(d, s);
8629   %}
8630   ins_pipe(ialu_reg);
8631 %}
8632 
8633 // n.b. AArch64 implementations of encode_klass_not_null and
8634 // decode_klass_not_null do not modify the flags register so, unlike
8635 // Intel, we don't kill CR as a side effect here
8636 
8637 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8638   match(Set dst (EncodePKlass src));
8639 
8640   ins_cost(INSN_COST * 3);
8641   format %{ "encode_klass_not_null $dst,$src" %}
8642 
8643   ins_encode %{
8644     Register src_reg = as_Register($src$$reg);
8645     Register dst_reg = as_Register($dst$$reg);
8646     __ encode_klass_not_null(dst_reg, src_reg);
8647   %}
8648 
8649    ins_pipe(ialu_reg);
8650 %}
8651 
8652 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
8653   match(Set dst (DecodeNKlass src));
8654 
8655   ins_cost(INSN_COST * 3);
8656   format %{ "decode_klass_not_null $dst,$src" %}
8657 
8658   ins_encode %{
8659     Register src_reg = as_Register($src$$reg);
8660     Register dst_reg = as_Register($dst$$reg);
8661     if (dst_reg != src_reg) {
8662       __ decode_klass_not_null(dst_reg, src_reg);
8663     } else {
8664       __ decode_klass_not_null(dst_reg);
8665     }
8666   %}
8667 
8668    ins_pipe(ialu_reg);
8669 %}
8670 
8671 instruct checkCastPP(iRegPNoSp dst)
8672 %{
8673   match(Set dst (CheckCastPP dst));
8674 
8675   size(0);
8676   format %{ "# checkcastPP of $dst" %}
8677   ins_encode(/* empty encoding */);
8678   ins_pipe(pipe_class_empty);
8679 %}
8680 
8681 instruct castPP(iRegPNoSp dst)
8682 %{
8683   match(Set dst (CastPP dst));
8684 
8685   size(0);
8686   format %{ "# castPP of $dst" %}
8687   ins_encode(/* empty encoding */);
8688   ins_pipe(pipe_class_empty);
8689 %}
8690 
8691 instruct castII(iRegI dst)
8692 %{
8693   match(Set dst (CastII dst));
8694 
8695   size(0);
8696   format %{ "# castII of $dst" %}
8697   ins_encode(/* empty encoding */);
8698   ins_cost(0);
8699   ins_pipe(pipe_class_empty);
8700 %}
8701 
8702 // ============================================================================
8703 // Atomic operation instructions
8704 //
8705 // Intel and SPARC both implement Ideal Node LoadPLocked and
8706 // Store{PIL}Conditional instructions using a normal load for the
8707 // LoadPLocked and a CAS for the Store{PIL}Conditional.
8708 //
8709 // The ideal code appears only to use LoadPLocked/StorePLocked as a
8710 // pair to lock object allocations from Eden space when not using
8711 // TLABs.
8712 //
8713 // There does not appear to be a Load{IL}Locked Ideal Node and the
8714 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
8715 // and to use StoreIConditional only for 32-bit and StoreLConditional
8716 // only for 64-bit.
8717 //
8718 // We implement LoadPLocked and StorePLocked instructions using,
8719 // respectively the AArch64 hw load-exclusive and store-conditional
8720 // instructions. Whereas we must implement each of
8721 // Store{IL}Conditional using a CAS which employs a pair of
8722 // instructions comprising a load-exclusive followed by a
8723 // store-conditional.
8724 
8725 
8726 // Locked-load (linked load) of the current heap-top
8727 // used when updating the eden heap top
8728 // implemented using ldaxr on AArch64
8729 
8730 instruct loadPLocked(iRegPNoSp dst, indirect mem)
8731 %{
8732   match(Set dst (LoadPLocked mem));
8733 
8734   ins_cost(VOLATILE_REF_COST);
8735 
8736   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
8737 
8738   ins_encode(aarch64_enc_ldaxr(dst, mem));
8739 
8740   ins_pipe(pipe_serial);
8741 %}
8742 
8743 // Conditional-store of the updated heap-top.
8744 // Used during allocation of the shared heap.
8745 // Sets flag (EQ) on success.
8746 // implemented using stlxr on AArch64.
8747 
8748 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
8749 %{
8750   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8751 
8752   ins_cost(VOLATILE_REF_COST);
8753 
8754  // TODO
8755  // do we need to do a store-conditional release or can we just use a
8756  // plain store-conditional?
8757 
8758   format %{
8759     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
8760     "cmpw rscratch1, zr\t# EQ on successful write"
8761   %}
8762 
8763   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
8764 
8765   ins_pipe(pipe_serial);
8766 %}
8767 
8768 
8769 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
8770 // when attempting to rebias a lock towards the current thread.  We
8771 // must use the acquire form of cmpxchg in order to guarantee acquire
8772 // semantics in this case.
8773 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
8774 %{
8775   match(Set cr (StoreLConditional mem (Binary oldval newval)));
8776 
8777   ins_cost(VOLATILE_REF_COST);
8778 
8779   format %{
8780     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8781     "cmpw rscratch1, zr\t# EQ on successful write"
8782   %}
8783 
8784   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
8785 
8786   ins_pipe(pipe_slow);
8787 %}
8788 
8789 // storeIConditional also has acquire semantics, for no better reason
8790 // than matching storeLConditional.  At the time of writing this
8791 // comment storeIConditional was not used anywhere by AArch64.
8792 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
8793 %{
8794   match(Set cr (StoreIConditional mem (Binary oldval newval)));
8795 
8796   ins_cost(VOLATILE_REF_COST);
8797 
8798   format %{
8799     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
8800     "cmpw rscratch1, zr\t# EQ on successful write"
8801   %}
8802 
8803   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
8804 
8805   ins_pipe(pipe_slow);
8806 %}
8807 
8808 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
8809 // can't match them
8810 
8811 // standard CompareAndSwapX when we are using barriers
8812 // these have higher priority than the rules selected by a predicate
8813 
8814 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8815 
8816   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8817   ins_cost(2 * VOLATILE_REF_COST);
8818 
8819   effect(KILL cr);
8820 
8821  format %{
8822     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8823     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8824  %}
8825 
8826  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8827             aarch64_enc_cset_eq(res));
8828 
8829   ins_pipe(pipe_slow);
8830 %}
8831 
8832 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8833 
8834   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8835   ins_cost(2 * VOLATILE_REF_COST);
8836 
8837   effect(KILL cr);
8838 
8839  format %{
8840     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8841     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8842  %}
8843 
8844  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8845             aarch64_enc_cset_eq(res));
8846 
8847   ins_pipe(pipe_slow);
8848 %}
8849 
8850 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8851 
8852   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8853   ins_cost(2 * VOLATILE_REF_COST);
8854 
8855   effect(KILL cr);
8856 
8857  format %{
8858     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8859     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8860  %}
8861 
8862  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8863             aarch64_enc_cset_eq(res));
8864 
8865   ins_pipe(pipe_slow);
8866 %}
8867 
8868 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8869 
8870   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8871   ins_cost(2 * VOLATILE_REF_COST);
8872 
8873   effect(KILL cr);
8874 
8875  format %{
8876     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8877     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8878  %}
8879 
8880  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8881             aarch64_enc_cset_eq(res));
8882 
8883   ins_pipe(pipe_slow);
8884 %}
8885 
8886 // alternative CompareAndSwapX when we are eliding barriers
8887 
8888 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8889 
8890   predicate(needs_acquiring_load_exclusive(n));
8891   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8892   ins_cost(VOLATILE_REF_COST);
8893 
8894   effect(KILL cr);
8895 
8896  format %{
8897     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8898     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8899  %}
8900 
8901  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8902             aarch64_enc_cset_eq(res));
8903 
8904   ins_pipe(pipe_slow);
8905 %}
8906 
8907 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8908 
8909   predicate(needs_acquiring_load_exclusive(n));
8910   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8911   ins_cost(VOLATILE_REF_COST);
8912 
8913   effect(KILL cr);
8914 
8915  format %{
8916     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8917     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8918  %}
8919 
8920  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8921             aarch64_enc_cset_eq(res));
8922 
8923   ins_pipe(pipe_slow);
8924 %}
8925 
8926 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8927 
8928   predicate(needs_acquiring_load_exclusive(n));
8929   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8930   ins_cost(VOLATILE_REF_COST);
8931 
8932   effect(KILL cr);
8933 
8934  format %{
8935     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8936     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8937  %}
8938 
8939  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8940             aarch64_enc_cset_eq(res));
8941 
8942   ins_pipe(pipe_slow);
8943 %}
8944 
8945 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8946 
8947   predicate(needs_acquiring_load_exclusive(n));
8948   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8949   ins_cost(VOLATILE_REF_COST);
8950 
8951   effect(KILL cr);
8952 
8953  format %{
8954     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8955     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8956  %}
8957 
8958  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8959             aarch64_enc_cset_eq(res));
8960 
8961   ins_pipe(pipe_slow);
8962 %}
8963 
8964 
8965 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
8966   match(Set prev (GetAndSetI mem newv));
8967   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8968   ins_encode %{
8969     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8970   %}
8971   ins_pipe(pipe_serial);
8972 %}
8973 
8974 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
8975   match(Set prev (GetAndSetL mem newv));
8976   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8977   ins_encode %{
8978     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8979   %}
8980   ins_pipe(pipe_serial);
8981 %}
8982 
8983 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
8984   match(Set prev (GetAndSetN mem newv));
8985   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8986   ins_encode %{
8987     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8988   %}
8989   ins_pipe(pipe_serial);
8990 %}
8991 
8992 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
8993   match(Set prev (GetAndSetP mem newv));
8994   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8995   ins_encode %{
8996     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8997   %}
8998   ins_pipe(pipe_serial);
8999 %}
9000 
9001 
9002 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9003   match(Set newval (GetAndAddL mem incr));
9004   ins_cost(INSN_COST * 10);
9005   format %{ "get_and_addL $newval, [$mem], $incr" %}
9006   ins_encode %{
9007     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9008   %}
9009   ins_pipe(pipe_serial);
9010 %}
9011 
9012 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9013   predicate(n->as_LoadStore()->result_not_used());
9014   match(Set dummy (GetAndAddL mem incr));
9015   ins_cost(INSN_COST * 9);
9016   format %{ "get_and_addL [$mem], $incr" %}
9017   ins_encode %{
9018     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9019   %}
9020   ins_pipe(pipe_serial);
9021 %}
9022 
9023 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9024   match(Set newval (GetAndAddL mem incr));
9025   ins_cost(INSN_COST * 10);
9026   format %{ "get_and_addL $newval, [$mem], $incr" %}
9027   ins_encode %{
9028     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9029   %}
9030   ins_pipe(pipe_serial);
9031 %}
9032 
9033 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9034   predicate(n->as_LoadStore()->result_not_used());
9035   match(Set dummy (GetAndAddL mem incr));
9036   ins_cost(INSN_COST * 9);
9037   format %{ "get_and_addL [$mem], $incr" %}
9038   ins_encode %{
9039     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9040   %}
9041   ins_pipe(pipe_serial);
9042 %}
9043 
9044 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9045   match(Set newval (GetAndAddI mem incr));
9046   ins_cost(INSN_COST * 10);
9047   format %{ "get_and_addI $newval, [$mem], $incr" %}
9048   ins_encode %{
9049     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9050   %}
9051   ins_pipe(pipe_serial);
9052 %}
9053 
9054 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9055   predicate(n->as_LoadStore()->result_not_used());
9056   match(Set dummy (GetAndAddI mem incr));
9057   ins_cost(INSN_COST * 9);
9058   format %{ "get_and_addI [$mem], $incr" %}
9059   ins_encode %{
9060     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9061   %}
9062   ins_pipe(pipe_serial);
9063 %}
9064 
9065 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9066   match(Set newval (GetAndAddI mem incr));
9067   ins_cost(INSN_COST * 10);
9068   format %{ "get_and_addI $newval, [$mem], $incr" %}
9069   ins_encode %{
9070     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9071   %}
9072   ins_pipe(pipe_serial);
9073 %}
9074 
9075 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9076   predicate(n->as_LoadStore()->result_not_used());
9077   match(Set dummy (GetAndAddI mem incr));
9078   ins_cost(INSN_COST * 9);
9079   format %{ "get_and_addI [$mem], $incr" %}
9080   ins_encode %{
9081     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9082   %}
9083   ins_pipe(pipe_serial);
9084 %}
9085 
9086 // Manifest a CmpL result in an integer register.
9087 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9088 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9089 %{
9090   match(Set dst (CmpL3 src1 src2));
9091   effect(KILL flags);
9092 
9093   ins_cost(INSN_COST * 6);
9094   format %{
9095       "cmp $src1, $src2"
9096       "csetw $dst, ne"
9097       "cnegw $dst, lt"
9098   %}
9099   // format %{ "CmpL3 $dst, $src1, $src2" %}
9100   ins_encode %{
9101     __ cmp($src1$$Register, $src2$$Register);
9102     __ csetw($dst$$Register, Assembler::NE);
9103     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9104   %}
9105 
9106   ins_pipe(pipe_class_default);
9107 %}
9108 
9109 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9110 %{
9111   match(Set dst (CmpL3 src1 src2));
9112   effect(KILL flags);
9113 
9114   ins_cost(INSN_COST * 6);
9115   format %{
9116       "cmp $src1, $src2"
9117       "csetw $dst, ne"
9118       "cnegw $dst, lt"
9119   %}
9120   ins_encode %{
9121     int32_t con = (int32_t)$src2$$constant;
9122      if (con < 0) {
9123       __ adds(zr, $src1$$Register, -con);
9124     } else {
9125       __ subs(zr, $src1$$Register, con);
9126     }
9127     __ csetw($dst$$Register, Assembler::NE);
9128     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9129   %}
9130 
9131   ins_pipe(pipe_class_default);
9132 %}
9133 
9134 // ============================================================================
9135 // Conditional Move Instructions
9136 
9137 // n.b. we have identical rules for both a signed compare op (cmpOp)
9138 // and an unsigned compare op (cmpOpU). it would be nice if we could
9139 // define an op class which merged both inputs and use it to type the
9140 // argument to a single rule. unfortunatelyt his fails because the
9141 // opclass does not live up to the COND_INTER interface of its
9142 // component operands. When the generic code tries to negate the
9143 // operand it ends up running the generci Machoper::negate method
9144 // which throws a ShouldNotHappen. So, we have to provide two flavours
9145 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9146 
9147 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9148   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9149 
9150   ins_cost(INSN_COST * 2);
9151   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9152 
9153   ins_encode %{
9154     __ cselw(as_Register($dst$$reg),
9155              as_Register($src2$$reg),
9156              as_Register($src1$$reg),
9157              (Assembler::Condition)$cmp$$cmpcode);
9158   %}
9159 
9160   ins_pipe(icond_reg_reg);
9161 %}
9162 
9163 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9164   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9165 
9166   ins_cost(INSN_COST * 2);
9167   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9168 
9169   ins_encode %{
9170     __ cselw(as_Register($dst$$reg),
9171              as_Register($src2$$reg),
9172              as_Register($src1$$reg),
9173              (Assembler::Condition)$cmp$$cmpcode);
9174   %}
9175 
9176   ins_pipe(icond_reg_reg);
9177 %}
9178 
9179 // special cases where one arg is zero
9180 
9181 // n.b. this is selected in preference to the rule above because it
9182 // avoids loading constant 0 into a source register
9183 
9184 // TODO
9185 // we ought only to be able to cull one of these variants as the ideal
9186 // transforms ought always to order the zero consistently (to left/right?)
9187 
9188 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9189   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9190 
9191   ins_cost(INSN_COST * 2);
9192   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9193 
9194   ins_encode %{
9195     __ cselw(as_Register($dst$$reg),
9196              as_Register($src$$reg),
9197              zr,
9198              (Assembler::Condition)$cmp$$cmpcode);
9199   %}
9200 
9201   ins_pipe(icond_reg);
9202 %}
9203 
9204 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9205   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9206 
9207   ins_cost(INSN_COST * 2);
9208   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9209 
9210   ins_encode %{
9211     __ cselw(as_Register($dst$$reg),
9212              as_Register($src$$reg),
9213              zr,
9214              (Assembler::Condition)$cmp$$cmpcode);
9215   %}
9216 
9217   ins_pipe(icond_reg);
9218 %}
9219 
9220 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9221   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9222 
9223   ins_cost(INSN_COST * 2);
9224   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9225 
9226   ins_encode %{
9227     __ cselw(as_Register($dst$$reg),
9228              zr,
9229              as_Register($src$$reg),
9230              (Assembler::Condition)$cmp$$cmpcode);
9231   %}
9232 
9233   ins_pipe(icond_reg);
9234 %}
9235 
9236 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9237   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9238 
9239   ins_cost(INSN_COST * 2);
9240   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9241 
9242   ins_encode %{
9243     __ cselw(as_Register($dst$$reg),
9244              zr,
9245              as_Register($src$$reg),
9246              (Assembler::Condition)$cmp$$cmpcode);
9247   %}
9248 
9249   ins_pipe(icond_reg);
9250 %}
9251 
9252 // special case for creating a boolean 0 or 1
9253 
9254 // n.b. this is selected in preference to the rule above because it
9255 // avoids loading constants 0 and 1 into a source register
9256 
9257 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9258   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9259 
9260   ins_cost(INSN_COST * 2);
9261   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9262 
9263   ins_encode %{
9264     // equivalently
9265     // cset(as_Register($dst$$reg),
9266     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9267     __ csincw(as_Register($dst$$reg),
9268              zr,
9269              zr,
9270              (Assembler::Condition)$cmp$$cmpcode);
9271   %}
9272 
9273   ins_pipe(icond_none);
9274 %}
9275 
9276 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9277   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9278 
9279   ins_cost(INSN_COST * 2);
9280   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9281 
9282   ins_encode %{
9283     // equivalently
9284     // cset(as_Register($dst$$reg),
9285     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9286     __ csincw(as_Register($dst$$reg),
9287              zr,
9288              zr,
9289              (Assembler::Condition)$cmp$$cmpcode);
9290   %}
9291 
9292   ins_pipe(icond_none);
9293 %}
9294 
9295 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9296   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9297 
9298   ins_cost(INSN_COST * 2);
9299   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9300 
9301   ins_encode %{
9302     __ csel(as_Register($dst$$reg),
9303             as_Register($src2$$reg),
9304             as_Register($src1$$reg),
9305             (Assembler::Condition)$cmp$$cmpcode);
9306   %}
9307 
9308   ins_pipe(icond_reg_reg);
9309 %}
9310 
9311 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9312   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9313 
9314   ins_cost(INSN_COST * 2);
9315   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9316 
9317   ins_encode %{
9318     __ csel(as_Register($dst$$reg),
9319             as_Register($src2$$reg),
9320             as_Register($src1$$reg),
9321             (Assembler::Condition)$cmp$$cmpcode);
9322   %}
9323 
9324   ins_pipe(icond_reg_reg);
9325 %}
9326 
9327 // special cases where one arg is zero
9328 
9329 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9330   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9331 
9332   ins_cost(INSN_COST * 2);
9333   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9334 
9335   ins_encode %{
9336     __ csel(as_Register($dst$$reg),
9337             zr,
9338             as_Register($src$$reg),
9339             (Assembler::Condition)$cmp$$cmpcode);
9340   %}
9341 
9342   ins_pipe(icond_reg);
9343 %}
9344 
9345 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9346   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9347 
9348   ins_cost(INSN_COST * 2);
9349   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9350 
9351   ins_encode %{
9352     __ csel(as_Register($dst$$reg),
9353             zr,
9354             as_Register($src$$reg),
9355             (Assembler::Condition)$cmp$$cmpcode);
9356   %}
9357 
9358   ins_pipe(icond_reg);
9359 %}
9360 
9361 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9362   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9363 
9364   ins_cost(INSN_COST * 2);
9365   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9366 
9367   ins_encode %{
9368     __ csel(as_Register($dst$$reg),
9369             as_Register($src$$reg),
9370             zr,
9371             (Assembler::Condition)$cmp$$cmpcode);
9372   %}
9373 
9374   ins_pipe(icond_reg);
9375 %}
9376 
9377 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9378   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9379 
9380   ins_cost(INSN_COST * 2);
9381   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9382 
9383   ins_encode %{
9384     __ csel(as_Register($dst$$reg),
9385             as_Register($src$$reg),
9386             zr,
9387             (Assembler::Condition)$cmp$$cmpcode);
9388   %}
9389 
9390   ins_pipe(icond_reg);
9391 %}
9392 
9393 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9394   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9395 
9396   ins_cost(INSN_COST * 2);
9397   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9398 
9399   ins_encode %{
9400     __ csel(as_Register($dst$$reg),
9401             as_Register($src2$$reg),
9402             as_Register($src1$$reg),
9403             (Assembler::Condition)$cmp$$cmpcode);
9404   %}
9405 
9406   ins_pipe(icond_reg_reg);
9407 %}
9408 
9409 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9410   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9411 
9412   ins_cost(INSN_COST * 2);
9413   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9414 
9415   ins_encode %{
9416     __ csel(as_Register($dst$$reg),
9417             as_Register($src2$$reg),
9418             as_Register($src1$$reg),
9419             (Assembler::Condition)$cmp$$cmpcode);
9420   %}
9421 
9422   ins_pipe(icond_reg_reg);
9423 %}
9424 
9425 // special cases where one arg is zero
9426 
9427 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9428   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9429 
9430   ins_cost(INSN_COST * 2);
9431   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9432 
9433   ins_encode %{
9434     __ csel(as_Register($dst$$reg),
9435             zr,
9436             as_Register($src$$reg),
9437             (Assembler::Condition)$cmp$$cmpcode);
9438   %}
9439 
9440   ins_pipe(icond_reg);
9441 %}
9442 
9443 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9444   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9445 
9446   ins_cost(INSN_COST * 2);
9447   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9448 
9449   ins_encode %{
9450     __ csel(as_Register($dst$$reg),
9451             zr,
9452             as_Register($src$$reg),
9453             (Assembler::Condition)$cmp$$cmpcode);
9454   %}
9455 
9456   ins_pipe(icond_reg);
9457 %}
9458 
9459 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9460   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9461 
9462   ins_cost(INSN_COST * 2);
9463   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9464 
9465   ins_encode %{
9466     __ csel(as_Register($dst$$reg),
9467             as_Register($src$$reg),
9468             zr,
9469             (Assembler::Condition)$cmp$$cmpcode);
9470   %}
9471 
9472   ins_pipe(icond_reg);
9473 %}
9474 
9475 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9476   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9477 
9478   ins_cost(INSN_COST * 2);
9479   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9480 
9481   ins_encode %{
9482     __ csel(as_Register($dst$$reg),
9483             as_Register($src$$reg),
9484             zr,
9485             (Assembler::Condition)$cmp$$cmpcode);
9486   %}
9487 
9488   ins_pipe(icond_reg);
9489 %}
9490 
9491 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9492   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9493 
9494   ins_cost(INSN_COST * 2);
9495   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9496 
9497   ins_encode %{
9498     __ cselw(as_Register($dst$$reg),
9499              as_Register($src2$$reg),
9500              as_Register($src1$$reg),
9501              (Assembler::Condition)$cmp$$cmpcode);
9502   %}
9503 
9504   ins_pipe(icond_reg_reg);
9505 %}
9506 
9507 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9508   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9509 
9510   ins_cost(INSN_COST * 2);
9511   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9512 
9513   ins_encode %{
9514     __ cselw(as_Register($dst$$reg),
9515              as_Register($src2$$reg),
9516              as_Register($src1$$reg),
9517              (Assembler::Condition)$cmp$$cmpcode);
9518   %}
9519 
9520   ins_pipe(icond_reg_reg);
9521 %}
9522 
9523 // special cases where one arg is zero
9524 
9525 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9526   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9527 
9528   ins_cost(INSN_COST * 2);
9529   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9530 
9531   ins_encode %{
9532     __ cselw(as_Register($dst$$reg),
9533              zr,
9534              as_Register($src$$reg),
9535              (Assembler::Condition)$cmp$$cmpcode);
9536   %}
9537 
9538   ins_pipe(icond_reg);
9539 %}
9540 
9541 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9542   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9543 
9544   ins_cost(INSN_COST * 2);
9545   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9546 
9547   ins_encode %{
9548     __ cselw(as_Register($dst$$reg),
9549              zr,
9550              as_Register($src$$reg),
9551              (Assembler::Condition)$cmp$$cmpcode);
9552   %}
9553 
9554   ins_pipe(icond_reg);
9555 %}
9556 
9557 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9558   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9559 
9560   ins_cost(INSN_COST * 2);
9561   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9562 
9563   ins_encode %{
9564     __ cselw(as_Register($dst$$reg),
9565              as_Register($src$$reg),
9566              zr,
9567              (Assembler::Condition)$cmp$$cmpcode);
9568   %}
9569 
9570   ins_pipe(icond_reg);
9571 %}
9572 
9573 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9574   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9575 
9576   ins_cost(INSN_COST * 2);
9577   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9578 
9579   ins_encode %{
9580     __ cselw(as_Register($dst$$reg),
9581              as_Register($src$$reg),
9582              zr,
9583              (Assembler::Condition)$cmp$$cmpcode);
9584   %}
9585 
9586   ins_pipe(icond_reg);
9587 %}
9588 
9589 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9590 %{
9591   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9592 
9593   ins_cost(INSN_COST * 3);
9594 
9595   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9596   ins_encode %{
9597     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9598     __ fcsels(as_FloatRegister($dst$$reg),
9599               as_FloatRegister($src2$$reg),
9600               as_FloatRegister($src1$$reg),
9601               cond);
9602   %}
9603 
9604   ins_pipe(pipe_class_default);
9605 %}
9606 
9607 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
9608 %{
9609   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
9610 
9611   ins_cost(INSN_COST * 3);
9612 
9613   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9614   ins_encode %{
9615     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9616     __ fcsels(as_FloatRegister($dst$$reg),
9617               as_FloatRegister($src2$$reg),
9618               as_FloatRegister($src1$$reg),
9619               cond);
9620   %}
9621 
9622   ins_pipe(pipe_class_default);
9623 %}
9624 
9625 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
9626 %{
9627   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9628 
9629   ins_cost(INSN_COST * 3);
9630 
9631   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
9632   ins_encode %{
9633     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9634     __ fcseld(as_FloatRegister($dst$$reg),
9635               as_FloatRegister($src2$$reg),
9636               as_FloatRegister($src1$$reg),
9637               cond);
9638   %}
9639 
9640   ins_pipe(pipe_class_default);
9641 %}
9642 
9643 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
9644 %{
9645   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
9646 
9647   ins_cost(INSN_COST * 3);
9648 
9649   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
9650   ins_encode %{
9651     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
9652     __ fcseld(as_FloatRegister($dst$$reg),
9653               as_FloatRegister($src2$$reg),
9654               as_FloatRegister($src1$$reg),
9655               cond);
9656   %}
9657 
9658   ins_pipe(pipe_class_default);
9659 %}
9660 
9661 // ============================================================================
9662 // Arithmetic Instructions
9663 //
9664 
9665 // Integer Addition
9666 
9667 // TODO
9668 // these currently employ operations which do not set CR and hence are
9669 // not flagged as killing CR but we would like to isolate the cases
9670 // where we want to set flags from those where we don't. need to work
9671 // out how to do that.
9672 
9673 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9674   match(Set dst (AddI src1 src2));
9675 
9676   ins_cost(INSN_COST);
9677   format %{ "addw  $dst, $src1, $src2" %}
9678 
9679   ins_encode %{
9680     __ addw(as_Register($dst$$reg),
9681             as_Register($src1$$reg),
9682             as_Register($src2$$reg));
9683   %}
9684 
9685   ins_pipe(ialu_reg_reg);
9686 %}
9687 
9688 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9689   match(Set dst (AddI src1 src2));
9690 
9691   ins_cost(INSN_COST);
9692   format %{ "addw $dst, $src1, $src2" %}
9693 
9694   // use opcode to indicate that this is an add not a sub
9695   opcode(0x0);
9696 
9697   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9698 
9699   ins_pipe(ialu_reg_imm);
9700 %}
9701 
9702 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
9703   match(Set dst (AddI (ConvL2I src1) src2));
9704 
9705   ins_cost(INSN_COST);
9706   format %{ "addw $dst, $src1, $src2" %}
9707 
9708   // use opcode to indicate that this is an add not a sub
9709   opcode(0x0);
9710 
9711   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9712 
9713   ins_pipe(ialu_reg_imm);
9714 %}
9715 
9716 // Pointer Addition
9717 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
9718   match(Set dst (AddP src1 src2));
9719 
9720   ins_cost(INSN_COST);
9721   format %{ "add $dst, $src1, $src2\t# ptr" %}
9722 
9723   ins_encode %{
9724     __ add(as_Register($dst$$reg),
9725            as_Register($src1$$reg),
9726            as_Register($src2$$reg));
9727   %}
9728 
9729   ins_pipe(ialu_reg_reg);
9730 %}
9731 
9732 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
9733   match(Set dst (AddP src1 (ConvI2L src2)));
9734 
9735   ins_cost(1.9 * INSN_COST);
9736   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
9737 
9738   ins_encode %{
9739     __ add(as_Register($dst$$reg),
9740            as_Register($src1$$reg),
9741            as_Register($src2$$reg), ext::sxtw);
9742   %}
9743 
9744   ins_pipe(ialu_reg_reg);
9745 %}
9746 
9747 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9748   match(Set dst (AddP src1 (LShiftL src2 scale)));
9749 
9750   ins_cost(1.9 * INSN_COST);
9751   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9752 
9753   ins_encode %{
9754     __ lea(as_Register($dst$$reg),
9755            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9756                    Address::lsl($scale$$constant)));
9757   %}
9758 
9759   ins_pipe(ialu_reg_reg_shift);
9760 %}
9761 
9762 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9763   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9764 
9765   ins_cost(1.9 * INSN_COST);
9766   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9767 
9768   ins_encode %{
9769     __ lea(as_Register($dst$$reg),
9770            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9771                    Address::sxtw($scale$$constant)));
9772   %}
9773 
9774   ins_pipe(ialu_reg_reg_shift);
9775 %}
9776 
9777 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9778   match(Set dst (LShiftL (ConvI2L src) scale));
9779 
9780   ins_cost(INSN_COST);
9781   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9782 
9783   ins_encode %{
9784     __ sbfiz(as_Register($dst$$reg),
9785           as_Register($src$$reg),
9786           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9787   %}
9788 
9789   ins_pipe(ialu_reg_shift);
9790 %}
9791 
9792 // Pointer Immediate Addition
9793 // n.b. this needs to be more expensive than using an indirect memory
9794 // operand
9795 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9796   match(Set dst (AddP src1 src2));
9797 
9798   ins_cost(INSN_COST);
9799   format %{ "add $dst, $src1, $src2\t# ptr" %}
9800 
9801   // use opcode to indicate that this is an add not a sub
9802   opcode(0x0);
9803 
9804   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9805 
9806   ins_pipe(ialu_reg_imm);
9807 %}
9808 
9809 // Long Addition
9810 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9811 
9812   match(Set dst (AddL src1 src2));
9813 
9814   ins_cost(INSN_COST);
9815   format %{ "add  $dst, $src1, $src2" %}
9816 
9817   ins_encode %{
9818     __ add(as_Register($dst$$reg),
9819            as_Register($src1$$reg),
9820            as_Register($src2$$reg));
9821   %}
9822 
9823   ins_pipe(ialu_reg_reg);
9824 %}
9825 
9826 // No constant pool entries requiredLong Immediate Addition.
9827 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9828   match(Set dst (AddL src1 src2));
9829 
9830   ins_cost(INSN_COST);
9831   format %{ "add $dst, $src1, $src2" %}
9832 
9833   // use opcode to indicate that this is an add not a sub
9834   opcode(0x0);
9835 
9836   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9837 
9838   ins_pipe(ialu_reg_imm);
9839 %}
9840 
9841 // Integer Subtraction
9842 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9843   match(Set dst (SubI src1 src2));
9844 
9845   ins_cost(INSN_COST);
9846   format %{ "subw  $dst, $src1, $src2" %}
9847 
9848   ins_encode %{
9849     __ subw(as_Register($dst$$reg),
9850             as_Register($src1$$reg),
9851             as_Register($src2$$reg));
9852   %}
9853 
9854   ins_pipe(ialu_reg_reg);
9855 %}
9856 
9857 // Immediate Subtraction
9858 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9859   match(Set dst (SubI src1 src2));
9860 
9861   ins_cost(INSN_COST);
9862   format %{ "subw $dst, $src1, $src2" %}
9863 
9864   // use opcode to indicate that this is a sub not an add
9865   opcode(0x1);
9866 
9867   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9868 
9869   ins_pipe(ialu_reg_imm);
9870 %}
9871 
9872 // Long Subtraction
9873 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9874 
9875   match(Set dst (SubL src1 src2));
9876 
9877   ins_cost(INSN_COST);
9878   format %{ "sub  $dst, $src1, $src2" %}
9879 
9880   ins_encode %{
9881     __ sub(as_Register($dst$$reg),
9882            as_Register($src1$$reg),
9883            as_Register($src2$$reg));
9884   %}
9885 
9886   ins_pipe(ialu_reg_reg);
9887 %}
9888 
9889 // No constant pool entries requiredLong Immediate Subtraction.
9890 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9891   match(Set dst (SubL src1 src2));
9892 
9893   ins_cost(INSN_COST);
9894   format %{ "sub$dst, $src1, $src2" %}
9895 
9896   // use opcode to indicate that this is a sub not an add
9897   opcode(0x1);
9898 
9899   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9900 
9901   ins_pipe(ialu_reg_imm);
9902 %}
9903 
9904 // Integer Negation (special case for sub)
9905 
9906 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9907   match(Set dst (SubI zero src));
9908 
9909   ins_cost(INSN_COST);
9910   format %{ "negw $dst, $src\t# int" %}
9911 
9912   ins_encode %{
9913     __ negw(as_Register($dst$$reg),
9914             as_Register($src$$reg));
9915   %}
9916 
9917   ins_pipe(ialu_reg);
9918 %}
9919 
9920 // Long Negation
9921 
9922 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
9923   match(Set dst (SubL zero src));
9924 
9925   ins_cost(INSN_COST);
9926   format %{ "neg $dst, $src\t# long" %}
9927 
9928   ins_encode %{
9929     __ neg(as_Register($dst$$reg),
9930            as_Register($src$$reg));
9931   %}
9932 
9933   ins_pipe(ialu_reg);
9934 %}
9935 
9936 // Integer Multiply
9937 
9938 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9939   match(Set dst (MulI src1 src2));
9940 
9941   ins_cost(INSN_COST * 3);
9942   format %{ "mulw  $dst, $src1, $src2" %}
9943 
9944   ins_encode %{
9945     __ mulw(as_Register($dst$$reg),
9946             as_Register($src1$$reg),
9947             as_Register($src2$$reg));
9948   %}
9949 
9950   ins_pipe(imul_reg_reg);
9951 %}
9952 
9953 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9954   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9955 
9956   ins_cost(INSN_COST * 3);
9957   format %{ "smull  $dst, $src1, $src2" %}
9958 
9959   ins_encode %{
9960     __ smull(as_Register($dst$$reg),
9961              as_Register($src1$$reg),
9962              as_Register($src2$$reg));
9963   %}
9964 
9965   ins_pipe(imul_reg_reg);
9966 %}
9967 
9968 // Long Multiply
9969 
9970 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9971   match(Set dst (MulL src1 src2));
9972 
9973   ins_cost(INSN_COST * 5);
9974   format %{ "mul  $dst, $src1, $src2" %}
9975 
9976   ins_encode %{
9977     __ mul(as_Register($dst$$reg),
9978            as_Register($src1$$reg),
9979            as_Register($src2$$reg));
9980   %}
9981 
9982   ins_pipe(lmul_reg_reg);
9983 %}
9984 
9985 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9986 %{
9987   match(Set dst (MulHiL src1 src2));
9988 
9989   ins_cost(INSN_COST * 7);
9990   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9991 
9992   ins_encode %{
9993     __ smulh(as_Register($dst$$reg),
9994              as_Register($src1$$reg),
9995              as_Register($src2$$reg));
9996   %}
9997 
9998   ins_pipe(lmul_reg_reg);
9999 %}
10000 
10001 // Combined Integer Multiply & Add/Sub
10002 
10003 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10004   match(Set dst (AddI src3 (MulI src1 src2)));
10005 
10006   ins_cost(INSN_COST * 3);
10007   format %{ "madd  $dst, $src1, $src2, $src3" %}
10008 
10009   ins_encode %{
10010     __ maddw(as_Register($dst$$reg),
10011              as_Register($src1$$reg),
10012              as_Register($src2$$reg),
10013              as_Register($src3$$reg));
10014   %}
10015 
10016   ins_pipe(imac_reg_reg);
10017 %}
10018 
10019 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10020   match(Set dst (SubI src3 (MulI src1 src2)));
10021 
10022   ins_cost(INSN_COST * 3);
10023   format %{ "msub  $dst, $src1, $src2, $src3" %}
10024 
10025   ins_encode %{
10026     __ msubw(as_Register($dst$$reg),
10027              as_Register($src1$$reg),
10028              as_Register($src2$$reg),
10029              as_Register($src3$$reg));
10030   %}
10031 
10032   ins_pipe(imac_reg_reg);
10033 %}
10034 
10035 // Combined Long Multiply & Add/Sub
10036 
10037 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10038   match(Set dst (AddL src3 (MulL src1 src2)));
10039 
10040   ins_cost(INSN_COST * 5);
10041   format %{ "madd  $dst, $src1, $src2, $src3" %}
10042 
10043   ins_encode %{
10044     __ madd(as_Register($dst$$reg),
10045             as_Register($src1$$reg),
10046             as_Register($src2$$reg),
10047             as_Register($src3$$reg));
10048   %}
10049 
10050   ins_pipe(lmac_reg_reg);
10051 %}
10052 
10053 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10054   match(Set dst (SubL src3 (MulL src1 src2)));
10055 
10056   ins_cost(INSN_COST * 5);
10057   format %{ "msub  $dst, $src1, $src2, $src3" %}
10058 
10059   ins_encode %{
10060     __ msub(as_Register($dst$$reg),
10061             as_Register($src1$$reg),
10062             as_Register($src2$$reg),
10063             as_Register($src3$$reg));
10064   %}
10065 
10066   ins_pipe(lmac_reg_reg);
10067 %}
10068 
10069 // Integer Divide
10070 
10071 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10072   match(Set dst (DivI src1 src2));
10073 
10074   ins_cost(INSN_COST * 19);
10075   format %{ "sdivw  $dst, $src1, $src2" %}
10076 
10077   ins_encode(aarch64_enc_divw(dst, src1, src2));
10078   ins_pipe(idiv_reg_reg);
10079 %}
10080 
10081 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10082   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10083   ins_cost(INSN_COST);
10084   format %{ "lsrw $dst, $src1, $div1" %}
10085   ins_encode %{
10086     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10087   %}
10088   ins_pipe(ialu_reg_shift);
10089 %}
10090 
10091 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10092   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10093   ins_cost(INSN_COST);
10094   format %{ "addw $dst, $src, LSR $div1" %}
10095 
10096   ins_encode %{
10097     __ addw(as_Register($dst$$reg),
10098               as_Register($src$$reg),
10099               as_Register($src$$reg),
10100               Assembler::LSR, 31);
10101   %}
10102   ins_pipe(ialu_reg);
10103 %}
10104 
10105 // Long Divide
10106 
10107 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10108   match(Set dst (DivL src1 src2));
10109 
10110   ins_cost(INSN_COST * 35);
10111   format %{ "sdiv   $dst, $src1, $src2" %}
10112 
10113   ins_encode(aarch64_enc_div(dst, src1, src2));
10114   ins_pipe(ldiv_reg_reg);
10115 %}
10116 
10117 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10118   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10119   ins_cost(INSN_COST);
10120   format %{ "lsr $dst, $src1, $div1" %}
10121   ins_encode %{
10122     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10123   %}
10124   ins_pipe(ialu_reg_shift);
10125 %}
10126 
10127 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10128   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10129   ins_cost(INSN_COST);
10130   format %{ "add $dst, $src, $div1" %}
10131 
10132   ins_encode %{
10133     __ add(as_Register($dst$$reg),
10134               as_Register($src$$reg),
10135               as_Register($src$$reg),
10136               Assembler::LSR, 63);
10137   %}
10138   ins_pipe(ialu_reg);
10139 %}
10140 
10141 // Integer Remainder
10142 
10143 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10144   match(Set dst (ModI src1 src2));
10145 
10146   ins_cost(INSN_COST * 22);
10147   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10148             "msubw($dst, rscratch1, $src2, $src1" %}
10149 
10150   ins_encode(aarch64_enc_modw(dst, src1, src2));
10151   ins_pipe(idiv_reg_reg);
10152 %}
10153 
10154 // Long Remainder
10155 
10156 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10157   match(Set dst (ModL src1 src2));
10158 
10159   ins_cost(INSN_COST * 38);
10160   format %{ "sdiv   rscratch1, $src1, $src2\n"
10161             "msub($dst, rscratch1, $src2, $src1" %}
10162 
10163   ins_encode(aarch64_enc_mod(dst, src1, src2));
10164   ins_pipe(ldiv_reg_reg);
10165 %}
10166 
10167 // Integer Shifts
10168 
10169 // Shift Left Register
10170 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10171   match(Set dst (LShiftI src1 src2));
10172 
10173   ins_cost(INSN_COST * 2);
10174   format %{ "lslvw  $dst, $src1, $src2" %}
10175 
10176   ins_encode %{
10177     __ lslvw(as_Register($dst$$reg),
10178              as_Register($src1$$reg),
10179              as_Register($src2$$reg));
10180   %}
10181 
10182   ins_pipe(ialu_reg_reg_vshift);
10183 %}
10184 
10185 // Shift Left Immediate
10186 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10187   match(Set dst (LShiftI src1 src2));
10188 
10189   ins_cost(INSN_COST);
10190   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10191 
10192   ins_encode %{
10193     __ lslw(as_Register($dst$$reg),
10194             as_Register($src1$$reg),
10195             $src2$$constant & 0x1f);
10196   %}
10197 
10198   ins_pipe(ialu_reg_shift);
10199 %}
10200 
10201 // Shift Right Logical Register
10202 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10203   match(Set dst (URShiftI src1 src2));
10204 
10205   ins_cost(INSN_COST * 2);
10206   format %{ "lsrvw  $dst, $src1, $src2" %}
10207 
10208   ins_encode %{
10209     __ lsrvw(as_Register($dst$$reg),
10210              as_Register($src1$$reg),
10211              as_Register($src2$$reg));
10212   %}
10213 
10214   ins_pipe(ialu_reg_reg_vshift);
10215 %}
10216 
10217 // Shift Right Logical Immediate
10218 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10219   match(Set dst (URShiftI src1 src2));
10220 
10221   ins_cost(INSN_COST);
10222   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10223 
10224   ins_encode %{
10225     __ lsrw(as_Register($dst$$reg),
10226             as_Register($src1$$reg),
10227             $src2$$constant & 0x1f);
10228   %}
10229 
10230   ins_pipe(ialu_reg_shift);
10231 %}
10232 
10233 // Shift Right Arithmetic Register
10234 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10235   match(Set dst (RShiftI src1 src2));
10236 
10237   ins_cost(INSN_COST * 2);
10238   format %{ "asrvw  $dst, $src1, $src2" %}
10239 
10240   ins_encode %{
10241     __ asrvw(as_Register($dst$$reg),
10242              as_Register($src1$$reg),
10243              as_Register($src2$$reg));
10244   %}
10245 
10246   ins_pipe(ialu_reg_reg_vshift);
10247 %}
10248 
10249 // Shift Right Arithmetic Immediate
10250 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10251   match(Set dst (RShiftI src1 src2));
10252 
10253   ins_cost(INSN_COST);
10254   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10255 
10256   ins_encode %{
10257     __ asrw(as_Register($dst$$reg),
10258             as_Register($src1$$reg),
10259             $src2$$constant & 0x1f);
10260   %}
10261 
10262   ins_pipe(ialu_reg_shift);
10263 %}
10264 
10265 // Combined Int Mask and Right Shift (using UBFM)
10266 // TODO
10267 
10268 // Long Shifts
10269 
10270 // Shift Left Register
10271 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10272   match(Set dst (LShiftL src1 src2));
10273 
10274   ins_cost(INSN_COST * 2);
10275   format %{ "lslv  $dst, $src1, $src2" %}
10276 
10277   ins_encode %{
10278     __ lslv(as_Register($dst$$reg),
10279             as_Register($src1$$reg),
10280             as_Register($src2$$reg));
10281   %}
10282 
10283   ins_pipe(ialu_reg_reg_vshift);
10284 %}
10285 
10286 // Shift Left Immediate
10287 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10288   match(Set dst (LShiftL src1 src2));
10289 
10290   ins_cost(INSN_COST);
10291   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10292 
10293   ins_encode %{
10294     __ lsl(as_Register($dst$$reg),
10295             as_Register($src1$$reg),
10296             $src2$$constant & 0x3f);
10297   %}
10298 
10299   ins_pipe(ialu_reg_shift);
10300 %}
10301 
10302 // Shift Right Logical Register
10303 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10304   match(Set dst (URShiftL src1 src2));
10305 
10306   ins_cost(INSN_COST * 2);
10307   format %{ "lsrv  $dst, $src1, $src2" %}
10308 
10309   ins_encode %{
10310     __ lsrv(as_Register($dst$$reg),
10311             as_Register($src1$$reg),
10312             as_Register($src2$$reg));
10313   %}
10314 
10315   ins_pipe(ialu_reg_reg_vshift);
10316 %}
10317 
10318 // Shift Right Logical Immediate
10319 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10320   match(Set dst (URShiftL src1 src2));
10321 
10322   ins_cost(INSN_COST);
10323   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10324 
10325   ins_encode %{
10326     __ lsr(as_Register($dst$$reg),
10327            as_Register($src1$$reg),
10328            $src2$$constant & 0x3f);
10329   %}
10330 
10331   ins_pipe(ialu_reg_shift);
10332 %}
10333 
10334 // A special-case pattern for card table stores.
10335 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10336   match(Set dst (URShiftL (CastP2X src1) src2));
10337 
10338   ins_cost(INSN_COST);
10339   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10340 
10341   ins_encode %{
10342     __ lsr(as_Register($dst$$reg),
10343            as_Register($src1$$reg),
10344            $src2$$constant & 0x3f);
10345   %}
10346 
10347   ins_pipe(ialu_reg_shift);
10348 %}
10349 
10350 // Shift Right Arithmetic Register
10351 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10352   match(Set dst (RShiftL src1 src2));
10353 
10354   ins_cost(INSN_COST * 2);
10355   format %{ "asrv  $dst, $src1, $src2" %}
10356 
10357   ins_encode %{
10358     __ asrv(as_Register($dst$$reg),
10359             as_Register($src1$$reg),
10360             as_Register($src2$$reg));
10361   %}
10362 
10363   ins_pipe(ialu_reg_reg_vshift);
10364 %}
10365 
10366 // Shift Right Arithmetic Immediate
10367 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10368   match(Set dst (RShiftL src1 src2));
10369 
10370   ins_cost(INSN_COST);
10371   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10372 
10373   ins_encode %{
10374     __ asr(as_Register($dst$$reg),
10375            as_Register($src1$$reg),
10376            $src2$$constant & 0x3f);
10377   %}
10378 
10379   ins_pipe(ialu_reg_shift);
10380 %}
10381 
10382 // BEGIN This section of the file is automatically generated. Do not edit --------------
10383 
10384 instruct regL_not_reg(iRegLNoSp dst,
10385                          iRegL src1, immL_M1 m1,
10386                          rFlagsReg cr) %{
10387   match(Set dst (XorL src1 m1));
10388   ins_cost(INSN_COST);
10389   format %{ "eon  $dst, $src1, zr" %}
10390 
10391   ins_encode %{
10392     __ eon(as_Register($dst$$reg),
10393               as_Register($src1$$reg),
10394               zr,
10395               Assembler::LSL, 0);
10396   %}
10397 
10398   ins_pipe(ialu_reg);
10399 %}
10400 instruct regI_not_reg(iRegINoSp dst,
10401                          iRegIorL2I src1, immI_M1 m1,
10402                          rFlagsReg cr) %{
10403   match(Set dst (XorI src1 m1));
10404   ins_cost(INSN_COST);
10405   format %{ "eonw  $dst, $src1, zr" %}
10406 
10407   ins_encode %{
10408     __ eonw(as_Register($dst$$reg),
10409               as_Register($src1$$reg),
10410               zr,
10411               Assembler::LSL, 0);
10412   %}
10413 
10414   ins_pipe(ialu_reg);
10415 %}
10416 
10417 instruct AndI_reg_not_reg(iRegINoSp dst,
10418                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10419                          rFlagsReg cr) %{
10420   match(Set dst (AndI src1 (XorI src2 m1)));
10421   ins_cost(INSN_COST);
10422   format %{ "bicw  $dst, $src1, $src2" %}
10423 
10424   ins_encode %{
10425     __ bicw(as_Register($dst$$reg),
10426               as_Register($src1$$reg),
10427               as_Register($src2$$reg),
10428               Assembler::LSL, 0);
10429   %}
10430 
10431   ins_pipe(ialu_reg_reg);
10432 %}
10433 
10434 instruct AndL_reg_not_reg(iRegLNoSp dst,
10435                          iRegL src1, iRegL src2, immL_M1 m1,
10436                          rFlagsReg cr) %{
10437   match(Set dst (AndL src1 (XorL src2 m1)));
10438   ins_cost(INSN_COST);
10439   format %{ "bic  $dst, $src1, $src2" %}
10440 
10441   ins_encode %{
10442     __ bic(as_Register($dst$$reg),
10443               as_Register($src1$$reg),
10444               as_Register($src2$$reg),
10445               Assembler::LSL, 0);
10446   %}
10447 
10448   ins_pipe(ialu_reg_reg);
10449 %}
10450 
10451 instruct OrI_reg_not_reg(iRegINoSp dst,
10452                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10453                          rFlagsReg cr) %{
10454   match(Set dst (OrI src1 (XorI src2 m1)));
10455   ins_cost(INSN_COST);
10456   format %{ "ornw  $dst, $src1, $src2" %}
10457 
10458   ins_encode %{
10459     __ ornw(as_Register($dst$$reg),
10460               as_Register($src1$$reg),
10461               as_Register($src2$$reg),
10462               Assembler::LSL, 0);
10463   %}
10464 
10465   ins_pipe(ialu_reg_reg);
10466 %}
10467 
10468 instruct OrL_reg_not_reg(iRegLNoSp dst,
10469                          iRegL src1, iRegL src2, immL_M1 m1,
10470                          rFlagsReg cr) %{
10471   match(Set dst (OrL src1 (XorL src2 m1)));
10472   ins_cost(INSN_COST);
10473   format %{ "orn  $dst, $src1, $src2" %}
10474 
10475   ins_encode %{
10476     __ orn(as_Register($dst$$reg),
10477               as_Register($src1$$reg),
10478               as_Register($src2$$reg),
10479               Assembler::LSL, 0);
10480   %}
10481 
10482   ins_pipe(ialu_reg_reg);
10483 %}
10484 
10485 instruct XorI_reg_not_reg(iRegINoSp dst,
10486                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10487                          rFlagsReg cr) %{
10488   match(Set dst (XorI m1 (XorI src2 src1)));
10489   ins_cost(INSN_COST);
10490   format %{ "eonw  $dst, $src1, $src2" %}
10491 
10492   ins_encode %{
10493     __ eonw(as_Register($dst$$reg),
10494               as_Register($src1$$reg),
10495               as_Register($src2$$reg),
10496               Assembler::LSL, 0);
10497   %}
10498 
10499   ins_pipe(ialu_reg_reg);
10500 %}
10501 
10502 instruct XorL_reg_not_reg(iRegLNoSp dst,
10503                          iRegL src1, iRegL src2, immL_M1 m1,
10504                          rFlagsReg cr) %{
10505   match(Set dst (XorL m1 (XorL src2 src1)));
10506   ins_cost(INSN_COST);
10507   format %{ "eon  $dst, $src1, $src2" %}
10508 
10509   ins_encode %{
10510     __ eon(as_Register($dst$$reg),
10511               as_Register($src1$$reg),
10512               as_Register($src2$$reg),
10513               Assembler::LSL, 0);
10514   %}
10515 
10516   ins_pipe(ialu_reg_reg);
10517 %}
10518 
10519 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10520                          iRegIorL2I src1, iRegIorL2I src2,
10521                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10522   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10523   ins_cost(1.9 * INSN_COST);
10524   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10525 
10526   ins_encode %{
10527     __ bicw(as_Register($dst$$reg),
10528               as_Register($src1$$reg),
10529               as_Register($src2$$reg),
10530               Assembler::LSR,
10531               $src3$$constant & 0x1f);
10532   %}
10533 
10534   ins_pipe(ialu_reg_reg_shift);
10535 %}
10536 
10537 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10538                          iRegL src1, iRegL src2,
10539                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10540   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10541   ins_cost(1.9 * INSN_COST);
10542   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10543 
10544   ins_encode %{
10545     __ bic(as_Register($dst$$reg),
10546               as_Register($src1$$reg),
10547               as_Register($src2$$reg),
10548               Assembler::LSR,
10549               $src3$$constant & 0x3f);
10550   %}
10551 
10552   ins_pipe(ialu_reg_reg_shift);
10553 %}
10554 
10555 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10556                          iRegIorL2I src1, iRegIorL2I src2,
10557                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10558   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10559   ins_cost(1.9 * INSN_COST);
10560   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10561 
10562   ins_encode %{
10563     __ bicw(as_Register($dst$$reg),
10564               as_Register($src1$$reg),
10565               as_Register($src2$$reg),
10566               Assembler::ASR,
10567               $src3$$constant & 0x1f);
10568   %}
10569 
10570   ins_pipe(ialu_reg_reg_shift);
10571 %}
10572 
10573 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10574                          iRegL src1, iRegL src2,
10575                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10576   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10577   ins_cost(1.9 * INSN_COST);
10578   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10579 
10580   ins_encode %{
10581     __ bic(as_Register($dst$$reg),
10582               as_Register($src1$$reg),
10583               as_Register($src2$$reg),
10584               Assembler::ASR,
10585               $src3$$constant & 0x3f);
10586   %}
10587 
10588   ins_pipe(ialu_reg_reg_shift);
10589 %}
10590 
10591 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
10592                          iRegIorL2I src1, iRegIorL2I src2,
10593                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10594   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
10595   ins_cost(1.9 * INSN_COST);
10596   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
10597 
10598   ins_encode %{
10599     __ bicw(as_Register($dst$$reg),
10600               as_Register($src1$$reg),
10601               as_Register($src2$$reg),
10602               Assembler::LSL,
10603               $src3$$constant & 0x1f);
10604   %}
10605 
10606   ins_pipe(ialu_reg_reg_shift);
10607 %}
10608 
10609 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
10610                          iRegL src1, iRegL src2,
10611                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10612   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
10613   ins_cost(1.9 * INSN_COST);
10614   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
10615 
10616   ins_encode %{
10617     __ bic(as_Register($dst$$reg),
10618               as_Register($src1$$reg),
10619               as_Register($src2$$reg),
10620               Assembler::LSL,
10621               $src3$$constant & 0x3f);
10622   %}
10623 
10624   ins_pipe(ialu_reg_reg_shift);
10625 %}
10626 
10627 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
10628                          iRegIorL2I src1, iRegIorL2I src2,
10629                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10630   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
10631   ins_cost(1.9 * INSN_COST);
10632   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
10633 
10634   ins_encode %{
10635     __ eonw(as_Register($dst$$reg),
10636               as_Register($src1$$reg),
10637               as_Register($src2$$reg),
10638               Assembler::LSR,
10639               $src3$$constant & 0x1f);
10640   %}
10641 
10642   ins_pipe(ialu_reg_reg_shift);
10643 %}
10644 
10645 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
10646                          iRegL src1, iRegL src2,
10647                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10648   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
10649   ins_cost(1.9 * INSN_COST);
10650   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
10651 
10652   ins_encode %{
10653     __ eon(as_Register($dst$$reg),
10654               as_Register($src1$$reg),
10655               as_Register($src2$$reg),
10656               Assembler::LSR,
10657               $src3$$constant & 0x3f);
10658   %}
10659 
10660   ins_pipe(ialu_reg_reg_shift);
10661 %}
10662 
10663 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
10664                          iRegIorL2I src1, iRegIorL2I src2,
10665                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10666   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
10667   ins_cost(1.9 * INSN_COST);
10668   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
10669 
10670   ins_encode %{
10671     __ eonw(as_Register($dst$$reg),
10672               as_Register($src1$$reg),
10673               as_Register($src2$$reg),
10674               Assembler::ASR,
10675               $src3$$constant & 0x1f);
10676   %}
10677 
10678   ins_pipe(ialu_reg_reg_shift);
10679 %}
10680 
10681 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
10682                          iRegL src1, iRegL src2,
10683                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10684   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
10685   ins_cost(1.9 * INSN_COST);
10686   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
10687 
10688   ins_encode %{
10689     __ eon(as_Register($dst$$reg),
10690               as_Register($src1$$reg),
10691               as_Register($src2$$reg),
10692               Assembler::ASR,
10693               $src3$$constant & 0x3f);
10694   %}
10695 
10696   ins_pipe(ialu_reg_reg_shift);
10697 %}
10698 
10699 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
10700                          iRegIorL2I src1, iRegIorL2I src2,
10701                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10702   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
10703   ins_cost(1.9 * INSN_COST);
10704   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
10705 
10706   ins_encode %{
10707     __ eonw(as_Register($dst$$reg),
10708               as_Register($src1$$reg),
10709               as_Register($src2$$reg),
10710               Assembler::LSL,
10711               $src3$$constant & 0x1f);
10712   %}
10713 
10714   ins_pipe(ialu_reg_reg_shift);
10715 %}
10716 
10717 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
10718                          iRegL src1, iRegL src2,
10719                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10720   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
10721   ins_cost(1.9 * INSN_COST);
10722   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
10723 
10724   ins_encode %{
10725     __ eon(as_Register($dst$$reg),
10726               as_Register($src1$$reg),
10727               as_Register($src2$$reg),
10728               Assembler::LSL,
10729               $src3$$constant & 0x3f);
10730   %}
10731 
10732   ins_pipe(ialu_reg_reg_shift);
10733 %}
10734 
10735 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
10736                          iRegIorL2I src1, iRegIorL2I src2,
10737                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10738   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
10739   ins_cost(1.9 * INSN_COST);
10740   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
10741 
10742   ins_encode %{
10743     __ ornw(as_Register($dst$$reg),
10744               as_Register($src1$$reg),
10745               as_Register($src2$$reg),
10746               Assembler::LSR,
10747               $src3$$constant & 0x1f);
10748   %}
10749 
10750   ins_pipe(ialu_reg_reg_shift);
10751 %}
10752 
10753 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10754                          iRegL src1, iRegL src2,
10755                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10756   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10757   ins_cost(1.9 * INSN_COST);
10758   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10759 
10760   ins_encode %{
10761     __ orn(as_Register($dst$$reg),
10762               as_Register($src1$$reg),
10763               as_Register($src2$$reg),
10764               Assembler::LSR,
10765               $src3$$constant & 0x3f);
10766   %}
10767 
10768   ins_pipe(ialu_reg_reg_shift);
10769 %}
10770 
10771 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10772                          iRegIorL2I src1, iRegIorL2I src2,
10773                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10774   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10775   ins_cost(1.9 * INSN_COST);
10776   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10777 
10778   ins_encode %{
10779     __ ornw(as_Register($dst$$reg),
10780               as_Register($src1$$reg),
10781               as_Register($src2$$reg),
10782               Assembler::ASR,
10783               $src3$$constant & 0x1f);
10784   %}
10785 
10786   ins_pipe(ialu_reg_reg_shift);
10787 %}
10788 
10789 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10790                          iRegL src1, iRegL src2,
10791                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10792   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10793   ins_cost(1.9 * INSN_COST);
10794   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10795 
10796   ins_encode %{
10797     __ orn(as_Register($dst$$reg),
10798               as_Register($src1$$reg),
10799               as_Register($src2$$reg),
10800               Assembler::ASR,
10801               $src3$$constant & 0x3f);
10802   %}
10803 
10804   ins_pipe(ialu_reg_reg_shift);
10805 %}
10806 
10807 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10808                          iRegIorL2I src1, iRegIorL2I src2,
10809                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10810   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10811   ins_cost(1.9 * INSN_COST);
10812   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10813 
10814   ins_encode %{
10815     __ ornw(as_Register($dst$$reg),
10816               as_Register($src1$$reg),
10817               as_Register($src2$$reg),
10818               Assembler::LSL,
10819               $src3$$constant & 0x1f);
10820   %}
10821 
10822   ins_pipe(ialu_reg_reg_shift);
10823 %}
10824 
10825 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10826                          iRegL src1, iRegL src2,
10827                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10828   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10829   ins_cost(1.9 * INSN_COST);
10830   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10831 
10832   ins_encode %{
10833     __ orn(as_Register($dst$$reg),
10834               as_Register($src1$$reg),
10835               as_Register($src2$$reg),
10836               Assembler::LSL,
10837               $src3$$constant & 0x3f);
10838   %}
10839 
10840   ins_pipe(ialu_reg_reg_shift);
10841 %}
10842 
10843 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10844                          iRegIorL2I src1, iRegIorL2I src2,
10845                          immI src3, rFlagsReg cr) %{
10846   match(Set dst (AndI src1 (URShiftI src2 src3)));
10847 
10848   ins_cost(1.9 * INSN_COST);
10849   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10850 
10851   ins_encode %{
10852     __ andw(as_Register($dst$$reg),
10853               as_Register($src1$$reg),
10854               as_Register($src2$$reg),
10855               Assembler::LSR,
10856               $src3$$constant & 0x1f);
10857   %}
10858 
10859   ins_pipe(ialu_reg_reg_shift);
10860 %}
10861 
10862 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10863                          iRegL src1, iRegL src2,
10864                          immI src3, rFlagsReg cr) %{
10865   match(Set dst (AndL src1 (URShiftL src2 src3)));
10866 
10867   ins_cost(1.9 * INSN_COST);
10868   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10869 
10870   ins_encode %{
10871     __ andr(as_Register($dst$$reg),
10872               as_Register($src1$$reg),
10873               as_Register($src2$$reg),
10874               Assembler::LSR,
10875               $src3$$constant & 0x3f);
10876   %}
10877 
10878   ins_pipe(ialu_reg_reg_shift);
10879 %}
10880 
10881 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10882                          iRegIorL2I src1, iRegIorL2I src2,
10883                          immI src3, rFlagsReg cr) %{
10884   match(Set dst (AndI src1 (RShiftI src2 src3)));
10885 
10886   ins_cost(1.9 * INSN_COST);
10887   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10888 
10889   ins_encode %{
10890     __ andw(as_Register($dst$$reg),
10891               as_Register($src1$$reg),
10892               as_Register($src2$$reg),
10893               Assembler::ASR,
10894               $src3$$constant & 0x1f);
10895   %}
10896 
10897   ins_pipe(ialu_reg_reg_shift);
10898 %}
10899 
10900 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10901                          iRegL src1, iRegL src2,
10902                          immI src3, rFlagsReg cr) %{
10903   match(Set dst (AndL src1 (RShiftL src2 src3)));
10904 
10905   ins_cost(1.9 * INSN_COST);
10906   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10907 
10908   ins_encode %{
10909     __ andr(as_Register($dst$$reg),
10910               as_Register($src1$$reg),
10911               as_Register($src2$$reg),
10912               Assembler::ASR,
10913               $src3$$constant & 0x3f);
10914   %}
10915 
10916   ins_pipe(ialu_reg_reg_shift);
10917 %}
10918 
10919 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10920                          iRegIorL2I src1, iRegIorL2I src2,
10921                          immI src3, rFlagsReg cr) %{
10922   match(Set dst (AndI src1 (LShiftI src2 src3)));
10923 
10924   ins_cost(1.9 * INSN_COST);
10925   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10926 
10927   ins_encode %{
10928     __ andw(as_Register($dst$$reg),
10929               as_Register($src1$$reg),
10930               as_Register($src2$$reg),
10931               Assembler::LSL,
10932               $src3$$constant & 0x1f);
10933   %}
10934 
10935   ins_pipe(ialu_reg_reg_shift);
10936 %}
10937 
10938 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10939                          iRegL src1, iRegL src2,
10940                          immI src3, rFlagsReg cr) %{
10941   match(Set dst (AndL src1 (LShiftL src2 src3)));
10942 
10943   ins_cost(1.9 * INSN_COST);
10944   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10945 
10946   ins_encode %{
10947     __ andr(as_Register($dst$$reg),
10948               as_Register($src1$$reg),
10949               as_Register($src2$$reg),
10950               Assembler::LSL,
10951               $src3$$constant & 0x3f);
10952   %}
10953 
10954   ins_pipe(ialu_reg_reg_shift);
10955 %}
10956 
10957 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10958                          iRegIorL2I src1, iRegIorL2I src2,
10959                          immI src3, rFlagsReg cr) %{
10960   match(Set dst (XorI src1 (URShiftI src2 src3)));
10961 
10962   ins_cost(1.9 * INSN_COST);
10963   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10964 
10965   ins_encode %{
10966     __ eorw(as_Register($dst$$reg),
10967               as_Register($src1$$reg),
10968               as_Register($src2$$reg),
10969               Assembler::LSR,
10970               $src3$$constant & 0x1f);
10971   %}
10972 
10973   ins_pipe(ialu_reg_reg_shift);
10974 %}
10975 
10976 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10977                          iRegL src1, iRegL src2,
10978                          immI src3, rFlagsReg cr) %{
10979   match(Set dst (XorL src1 (URShiftL src2 src3)));
10980 
10981   ins_cost(1.9 * INSN_COST);
10982   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10983 
10984   ins_encode %{
10985     __ eor(as_Register($dst$$reg),
10986               as_Register($src1$$reg),
10987               as_Register($src2$$reg),
10988               Assembler::LSR,
10989               $src3$$constant & 0x3f);
10990   %}
10991 
10992   ins_pipe(ialu_reg_reg_shift);
10993 %}
10994 
10995 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10996                          iRegIorL2I src1, iRegIorL2I src2,
10997                          immI src3, rFlagsReg cr) %{
10998   match(Set dst (XorI src1 (RShiftI src2 src3)));
10999 
11000   ins_cost(1.9 * INSN_COST);
11001   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11002 
11003   ins_encode %{
11004     __ eorw(as_Register($dst$$reg),
11005               as_Register($src1$$reg),
11006               as_Register($src2$$reg),
11007               Assembler::ASR,
11008               $src3$$constant & 0x1f);
11009   %}
11010 
11011   ins_pipe(ialu_reg_reg_shift);
11012 %}
11013 
11014 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11015                          iRegL src1, iRegL src2,
11016                          immI src3, rFlagsReg cr) %{
11017   match(Set dst (XorL src1 (RShiftL src2 src3)));
11018 
11019   ins_cost(1.9 * INSN_COST);
11020   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11021 
11022   ins_encode %{
11023     __ eor(as_Register($dst$$reg),
11024               as_Register($src1$$reg),
11025               as_Register($src2$$reg),
11026               Assembler::ASR,
11027               $src3$$constant & 0x3f);
11028   %}
11029 
11030   ins_pipe(ialu_reg_reg_shift);
11031 %}
11032 
11033 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11034                          iRegIorL2I src1, iRegIorL2I src2,
11035                          immI src3, rFlagsReg cr) %{
11036   match(Set dst (XorI src1 (LShiftI src2 src3)));
11037 
11038   ins_cost(1.9 * INSN_COST);
11039   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11040 
11041   ins_encode %{
11042     __ eorw(as_Register($dst$$reg),
11043               as_Register($src1$$reg),
11044               as_Register($src2$$reg),
11045               Assembler::LSL,
11046               $src3$$constant & 0x1f);
11047   %}
11048 
11049   ins_pipe(ialu_reg_reg_shift);
11050 %}
11051 
11052 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11053                          iRegL src1, iRegL src2,
11054                          immI src3, rFlagsReg cr) %{
11055   match(Set dst (XorL src1 (LShiftL src2 src3)));
11056 
11057   ins_cost(1.9 * INSN_COST);
11058   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11059 
11060   ins_encode %{
11061     __ eor(as_Register($dst$$reg),
11062               as_Register($src1$$reg),
11063               as_Register($src2$$reg),
11064               Assembler::LSL,
11065               $src3$$constant & 0x3f);
11066   %}
11067 
11068   ins_pipe(ialu_reg_reg_shift);
11069 %}
11070 
11071 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11072                          iRegIorL2I src1, iRegIorL2I src2,
11073                          immI src3, rFlagsReg cr) %{
11074   match(Set dst (OrI src1 (URShiftI src2 src3)));
11075 
11076   ins_cost(1.9 * INSN_COST);
11077   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11078 
11079   ins_encode %{
11080     __ orrw(as_Register($dst$$reg),
11081               as_Register($src1$$reg),
11082               as_Register($src2$$reg),
11083               Assembler::LSR,
11084               $src3$$constant & 0x1f);
11085   %}
11086 
11087   ins_pipe(ialu_reg_reg_shift);
11088 %}
11089 
11090 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11091                          iRegL src1, iRegL src2,
11092                          immI src3, rFlagsReg cr) %{
11093   match(Set dst (OrL src1 (URShiftL src2 src3)));
11094 
11095   ins_cost(1.9 * INSN_COST);
11096   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11097 
11098   ins_encode %{
11099     __ orr(as_Register($dst$$reg),
11100               as_Register($src1$$reg),
11101               as_Register($src2$$reg),
11102               Assembler::LSR,
11103               $src3$$constant & 0x3f);
11104   %}
11105 
11106   ins_pipe(ialu_reg_reg_shift);
11107 %}
11108 
11109 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11110                          iRegIorL2I src1, iRegIorL2I src2,
11111                          immI src3, rFlagsReg cr) %{
11112   match(Set dst (OrI src1 (RShiftI src2 src3)));
11113 
11114   ins_cost(1.9 * INSN_COST);
11115   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11116 
11117   ins_encode %{
11118     __ orrw(as_Register($dst$$reg),
11119               as_Register($src1$$reg),
11120               as_Register($src2$$reg),
11121               Assembler::ASR,
11122               $src3$$constant & 0x1f);
11123   %}
11124 
11125   ins_pipe(ialu_reg_reg_shift);
11126 %}
11127 
11128 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11129                          iRegL src1, iRegL src2,
11130                          immI src3, rFlagsReg cr) %{
11131   match(Set dst (OrL src1 (RShiftL src2 src3)));
11132 
11133   ins_cost(1.9 * INSN_COST);
11134   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11135 
11136   ins_encode %{
11137     __ orr(as_Register($dst$$reg),
11138               as_Register($src1$$reg),
11139               as_Register($src2$$reg),
11140               Assembler::ASR,
11141               $src3$$constant & 0x3f);
11142   %}
11143 
11144   ins_pipe(ialu_reg_reg_shift);
11145 %}
11146 
11147 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11148                          iRegIorL2I src1, iRegIorL2I src2,
11149                          immI src3, rFlagsReg cr) %{
11150   match(Set dst (OrI src1 (LShiftI src2 src3)));
11151 
11152   ins_cost(1.9 * INSN_COST);
11153   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11154 
11155   ins_encode %{
11156     __ orrw(as_Register($dst$$reg),
11157               as_Register($src1$$reg),
11158               as_Register($src2$$reg),
11159               Assembler::LSL,
11160               $src3$$constant & 0x1f);
11161   %}
11162 
11163   ins_pipe(ialu_reg_reg_shift);
11164 %}
11165 
11166 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11167                          iRegL src1, iRegL src2,
11168                          immI src3, rFlagsReg cr) %{
11169   match(Set dst (OrL src1 (LShiftL src2 src3)));
11170 
11171   ins_cost(1.9 * INSN_COST);
11172   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11173 
11174   ins_encode %{
11175     __ orr(as_Register($dst$$reg),
11176               as_Register($src1$$reg),
11177               as_Register($src2$$reg),
11178               Assembler::LSL,
11179               $src3$$constant & 0x3f);
11180   %}
11181 
11182   ins_pipe(ialu_reg_reg_shift);
11183 %}
11184 
11185 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11186                          iRegIorL2I src1, iRegIorL2I src2,
11187                          immI src3, rFlagsReg cr) %{
11188   match(Set dst (AddI src1 (URShiftI src2 src3)));
11189 
11190   ins_cost(1.9 * INSN_COST);
11191   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11192 
11193   ins_encode %{
11194     __ addw(as_Register($dst$$reg),
11195               as_Register($src1$$reg),
11196               as_Register($src2$$reg),
11197               Assembler::LSR,
11198               $src3$$constant & 0x1f);
11199   %}
11200 
11201   ins_pipe(ialu_reg_reg_shift);
11202 %}
11203 
11204 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11205                          iRegL src1, iRegL src2,
11206                          immI src3, rFlagsReg cr) %{
11207   match(Set dst (AddL src1 (URShiftL src2 src3)));
11208 
11209   ins_cost(1.9 * INSN_COST);
11210   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11211 
11212   ins_encode %{
11213     __ add(as_Register($dst$$reg),
11214               as_Register($src1$$reg),
11215               as_Register($src2$$reg),
11216               Assembler::LSR,
11217               $src3$$constant & 0x3f);
11218   %}
11219 
11220   ins_pipe(ialu_reg_reg_shift);
11221 %}
11222 
11223 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11224                          iRegIorL2I src1, iRegIorL2I src2,
11225                          immI src3, rFlagsReg cr) %{
11226   match(Set dst (AddI src1 (RShiftI src2 src3)));
11227 
11228   ins_cost(1.9 * INSN_COST);
11229   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11230 
11231   ins_encode %{
11232     __ addw(as_Register($dst$$reg),
11233               as_Register($src1$$reg),
11234               as_Register($src2$$reg),
11235               Assembler::ASR,
11236               $src3$$constant & 0x1f);
11237   %}
11238 
11239   ins_pipe(ialu_reg_reg_shift);
11240 %}
11241 
11242 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11243                          iRegL src1, iRegL src2,
11244                          immI src3, rFlagsReg cr) %{
11245   match(Set dst (AddL src1 (RShiftL src2 src3)));
11246 
11247   ins_cost(1.9 * INSN_COST);
11248   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11249 
11250   ins_encode %{
11251     __ add(as_Register($dst$$reg),
11252               as_Register($src1$$reg),
11253               as_Register($src2$$reg),
11254               Assembler::ASR,
11255               $src3$$constant & 0x3f);
11256   %}
11257 
11258   ins_pipe(ialu_reg_reg_shift);
11259 %}
11260 
11261 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11262                          iRegIorL2I src1, iRegIorL2I src2,
11263                          immI src3, rFlagsReg cr) %{
11264   match(Set dst (AddI src1 (LShiftI src2 src3)));
11265 
11266   ins_cost(1.9 * INSN_COST);
11267   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11268 
11269   ins_encode %{
11270     __ addw(as_Register($dst$$reg),
11271               as_Register($src1$$reg),
11272               as_Register($src2$$reg),
11273               Assembler::LSL,
11274               $src3$$constant & 0x1f);
11275   %}
11276 
11277   ins_pipe(ialu_reg_reg_shift);
11278 %}
11279 
11280 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11281                          iRegL src1, iRegL src2,
11282                          immI src3, rFlagsReg cr) %{
11283   match(Set dst (AddL src1 (LShiftL src2 src3)));
11284 
11285   ins_cost(1.9 * INSN_COST);
11286   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11287 
11288   ins_encode %{
11289     __ add(as_Register($dst$$reg),
11290               as_Register($src1$$reg),
11291               as_Register($src2$$reg),
11292               Assembler::LSL,
11293               $src3$$constant & 0x3f);
11294   %}
11295 
11296   ins_pipe(ialu_reg_reg_shift);
11297 %}
11298 
11299 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11300                          iRegIorL2I src1, iRegIorL2I src2,
11301                          immI src3, rFlagsReg cr) %{
11302   match(Set dst (SubI src1 (URShiftI src2 src3)));
11303 
11304   ins_cost(1.9 * INSN_COST);
11305   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11306 
11307   ins_encode %{
11308     __ subw(as_Register($dst$$reg),
11309               as_Register($src1$$reg),
11310               as_Register($src2$$reg),
11311               Assembler::LSR,
11312               $src3$$constant & 0x1f);
11313   %}
11314 
11315   ins_pipe(ialu_reg_reg_shift);
11316 %}
11317 
11318 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11319                          iRegL src1, iRegL src2,
11320                          immI src3, rFlagsReg cr) %{
11321   match(Set dst (SubL src1 (URShiftL src2 src3)));
11322 
11323   ins_cost(1.9 * INSN_COST);
11324   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11325 
11326   ins_encode %{
11327     __ sub(as_Register($dst$$reg),
11328               as_Register($src1$$reg),
11329               as_Register($src2$$reg),
11330               Assembler::LSR,
11331               $src3$$constant & 0x3f);
11332   %}
11333 
11334   ins_pipe(ialu_reg_reg_shift);
11335 %}
11336 
11337 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11338                          iRegIorL2I src1, iRegIorL2I src2,
11339                          immI src3, rFlagsReg cr) %{
11340   match(Set dst (SubI src1 (RShiftI src2 src3)));
11341 
11342   ins_cost(1.9 * INSN_COST);
11343   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11344 
11345   ins_encode %{
11346     __ subw(as_Register($dst$$reg),
11347               as_Register($src1$$reg),
11348               as_Register($src2$$reg),
11349               Assembler::ASR,
11350               $src3$$constant & 0x1f);
11351   %}
11352 
11353   ins_pipe(ialu_reg_reg_shift);
11354 %}
11355 
11356 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11357                          iRegL src1, iRegL src2,
11358                          immI src3, rFlagsReg cr) %{
11359   match(Set dst (SubL src1 (RShiftL src2 src3)));
11360 
11361   ins_cost(1.9 * INSN_COST);
11362   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11363 
11364   ins_encode %{
11365     __ sub(as_Register($dst$$reg),
11366               as_Register($src1$$reg),
11367               as_Register($src2$$reg),
11368               Assembler::ASR,
11369               $src3$$constant & 0x3f);
11370   %}
11371 
11372   ins_pipe(ialu_reg_reg_shift);
11373 %}
11374 
11375 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11376                          iRegIorL2I src1, iRegIorL2I src2,
11377                          immI src3, rFlagsReg cr) %{
11378   match(Set dst (SubI src1 (LShiftI src2 src3)));
11379 
11380   ins_cost(1.9 * INSN_COST);
11381   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11382 
11383   ins_encode %{
11384     __ subw(as_Register($dst$$reg),
11385               as_Register($src1$$reg),
11386               as_Register($src2$$reg),
11387               Assembler::LSL,
11388               $src3$$constant & 0x1f);
11389   %}
11390 
11391   ins_pipe(ialu_reg_reg_shift);
11392 %}
11393 
11394 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11395                          iRegL src1, iRegL src2,
11396                          immI src3, rFlagsReg cr) %{
11397   match(Set dst (SubL src1 (LShiftL src2 src3)));
11398 
11399   ins_cost(1.9 * INSN_COST);
11400   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11401 
11402   ins_encode %{
11403     __ sub(as_Register($dst$$reg),
11404               as_Register($src1$$reg),
11405               as_Register($src2$$reg),
11406               Assembler::LSL,
11407               $src3$$constant & 0x3f);
11408   %}
11409 
11410   ins_pipe(ialu_reg_reg_shift);
11411 %}
11412 
11413 
11414 
11415 // Shift Left followed by Shift Right.
11416 // This idiom is used by the compiler for the i2b bytecode etc.
11417 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11418 %{
11419   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11420   // Make sure we are not going to exceed what sbfm can do.
11421   predicate((unsigned int)n->in(2)->get_int() <= 63
11422             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11423 
11424   ins_cost(INSN_COST * 2);
11425   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11426   ins_encode %{
11427     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11428     int s = 63 - lshift;
11429     int r = (rshift - lshift) & 63;
11430     __ sbfm(as_Register($dst$$reg),
11431             as_Register($src$$reg),
11432             r, s);
11433   %}
11434 
11435   ins_pipe(ialu_reg_shift);
11436 %}
11437 
11438 // Shift Left followed by Shift Right.
11439 // This idiom is used by the compiler for the i2b bytecode etc.
11440 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11441 %{
11442   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11443   // Make sure we are not going to exceed what sbfmw can do.
11444   predicate((unsigned int)n->in(2)->get_int() <= 31
11445             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11446 
11447   ins_cost(INSN_COST * 2);
11448   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11449   ins_encode %{
11450     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11451     int s = 31 - lshift;
11452     int r = (rshift - lshift) & 31;
11453     __ sbfmw(as_Register($dst$$reg),
11454             as_Register($src$$reg),
11455             r, s);
11456   %}
11457 
11458   ins_pipe(ialu_reg_shift);
11459 %}
11460 
11461 // Shift Left followed by Shift Right.
11462 // This idiom is used by the compiler for the i2b bytecode etc.
11463 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11464 %{
11465   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11466   // Make sure we are not going to exceed what ubfm can do.
11467   predicate((unsigned int)n->in(2)->get_int() <= 63
11468             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11469 
11470   ins_cost(INSN_COST * 2);
11471   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11472   ins_encode %{
11473     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11474     int s = 63 - lshift;
11475     int r = (rshift - lshift) & 63;
11476     __ ubfm(as_Register($dst$$reg),
11477             as_Register($src$$reg),
11478             r, s);
11479   %}
11480 
11481   ins_pipe(ialu_reg_shift);
11482 %}
11483 
11484 // Shift Left followed by Shift Right.
11485 // This idiom is used by the compiler for the i2b bytecode etc.
11486 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11487 %{
11488   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11489   // Make sure we are not going to exceed what ubfmw can do.
11490   predicate((unsigned int)n->in(2)->get_int() <= 31
11491             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11492 
11493   ins_cost(INSN_COST * 2);
11494   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11495   ins_encode %{
11496     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11497     int s = 31 - lshift;
11498     int r = (rshift - lshift) & 31;
11499     __ ubfmw(as_Register($dst$$reg),
11500             as_Register($src$$reg),
11501             r, s);
11502   %}
11503 
11504   ins_pipe(ialu_reg_shift);
11505 %}
11506 // Bitfield extract with shift & mask
11507 
11508 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11509 %{
11510   match(Set dst (AndI (URShiftI src rshift) mask));
11511 
11512   ins_cost(INSN_COST);
11513   format %{ "ubfxw $dst, $src, $mask" %}
11514   ins_encode %{
11515     int rshift = $rshift$$constant;
11516     long mask = $mask$$constant;
11517     int width = exact_log2(mask+1);
11518     __ ubfxw(as_Register($dst$$reg),
11519             as_Register($src$$reg), rshift, width);
11520   %}
11521   ins_pipe(ialu_reg_shift);
11522 %}
11523 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11524 %{
11525   match(Set dst (AndL (URShiftL src rshift) mask));
11526 
11527   ins_cost(INSN_COST);
11528   format %{ "ubfx $dst, $src, $mask" %}
11529   ins_encode %{
11530     int rshift = $rshift$$constant;
11531     long mask = $mask$$constant;
11532     int width = exact_log2(mask+1);
11533     __ ubfx(as_Register($dst$$reg),
11534             as_Register($src$$reg), rshift, width);
11535   %}
11536   ins_pipe(ialu_reg_shift);
11537 %}
11538 
11539 // We can use ubfx when extending an And with a mask when we know mask
11540 // is positive.  We know that because immI_bitmask guarantees it.
11541 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11542 %{
11543   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11544 
11545   ins_cost(INSN_COST * 2);
11546   format %{ "ubfx $dst, $src, $mask" %}
11547   ins_encode %{
11548     int rshift = $rshift$$constant;
11549     long mask = $mask$$constant;
11550     int width = exact_log2(mask+1);
11551     __ ubfx(as_Register($dst$$reg),
11552             as_Register($src$$reg), rshift, width);
11553   %}
11554   ins_pipe(ialu_reg_shift);
11555 %}
11556 
11557 // Rotations
11558 
11559 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11560 %{
11561   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11562   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11563 
11564   ins_cost(INSN_COST);
11565   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11566 
11567   ins_encode %{
11568     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11569             $rshift$$constant & 63);
11570   %}
11571   ins_pipe(ialu_reg_reg_extr);
11572 %}
11573 
11574 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11575 %{
11576   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11577   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11578 
11579   ins_cost(INSN_COST);
11580   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11581 
11582   ins_encode %{
11583     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11584             $rshift$$constant & 31);
11585   %}
11586   ins_pipe(ialu_reg_reg_extr);
11587 %}
11588 
11589 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11590 %{
11591   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11592   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11593 
11594   ins_cost(INSN_COST);
11595   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11596 
11597   ins_encode %{
11598     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11599             $rshift$$constant & 63);
11600   %}
11601   ins_pipe(ialu_reg_reg_extr);
11602 %}
11603 
11604 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11605 %{
11606   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11607   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11608 
11609   ins_cost(INSN_COST);
11610   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11611 
11612   ins_encode %{
11613     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11614             $rshift$$constant & 31);
11615   %}
11616   ins_pipe(ialu_reg_reg_extr);
11617 %}
11618 
11619 
11620 // rol expander
11621 
11622 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11623 %{
11624   effect(DEF dst, USE src, USE shift);
11625 
11626   format %{ "rol    $dst, $src, $shift" %}
11627   ins_cost(INSN_COST * 3);
11628   ins_encode %{
11629     __ subw(rscratch1, zr, as_Register($shift$$reg));
11630     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11631             rscratch1);
11632     %}
11633   ins_pipe(ialu_reg_reg_vshift);
11634 %}
11635 
11636 // rol expander
11637 
11638 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11639 %{
11640   effect(DEF dst, USE src, USE shift);
11641 
11642   format %{ "rol    $dst, $src, $shift" %}
11643   ins_cost(INSN_COST * 3);
11644   ins_encode %{
11645     __ subw(rscratch1, zr, as_Register($shift$$reg));
11646     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11647             rscratch1);
11648     %}
11649   ins_pipe(ialu_reg_reg_vshift);
11650 %}
11651 
11652 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11653 %{
11654   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
11655 
11656   expand %{
11657     rolL_rReg(dst, src, shift, cr);
11658   %}
11659 %}
11660 
11661 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11662 %{
11663   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
11664 
11665   expand %{
11666     rolL_rReg(dst, src, shift, cr);
11667   %}
11668 %}
11669 
11670 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11671 %{
11672   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
11673 
11674   expand %{
11675     rolL_rReg(dst, src, shift, cr);
11676   %}
11677 %}
11678 
11679 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11680 %{
11681   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
11682 
11683   expand %{
11684     rolL_rReg(dst, src, shift, cr);
11685   %}
11686 %}
11687 
11688 // ror expander
11689 
11690 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
11691 %{
11692   effect(DEF dst, USE src, USE shift);
11693 
11694   format %{ "ror    $dst, $src, $shift" %}
11695   ins_cost(INSN_COST);
11696   ins_encode %{
11697     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
11698             as_Register($shift$$reg));
11699     %}
11700   ins_pipe(ialu_reg_reg_vshift);
11701 %}
11702 
11703 // ror expander
11704 
11705 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
11706 %{
11707   effect(DEF dst, USE src, USE shift);
11708 
11709   format %{ "ror    $dst, $src, $shift" %}
11710   ins_cost(INSN_COST);
11711   ins_encode %{
11712     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
11713             as_Register($shift$$reg));
11714     %}
11715   ins_pipe(ialu_reg_reg_vshift);
11716 %}
11717 
11718 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
11719 %{
11720   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
11721 
11722   expand %{
11723     rorL_rReg(dst, src, shift, cr);
11724   %}
11725 %}
11726 
11727 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11728 %{
11729   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
11730 
11731   expand %{
11732     rorL_rReg(dst, src, shift, cr);
11733   %}
11734 %}
11735 
11736 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
11737 %{
11738   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
11739 
11740   expand %{
11741     rorL_rReg(dst, src, shift, cr);
11742   %}
11743 %}
11744 
11745 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
11746 %{
11747   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11748 
11749   expand %{
11750     rorL_rReg(dst, src, shift, cr);
11751   %}
11752 %}
11753 
11754 // Add/subtract (extended)
11755 
11756 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11757 %{
11758   match(Set dst (AddL src1 (ConvI2L src2)));
11759   ins_cost(INSN_COST);
11760   format %{ "add  $dst, $src1, sxtw $src2" %}
11761 
11762    ins_encode %{
11763      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11764             as_Register($src2$$reg), ext::sxtw);
11765    %}
11766   ins_pipe(ialu_reg_reg);
11767 %};
11768 
11769 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11770 %{
11771   match(Set dst (SubL src1 (ConvI2L src2)));
11772   ins_cost(INSN_COST);
11773   format %{ "sub  $dst, $src1, sxtw $src2" %}
11774 
11775    ins_encode %{
11776      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11777             as_Register($src2$$reg), ext::sxtw);
11778    %}
11779   ins_pipe(ialu_reg_reg);
11780 %};
11781 
11782 
11783 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11784 %{
11785   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11786   ins_cost(INSN_COST);
11787   format %{ "add  $dst, $src1, sxth $src2" %}
11788 
11789    ins_encode %{
11790      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11791             as_Register($src2$$reg), ext::sxth);
11792    %}
11793   ins_pipe(ialu_reg_reg);
11794 %}
11795 
11796 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11797 %{
11798   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11799   ins_cost(INSN_COST);
11800   format %{ "add  $dst, $src1, sxtb $src2" %}
11801 
11802    ins_encode %{
11803      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11804             as_Register($src2$$reg), ext::sxtb);
11805    %}
11806   ins_pipe(ialu_reg_reg);
11807 %}
11808 
11809 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11810 %{
11811   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11812   ins_cost(INSN_COST);
11813   format %{ "add  $dst, $src1, uxtb $src2" %}
11814 
11815    ins_encode %{
11816      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11817             as_Register($src2$$reg), ext::uxtb);
11818    %}
11819   ins_pipe(ialu_reg_reg);
11820 %}
11821 
11822 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11823 %{
11824   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11825   ins_cost(INSN_COST);
11826   format %{ "add  $dst, $src1, sxth $src2" %}
11827 
11828    ins_encode %{
11829      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11830             as_Register($src2$$reg), ext::sxth);
11831    %}
11832   ins_pipe(ialu_reg_reg);
11833 %}
11834 
11835 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11836 %{
11837   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11838   ins_cost(INSN_COST);
11839   format %{ "add  $dst, $src1, sxtw $src2" %}
11840 
11841    ins_encode %{
11842      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11843             as_Register($src2$$reg), ext::sxtw);
11844    %}
11845   ins_pipe(ialu_reg_reg);
11846 %}
11847 
11848 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11849 %{
11850   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11851   ins_cost(INSN_COST);
11852   format %{ "add  $dst, $src1, sxtb $src2" %}
11853 
11854    ins_encode %{
11855      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11856             as_Register($src2$$reg), ext::sxtb);
11857    %}
11858   ins_pipe(ialu_reg_reg);
11859 %}
11860 
11861 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11862 %{
11863   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11864   ins_cost(INSN_COST);
11865   format %{ "add  $dst, $src1, uxtb $src2" %}
11866 
11867    ins_encode %{
11868      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11869             as_Register($src2$$reg), ext::uxtb);
11870    %}
11871   ins_pipe(ialu_reg_reg);
11872 %}
11873 
11874 
11875 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11876 %{
11877   match(Set dst (AddI src1 (AndI src2 mask)));
11878   ins_cost(INSN_COST);
11879   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11880 
11881    ins_encode %{
11882      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11883             as_Register($src2$$reg), ext::uxtb);
11884    %}
11885   ins_pipe(ialu_reg_reg);
11886 %}
11887 
11888 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11889 %{
11890   match(Set dst (AddI src1 (AndI src2 mask)));
11891   ins_cost(INSN_COST);
11892   format %{ "addw  $dst, $src1, $src2, uxth" %}
11893 
11894    ins_encode %{
11895      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11896             as_Register($src2$$reg), ext::uxth);
11897    %}
11898   ins_pipe(ialu_reg_reg);
11899 %}
11900 
11901 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11902 %{
11903   match(Set dst (AddL src1 (AndL src2 mask)));
11904   ins_cost(INSN_COST);
11905   format %{ "add  $dst, $src1, $src2, uxtb" %}
11906 
11907    ins_encode %{
11908      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11909             as_Register($src2$$reg), ext::uxtb);
11910    %}
11911   ins_pipe(ialu_reg_reg);
11912 %}
11913 
11914 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11915 %{
11916   match(Set dst (AddL src1 (AndL src2 mask)));
11917   ins_cost(INSN_COST);
11918   format %{ "add  $dst, $src1, $src2, uxth" %}
11919 
11920    ins_encode %{
11921      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11922             as_Register($src2$$reg), ext::uxth);
11923    %}
11924   ins_pipe(ialu_reg_reg);
11925 %}
11926 
11927 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11928 %{
11929   match(Set dst (AddL src1 (AndL src2 mask)));
11930   ins_cost(INSN_COST);
11931   format %{ "add  $dst, $src1, $src2, uxtw" %}
11932 
11933    ins_encode %{
11934      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11935             as_Register($src2$$reg), ext::uxtw);
11936    %}
11937   ins_pipe(ialu_reg_reg);
11938 %}
11939 
11940 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11941 %{
11942   match(Set dst (SubI src1 (AndI src2 mask)));
11943   ins_cost(INSN_COST);
11944   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11945 
11946    ins_encode %{
11947      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11948             as_Register($src2$$reg), ext::uxtb);
11949    %}
11950   ins_pipe(ialu_reg_reg);
11951 %}
11952 
11953 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11954 %{
11955   match(Set dst (SubI src1 (AndI src2 mask)));
11956   ins_cost(INSN_COST);
11957   format %{ "subw  $dst, $src1, $src2, uxth" %}
11958 
11959    ins_encode %{
11960      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11961             as_Register($src2$$reg), ext::uxth);
11962    %}
11963   ins_pipe(ialu_reg_reg);
11964 %}
11965 
11966 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11967 %{
11968   match(Set dst (SubL src1 (AndL src2 mask)));
11969   ins_cost(INSN_COST);
11970   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11971 
11972    ins_encode %{
11973      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11974             as_Register($src2$$reg), ext::uxtb);
11975    %}
11976   ins_pipe(ialu_reg_reg);
11977 %}
11978 
11979 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11980 %{
11981   match(Set dst (SubL src1 (AndL src2 mask)));
11982   ins_cost(INSN_COST);
11983   format %{ "sub  $dst, $src1, $src2, uxth" %}
11984 
11985    ins_encode %{
11986      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11987             as_Register($src2$$reg), ext::uxth);
11988    %}
11989   ins_pipe(ialu_reg_reg);
11990 %}
11991 
11992 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11993 %{
11994   match(Set dst (SubL src1 (AndL src2 mask)));
11995   ins_cost(INSN_COST);
11996   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11997 
11998    ins_encode %{
11999      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12000             as_Register($src2$$reg), ext::uxtw);
12001    %}
12002   ins_pipe(ialu_reg_reg);
12003 %}
12004 
12005 // END This section of the file is automatically generated. Do not edit --------------
12006 
12007 // ============================================================================
12008 // Floating Point Arithmetic Instructions
12009 
12010 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12011   match(Set dst (AddF src1 src2));
12012 
12013   ins_cost(INSN_COST * 5);
12014   format %{ "fadds   $dst, $src1, $src2" %}
12015 
12016   ins_encode %{
12017     __ fadds(as_FloatRegister($dst$$reg),
12018              as_FloatRegister($src1$$reg),
12019              as_FloatRegister($src2$$reg));
12020   %}
12021 
12022   ins_pipe(pipe_class_default);
12023 %}
12024 
12025 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12026   match(Set dst (AddD src1 src2));
12027 
12028   ins_cost(INSN_COST * 5);
12029   format %{ "faddd   $dst, $src1, $src2" %}
12030 
12031   ins_encode %{
12032     __ faddd(as_FloatRegister($dst$$reg),
12033              as_FloatRegister($src1$$reg),
12034              as_FloatRegister($src2$$reg));
12035   %}
12036 
12037   ins_pipe(pipe_class_default);
12038 %}
12039 
12040 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12041   match(Set dst (SubF src1 src2));
12042 
12043   ins_cost(INSN_COST * 5);
12044   format %{ "fsubs   $dst, $src1, $src2" %}
12045 
12046   ins_encode %{
12047     __ fsubs(as_FloatRegister($dst$$reg),
12048              as_FloatRegister($src1$$reg),
12049              as_FloatRegister($src2$$reg));
12050   %}
12051 
12052   ins_pipe(pipe_class_default);
12053 %}
12054 
12055 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12056   match(Set dst (SubD src1 src2));
12057 
12058   ins_cost(INSN_COST * 5);
12059   format %{ "fsubd   $dst, $src1, $src2" %}
12060 
12061   ins_encode %{
12062     __ fsubd(as_FloatRegister($dst$$reg),
12063              as_FloatRegister($src1$$reg),
12064              as_FloatRegister($src2$$reg));
12065   %}
12066 
12067   ins_pipe(pipe_class_default);
12068 %}
12069 
12070 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12071   match(Set dst (MulF src1 src2));
12072 
12073   ins_cost(INSN_COST * 6);
12074   format %{ "fmuls   $dst, $src1, $src2" %}
12075 
12076   ins_encode %{
12077     __ fmuls(as_FloatRegister($dst$$reg),
12078              as_FloatRegister($src1$$reg),
12079              as_FloatRegister($src2$$reg));
12080   %}
12081 
12082   ins_pipe(pipe_class_default);
12083 %}
12084 
12085 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12086   match(Set dst (MulD src1 src2));
12087 
12088   ins_cost(INSN_COST * 6);
12089   format %{ "fmuld   $dst, $src1, $src2" %}
12090 
12091   ins_encode %{
12092     __ fmuld(as_FloatRegister($dst$$reg),
12093              as_FloatRegister($src1$$reg),
12094              as_FloatRegister($src2$$reg));
12095   %}
12096 
12097   ins_pipe(pipe_class_default);
12098 %}
12099 
12100 // We cannot use these fused mul w add/sub ops because they don't
12101 // produce the same result as the equivalent separated ops
12102 // (essentially they don't round the intermediate result). that's a
12103 // shame. leaving them here in case we can idenitfy cases where it is
12104 // legitimate to use them
12105 
12106 
12107 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12108 //   match(Set dst (AddF (MulF src1 src2) src3));
12109 
12110 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12111 
12112 //   ins_encode %{
12113 //     __ fmadds(as_FloatRegister($dst$$reg),
12114 //              as_FloatRegister($src1$$reg),
12115 //              as_FloatRegister($src2$$reg),
12116 //              as_FloatRegister($src3$$reg));
12117 //   %}
12118 
12119 //   ins_pipe(pipe_class_default);
12120 // %}
12121 
12122 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12123 //   match(Set dst (AddD (MulD src1 src2) src3));
12124 
12125 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12126 
12127 //   ins_encode %{
12128 //     __ fmaddd(as_FloatRegister($dst$$reg),
12129 //              as_FloatRegister($src1$$reg),
12130 //              as_FloatRegister($src2$$reg),
12131 //              as_FloatRegister($src3$$reg));
12132 //   %}
12133 
12134 //   ins_pipe(pipe_class_default);
12135 // %}
12136 
12137 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12138 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12139 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12140 
12141 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12142 
12143 //   ins_encode %{
12144 //     __ fmsubs(as_FloatRegister($dst$$reg),
12145 //               as_FloatRegister($src1$$reg),
12146 //               as_FloatRegister($src2$$reg),
12147 //              as_FloatRegister($src3$$reg));
12148 //   %}
12149 
12150 //   ins_pipe(pipe_class_default);
12151 // %}
12152 
12153 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12154 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12155 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12156 
12157 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12158 
12159 //   ins_encode %{
12160 //     __ fmsubd(as_FloatRegister($dst$$reg),
12161 //               as_FloatRegister($src1$$reg),
12162 //               as_FloatRegister($src2$$reg),
12163 //               as_FloatRegister($src3$$reg));
12164 //   %}
12165 
12166 //   ins_pipe(pipe_class_default);
12167 // %}
12168 
12169 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12170 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12171 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12172 
12173 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12174 
12175 //   ins_encode %{
12176 //     __ fnmadds(as_FloatRegister($dst$$reg),
12177 //                as_FloatRegister($src1$$reg),
12178 //                as_FloatRegister($src2$$reg),
12179 //                as_FloatRegister($src3$$reg));
12180 //   %}
12181 
12182 //   ins_pipe(pipe_class_default);
12183 // %}
12184 
12185 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12186 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12187 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12188 
12189 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12190 
12191 //   ins_encode %{
12192 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12193 //                as_FloatRegister($src1$$reg),
12194 //                as_FloatRegister($src2$$reg),
12195 //                as_FloatRegister($src3$$reg));
12196 //   %}
12197 
12198 //   ins_pipe(pipe_class_default);
12199 // %}
12200 
12201 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12202 //   match(Set dst (SubF (MulF src1 src2) src3));
12203 
12204 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12205 
12206 //   ins_encode %{
12207 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12208 //                as_FloatRegister($src1$$reg),
12209 //                as_FloatRegister($src2$$reg),
12210 //                as_FloatRegister($src3$$reg));
12211 //   %}
12212 
12213 //   ins_pipe(pipe_class_default);
12214 // %}
12215 
12216 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12217 //   match(Set dst (SubD (MulD src1 src2) src3));
12218 
12219 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12220 
12221 //   ins_encode %{
12222 //   // n.b. insn name should be fnmsubd
12223 //     __ fnmsub(as_FloatRegister($dst$$reg),
12224 //                as_FloatRegister($src1$$reg),
12225 //                as_FloatRegister($src2$$reg),
12226 //                as_FloatRegister($src3$$reg));
12227 //   %}
12228 
12229 //   ins_pipe(pipe_class_default);
12230 // %}
12231 
12232 
12233 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12234   match(Set dst (DivF src1  src2));
12235 
12236   ins_cost(INSN_COST * 18);
12237   format %{ "fdivs   $dst, $src1, $src2" %}
12238 
12239   ins_encode %{
12240     __ fdivs(as_FloatRegister($dst$$reg),
12241              as_FloatRegister($src1$$reg),
12242              as_FloatRegister($src2$$reg));
12243   %}
12244 
12245   ins_pipe(pipe_class_default);
12246 %}
12247 
12248 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12249   match(Set dst (DivD src1  src2));
12250 
12251   ins_cost(INSN_COST * 32);
12252   format %{ "fdivd   $dst, $src1, $src2" %}
12253 
12254   ins_encode %{
12255     __ fdivd(as_FloatRegister($dst$$reg),
12256              as_FloatRegister($src1$$reg),
12257              as_FloatRegister($src2$$reg));
12258   %}
12259 
12260   ins_pipe(pipe_class_default);
12261 %}
12262 
12263 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12264   match(Set dst (NegF src));
12265 
12266   ins_cost(INSN_COST * 3);
12267   format %{ "fneg   $dst, $src" %}
12268 
12269   ins_encode %{
12270     __ fnegs(as_FloatRegister($dst$$reg),
12271              as_FloatRegister($src$$reg));
12272   %}
12273 
12274   ins_pipe(pipe_class_default);
12275 %}
12276 
12277 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12278   match(Set dst (NegD src));
12279 
12280   ins_cost(INSN_COST * 3);
12281   format %{ "fnegd   $dst, $src" %}
12282 
12283   ins_encode %{
12284     __ fnegd(as_FloatRegister($dst$$reg),
12285              as_FloatRegister($src$$reg));
12286   %}
12287 
12288   ins_pipe(pipe_class_default);
12289 %}
12290 
12291 instruct absF_reg(vRegF dst, vRegF src) %{
12292   match(Set dst (AbsF src));
12293 
12294   ins_cost(INSN_COST * 3);
12295   format %{ "fabss   $dst, $src" %}
12296   ins_encode %{
12297     __ fabss(as_FloatRegister($dst$$reg),
12298              as_FloatRegister($src$$reg));
12299   %}
12300 
12301   ins_pipe(pipe_class_default);
12302 %}
12303 
12304 instruct absD_reg(vRegD dst, vRegD src) %{
12305   match(Set dst (AbsD src));
12306 
12307   ins_cost(INSN_COST * 3);
12308   format %{ "fabsd   $dst, $src" %}
12309   ins_encode %{
12310     __ fabsd(as_FloatRegister($dst$$reg),
12311              as_FloatRegister($src$$reg));
12312   %}
12313 
12314   ins_pipe(pipe_class_default);
12315 %}
12316 
12317 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12318   match(Set dst (SqrtD src));
12319 
12320   ins_cost(INSN_COST * 50);
12321   format %{ "fsqrtd  $dst, $src" %}
12322   ins_encode %{
12323     __ fsqrtd(as_FloatRegister($dst$$reg),
12324              as_FloatRegister($src$$reg));
12325   %}
12326 
12327   ins_pipe(pipe_class_default);
12328 %}
12329 
12330 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12331   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12332 
12333   ins_cost(INSN_COST * 50);
12334   format %{ "fsqrts  $dst, $src" %}
12335   ins_encode %{
12336     __ fsqrts(as_FloatRegister($dst$$reg),
12337              as_FloatRegister($src$$reg));
12338   %}
12339 
12340   ins_pipe(pipe_class_default);
12341 %}
12342 
12343 // ============================================================================
12344 // Logical Instructions
12345 
12346 // Integer Logical Instructions
12347 
12348 // And Instructions
12349 
12350 
12351 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12352   match(Set dst (AndI src1 src2));
12353 
12354   format %{ "andw  $dst, $src1, $src2\t# int" %}
12355 
12356   ins_cost(INSN_COST);
12357   ins_encode %{
12358     __ andw(as_Register($dst$$reg),
12359             as_Register($src1$$reg),
12360             as_Register($src2$$reg));
12361   %}
12362 
12363   ins_pipe(ialu_reg_reg);
12364 %}
12365 
12366 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12367   match(Set dst (AndI src1 src2));
12368 
12369   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12370 
12371   ins_cost(INSN_COST);
12372   ins_encode %{
12373     __ andw(as_Register($dst$$reg),
12374             as_Register($src1$$reg),
12375             (unsigned long)($src2$$constant));
12376   %}
12377 
12378   ins_pipe(ialu_reg_imm);
12379 %}
12380 
12381 // Or Instructions
12382 
12383 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12384   match(Set dst (OrI src1 src2));
12385 
12386   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12387 
12388   ins_cost(INSN_COST);
12389   ins_encode %{
12390     __ orrw(as_Register($dst$$reg),
12391             as_Register($src1$$reg),
12392             as_Register($src2$$reg));
12393   %}
12394 
12395   ins_pipe(ialu_reg_reg);
12396 %}
12397 
12398 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12399   match(Set dst (OrI src1 src2));
12400 
12401   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12402 
12403   ins_cost(INSN_COST);
12404   ins_encode %{
12405     __ orrw(as_Register($dst$$reg),
12406             as_Register($src1$$reg),
12407             (unsigned long)($src2$$constant));
12408   %}
12409 
12410   ins_pipe(ialu_reg_imm);
12411 %}
12412 
12413 // Xor Instructions
12414 
12415 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12416   match(Set dst (XorI src1 src2));
12417 
12418   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12419 
12420   ins_cost(INSN_COST);
12421   ins_encode %{
12422     __ eorw(as_Register($dst$$reg),
12423             as_Register($src1$$reg),
12424             as_Register($src2$$reg));
12425   %}
12426 
12427   ins_pipe(ialu_reg_reg);
12428 %}
12429 
12430 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12431   match(Set dst (XorI src1 src2));
12432 
12433   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12434 
12435   ins_cost(INSN_COST);
12436   ins_encode %{
12437     __ eorw(as_Register($dst$$reg),
12438             as_Register($src1$$reg),
12439             (unsigned long)($src2$$constant));
12440   %}
12441 
12442   ins_pipe(ialu_reg_imm);
12443 %}
12444 
12445 // Long Logical Instructions
12446 // TODO
12447 
12448 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12449   match(Set dst (AndL src1 src2));
12450 
12451   format %{ "and  $dst, $src1, $src2\t# int" %}
12452 
12453   ins_cost(INSN_COST);
12454   ins_encode %{
12455     __ andr(as_Register($dst$$reg),
12456             as_Register($src1$$reg),
12457             as_Register($src2$$reg));
12458   %}
12459 
12460   ins_pipe(ialu_reg_reg);
12461 %}
12462 
12463 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12464   match(Set dst (AndL src1 src2));
12465 
12466   format %{ "and  $dst, $src1, $src2\t# int" %}
12467 
12468   ins_cost(INSN_COST);
12469   ins_encode %{
12470     __ andr(as_Register($dst$$reg),
12471             as_Register($src1$$reg),
12472             (unsigned long)($src2$$constant));
12473   %}
12474 
12475   ins_pipe(ialu_reg_imm);
12476 %}
12477 
12478 // Or Instructions
12479 
12480 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12481   match(Set dst (OrL src1 src2));
12482 
12483   format %{ "orr  $dst, $src1, $src2\t# int" %}
12484 
12485   ins_cost(INSN_COST);
12486   ins_encode %{
12487     __ orr(as_Register($dst$$reg),
12488            as_Register($src1$$reg),
12489            as_Register($src2$$reg));
12490   %}
12491 
12492   ins_pipe(ialu_reg_reg);
12493 %}
12494 
12495 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12496   match(Set dst (OrL src1 src2));
12497 
12498   format %{ "orr  $dst, $src1, $src2\t# int" %}
12499 
12500   ins_cost(INSN_COST);
12501   ins_encode %{
12502     __ orr(as_Register($dst$$reg),
12503            as_Register($src1$$reg),
12504            (unsigned long)($src2$$constant));
12505   %}
12506 
12507   ins_pipe(ialu_reg_imm);
12508 %}
12509 
12510 // Xor Instructions
12511 
12512 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12513   match(Set dst (XorL src1 src2));
12514 
12515   format %{ "eor  $dst, $src1, $src2\t# int" %}
12516 
12517   ins_cost(INSN_COST);
12518   ins_encode %{
12519     __ eor(as_Register($dst$$reg),
12520            as_Register($src1$$reg),
12521            as_Register($src2$$reg));
12522   %}
12523 
12524   ins_pipe(ialu_reg_reg);
12525 %}
12526 
12527 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12528   match(Set dst (XorL src1 src2));
12529 
12530   ins_cost(INSN_COST);
12531   format %{ "eor  $dst, $src1, $src2\t# int" %}
12532 
12533   ins_encode %{
12534     __ eor(as_Register($dst$$reg),
12535            as_Register($src1$$reg),
12536            (unsigned long)($src2$$constant));
12537   %}
12538 
12539   ins_pipe(ialu_reg_imm);
12540 %}
12541 
12542 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12543 %{
12544   match(Set dst (ConvI2L src));
12545 
12546   ins_cost(INSN_COST);
12547   format %{ "sxtw  $dst, $src\t# i2l" %}
12548   ins_encode %{
12549     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12550   %}
12551   ins_pipe(ialu_reg_shift);
12552 %}
12553 
12554 // this pattern occurs in bigmath arithmetic
12555 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12556 %{
12557   match(Set dst (AndL (ConvI2L src) mask));
12558 
12559   ins_cost(INSN_COST);
12560   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12561   ins_encode %{
12562     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12563   %}
12564 
12565   ins_pipe(ialu_reg_shift);
12566 %}
12567 
12568 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12569   match(Set dst (ConvL2I src));
12570 
12571   ins_cost(INSN_COST);
12572   format %{ "movw  $dst, $src \t// l2i" %}
12573 
12574   ins_encode %{
12575     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12576   %}
12577 
12578   ins_pipe(ialu_reg);
12579 %}
12580 
12581 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12582 %{
12583   match(Set dst (Conv2B src));
12584   effect(KILL cr);
12585 
12586   format %{
12587     "cmpw $src, zr\n\t"
12588     "cset $dst, ne"
12589   %}
12590 
12591   ins_encode %{
12592     __ cmpw(as_Register($src$$reg), zr);
12593     __ cset(as_Register($dst$$reg), Assembler::NE);
12594   %}
12595 
12596   ins_pipe(ialu_reg);
12597 %}
12598 
12599 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
12600 %{
12601   match(Set dst (Conv2B src));
12602   effect(KILL cr);
12603 
12604   format %{
12605     "cmp  $src, zr\n\t"
12606     "cset $dst, ne"
12607   %}
12608 
12609   ins_encode %{
12610     __ cmp(as_Register($src$$reg), zr);
12611     __ cset(as_Register($dst$$reg), Assembler::NE);
12612   %}
12613 
12614   ins_pipe(ialu_reg);
12615 %}
12616 
12617 instruct convD2F_reg(vRegF dst, vRegD src) %{
12618   match(Set dst (ConvD2F src));
12619 
12620   ins_cost(INSN_COST * 5);
12621   format %{ "fcvtd  $dst, $src \t// d2f" %}
12622 
12623   ins_encode %{
12624     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12625   %}
12626 
12627   ins_pipe(pipe_class_default);
12628 %}
12629 
12630 instruct convF2D_reg(vRegD dst, vRegF src) %{
12631   match(Set dst (ConvF2D src));
12632 
12633   ins_cost(INSN_COST * 5);
12634   format %{ "fcvts  $dst, $src \t// f2d" %}
12635 
12636   ins_encode %{
12637     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
12638   %}
12639 
12640   ins_pipe(pipe_class_default);
12641 %}
12642 
12643 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12644   match(Set dst (ConvF2I src));
12645 
12646   ins_cost(INSN_COST * 5);
12647   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
12648 
12649   ins_encode %{
12650     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12651   %}
12652 
12653   ins_pipe(pipe_class_default);
12654 %}
12655 
12656 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
12657   match(Set dst (ConvF2L src));
12658 
12659   ins_cost(INSN_COST * 5);
12660   format %{ "fcvtzs  $dst, $src \t// f2l" %}
12661 
12662   ins_encode %{
12663     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12664   %}
12665 
12666   ins_pipe(pipe_class_default);
12667 %}
12668 
12669 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
12670   match(Set dst (ConvI2F src));
12671 
12672   ins_cost(INSN_COST * 5);
12673   format %{ "scvtfws  $dst, $src \t// i2f" %}
12674 
12675   ins_encode %{
12676     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12677   %}
12678 
12679   ins_pipe(pipe_class_default);
12680 %}
12681 
12682 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
12683   match(Set dst (ConvL2F src));
12684 
12685   ins_cost(INSN_COST * 5);
12686   format %{ "scvtfs  $dst, $src \t// l2f" %}
12687 
12688   ins_encode %{
12689     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12690   %}
12691 
12692   ins_pipe(pipe_class_default);
12693 %}
12694 
12695 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
12696   match(Set dst (ConvD2I src));
12697 
12698   ins_cost(INSN_COST * 5);
12699   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
12700 
12701   ins_encode %{
12702     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12703   %}
12704 
12705   ins_pipe(pipe_class_default);
12706 %}
12707 
12708 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12709   match(Set dst (ConvD2L src));
12710 
12711   ins_cost(INSN_COST * 5);
12712   format %{ "fcvtzd  $dst, $src \t// d2l" %}
12713 
12714   ins_encode %{
12715     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
12716   %}
12717 
12718   ins_pipe(pipe_class_default);
12719 %}
12720 
12721 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
12722   match(Set dst (ConvI2D src));
12723 
12724   ins_cost(INSN_COST * 5);
12725   format %{ "scvtfwd  $dst, $src \t// i2d" %}
12726 
12727   ins_encode %{
12728     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12729   %}
12730 
12731   ins_pipe(pipe_class_default);
12732 %}
12733 
12734 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
12735   match(Set dst (ConvL2D src));
12736 
12737   ins_cost(INSN_COST * 5);
12738   format %{ "scvtfd  $dst, $src \t// l2d" %}
12739 
12740   ins_encode %{
12741     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12742   %}
12743 
12744   ins_pipe(pipe_class_default);
12745 %}
12746 
12747 // stack <-> reg and reg <-> reg shuffles with no conversion
12748 
12749 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12750 
12751   match(Set dst (MoveF2I src));
12752 
12753   effect(DEF dst, USE src);
12754 
12755   ins_cost(4 * INSN_COST);
12756 
12757   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12758 
12759   ins_encode %{
12760     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12761   %}
12762 
12763   ins_pipe(iload_reg_reg);
12764 
12765 %}
12766 
12767 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12768 
12769   match(Set dst (MoveI2F src));
12770 
12771   effect(DEF dst, USE src);
12772 
12773   ins_cost(4 * INSN_COST);
12774 
12775   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12776 
12777   ins_encode %{
12778     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12779   %}
12780 
12781   ins_pipe(pipe_class_memory);
12782 
12783 %}
12784 
12785 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12786 
12787   match(Set dst (MoveD2L src));
12788 
12789   effect(DEF dst, USE src);
12790 
12791   ins_cost(4 * INSN_COST);
12792 
12793   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12794 
12795   ins_encode %{
12796     __ ldr($dst$$Register, Address(sp, $src$$disp));
12797   %}
12798 
12799   ins_pipe(iload_reg_reg);
12800 
12801 %}
12802 
12803 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12804 
12805   match(Set dst (MoveL2D src));
12806 
12807   effect(DEF dst, USE src);
12808 
12809   ins_cost(4 * INSN_COST);
12810 
12811   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12812 
12813   ins_encode %{
12814     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12815   %}
12816 
12817   ins_pipe(pipe_class_memory);
12818 
12819 %}
12820 
12821 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12822 
12823   match(Set dst (MoveF2I src));
12824 
12825   effect(DEF dst, USE src);
12826 
12827   ins_cost(INSN_COST);
12828 
12829   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12830 
12831   ins_encode %{
12832     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12833   %}
12834 
12835   ins_pipe(pipe_class_memory);
12836 
12837 %}
12838 
12839 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12840 
12841   match(Set dst (MoveI2F src));
12842 
12843   effect(DEF dst, USE src);
12844 
12845   ins_cost(INSN_COST);
12846 
12847   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12848 
12849   ins_encode %{
12850     __ strw($src$$Register, Address(sp, $dst$$disp));
12851   %}
12852 
12853   ins_pipe(istore_reg_reg);
12854 
12855 %}
12856 
12857 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12858 
12859   match(Set dst (MoveD2L src));
12860 
12861   effect(DEF dst, USE src);
12862 
12863   ins_cost(INSN_COST);
12864 
12865   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12866 
12867   ins_encode %{
12868     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12869   %}
12870 
12871   ins_pipe(pipe_class_memory);
12872 
12873 %}
12874 
12875 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12876 
12877   match(Set dst (MoveL2D src));
12878 
12879   effect(DEF dst, USE src);
12880 
12881   ins_cost(INSN_COST);
12882 
12883   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12884 
12885   ins_encode %{
12886     __ str($src$$Register, Address(sp, $dst$$disp));
12887   %}
12888 
12889   ins_pipe(istore_reg_reg);
12890 
12891 %}
12892 
12893 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12894 
12895   match(Set dst (MoveF2I src));
12896 
12897   effect(DEF dst, USE src);
12898 
12899   ins_cost(INSN_COST);
12900 
12901   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12902 
12903   ins_encode %{
12904     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12905   %}
12906 
12907   ins_pipe(pipe_class_memory);
12908 
12909 %}
12910 
12911 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12912 
12913   match(Set dst (MoveI2F src));
12914 
12915   effect(DEF dst, USE src);
12916 
12917   ins_cost(INSN_COST);
12918 
12919   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12920 
12921   ins_encode %{
12922     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12923   %}
12924 
12925   ins_pipe(pipe_class_memory);
12926 
12927 %}
12928 
12929 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12930 
12931   match(Set dst (MoveD2L src));
12932 
12933   effect(DEF dst, USE src);
12934 
12935   ins_cost(INSN_COST);
12936 
12937   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12938 
12939   ins_encode %{
12940     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12941   %}
12942 
12943   ins_pipe(pipe_class_memory);
12944 
12945 %}
12946 
12947 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12948 
12949   match(Set dst (MoveL2D src));
12950 
12951   effect(DEF dst, USE src);
12952 
12953   ins_cost(INSN_COST);
12954 
12955   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12956 
12957   ins_encode %{
12958     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12959   %}
12960 
12961   ins_pipe(pipe_class_memory);
12962 
12963 %}
12964 
12965 // ============================================================================
12966 // clearing of an array
12967 
12968 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12969 %{
12970   match(Set dummy (ClearArray cnt base));
12971   effect(USE_KILL cnt, USE_KILL base);
12972 
12973   ins_cost(4 * INSN_COST);
12974   format %{ "ClearArray $cnt, $base" %}
12975 
12976   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
12977 
12978   ins_pipe(pipe_class_memory);
12979 %}
12980 
12981 // ============================================================================
12982 // Overflow Math Instructions
12983 
12984 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12985 %{
12986   match(Set cr (OverflowAddI op1 op2));
12987 
12988   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12989   ins_cost(INSN_COST);
12990   ins_encode %{
12991     __ cmnw($op1$$Register, $op2$$Register);
12992   %}
12993 
12994   ins_pipe(icmp_reg_reg);
12995 %}
12996 
12997 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12998 %{
12999   match(Set cr (OverflowAddI op1 op2));
13000 
13001   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13002   ins_cost(INSN_COST);
13003   ins_encode %{
13004     __ cmnw($op1$$Register, $op2$$constant);
13005   %}
13006 
13007   ins_pipe(icmp_reg_imm);
13008 %}
13009 
13010 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13011 %{
13012   match(Set cr (OverflowAddL op1 op2));
13013 
13014   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13015   ins_cost(INSN_COST);
13016   ins_encode %{
13017     __ cmn($op1$$Register, $op2$$Register);
13018   %}
13019 
13020   ins_pipe(icmp_reg_reg);
13021 %}
13022 
13023 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13024 %{
13025   match(Set cr (OverflowAddL op1 op2));
13026 
13027   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13028   ins_cost(INSN_COST);
13029   ins_encode %{
13030     __ cmn($op1$$Register, $op2$$constant);
13031   %}
13032 
13033   ins_pipe(icmp_reg_imm);
13034 %}
13035 
13036 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13037 %{
13038   match(Set cr (OverflowSubI op1 op2));
13039 
13040   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13041   ins_cost(INSN_COST);
13042   ins_encode %{
13043     __ cmpw($op1$$Register, $op2$$Register);
13044   %}
13045 
13046   ins_pipe(icmp_reg_reg);
13047 %}
13048 
13049 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13050 %{
13051   match(Set cr (OverflowSubI op1 op2));
13052 
13053   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13054   ins_cost(INSN_COST);
13055   ins_encode %{
13056     __ cmpw($op1$$Register, $op2$$constant);
13057   %}
13058 
13059   ins_pipe(icmp_reg_imm);
13060 %}
13061 
13062 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13063 %{
13064   match(Set cr (OverflowSubL op1 op2));
13065 
13066   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13067   ins_cost(INSN_COST);
13068   ins_encode %{
13069     __ cmp($op1$$Register, $op2$$Register);
13070   %}
13071 
13072   ins_pipe(icmp_reg_reg);
13073 %}
13074 
13075 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13076 %{
13077   match(Set cr (OverflowSubL op1 op2));
13078 
13079   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13080   ins_cost(INSN_COST);
13081   ins_encode %{
13082     __ cmp($op1$$Register, $op2$$constant);
13083   %}
13084 
13085   ins_pipe(icmp_reg_imm);
13086 %}
13087 
13088 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13089 %{
13090   match(Set cr (OverflowSubI zero op1));
13091 
13092   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13093   ins_cost(INSN_COST);
13094   ins_encode %{
13095     __ cmpw(zr, $op1$$Register);
13096   %}
13097 
13098   ins_pipe(icmp_reg_imm);
13099 %}
13100 
13101 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13102 %{
13103   match(Set cr (OverflowSubL zero op1));
13104 
13105   format %{ "cmp   zr, $op1\t# overflow check long" %}
13106   ins_cost(INSN_COST);
13107   ins_encode %{
13108     __ cmp(zr, $op1$$Register);
13109   %}
13110 
13111   ins_pipe(icmp_reg_imm);
13112 %}
13113 
13114 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13115 %{
13116   match(Set cr (OverflowMulI op1 op2));
13117 
13118   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13119             "cmp   rscratch1, rscratch1, sxtw\n\t"
13120             "movw  rscratch1, #0x80000000\n\t"
13121             "cselw rscratch1, rscratch1, zr, NE\n\t"
13122             "cmpw  rscratch1, #1" %}
13123   ins_cost(5 * INSN_COST);
13124   ins_encode %{
13125     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13126     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13127     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13128     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13129     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13130   %}
13131 
13132   ins_pipe(pipe_slow);
13133 %}
13134 
13135 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13136 %{
13137   match(If cmp (OverflowMulI op1 op2));
13138   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13139             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13140   effect(USE labl, KILL cr);
13141 
13142   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13143             "cmp   rscratch1, rscratch1, sxtw\n\t"
13144             "b$cmp   $labl" %}
13145   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13146   ins_encode %{
13147     Label* L = $labl$$label;
13148     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13149     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13150     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13151     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13152   %}
13153 
13154   ins_pipe(pipe_serial);
13155 %}
13156 
13157 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13158 %{
13159   match(Set cr (OverflowMulL op1 op2));
13160 
13161   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13162             "smulh rscratch2, $op1, $op2\n\t"
13163             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13164             "movw  rscratch1, #0x80000000\n\t"
13165             "cselw rscratch1, rscratch1, zr, NE\n\t"
13166             "cmpw  rscratch1, #1" %}
13167   ins_cost(6 * INSN_COST);
13168   ins_encode %{
13169     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13170     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13171     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13172     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13173     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13174     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13175   %}
13176 
13177   ins_pipe(pipe_slow);
13178 %}
13179 
13180 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13181 %{
13182   match(If cmp (OverflowMulL op1 op2));
13183   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13184             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13185   effect(USE labl, KILL cr);
13186 
13187   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13188             "smulh rscratch2, $op1, $op2\n\t"
13189             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13190             "b$cmp $labl" %}
13191   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13192   ins_encode %{
13193     Label* L = $labl$$label;
13194     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13195     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13196     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13197     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13198     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13199   %}
13200 
13201   ins_pipe(pipe_serial);
13202 %}
13203 
13204 // ============================================================================
13205 // Compare Instructions
13206 
13207 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13208 %{
13209   match(Set cr (CmpI op1 op2));
13210 
13211   effect(DEF cr, USE op1, USE op2);
13212 
13213   ins_cost(INSN_COST);
13214   format %{ "cmpw  $op1, $op2" %}
13215 
13216   ins_encode(aarch64_enc_cmpw(op1, op2));
13217 
13218   ins_pipe(icmp_reg_reg);
13219 %}
13220 
13221 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13222 %{
13223   match(Set cr (CmpI op1 zero));
13224 
13225   effect(DEF cr, USE op1);
13226 
13227   ins_cost(INSN_COST);
13228   format %{ "cmpw $op1, 0" %}
13229 
13230   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13231 
13232   ins_pipe(icmp_reg_imm);
13233 %}
13234 
13235 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13236 %{
13237   match(Set cr (CmpI op1 op2));
13238 
13239   effect(DEF cr, USE op1);
13240 
13241   ins_cost(INSN_COST);
13242   format %{ "cmpw  $op1, $op2" %}
13243 
13244   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13245 
13246   ins_pipe(icmp_reg_imm);
13247 %}
13248 
13249 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13250 %{
13251   match(Set cr (CmpI op1 op2));
13252 
13253   effect(DEF cr, USE op1);
13254 
13255   ins_cost(INSN_COST * 2);
13256   format %{ "cmpw  $op1, $op2" %}
13257 
13258   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13259 
13260   ins_pipe(icmp_reg_imm);
13261 %}
13262 
13263 // Unsigned compare Instructions; really, same as signed compare
13264 // except it should only be used to feed an If or a CMovI which takes a
13265 // cmpOpU.
13266 
13267 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13268 %{
13269   match(Set cr (CmpU op1 op2));
13270 
13271   effect(DEF cr, USE op1, USE op2);
13272 
13273   ins_cost(INSN_COST);
13274   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13275 
13276   ins_encode(aarch64_enc_cmpw(op1, op2));
13277 
13278   ins_pipe(icmp_reg_reg);
13279 %}
13280 
13281 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13282 %{
13283   match(Set cr (CmpU op1 zero));
13284 
13285   effect(DEF cr, USE op1);
13286 
13287   ins_cost(INSN_COST);
13288   format %{ "cmpw $op1, #0\t# unsigned" %}
13289 
13290   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13291 
13292   ins_pipe(icmp_reg_imm);
13293 %}
13294 
13295 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13296 %{
13297   match(Set cr (CmpU op1 op2));
13298 
13299   effect(DEF cr, USE op1);
13300 
13301   ins_cost(INSN_COST);
13302   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13303 
13304   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13305 
13306   ins_pipe(icmp_reg_imm);
13307 %}
13308 
13309 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13310 %{
13311   match(Set cr (CmpU op1 op2));
13312 
13313   effect(DEF cr, USE op1);
13314 
13315   ins_cost(INSN_COST * 2);
13316   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13317 
13318   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13319 
13320   ins_pipe(icmp_reg_imm);
13321 %}
13322 
13323 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13324 %{
13325   match(Set cr (CmpL op1 op2));
13326 
13327   effect(DEF cr, USE op1, USE op2);
13328 
13329   ins_cost(INSN_COST);
13330   format %{ "cmp  $op1, $op2" %}
13331 
13332   ins_encode(aarch64_enc_cmp(op1, op2));
13333 
13334   ins_pipe(icmp_reg_reg);
13335 %}
13336 
13337 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13338 %{
13339   match(Set cr (CmpL op1 zero));
13340 
13341   effect(DEF cr, USE op1);
13342 
13343   ins_cost(INSN_COST);
13344   format %{ "tst  $op1" %}
13345 
13346   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13347 
13348   ins_pipe(icmp_reg_imm);
13349 %}
13350 
13351 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13352 %{
13353   match(Set cr (CmpL op1 op2));
13354 
13355   effect(DEF cr, USE op1);
13356 
13357   ins_cost(INSN_COST);
13358   format %{ "cmp  $op1, $op2" %}
13359 
13360   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13361 
13362   ins_pipe(icmp_reg_imm);
13363 %}
13364 
13365 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13366 %{
13367   match(Set cr (CmpL op1 op2));
13368 
13369   effect(DEF cr, USE op1);
13370 
13371   ins_cost(INSN_COST * 2);
13372   format %{ "cmp  $op1, $op2" %}
13373 
13374   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13375 
13376   ins_pipe(icmp_reg_imm);
13377 %}
13378 
13379 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13380 %{
13381   match(Set cr (CmpP op1 op2));
13382 
13383   effect(DEF cr, USE op1, USE op2);
13384 
13385   ins_cost(INSN_COST);
13386   format %{ "cmp  $op1, $op2\t // ptr" %}
13387 
13388   ins_encode(aarch64_enc_cmpp(op1, op2));
13389 
13390   ins_pipe(icmp_reg_reg);
13391 %}
13392 
13393 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13394 %{
13395   match(Set cr (CmpN op1 op2));
13396 
13397   effect(DEF cr, USE op1, USE op2);
13398 
13399   ins_cost(INSN_COST);
13400   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13401 
13402   ins_encode(aarch64_enc_cmpn(op1, op2));
13403 
13404   ins_pipe(icmp_reg_reg);
13405 %}
13406 
13407 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13408 %{
13409   match(Set cr (CmpP op1 zero));
13410 
13411   effect(DEF cr, USE op1, USE zero);
13412 
13413   ins_cost(INSN_COST);
13414   format %{ "cmp  $op1, 0\t // ptr" %}
13415 
13416   ins_encode(aarch64_enc_testp(op1));
13417 
13418   ins_pipe(icmp_reg_imm);
13419 %}
13420 
13421 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13422 %{
13423   match(Set cr (CmpN op1 zero));
13424 
13425   effect(DEF cr, USE op1, USE zero);
13426 
13427   ins_cost(INSN_COST);
13428   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13429 
13430   ins_encode(aarch64_enc_testn(op1));
13431 
13432   ins_pipe(icmp_reg_imm);
13433 %}
13434 
13435 // FP comparisons
13436 //
13437 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13438 // using normal cmpOp. See declaration of rFlagsReg for details.
13439 
13440 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13441 %{
13442   match(Set cr (CmpF src1 src2));
13443 
13444   ins_cost(3 * INSN_COST);
13445   format %{ "fcmps $src1, $src2" %}
13446 
13447   ins_encode %{
13448     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13449   %}
13450 
13451   ins_pipe(pipe_class_compare);
13452 %}
13453 
13454 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13455 %{
13456   match(Set cr (CmpF src1 src2));
13457 
13458   ins_cost(3 * INSN_COST);
13459   format %{ "fcmps $src1, 0.0" %}
13460 
13461   ins_encode %{
13462     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13463   %}
13464 
13465   ins_pipe(pipe_class_compare);
13466 %}
13467 // FROM HERE
13468 
13469 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13470 %{
13471   match(Set cr (CmpD src1 src2));
13472 
13473   ins_cost(3 * INSN_COST);
13474   format %{ "fcmpd $src1, $src2" %}
13475 
13476   ins_encode %{
13477     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13478   %}
13479 
13480   ins_pipe(pipe_class_compare);
13481 %}
13482 
13483 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13484 %{
13485   match(Set cr (CmpD src1 src2));
13486 
13487   ins_cost(3 * INSN_COST);
13488   format %{ "fcmpd $src1, 0.0" %}
13489 
13490   ins_encode %{
13491     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13492   %}
13493 
13494   ins_pipe(pipe_class_compare);
13495 %}
13496 
13497 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13498 %{
13499   match(Set dst (CmpF3 src1 src2));
13500   effect(KILL cr);
13501 
13502   ins_cost(5 * INSN_COST);
13503   format %{ "fcmps $src1, $src2\n\t"
13504             "csinvw($dst, zr, zr, eq\n\t"
13505             "csnegw($dst, $dst, $dst, lt)"
13506   %}
13507 
13508   ins_encode %{
13509     Label done;
13510     FloatRegister s1 = as_FloatRegister($src1$$reg);
13511     FloatRegister s2 = as_FloatRegister($src2$$reg);
13512     Register d = as_Register($dst$$reg);
13513     __ fcmps(s1, s2);
13514     // installs 0 if EQ else -1
13515     __ csinvw(d, zr, zr, Assembler::EQ);
13516     // keeps -1 if less or unordered else installs 1
13517     __ csnegw(d, d, d, Assembler::LT);
13518     __ bind(done);
13519   %}
13520 
13521   ins_pipe(pipe_class_default);
13522 
13523 %}
13524 
13525 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13526 %{
13527   match(Set dst (CmpD3 src1 src2));
13528   effect(KILL cr);
13529 
13530   ins_cost(5 * INSN_COST);
13531   format %{ "fcmpd $src1, $src2\n\t"
13532             "csinvw($dst, zr, zr, eq\n\t"
13533             "csnegw($dst, $dst, $dst, lt)"
13534   %}
13535 
13536   ins_encode %{
13537     Label done;
13538     FloatRegister s1 = as_FloatRegister($src1$$reg);
13539     FloatRegister s2 = as_FloatRegister($src2$$reg);
13540     Register d = as_Register($dst$$reg);
13541     __ fcmpd(s1, s2);
13542     // installs 0 if EQ else -1
13543     __ csinvw(d, zr, zr, Assembler::EQ);
13544     // keeps -1 if less or unordered else installs 1
13545     __ csnegw(d, d, d, Assembler::LT);
13546     __ bind(done);
13547   %}
13548   ins_pipe(pipe_class_default);
13549 
13550 %}
13551 
13552 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13553 %{
13554   match(Set dst (CmpF3 src1 zero));
13555   effect(KILL cr);
13556 
13557   ins_cost(5 * INSN_COST);
13558   format %{ "fcmps $src1, 0.0\n\t"
13559             "csinvw($dst, zr, zr, eq\n\t"
13560             "csnegw($dst, $dst, $dst, lt)"
13561   %}
13562 
13563   ins_encode %{
13564     Label done;
13565     FloatRegister s1 = as_FloatRegister($src1$$reg);
13566     Register d = as_Register($dst$$reg);
13567     __ fcmps(s1, 0.0D);
13568     // installs 0 if EQ else -1
13569     __ csinvw(d, zr, zr, Assembler::EQ);
13570     // keeps -1 if less or unordered else installs 1
13571     __ csnegw(d, d, d, Assembler::LT);
13572     __ bind(done);
13573   %}
13574 
13575   ins_pipe(pipe_class_default);
13576 
13577 %}
13578 
13579 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13580 %{
13581   match(Set dst (CmpD3 src1 zero));
13582   effect(KILL cr);
13583 
13584   ins_cost(5 * INSN_COST);
13585   format %{ "fcmpd $src1, 0.0\n\t"
13586             "csinvw($dst, zr, zr, eq\n\t"
13587             "csnegw($dst, $dst, $dst, lt)"
13588   %}
13589 
13590   ins_encode %{
13591     Label done;
13592     FloatRegister s1 = as_FloatRegister($src1$$reg);
13593     Register d = as_Register($dst$$reg);
13594     __ fcmpd(s1, 0.0D);
13595     // installs 0 if EQ else -1
13596     __ csinvw(d, zr, zr, Assembler::EQ);
13597     // keeps -1 if less or unordered else installs 1
13598     __ csnegw(d, d, d, Assembler::LT);
13599     __ bind(done);
13600   %}
13601   ins_pipe(pipe_class_default);
13602 
13603 %}
13604 
13605 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
13606 %{
13607   match(Set dst (CmpLTMask p q));
13608   effect(KILL cr);
13609 
13610   ins_cost(3 * INSN_COST);
13611 
13612   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
13613             "csetw $dst, lt\n\t"
13614             "subw $dst, zr, $dst"
13615   %}
13616 
13617   ins_encode %{
13618     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
13619     __ csetw(as_Register($dst$$reg), Assembler::LT);
13620     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
13621   %}
13622 
13623   ins_pipe(ialu_reg_reg);
13624 %}
13625 
13626 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
13627 %{
13628   match(Set dst (CmpLTMask src zero));
13629   effect(KILL cr);
13630 
13631   ins_cost(INSN_COST);
13632 
13633   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
13634 
13635   ins_encode %{
13636     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
13637   %}
13638 
13639   ins_pipe(ialu_reg_shift);
13640 %}
13641 
13642 // ============================================================================
13643 // Max and Min
13644 
13645 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13646 %{
13647   match(Set dst (MinI src1 src2));
13648 
13649   effect(DEF dst, USE src1, USE src2, KILL cr);
13650   size(8);
13651 
13652   ins_cost(INSN_COST * 3);
13653   format %{
13654     "cmpw $src1 $src2\t signed int\n\t"
13655     "cselw $dst, $src1, $src2 lt\t"
13656   %}
13657 
13658   ins_encode %{
13659     __ cmpw(as_Register($src1$$reg),
13660             as_Register($src2$$reg));
13661     __ cselw(as_Register($dst$$reg),
13662              as_Register($src1$$reg),
13663              as_Register($src2$$reg),
13664              Assembler::LT);
13665   %}
13666 
13667   ins_pipe(ialu_reg_reg);
13668 %}
13669 // FROM HERE
13670 
13671 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13672 %{
13673   match(Set dst (MaxI src1 src2));
13674 
13675   effect(DEF dst, USE src1, USE src2, KILL cr);
13676   size(8);
13677 
13678   ins_cost(INSN_COST * 3);
13679   format %{
13680     "cmpw $src1 $src2\t signed int\n\t"
13681     "cselw $dst, $src1, $src2 gt\t"
13682   %}
13683 
13684   ins_encode %{
13685     __ cmpw(as_Register($src1$$reg),
13686             as_Register($src2$$reg));
13687     __ cselw(as_Register($dst$$reg),
13688              as_Register($src1$$reg),
13689              as_Register($src2$$reg),
13690              Assembler::GT);
13691   %}
13692 
13693   ins_pipe(ialu_reg_reg);
13694 %}
13695 
13696 // ============================================================================
13697 // Branch Instructions
13698 
13699 // Direct Branch.
13700 instruct branch(label lbl)
13701 %{
13702   match(Goto);
13703 
13704   effect(USE lbl);
13705 
13706   ins_cost(BRANCH_COST);
13707   format %{ "b  $lbl" %}
13708 
13709   ins_encode(aarch64_enc_b(lbl));
13710 
13711   ins_pipe(pipe_branch);
13712 %}
13713 
13714 // Conditional Near Branch
13715 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13716 %{
13717   // Same match rule as `branchConFar'.
13718   match(If cmp cr);
13719 
13720   effect(USE lbl);
13721 
13722   ins_cost(BRANCH_COST);
13723   // If set to 1 this indicates that the current instruction is a
13724   // short variant of a long branch. This avoids using this
13725   // instruction in first-pass matching. It will then only be used in
13726   // the `Shorten_branches' pass.
13727   // ins_short_branch(1);
13728   format %{ "b$cmp  $lbl" %}
13729 
13730   ins_encode(aarch64_enc_br_con(cmp, lbl));
13731 
13732   ins_pipe(pipe_branch_cond);
13733 %}
13734 
13735 // Conditional Near Branch Unsigned
13736 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13737 %{
13738   // Same match rule as `branchConFar'.
13739   match(If cmp cr);
13740 
13741   effect(USE lbl);
13742 
13743   ins_cost(BRANCH_COST);
13744   // If set to 1 this indicates that the current instruction is a
13745   // short variant of a long branch. This avoids using this
13746   // instruction in first-pass matching. It will then only be used in
13747   // the `Shorten_branches' pass.
13748   // ins_short_branch(1);
13749   format %{ "b$cmp  $lbl\t# unsigned" %}
13750 
13751   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13752 
13753   ins_pipe(pipe_branch_cond);
13754 %}
13755 
13756 // Make use of CBZ and CBNZ.  These instructions, as well as being
13757 // shorter than (cmp; branch), have the additional benefit of not
13758 // killing the flags.
13759 
13760 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13761   match(If cmp (CmpI op1 op2));
13762   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13763             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13764   effect(USE labl);
13765 
13766   ins_cost(BRANCH_COST);
13767   format %{ "cbw$cmp   $op1, $labl" %}
13768   ins_encode %{
13769     Label* L = $labl$$label;
13770     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13771     if (cond == Assembler::EQ)
13772       __ cbzw($op1$$Register, *L);
13773     else
13774       __ cbnzw($op1$$Register, *L);
13775   %}
13776   ins_pipe(pipe_cmp_branch);
13777 %}
13778 
13779 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13780   match(If cmp (CmpL op1 op2));
13781   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13782             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13783   effect(USE labl);
13784 
13785   ins_cost(BRANCH_COST);
13786   format %{ "cb$cmp   $op1, $labl" %}
13787   ins_encode %{
13788     Label* L = $labl$$label;
13789     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13790     if (cond == Assembler::EQ)
13791       __ cbz($op1$$Register, *L);
13792     else
13793       __ cbnz($op1$$Register, *L);
13794   %}
13795   ins_pipe(pipe_cmp_branch);
13796 %}
13797 
13798 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13799   match(If cmp (CmpP op1 op2));
13800   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13801             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13802   effect(USE labl);
13803 
13804   ins_cost(BRANCH_COST);
13805   format %{ "cb$cmp   $op1, $labl" %}
13806   ins_encode %{
13807     Label* L = $labl$$label;
13808     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13809     if (cond == Assembler::EQ)
13810       __ cbz($op1$$Register, *L);
13811     else
13812       __ cbnz($op1$$Register, *L);
13813   %}
13814   ins_pipe(pipe_cmp_branch);
13815 %}
13816 
13817 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
13818   match(If cmp (CmpP (DecodeN oop) zero));
13819   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13820             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13821   effect(USE labl);
13822 
13823   ins_cost(BRANCH_COST);
13824   format %{ "cb$cmp   $oop, $labl" %}
13825   ins_encode %{
13826     Label* L = $labl$$label;
13827     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13828     if (cond == Assembler::EQ)
13829       __ cbzw($oop$$Register, *L);
13830     else
13831       __ cbnzw($oop$$Register, *L);
13832   %}
13833   ins_pipe(pipe_cmp_branch);
13834 %}
13835 
13836 // Conditional Far Branch
13837 // Conditional Far Branch Unsigned
13838 // TODO: fixme
13839 
13840 // counted loop end branch near
13841 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13842 %{
13843   match(CountedLoopEnd cmp cr);
13844 
13845   effect(USE lbl);
13846 
13847   ins_cost(BRANCH_COST);
13848   // short variant.
13849   // ins_short_branch(1);
13850   format %{ "b$cmp $lbl \t// counted loop end" %}
13851 
13852   ins_encode(aarch64_enc_br_con(cmp, lbl));
13853 
13854   ins_pipe(pipe_branch);
13855 %}
13856 
13857 // counted loop end branch near Unsigned
13858 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13859 %{
13860   match(CountedLoopEnd cmp cr);
13861 
13862   effect(USE lbl);
13863 
13864   ins_cost(BRANCH_COST);
13865   // short variant.
13866   // ins_short_branch(1);
13867   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
13868 
13869   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13870 
13871   ins_pipe(pipe_branch);
13872 %}
13873 
13874 // counted loop end branch far
13875 // counted loop end branch far unsigned
13876 // TODO: fixme
13877 
13878 // ============================================================================
13879 // inlined locking and unlocking
13880 
13881 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13882 %{
13883   match(Set cr (FastLock object box));
13884   effect(TEMP tmp, TEMP tmp2);
13885 
13886   // TODO
13887   // identify correct cost
13888   ins_cost(5 * INSN_COST);
13889   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
13890 
13891   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
13892 
13893   ins_pipe(pipe_serial);
13894 %}
13895 
13896 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13897 %{
13898   match(Set cr (FastUnlock object box));
13899   effect(TEMP tmp, TEMP tmp2);
13900 
13901   ins_cost(5 * INSN_COST);
13902   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
13903 
13904   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
13905 
13906   ins_pipe(pipe_serial);
13907 %}
13908 
13909 
13910 // ============================================================================
13911 // Safepoint Instructions
13912 
13913 // TODO
13914 // provide a near and far version of this code
13915 
13916 instruct safePoint(iRegP poll)
13917 %{
13918   match(SafePoint poll);
13919 
13920   format %{
13921     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
13922   %}
13923   ins_encode %{
13924     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
13925   %}
13926   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
13927 %}
13928 
13929 
13930 // ============================================================================
13931 // Procedure Call/Return Instructions
13932 
13933 // Call Java Static Instruction
13934 
13935 instruct CallStaticJavaDirect(method meth)
13936 %{
13937   match(CallStaticJava);
13938 
13939   effect(USE meth);
13940 
13941   ins_cost(CALL_COST);
13942 
13943   format %{ "call,static $meth \t// ==> " %}
13944 
13945   ins_encode( aarch64_enc_java_static_call(meth),
13946               aarch64_enc_call_epilog );
13947 
13948   ins_pipe(pipe_class_call);
13949 %}
13950 
13951 // TO HERE
13952 
13953 // Call Java Dynamic Instruction
13954 instruct CallDynamicJavaDirect(method meth)
13955 %{
13956   match(CallDynamicJava);
13957 
13958   effect(USE meth);
13959 
13960   ins_cost(CALL_COST);
13961 
13962   format %{ "CALL,dynamic $meth \t// ==> " %}
13963 
13964   ins_encode( aarch64_enc_java_dynamic_call(meth),
13965                aarch64_enc_call_epilog );
13966 
13967   ins_pipe(pipe_class_call);
13968 %}
13969 
13970 // Call Runtime Instruction
13971 
13972 instruct CallRuntimeDirect(method meth)
13973 %{
13974   match(CallRuntime);
13975 
13976   effect(USE meth);
13977 
13978   ins_cost(CALL_COST);
13979 
13980   format %{ "CALL, runtime $meth" %}
13981 
13982   ins_encode( aarch64_enc_java_to_runtime(meth) );
13983 
13984   ins_pipe(pipe_class_call);
13985 %}
13986 
13987 // Call Runtime Instruction
13988 
13989 instruct CallLeafDirect(method meth)
13990 %{
13991   match(CallLeaf);
13992 
13993   effect(USE meth);
13994 
13995   ins_cost(CALL_COST);
13996 
13997   format %{ "CALL, runtime leaf $meth" %}
13998 
13999   ins_encode( aarch64_enc_java_to_runtime(meth) );
14000 
14001   ins_pipe(pipe_class_call);
14002 %}
14003 
14004 // Call Runtime Instruction
14005 
14006 instruct CallLeafNoFPDirect(method meth)
14007 %{
14008   match(CallLeafNoFP);
14009 
14010   effect(USE meth);
14011 
14012   ins_cost(CALL_COST);
14013 
14014   format %{ "CALL, runtime leaf nofp $meth" %}
14015 
14016   ins_encode( aarch64_enc_java_to_runtime(meth) );
14017 
14018   ins_pipe(pipe_class_call);
14019 %}
14020 
14021 // Tail Call; Jump from runtime stub to Java code.
14022 // Also known as an 'interprocedural jump'.
14023 // Target of jump will eventually return to caller.
14024 // TailJump below removes the return address.
14025 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14026 %{
14027   match(TailCall jump_target method_oop);
14028 
14029   ins_cost(CALL_COST);
14030 
14031   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14032 
14033   ins_encode(aarch64_enc_tail_call(jump_target));
14034 
14035   ins_pipe(pipe_class_call);
14036 %}
14037 
14038 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14039 %{
14040   match(TailJump jump_target ex_oop);
14041 
14042   ins_cost(CALL_COST);
14043 
14044   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14045 
14046   ins_encode(aarch64_enc_tail_jmp(jump_target));
14047 
14048   ins_pipe(pipe_class_call);
14049 %}
14050 
14051 // Create exception oop: created by stack-crawling runtime code.
14052 // Created exception is now available to this handler, and is setup
14053 // just prior to jumping to this handler. No code emitted.
14054 // TODO check
14055 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14056 instruct CreateException(iRegP_R0 ex_oop)
14057 %{
14058   match(Set ex_oop (CreateEx));
14059 
14060   format %{ " -- \t// exception oop; no code emitted" %}
14061 
14062   size(0);
14063 
14064   ins_encode( /*empty*/ );
14065 
14066   ins_pipe(pipe_class_empty);
14067 %}
14068 
14069 // Rethrow exception: The exception oop will come in the first
14070 // argument position. Then JUMP (not call) to the rethrow stub code.
14071 instruct RethrowException() %{
14072   match(Rethrow);
14073   ins_cost(CALL_COST);
14074 
14075   format %{ "b rethrow_stub" %}
14076 
14077   ins_encode( aarch64_enc_rethrow() );
14078 
14079   ins_pipe(pipe_class_call);
14080 %}
14081 
14082 
14083 // Return Instruction
14084 // epilog node loads ret address into lr as part of frame pop
14085 instruct Ret()
14086 %{
14087   match(Return);
14088 
14089   format %{ "ret\t// return register" %}
14090 
14091   ins_encode( aarch64_enc_ret() );
14092 
14093   ins_pipe(pipe_branch);
14094 %}
14095 
14096 // Die now.
14097 instruct ShouldNotReachHere() %{
14098   match(Halt);
14099 
14100   ins_cost(CALL_COST);
14101   format %{ "ShouldNotReachHere" %}
14102 
14103   ins_encode %{
14104     // TODO
14105     // implement proper trap call here
14106     __ brk(999);
14107   %}
14108 
14109   ins_pipe(pipe_class_default);
14110 %}
14111 
14112 // ============================================================================
14113 // Partial Subtype Check
14114 //
14115 // superklass array for an instance of the superklass.  Set a hidden
14116 // internal cache on a hit (cache is checked with exposed code in
14117 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14118 // encoding ALSO sets flags.
14119 
14120 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14121 %{
14122   match(Set result (PartialSubtypeCheck sub super));
14123   effect(KILL cr, KILL temp);
14124 
14125   ins_cost(1100);  // slightly larger than the next version
14126   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14127 
14128   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14129 
14130   opcode(0x1); // Force zero of result reg on hit
14131 
14132   ins_pipe(pipe_class_memory);
14133 %}
14134 
14135 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14136 %{
14137   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14138   effect(KILL temp, KILL result);
14139 
14140   ins_cost(1100);  // slightly larger than the next version
14141   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14142 
14143   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14144 
14145   opcode(0x0); // Don't zero result reg on hit
14146 
14147   ins_pipe(pipe_class_memory);
14148 %}
14149 
14150 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14151                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14152 %{
14153   predicate(!CompactStrings);
14154   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14155   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14156 
14157   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14158   ins_encode %{
14159     __ string_compare($str1$$Register, $str2$$Register,
14160                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14161                       $tmp1$$Register);
14162   %}
14163   ins_pipe(pipe_class_memory);
14164 %}
14165 
14166 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14167        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14168 %{
14169   predicate(!CompactStrings);
14170   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14171   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14172          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14173   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14174 
14175   ins_encode %{
14176     __ string_indexof($str1$$Register, $str2$$Register,
14177                       $cnt1$$Register, $cnt2$$Register,
14178                       $tmp1$$Register, $tmp2$$Register,
14179                       $tmp3$$Register, $tmp4$$Register,
14180                       -1, $result$$Register);
14181   %}
14182   ins_pipe(pipe_class_memory);
14183 %}
14184 
14185 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14186                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14187                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14188 %{
14189   predicate(!CompactStrings);
14190   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14191   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14192          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14193   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14194 
14195   ins_encode %{
14196     int icnt2 = (int)$int_cnt2$$constant;
14197     __ string_indexof($str1$$Register, $str2$$Register,
14198                       $cnt1$$Register, zr,
14199                       $tmp1$$Register, $tmp2$$Register,
14200                       $tmp3$$Register, $tmp4$$Register,
14201                       icnt2, $result$$Register);
14202   %}
14203   ins_pipe(pipe_class_memory);
14204 %}
14205 
14206 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14207                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
14208 %{
14209   predicate(!CompactStrings);
14210   match(Set result (StrEquals (Binary str1 str2) cnt));
14211   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14212 
14213   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
14214   ins_encode %{
14215     __ string_equals($str1$$Register, $str2$$Register,
14216                       $cnt$$Register, $result$$Register,
14217                       $tmp$$Register);
14218   %}
14219   ins_pipe(pipe_class_memory);
14220 %}
14221 
14222 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14223                       iRegP_R10 tmp, rFlagsReg cr)
14224 %{
14225   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14226   match(Set result (AryEq ary1 ary2));
14227   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14228 
14229   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14230   ins_encode %{
14231     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
14232                           $result$$Register, $tmp$$Register);
14233   %}
14234   ins_pipe(pipe_class_memory);
14235 %}
14236 
14237 // encode char[] to byte[] in ISO_8859_1
14238 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14239                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14240                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14241                           iRegI_R0 result, rFlagsReg cr)
14242 %{
14243   match(Set result (EncodeISOArray src (Binary dst len)));
14244   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14245          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14246 
14247   format %{ "Encode array $src,$dst,$len -> $result" %}
14248   ins_encode %{
14249     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14250          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14251          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14252   %}
14253   ins_pipe( pipe_class_memory );
14254 %}
14255 
14256 // ============================================================================
14257 // This name is KNOWN by the ADLC and cannot be changed.
14258 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14259 // for this guy.
14260 instruct tlsLoadP(thread_RegP dst)
14261 %{
14262   match(Set dst (ThreadLocal));
14263 
14264   ins_cost(0);
14265 
14266   format %{ " -- \t// $dst=Thread::current(), empty" %}
14267 
14268   size(0);
14269 
14270   ins_encode( /*empty*/ );
14271 
14272   ins_pipe(pipe_class_empty);
14273 %}
14274 
14275 // ====================VECTOR INSTRUCTIONS=====================================
14276 
14277 // Load vector (32 bits)
14278 instruct loadV4(vecD dst, vmem mem)
14279 %{
14280   predicate(n->as_LoadVector()->memory_size() == 4);
14281   match(Set dst (LoadVector mem));
14282   ins_cost(4 * INSN_COST);
14283   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14284   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14285   ins_pipe(pipe_class_memory);
14286 %}
14287 
14288 // Load vector (64 bits)
14289 instruct loadV8(vecD dst, vmem mem)
14290 %{
14291   predicate(n->as_LoadVector()->memory_size() == 8);
14292   match(Set dst (LoadVector mem));
14293   ins_cost(4 * INSN_COST);
14294   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14295   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14296   ins_pipe(pipe_class_memory);
14297 %}
14298 
14299 // Load Vector (128 bits)
14300 instruct loadV16(vecX dst, vmem mem)
14301 %{
14302   predicate(n->as_LoadVector()->memory_size() == 16);
14303   match(Set dst (LoadVector mem));
14304   ins_cost(4 * INSN_COST);
14305   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14306   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14307   ins_pipe(pipe_class_memory);
14308 %}
14309 
14310 // Store Vector (32 bits)
14311 instruct storeV4(vecD src, vmem mem)
14312 %{
14313   predicate(n->as_StoreVector()->memory_size() == 4);
14314   match(Set mem (StoreVector mem src));
14315   ins_cost(4 * INSN_COST);
14316   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
14317   ins_encode( aarch64_enc_strvS(src, mem) );
14318   ins_pipe(pipe_class_memory);
14319 %}
14320 
14321 // Store Vector (64 bits)
14322 instruct storeV8(vecD src, vmem mem)
14323 %{
14324   predicate(n->as_StoreVector()->memory_size() == 8);
14325   match(Set mem (StoreVector mem src));
14326   ins_cost(4 * INSN_COST);
14327   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
14328   ins_encode( aarch64_enc_strvD(src, mem) );
14329   ins_pipe(pipe_class_memory);
14330 %}
14331 
14332 // Store Vector (128 bits)
14333 instruct storeV16(vecX src, vmem mem)
14334 %{
14335   predicate(n->as_StoreVector()->memory_size() == 16);
14336   match(Set mem (StoreVector mem src));
14337   ins_cost(4 * INSN_COST);
14338   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
14339   ins_encode( aarch64_enc_strvQ(src, mem) );
14340   ins_pipe(pipe_class_memory);
14341 %}
14342 
14343 instruct replicate8B(vecD dst, iRegIorL2I src)
14344 %{
14345   predicate(n->as_Vector()->length() == 4 ||
14346             n->as_Vector()->length() == 8);
14347   match(Set dst (ReplicateB src));
14348   ins_cost(INSN_COST);
14349   format %{ "dup  $dst, $src\t# vector (8B)" %}
14350   ins_encode %{
14351     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
14352   %}
14353   ins_pipe(pipe_class_default);
14354 %}
14355 
14356 instruct replicate16B(vecX dst, iRegIorL2I src)
14357 %{
14358   predicate(n->as_Vector()->length() == 16);
14359   match(Set dst (ReplicateB src));
14360   ins_cost(INSN_COST);
14361   format %{ "dup  $dst, $src\t# vector (16B)" %}
14362   ins_encode %{
14363     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
14364   %}
14365   ins_pipe(pipe_class_default);
14366 %}
14367 
14368 instruct replicate8B_imm(vecD dst, immI con)
14369 %{
14370   predicate(n->as_Vector()->length() == 4 ||
14371             n->as_Vector()->length() == 8);
14372   match(Set dst (ReplicateB con));
14373   ins_cost(INSN_COST);
14374   format %{ "movi  $dst, $con\t# vector(8B)" %}
14375   ins_encode %{
14376     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14377   %}
14378   ins_pipe(pipe_class_default);
14379 %}
14380 
14381 instruct replicate16B_imm(vecX dst, immI con)
14382 %{
14383   predicate(n->as_Vector()->length() == 16);
14384   match(Set dst (ReplicateB con));
14385   ins_cost(INSN_COST);
14386   format %{ "movi  $dst, $con\t# vector(16B)" %}
14387   ins_encode %{
14388     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14389   %}
14390   ins_pipe(pipe_class_default);
14391 %}
14392 
14393 instruct replicate4S(vecD dst, iRegIorL2I src)
14394 %{
14395   predicate(n->as_Vector()->length() == 2 ||
14396             n->as_Vector()->length() == 4);
14397   match(Set dst (ReplicateS src));
14398   ins_cost(INSN_COST);
14399   format %{ "dup  $dst, $src\t# vector (4S)" %}
14400   ins_encode %{
14401     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14402   %}
14403   ins_pipe(pipe_class_default);
14404 %}
14405 
14406 instruct replicate8S(vecX dst, iRegIorL2I src)
14407 %{
14408   predicate(n->as_Vector()->length() == 8);
14409   match(Set dst (ReplicateS src));
14410   ins_cost(INSN_COST);
14411   format %{ "dup  $dst, $src\t# vector (8S)" %}
14412   ins_encode %{
14413     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14414   %}
14415   ins_pipe(pipe_class_default);
14416 %}
14417 
14418 instruct replicate4S_imm(vecD dst, immI con)
14419 %{
14420   predicate(n->as_Vector()->length() == 2 ||
14421             n->as_Vector()->length() == 4);
14422   match(Set dst (ReplicateS con));
14423   ins_cost(INSN_COST);
14424   format %{ "movi  $dst, $con\t# vector(4H)" %}
14425   ins_encode %{
14426     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14427   %}
14428   ins_pipe(pipe_class_default);
14429 %}
14430 
14431 instruct replicate8S_imm(vecX dst, immI con)
14432 %{
14433   predicate(n->as_Vector()->length() == 8);
14434   match(Set dst (ReplicateS con));
14435   ins_cost(INSN_COST);
14436   format %{ "movi  $dst, $con\t# vector(8H)" %}
14437   ins_encode %{
14438     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
14439   %}
14440   ins_pipe(pipe_class_default);
14441 %}
14442 
14443 instruct replicate2I(vecD dst, iRegIorL2I src)
14444 %{
14445   predicate(n->as_Vector()->length() == 2);
14446   match(Set dst (ReplicateI src));
14447   ins_cost(INSN_COST);
14448   format %{ "dup  $dst, $src\t# vector (2I)" %}
14449   ins_encode %{
14450     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
14451   %}
14452   ins_pipe(pipe_class_default);
14453 %}
14454 
14455 instruct replicate4I(vecX dst, iRegIorL2I src)
14456 %{
14457   predicate(n->as_Vector()->length() == 4);
14458   match(Set dst (ReplicateI src));
14459   ins_cost(INSN_COST);
14460   format %{ "dup  $dst, $src\t# vector (4I)" %}
14461   ins_encode %{
14462     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
14463   %}
14464   ins_pipe(pipe_class_default);
14465 %}
14466 
14467 instruct replicate2I_imm(vecD dst, immI con)
14468 %{
14469   predicate(n->as_Vector()->length() == 2);
14470   match(Set dst (ReplicateI con));
14471   ins_cost(INSN_COST);
14472   format %{ "movi  $dst, $con\t# vector(2I)" %}
14473   ins_encode %{
14474     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
14475   %}
14476   ins_pipe(pipe_class_default);
14477 %}
14478 
14479 instruct replicate4I_imm(vecX dst, immI con)
14480 %{
14481   predicate(n->as_Vector()->length() == 4);
14482   match(Set dst (ReplicateI con));
14483   ins_cost(INSN_COST);
14484   format %{ "movi  $dst, $con\t# vector(4I)" %}
14485   ins_encode %{
14486     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
14487   %}
14488   ins_pipe(pipe_class_default);
14489 %}
14490 
14491 instruct replicate2L(vecX dst, iRegL src)
14492 %{
14493   predicate(n->as_Vector()->length() == 2);
14494   match(Set dst (ReplicateL src));
14495   ins_cost(INSN_COST);
14496   format %{ "dup  $dst, $src\t# vector (2L)" %}
14497   ins_encode %{
14498     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
14499   %}
14500   ins_pipe(pipe_class_default);
14501 %}
14502 
14503 instruct replicate2L_zero(vecX dst, immI0 zero)
14504 %{
14505   predicate(n->as_Vector()->length() == 2);
14506   match(Set dst (ReplicateI zero));
14507   ins_cost(INSN_COST);
14508   format %{ "movi  $dst, $zero\t# vector(4I)" %}
14509   ins_encode %{
14510     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14511            as_FloatRegister($dst$$reg),
14512            as_FloatRegister($dst$$reg));
14513   %}
14514   ins_pipe(pipe_class_default);
14515 %}
14516 
14517 instruct replicate2F(vecD dst, vRegF src)
14518 %{
14519   predicate(n->as_Vector()->length() == 2);
14520   match(Set dst (ReplicateF src));
14521   ins_cost(INSN_COST);
14522   format %{ "dup  $dst, $src\t# vector (2F)" %}
14523   ins_encode %{
14524     __ dup(as_FloatRegister($dst$$reg), __ T2S,
14525            as_FloatRegister($src$$reg));
14526   %}
14527   ins_pipe(pipe_class_default);
14528 %}
14529 
14530 instruct replicate4F(vecX dst, vRegF src)
14531 %{
14532   predicate(n->as_Vector()->length() == 4);
14533   match(Set dst (ReplicateF src));
14534   ins_cost(INSN_COST);
14535   format %{ "dup  $dst, $src\t# vector (4F)" %}
14536   ins_encode %{
14537     __ dup(as_FloatRegister($dst$$reg), __ T4S,
14538            as_FloatRegister($src$$reg));
14539   %}
14540   ins_pipe(pipe_class_default);
14541 %}
14542 
14543 instruct replicate2D(vecX dst, vRegD src)
14544 %{
14545   predicate(n->as_Vector()->length() == 2);
14546   match(Set dst (ReplicateD src));
14547   ins_cost(INSN_COST);
14548   format %{ "dup  $dst, $src\t# vector (2D)" %}
14549   ins_encode %{
14550     __ dup(as_FloatRegister($dst$$reg), __ T2D,
14551            as_FloatRegister($src$$reg));
14552   %}
14553   ins_pipe(pipe_class_default);
14554 %}
14555 
14556 // ====================REDUCTION ARITHMETIC====================================
14557 
14558 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
14559 %{
14560   match(Set dst (AddReductionVI src1 src2));
14561   ins_cost(INSN_COST);
14562   effect(TEMP tmp, TEMP tmp2);
14563   format %{ "umov  $tmp, $src2, S, 0\n\t"
14564             "umov  $tmp2, $src2, S, 1\n\t"
14565             "addw  $dst, $src1, $tmp\n\t"
14566             "addw  $dst, $dst, $tmp2\t add reduction2i"
14567   %}
14568   ins_encode %{
14569     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14570     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14571     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
14572     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
14573   %}
14574   ins_pipe(pipe_class_default);
14575 %}
14576 
14577 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14578 %{
14579   match(Set dst (AddReductionVI src1 src2));
14580   ins_cost(INSN_COST);
14581   effect(TEMP tmp, TEMP tmp2);
14582   format %{ "addv  $tmp, T4S, $src2\n\t"
14583             "umov  $tmp2, $tmp, S, 0\n\t"
14584             "addw  $dst, $tmp2, $src1\t add reduction4i"
14585   %}
14586   ins_encode %{
14587     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
14588             as_FloatRegister($src2$$reg));
14589     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14590     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
14591   %}
14592   ins_pipe(pipe_class_default);
14593 %}
14594 
14595 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
14596 %{
14597   match(Set dst (MulReductionVI src1 src2));
14598   ins_cost(INSN_COST);
14599   effect(TEMP tmp, TEMP dst);
14600   format %{ "umov  $tmp, $src2, S, 0\n\t"
14601             "mul   $dst, $tmp, $src1\n\t"
14602             "umov  $tmp, $src2, S, 1\n\t"
14603             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
14604   %}
14605   ins_encode %{
14606     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
14607     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
14608     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
14609     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
14610   %}
14611   ins_pipe(pipe_class_default);
14612 %}
14613 
14614 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
14615 %{
14616   match(Set dst (MulReductionVI src1 src2));
14617   ins_cost(INSN_COST);
14618   effect(TEMP tmp, TEMP tmp2, TEMP dst);
14619   format %{ "ins   $tmp, $src2, 0, 1\n\t"
14620             "mul   $tmp, $tmp, $src2\n\t"
14621             "umov  $tmp2, $tmp, S, 0\n\t"
14622             "mul   $dst, $tmp2, $src1\n\t"
14623             "umov  $tmp2, $tmp, S, 1\n\t"
14624             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
14625   %}
14626   ins_encode %{
14627     __ ins(as_FloatRegister($tmp$$reg), __ D,
14628            as_FloatRegister($src2$$reg), 0, 1);
14629     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
14630            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
14631     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
14632     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
14633     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
14634     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
14635   %}
14636   ins_pipe(pipe_class_default);
14637 %}
14638 
14639 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14640 %{
14641   match(Set dst (AddReductionVF src1 src2));
14642   ins_cost(INSN_COST);
14643   effect(TEMP tmp, TEMP dst);
14644   format %{ "fadds $dst, $src1, $src2\n\t"
14645             "ins   $tmp, S, $src2, 0, 1\n\t"
14646             "fadds $dst, $dst, $tmp\t add reduction2f"
14647   %}
14648   ins_encode %{
14649     __ fadds(as_FloatRegister($dst$$reg),
14650              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14651     __ ins(as_FloatRegister($tmp$$reg), __ S,
14652            as_FloatRegister($src2$$reg), 0, 1);
14653     __ fadds(as_FloatRegister($dst$$reg),
14654              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14655   %}
14656   ins_pipe(pipe_class_default);
14657 %}
14658 
14659 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14660 %{
14661   match(Set dst (AddReductionVF src1 src2));
14662   ins_cost(INSN_COST);
14663   effect(TEMP tmp, TEMP dst);
14664   format %{ "fadds $dst, $src1, $src2\n\t"
14665             "ins   $tmp, S, $src2, 0, 1\n\t"
14666             "fadds $dst, $dst, $tmp\n\t"
14667             "ins   $tmp, S, $src2, 0, 2\n\t"
14668             "fadds $dst, $dst, $tmp\n\t"
14669             "ins   $tmp, S, $src2, 0, 3\n\t"
14670             "fadds $dst, $dst, $tmp\t add reduction4f"
14671   %}
14672   ins_encode %{
14673     __ fadds(as_FloatRegister($dst$$reg),
14674              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14675     __ ins(as_FloatRegister($tmp$$reg), __ S,
14676            as_FloatRegister($src2$$reg), 0, 1);
14677     __ fadds(as_FloatRegister($dst$$reg),
14678              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14679     __ ins(as_FloatRegister($tmp$$reg), __ S,
14680            as_FloatRegister($src2$$reg), 0, 2);
14681     __ fadds(as_FloatRegister($dst$$reg),
14682              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14683     __ ins(as_FloatRegister($tmp$$reg), __ S,
14684            as_FloatRegister($src2$$reg), 0, 3);
14685     __ fadds(as_FloatRegister($dst$$reg),
14686              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14687   %}
14688   ins_pipe(pipe_class_default);
14689 %}
14690 
14691 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
14692 %{
14693   match(Set dst (MulReductionVF src1 src2));
14694   ins_cost(INSN_COST);
14695   effect(TEMP tmp, TEMP dst);
14696   format %{ "fmuls $dst, $src1, $src2\n\t"
14697             "ins   $tmp, S, $src2, 0, 1\n\t"
14698             "fmuls $dst, $dst, $tmp\t add reduction4f"
14699   %}
14700   ins_encode %{
14701     __ fmuls(as_FloatRegister($dst$$reg),
14702              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14703     __ ins(as_FloatRegister($tmp$$reg), __ S,
14704            as_FloatRegister($src2$$reg), 0, 1);
14705     __ fmuls(as_FloatRegister($dst$$reg),
14706              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14707   %}
14708   ins_pipe(pipe_class_default);
14709 %}
14710 
14711 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
14712 %{
14713   match(Set dst (MulReductionVF src1 src2));
14714   ins_cost(INSN_COST);
14715   effect(TEMP tmp, TEMP dst);
14716   format %{ "fmuls $dst, $src1, $src2\n\t"
14717             "ins   $tmp, S, $src2, 0, 1\n\t"
14718             "fmuls $dst, $dst, $tmp\n\t"
14719             "ins   $tmp, S, $src2, 0, 2\n\t"
14720             "fmuls $dst, $dst, $tmp\n\t"
14721             "ins   $tmp, S, $src2, 0, 3\n\t"
14722             "fmuls $dst, $dst, $tmp\t add reduction4f"
14723   %}
14724   ins_encode %{
14725     __ fmuls(as_FloatRegister($dst$$reg),
14726              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14727     __ ins(as_FloatRegister($tmp$$reg), __ S,
14728            as_FloatRegister($src2$$reg), 0, 1);
14729     __ fmuls(as_FloatRegister($dst$$reg),
14730              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14731     __ ins(as_FloatRegister($tmp$$reg), __ S,
14732            as_FloatRegister($src2$$reg), 0, 2);
14733     __ fmuls(as_FloatRegister($dst$$reg),
14734              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14735     __ ins(as_FloatRegister($tmp$$reg), __ S,
14736            as_FloatRegister($src2$$reg), 0, 3);
14737     __ fmuls(as_FloatRegister($dst$$reg),
14738              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14739   %}
14740   ins_pipe(pipe_class_default);
14741 %}
14742 
14743 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14744 %{
14745   match(Set dst (AddReductionVD src1 src2));
14746   ins_cost(INSN_COST);
14747   effect(TEMP tmp, TEMP dst);
14748   format %{ "faddd $dst, $src1, $src2\n\t"
14749             "ins   $tmp, D, $src2, 0, 1\n\t"
14750             "faddd $dst, $dst, $tmp\t add reduction2d"
14751   %}
14752   ins_encode %{
14753     __ faddd(as_FloatRegister($dst$$reg),
14754              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14755     __ ins(as_FloatRegister($tmp$$reg), __ D,
14756            as_FloatRegister($src2$$reg), 0, 1);
14757     __ faddd(as_FloatRegister($dst$$reg),
14758              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14759   %}
14760   ins_pipe(pipe_class_default);
14761 %}
14762 
14763 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
14764 %{
14765   match(Set dst (MulReductionVD src1 src2));
14766   ins_cost(INSN_COST);
14767   effect(TEMP tmp, TEMP dst);
14768   format %{ "fmuld $dst, $src1, $src2\n\t"
14769             "ins   $tmp, D, $src2, 0, 1\n\t"
14770             "fmuld $dst, $dst, $tmp\t add reduction2d"
14771   %}
14772   ins_encode %{
14773     __ fmuld(as_FloatRegister($dst$$reg),
14774              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
14775     __ ins(as_FloatRegister($tmp$$reg), __ D,
14776            as_FloatRegister($src2$$reg), 0, 1);
14777     __ fmuld(as_FloatRegister($dst$$reg),
14778              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
14779   %}
14780   ins_pipe(pipe_class_default);
14781 %}
14782 
14783 // ====================VECTOR ARITHMETIC=======================================
14784 
14785 // --------------------------------- ADD --------------------------------------
14786 
14787 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14788 %{
14789   predicate(n->as_Vector()->length() == 4 ||
14790             n->as_Vector()->length() == 8);
14791   match(Set dst (AddVB src1 src2));
14792   ins_cost(INSN_COST);
14793   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14794   ins_encode %{
14795     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14796             as_FloatRegister($src1$$reg),
14797             as_FloatRegister($src2$$reg));
14798   %}
14799   ins_pipe(pipe_class_default);
14800 %}
14801 
14802 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14803 %{
14804   predicate(n->as_Vector()->length() == 16);
14805   match(Set dst (AddVB src1 src2));
14806   ins_cost(INSN_COST);
14807   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14808   ins_encode %{
14809     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14810             as_FloatRegister($src1$$reg),
14811             as_FloatRegister($src2$$reg));
14812   %}
14813   ins_pipe(pipe_class_default);
14814 %}
14815 
14816 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14817 %{
14818   predicate(n->as_Vector()->length() == 2 ||
14819             n->as_Vector()->length() == 4);
14820   match(Set dst (AddVS src1 src2));
14821   ins_cost(INSN_COST);
14822   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14823   ins_encode %{
14824     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14825             as_FloatRegister($src1$$reg),
14826             as_FloatRegister($src2$$reg));
14827   %}
14828   ins_pipe(pipe_class_default);
14829 %}
14830 
14831 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14832 %{
14833   predicate(n->as_Vector()->length() == 8);
14834   match(Set dst (AddVS src1 src2));
14835   ins_cost(INSN_COST);
14836   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14837   ins_encode %{
14838     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14839             as_FloatRegister($src1$$reg),
14840             as_FloatRegister($src2$$reg));
14841   %}
14842   ins_pipe(pipe_class_default);
14843 %}
14844 
14845 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14846 %{
14847   predicate(n->as_Vector()->length() == 2);
14848   match(Set dst (AddVI src1 src2));
14849   ins_cost(INSN_COST);
14850   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14851   ins_encode %{
14852     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14853             as_FloatRegister($src1$$reg),
14854             as_FloatRegister($src2$$reg));
14855   %}
14856   ins_pipe(pipe_class_default);
14857 %}
14858 
14859 instruct vadd4I(vecX dst, vecX src1, vecX src2)
14860 %{
14861   predicate(n->as_Vector()->length() == 4);
14862   match(Set dst (AddVI src1 src2));
14863   ins_cost(INSN_COST);
14864   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
14865   ins_encode %{
14866     __ addv(as_FloatRegister($dst$$reg), __ T4S,
14867             as_FloatRegister($src1$$reg),
14868             as_FloatRegister($src2$$reg));
14869   %}
14870   ins_pipe(pipe_class_default);
14871 %}
14872 
14873 instruct vadd2L(vecX dst, vecX src1, vecX src2)
14874 %{
14875   predicate(n->as_Vector()->length() == 2);
14876   match(Set dst (AddVL src1 src2));
14877   ins_cost(INSN_COST);
14878   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
14879   ins_encode %{
14880     __ addv(as_FloatRegister($dst$$reg), __ T2D,
14881             as_FloatRegister($src1$$reg),
14882             as_FloatRegister($src2$$reg));
14883   %}
14884   ins_pipe(pipe_class_default);
14885 %}
14886 
14887 instruct vadd2F(vecD dst, vecD src1, vecD src2)
14888 %{
14889   predicate(n->as_Vector()->length() == 2);
14890   match(Set dst (AddVF src1 src2));
14891   ins_cost(INSN_COST);
14892   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
14893   ins_encode %{
14894     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
14895             as_FloatRegister($src1$$reg),
14896             as_FloatRegister($src2$$reg));
14897   %}
14898   ins_pipe(pipe_class_default);
14899 %}
14900 
14901 instruct vadd4F(vecX dst, vecX src1, vecX src2)
14902 %{
14903   predicate(n->as_Vector()->length() == 4);
14904   match(Set dst (AddVF src1 src2));
14905   ins_cost(INSN_COST);
14906   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
14907   ins_encode %{
14908     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
14909             as_FloatRegister($src1$$reg),
14910             as_FloatRegister($src2$$reg));
14911   %}
14912   ins_pipe(pipe_class_default);
14913 %}
14914 
14915 instruct vadd2D(vecX dst, vecX src1, vecX src2)
14916 %{
14917   match(Set dst (AddVD src1 src2));
14918   ins_cost(INSN_COST);
14919   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
14920   ins_encode %{
14921     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
14922             as_FloatRegister($src1$$reg),
14923             as_FloatRegister($src2$$reg));
14924   %}
14925   ins_pipe(pipe_class_default);
14926 %}
14927 
14928 // --------------------------------- SUB --------------------------------------
14929 
14930 instruct vsub8B(vecD dst, vecD src1, vecD src2)
14931 %{
14932   predicate(n->as_Vector()->length() == 4 ||
14933             n->as_Vector()->length() == 8);
14934   match(Set dst (SubVB src1 src2));
14935   ins_cost(INSN_COST);
14936   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
14937   ins_encode %{
14938     __ subv(as_FloatRegister($dst$$reg), __ T8B,
14939             as_FloatRegister($src1$$reg),
14940             as_FloatRegister($src2$$reg));
14941   %}
14942   ins_pipe(pipe_class_default);
14943 %}
14944 
14945 instruct vsub16B(vecX dst, vecX src1, vecX src2)
14946 %{
14947   predicate(n->as_Vector()->length() == 16);
14948   match(Set dst (SubVB src1 src2));
14949   ins_cost(INSN_COST);
14950   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
14951   ins_encode %{
14952     __ subv(as_FloatRegister($dst$$reg), __ T16B,
14953             as_FloatRegister($src1$$reg),
14954             as_FloatRegister($src2$$reg));
14955   %}
14956   ins_pipe(pipe_class_default);
14957 %}
14958 
14959 instruct vsub4S(vecD dst, vecD src1, vecD src2)
14960 %{
14961   predicate(n->as_Vector()->length() == 2 ||
14962             n->as_Vector()->length() == 4);
14963   match(Set dst (SubVS src1 src2));
14964   ins_cost(INSN_COST);
14965   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
14966   ins_encode %{
14967     __ subv(as_FloatRegister($dst$$reg), __ T4H,
14968             as_FloatRegister($src1$$reg),
14969             as_FloatRegister($src2$$reg));
14970   %}
14971   ins_pipe(pipe_class_default);
14972 %}
14973 
14974 instruct vsub8S(vecX dst, vecX src1, vecX src2)
14975 %{
14976   predicate(n->as_Vector()->length() == 8);
14977   match(Set dst (SubVS src1 src2));
14978   ins_cost(INSN_COST);
14979   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
14980   ins_encode %{
14981     __ subv(as_FloatRegister($dst$$reg), __ T8H,
14982             as_FloatRegister($src1$$reg),
14983             as_FloatRegister($src2$$reg));
14984   %}
14985   ins_pipe(pipe_class_default);
14986 %}
14987 
14988 instruct vsub2I(vecD dst, vecD src1, vecD src2)
14989 %{
14990   predicate(n->as_Vector()->length() == 2);
14991   match(Set dst (SubVI src1 src2));
14992   ins_cost(INSN_COST);
14993   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
14994   ins_encode %{
14995     __ subv(as_FloatRegister($dst$$reg), __ T2S,
14996             as_FloatRegister($src1$$reg),
14997             as_FloatRegister($src2$$reg));
14998   %}
14999   ins_pipe(pipe_class_default);
15000 %}
15001 
15002 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15003 %{
15004   predicate(n->as_Vector()->length() == 4);
15005   match(Set dst (SubVI src1 src2));
15006   ins_cost(INSN_COST);
15007   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15008   ins_encode %{
15009     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15010             as_FloatRegister($src1$$reg),
15011             as_FloatRegister($src2$$reg));
15012   %}
15013   ins_pipe(pipe_class_default);
15014 %}
15015 
15016 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15017 %{
15018   predicate(n->as_Vector()->length() == 2);
15019   match(Set dst (SubVL src1 src2));
15020   ins_cost(INSN_COST);
15021   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15022   ins_encode %{
15023     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15024             as_FloatRegister($src1$$reg),
15025             as_FloatRegister($src2$$reg));
15026   %}
15027   ins_pipe(pipe_class_default);
15028 %}
15029 
15030 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15031 %{
15032   predicate(n->as_Vector()->length() == 2);
15033   match(Set dst (SubVF src1 src2));
15034   ins_cost(INSN_COST);
15035   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15036   ins_encode %{
15037     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15038             as_FloatRegister($src1$$reg),
15039             as_FloatRegister($src2$$reg));
15040   %}
15041   ins_pipe(pipe_class_default);
15042 %}
15043 
15044 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15045 %{
15046   predicate(n->as_Vector()->length() == 4);
15047   match(Set dst (SubVF src1 src2));
15048   ins_cost(INSN_COST);
15049   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15050   ins_encode %{
15051     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15052             as_FloatRegister($src1$$reg),
15053             as_FloatRegister($src2$$reg));
15054   %}
15055   ins_pipe(pipe_class_default);
15056 %}
15057 
15058 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15059 %{
15060   predicate(n->as_Vector()->length() == 2);
15061   match(Set dst (SubVD src1 src2));
15062   ins_cost(INSN_COST);
15063   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15064   ins_encode %{
15065     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15066             as_FloatRegister($src1$$reg),
15067             as_FloatRegister($src2$$reg));
15068   %}
15069   ins_pipe(pipe_class_default);
15070 %}
15071 
15072 // --------------------------------- MUL --------------------------------------
15073 
15074 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15075 %{
15076   predicate(n->as_Vector()->length() == 2 ||
15077             n->as_Vector()->length() == 4);
15078   match(Set dst (MulVS src1 src2));
15079   ins_cost(INSN_COST);
15080   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15081   ins_encode %{
15082     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15083             as_FloatRegister($src1$$reg),
15084             as_FloatRegister($src2$$reg));
15085   %}
15086   ins_pipe(pipe_class_default);
15087 %}
15088 
15089 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15090 %{
15091   predicate(n->as_Vector()->length() == 8);
15092   match(Set dst (MulVS src1 src2));
15093   ins_cost(INSN_COST);
15094   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15095   ins_encode %{
15096     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15097             as_FloatRegister($src1$$reg),
15098             as_FloatRegister($src2$$reg));
15099   %}
15100   ins_pipe(pipe_class_default);
15101 %}
15102 
15103 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15104 %{
15105   predicate(n->as_Vector()->length() == 2);
15106   match(Set dst (MulVI src1 src2));
15107   ins_cost(INSN_COST);
15108   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15109   ins_encode %{
15110     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15111             as_FloatRegister($src1$$reg),
15112             as_FloatRegister($src2$$reg));
15113   %}
15114   ins_pipe(pipe_class_default);
15115 %}
15116 
15117 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15118 %{
15119   predicate(n->as_Vector()->length() == 4);
15120   match(Set dst (MulVI src1 src2));
15121   ins_cost(INSN_COST);
15122   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15123   ins_encode %{
15124     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15125             as_FloatRegister($src1$$reg),
15126             as_FloatRegister($src2$$reg));
15127   %}
15128   ins_pipe(pipe_class_default);
15129 %}
15130 
15131 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15132 %{
15133   predicate(n->as_Vector()->length() == 2);
15134   match(Set dst (MulVF src1 src2));
15135   ins_cost(INSN_COST);
15136   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15137   ins_encode %{
15138     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15139             as_FloatRegister($src1$$reg),
15140             as_FloatRegister($src2$$reg));
15141   %}
15142   ins_pipe(pipe_class_default);
15143 %}
15144 
15145 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15146 %{
15147   predicate(n->as_Vector()->length() == 4);
15148   match(Set dst (MulVF src1 src2));
15149   ins_cost(INSN_COST);
15150   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15151   ins_encode %{
15152     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15153             as_FloatRegister($src1$$reg),
15154             as_FloatRegister($src2$$reg));
15155   %}
15156   ins_pipe(pipe_class_default);
15157 %}
15158 
15159 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15160 %{
15161   predicate(n->as_Vector()->length() == 2);
15162   match(Set dst (MulVD src1 src2));
15163   ins_cost(INSN_COST);
15164   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15165   ins_encode %{
15166     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15167             as_FloatRegister($src1$$reg),
15168             as_FloatRegister($src2$$reg));
15169   %}
15170   ins_pipe(pipe_class_default);
15171 %}
15172 
15173 // --------------------------------- DIV --------------------------------------
15174 
15175 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15176 %{
15177   predicate(n->as_Vector()->length() == 2);
15178   match(Set dst (DivVF src1 src2));
15179   ins_cost(INSN_COST);
15180   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15181   ins_encode %{
15182     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15183             as_FloatRegister($src1$$reg),
15184             as_FloatRegister($src2$$reg));
15185   %}
15186   ins_pipe(pipe_class_default);
15187 %}
15188 
15189 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15190 %{
15191   predicate(n->as_Vector()->length() == 4);
15192   match(Set dst (DivVF src1 src2));
15193   ins_cost(INSN_COST);
15194   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
15195   ins_encode %{
15196     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
15197             as_FloatRegister($src1$$reg),
15198             as_FloatRegister($src2$$reg));
15199   %}
15200   ins_pipe(pipe_class_default);
15201 %}
15202 
15203 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
15204 %{
15205   predicate(n->as_Vector()->length() == 2);
15206   match(Set dst (DivVD src1 src2));
15207   ins_cost(INSN_COST);
15208   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
15209   ins_encode %{
15210     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
15211             as_FloatRegister($src1$$reg),
15212             as_FloatRegister($src2$$reg));
15213   %}
15214   ins_pipe(pipe_class_default);
15215 %}
15216 
15217 // --------------------------------- SQRT -------------------------------------
15218 
15219 instruct vsqrt2D(vecX dst, vecX src)
15220 %{
15221   predicate(n->as_Vector()->length() == 2);
15222   match(Set dst (SqrtVD src));
15223   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
15224   ins_encode %{
15225     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
15226              as_FloatRegister($src$$reg));
15227   %}
15228   ins_pipe(pipe_class_default);
15229 %}
15230 
15231 // --------------------------------- ABS --------------------------------------
15232 
15233 instruct vabs2F(vecD dst, vecD src)
15234 %{
15235   predicate(n->as_Vector()->length() == 2);
15236   match(Set dst (AbsVF src));
15237   ins_cost(INSN_COST * 3);
15238   format %{ "fabs  $dst,$src\t# vector (2S)" %}
15239   ins_encode %{
15240     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
15241             as_FloatRegister($src$$reg));
15242   %}
15243   ins_pipe(pipe_class_default);
15244 %}
15245 
15246 instruct vabs4F(vecX dst, vecX src)
15247 %{
15248   predicate(n->as_Vector()->length() == 4);
15249   match(Set dst (AbsVF src));
15250   ins_cost(INSN_COST * 3);
15251   format %{ "fabs  $dst,$src\t# vector (4S)" %}
15252   ins_encode %{
15253     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
15254             as_FloatRegister($src$$reg));
15255   %}
15256   ins_pipe(pipe_class_default);
15257 %}
15258 
15259 instruct vabs2D(vecX dst, vecX src)
15260 %{
15261   predicate(n->as_Vector()->length() == 2);
15262   match(Set dst (AbsVD src));
15263   ins_cost(INSN_COST * 3);
15264   format %{ "fabs  $dst,$src\t# vector (2D)" %}
15265   ins_encode %{
15266     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
15267             as_FloatRegister($src$$reg));
15268   %}
15269   ins_pipe(pipe_class_default);
15270 %}
15271 
15272 // --------------------------------- NEG --------------------------------------
15273 
15274 instruct vneg2F(vecD dst, vecD src)
15275 %{
15276   predicate(n->as_Vector()->length() == 2);
15277   match(Set dst (NegVF src));
15278   ins_cost(INSN_COST * 3);
15279   format %{ "fneg  $dst,$src\t# vector (2S)" %}
15280   ins_encode %{
15281     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
15282             as_FloatRegister($src$$reg));
15283   %}
15284   ins_pipe(pipe_class_default);
15285 %}
15286 
15287 instruct vneg4F(vecX dst, vecX src)
15288 %{
15289   predicate(n->as_Vector()->length() == 4);
15290   match(Set dst (NegVF src));
15291   ins_cost(INSN_COST * 3);
15292   format %{ "fneg  $dst,$src\t# vector (4S)" %}
15293   ins_encode %{
15294     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
15295             as_FloatRegister($src$$reg));
15296   %}
15297   ins_pipe(pipe_class_default);
15298 %}
15299 
15300 instruct vneg2D(vecX dst, vecX src)
15301 %{
15302   predicate(n->as_Vector()->length() == 2);
15303   match(Set dst (NegVD src));
15304   ins_cost(INSN_COST * 3);
15305   format %{ "fneg  $dst,$src\t# vector (2D)" %}
15306   ins_encode %{
15307     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
15308             as_FloatRegister($src$$reg));
15309   %}
15310   ins_pipe(pipe_class_default);
15311 %}
15312 
15313 // --------------------------------- AND --------------------------------------
15314 
15315 instruct vand8B(vecD dst, vecD src1, vecD src2)
15316 %{
15317   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15318             n->as_Vector()->length_in_bytes() == 8);
15319   match(Set dst (AndV src1 src2));
15320   ins_cost(INSN_COST);
15321   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15322   ins_encode %{
15323     __ andr(as_FloatRegister($dst$$reg), __ T8B,
15324             as_FloatRegister($src1$$reg),
15325             as_FloatRegister($src2$$reg));
15326   %}
15327   ins_pipe(pipe_class_default);
15328 %}
15329 
15330 instruct vand16B(vecX dst, vecX src1, vecX src2)
15331 %{
15332   predicate(n->as_Vector()->length_in_bytes() == 16);
15333   match(Set dst (AndV src1 src2));
15334   ins_cost(INSN_COST);
15335   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
15336   ins_encode %{
15337     __ andr(as_FloatRegister($dst$$reg), __ T16B,
15338             as_FloatRegister($src1$$reg),
15339             as_FloatRegister($src2$$reg));
15340   %}
15341   ins_pipe(pipe_class_default);
15342 %}
15343 
15344 // --------------------------------- OR ---------------------------------------
15345 
15346 instruct vor8B(vecD dst, vecD src1, vecD src2)
15347 %{
15348   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15349             n->as_Vector()->length_in_bytes() == 8);
15350   match(Set dst (OrV src1 src2));
15351   ins_cost(INSN_COST);
15352   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
15353   ins_encode %{
15354     __ orr(as_FloatRegister($dst$$reg), __ T8B,
15355             as_FloatRegister($src1$$reg),
15356             as_FloatRegister($src2$$reg));
15357   %}
15358   ins_pipe(pipe_class_default);
15359 %}
15360 
15361 instruct vor16B(vecX dst, vecX src1, vecX src2)
15362 %{
15363   predicate(n->as_Vector()->length_in_bytes() == 16);
15364   match(Set dst (OrV src1 src2));
15365   ins_cost(INSN_COST);
15366   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
15367   ins_encode %{
15368     __ orr(as_FloatRegister($dst$$reg), __ T16B,
15369             as_FloatRegister($src1$$reg),
15370             as_FloatRegister($src2$$reg));
15371   %}
15372   ins_pipe(pipe_class_default);
15373 %}
15374 
15375 // --------------------------------- XOR --------------------------------------
15376 
15377 instruct vxor8B(vecD dst, vecD src1, vecD src2)
15378 %{
15379   predicate(n->as_Vector()->length_in_bytes() == 4 ||
15380             n->as_Vector()->length_in_bytes() == 8);
15381   match(Set dst (XorV src1 src2));
15382   ins_cost(INSN_COST);
15383   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
15384   ins_encode %{
15385     __ eor(as_FloatRegister($dst$$reg), __ T8B,
15386             as_FloatRegister($src1$$reg),
15387             as_FloatRegister($src2$$reg));
15388   %}
15389   ins_pipe(pipe_class_default);
15390 %}
15391 
15392 instruct vxor16B(vecX dst, vecX src1, vecX src2)
15393 %{
15394   predicate(n->as_Vector()->length_in_bytes() == 16);
15395   match(Set dst (XorV src1 src2));
15396   ins_cost(INSN_COST);
15397   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
15398   ins_encode %{
15399     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15400             as_FloatRegister($src1$$reg),
15401             as_FloatRegister($src2$$reg));
15402   %}
15403   ins_pipe(pipe_class_default);
15404 %}
15405 
15406 // ------------------------------ Shift ---------------------------------------
15407 
15408 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
15409   match(Set dst (LShiftCntV cnt));
15410   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
15411   ins_encode %{
15412     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15413   %}
15414   ins_pipe(pipe_class_default);
15415 %}
15416 
15417 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
15418 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
15419   match(Set dst (RShiftCntV cnt));
15420   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
15421   ins_encode %{
15422     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
15423     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
15424   %}
15425   ins_pipe(pipe_class_default);
15426 %}
15427 
15428 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
15429   predicate(n->as_Vector()->length() == 4 ||
15430             n->as_Vector()->length() == 8);
15431   match(Set dst (LShiftVB src shift));
15432   match(Set dst (RShiftVB src shift));
15433   ins_cost(INSN_COST);
15434   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
15435   ins_encode %{
15436     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
15437             as_FloatRegister($src$$reg),
15438             as_FloatRegister($shift$$reg));
15439   %}
15440   ins_pipe(pipe_class_default);
15441 %}
15442 
15443 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
15444   predicate(n->as_Vector()->length() == 16);
15445   match(Set dst (LShiftVB src shift));
15446   match(Set dst (RShiftVB src shift));
15447   ins_cost(INSN_COST);
15448   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
15449   ins_encode %{
15450     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
15451             as_FloatRegister($src$$reg),
15452             as_FloatRegister($shift$$reg));
15453   %}
15454   ins_pipe(pipe_class_default);
15455 %}
15456 
15457 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
15458   predicate(n->as_Vector()->length() == 4 ||
15459             n->as_Vector()->length() == 8);
15460   match(Set dst (URShiftVB src shift));
15461   ins_cost(INSN_COST);
15462   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
15463   ins_encode %{
15464     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
15465             as_FloatRegister($src$$reg),
15466             as_FloatRegister($shift$$reg));
15467   %}
15468   ins_pipe(pipe_class_default);
15469 %}
15470 
15471 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
15472   predicate(n->as_Vector()->length() == 16);
15473   match(Set dst (URShiftVB src shift));
15474   ins_cost(INSN_COST);
15475   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
15476   ins_encode %{
15477     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
15478             as_FloatRegister($src$$reg),
15479             as_FloatRegister($shift$$reg));
15480   %}
15481   ins_pipe(pipe_class_default);
15482 %}
15483 
15484 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
15485   predicate(n->as_Vector()->length() == 4 ||
15486             n->as_Vector()->length() == 8);
15487   match(Set dst (LShiftVB src shift));
15488   ins_cost(INSN_COST);
15489   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
15490   ins_encode %{
15491     int sh = (int)$shift$$constant & 31;
15492     if (sh >= 8) {
15493       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15494              as_FloatRegister($src$$reg),
15495              as_FloatRegister($src$$reg));
15496     } else {
15497       __ shl(as_FloatRegister($dst$$reg), __ T8B,
15498              as_FloatRegister($src$$reg), sh);
15499     }
15500   %}
15501   ins_pipe(pipe_class_default);
15502 %}
15503 
15504 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
15505   predicate(n->as_Vector()->length() == 16);
15506   match(Set dst (LShiftVB src shift));
15507   ins_cost(INSN_COST);
15508   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
15509   ins_encode %{
15510     int sh = (int)$shift$$constant & 31;
15511     if (sh >= 8) {
15512       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15513              as_FloatRegister($src$$reg),
15514              as_FloatRegister($src$$reg));
15515     } else {
15516       __ shl(as_FloatRegister($dst$$reg), __ T16B,
15517              as_FloatRegister($src$$reg), sh);
15518     }
15519   %}
15520   ins_pipe(pipe_class_default);
15521 %}
15522 
15523 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
15524   predicate(n->as_Vector()->length() == 4 ||
15525             n->as_Vector()->length() == 8);
15526   match(Set dst (RShiftVB src shift));
15527   ins_cost(INSN_COST);
15528   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
15529   ins_encode %{
15530     int sh = (int)$shift$$constant & 31;
15531     if (sh >= 8) sh = 7;
15532     sh = -sh & 7;
15533     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
15534            as_FloatRegister($src$$reg), sh);
15535   %}
15536   ins_pipe(pipe_class_default);
15537 %}
15538 
15539 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
15540   predicate(n->as_Vector()->length() == 16);
15541   match(Set dst (RShiftVB src shift));
15542   ins_cost(INSN_COST);
15543   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
15544   ins_encode %{
15545     int sh = (int)$shift$$constant & 31;
15546     if (sh >= 8) sh = 7;
15547     sh = -sh & 7;
15548     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
15549            as_FloatRegister($src$$reg), sh);
15550   %}
15551   ins_pipe(pipe_class_default);
15552 %}
15553 
15554 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
15555   predicate(n->as_Vector()->length() == 4 ||
15556             n->as_Vector()->length() == 8);
15557   match(Set dst (URShiftVB src shift));
15558   ins_cost(INSN_COST);
15559   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
15560   ins_encode %{
15561     int sh = (int)$shift$$constant & 31;
15562     if (sh >= 8) {
15563       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15564              as_FloatRegister($src$$reg),
15565              as_FloatRegister($src$$reg));
15566     } else {
15567       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
15568              as_FloatRegister($src$$reg), -sh & 7);
15569     }
15570   %}
15571   ins_pipe(pipe_class_default);
15572 %}
15573 
15574 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
15575   predicate(n->as_Vector()->length() == 16);
15576   match(Set dst (URShiftVB src shift));
15577   ins_cost(INSN_COST);
15578   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
15579   ins_encode %{
15580     int sh = (int)$shift$$constant & 31;
15581     if (sh >= 8) {
15582       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15583              as_FloatRegister($src$$reg),
15584              as_FloatRegister($src$$reg));
15585     } else {
15586       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
15587              as_FloatRegister($src$$reg), -sh & 7);
15588     }
15589   %}
15590   ins_pipe(pipe_class_default);
15591 %}
15592 
15593 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
15594   predicate(n->as_Vector()->length() == 2 ||
15595             n->as_Vector()->length() == 4);
15596   match(Set dst (LShiftVS src shift));
15597   match(Set dst (RShiftVS src shift));
15598   ins_cost(INSN_COST);
15599   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
15600   ins_encode %{
15601     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
15602             as_FloatRegister($src$$reg),
15603             as_FloatRegister($shift$$reg));
15604   %}
15605   ins_pipe(pipe_class_default);
15606 %}
15607 
15608 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
15609   predicate(n->as_Vector()->length() == 8);
15610   match(Set dst (LShiftVS src shift));
15611   match(Set dst (RShiftVS src shift));
15612   ins_cost(INSN_COST);
15613   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
15614   ins_encode %{
15615     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15616             as_FloatRegister($src$$reg),
15617             as_FloatRegister($shift$$reg));
15618   %}
15619   ins_pipe(pipe_class_default);
15620 %}
15621 
15622 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
15623   predicate(n->as_Vector()->length() == 2 ||
15624             n->as_Vector()->length() == 4);
15625   match(Set dst (URShiftVS src shift));
15626   ins_cost(INSN_COST);
15627   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
15628   ins_encode %{
15629     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
15630             as_FloatRegister($src$$reg),
15631             as_FloatRegister($shift$$reg));
15632   %}
15633   ins_pipe(pipe_class_default);
15634 %}
15635 
15636 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
15637   predicate(n->as_Vector()->length() == 8);
15638   match(Set dst (URShiftVS src shift));
15639   ins_cost(INSN_COST);
15640   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
15641   ins_encode %{
15642     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
15643             as_FloatRegister($src$$reg),
15644             as_FloatRegister($shift$$reg));
15645   %}
15646   ins_pipe(pipe_class_default);
15647 %}
15648 
15649 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15650   predicate(n->as_Vector()->length() == 2 ||
15651             n->as_Vector()->length() == 4);
15652   match(Set dst (LShiftVS src shift));
15653   ins_cost(INSN_COST);
15654   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15655   ins_encode %{
15656     int sh = (int)$shift$$constant & 31;
15657     if (sh >= 16) {
15658       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15659              as_FloatRegister($src$$reg),
15660              as_FloatRegister($src$$reg));
15661     } else {
15662       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15663              as_FloatRegister($src$$reg), sh);
15664     }
15665   %}
15666   ins_pipe(pipe_class_default);
15667 %}
15668 
15669 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15670   predicate(n->as_Vector()->length() == 8);
15671   match(Set dst (LShiftVS src shift));
15672   ins_cost(INSN_COST);
15673   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15674   ins_encode %{
15675     int sh = (int)$shift$$constant & 31;
15676     if (sh >= 16) {
15677       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15678              as_FloatRegister($src$$reg),
15679              as_FloatRegister($src$$reg));
15680     } else {
15681       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15682              as_FloatRegister($src$$reg), sh);
15683     }
15684   %}
15685   ins_pipe(pipe_class_default);
15686 %}
15687 
15688 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15689   predicate(n->as_Vector()->length() == 2 ||
15690             n->as_Vector()->length() == 4);
15691   match(Set dst (RShiftVS src shift));
15692   ins_cost(INSN_COST);
15693   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15694   ins_encode %{
15695     int sh = (int)$shift$$constant & 31;
15696     if (sh >= 16) sh = 15;
15697     sh = -sh & 15;
15698     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15699            as_FloatRegister($src$$reg), sh);
15700   %}
15701   ins_pipe(pipe_class_default);
15702 %}
15703 
15704 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15705   predicate(n->as_Vector()->length() == 8);
15706   match(Set dst (RShiftVS src shift));
15707   ins_cost(INSN_COST);
15708   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15709   ins_encode %{
15710     int sh = (int)$shift$$constant & 31;
15711     if (sh >= 16) sh = 15;
15712     sh = -sh & 15;
15713     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15714            as_FloatRegister($src$$reg), sh);
15715   %}
15716   ins_pipe(pipe_class_default);
15717 %}
15718 
15719 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15720   predicate(n->as_Vector()->length() == 2 ||
15721             n->as_Vector()->length() == 4);
15722   match(Set dst (URShiftVS src shift));
15723   ins_cost(INSN_COST);
15724   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15725   ins_encode %{
15726     int sh = (int)$shift$$constant & 31;
15727     if (sh >= 16) {
15728       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15729              as_FloatRegister($src$$reg),
15730              as_FloatRegister($src$$reg));
15731     } else {
15732       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15733              as_FloatRegister($src$$reg), -sh & 15);
15734     }
15735   %}
15736   ins_pipe(pipe_class_default);
15737 %}
15738 
15739 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15740   predicate(n->as_Vector()->length() == 8);
15741   match(Set dst (URShiftVS src shift));
15742   ins_cost(INSN_COST);
15743   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15744   ins_encode %{
15745     int sh = (int)$shift$$constant & 31;
15746     if (sh >= 16) {
15747       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15748              as_FloatRegister($src$$reg),
15749              as_FloatRegister($src$$reg));
15750     } else {
15751       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15752              as_FloatRegister($src$$reg), -sh & 15);
15753     }
15754   %}
15755   ins_pipe(pipe_class_default);
15756 %}
15757 
15758 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
15759   predicate(n->as_Vector()->length() == 2);
15760   match(Set dst (LShiftVI src shift));
15761   match(Set dst (RShiftVI src shift));
15762   ins_cost(INSN_COST);
15763   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15764   ins_encode %{
15765     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15766             as_FloatRegister($src$$reg),
15767             as_FloatRegister($shift$$reg));
15768   %}
15769   ins_pipe(pipe_class_default);
15770 %}
15771 
15772 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15773   predicate(n->as_Vector()->length() == 4);
15774   match(Set dst (LShiftVI src shift));
15775   match(Set dst (RShiftVI src shift));
15776   ins_cost(INSN_COST);
15777   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15778   ins_encode %{
15779     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15780             as_FloatRegister($src$$reg),
15781             as_FloatRegister($shift$$reg));
15782   %}
15783   ins_pipe(pipe_class_default);
15784 %}
15785 
15786 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
15787   predicate(n->as_Vector()->length() == 2);
15788   match(Set dst (URShiftVI src shift));
15789   ins_cost(INSN_COST);
15790   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
15791   ins_encode %{
15792     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15793             as_FloatRegister($src$$reg),
15794             as_FloatRegister($shift$$reg));
15795   %}
15796   ins_pipe(pipe_class_default);
15797 %}
15798 
15799 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
15800   predicate(n->as_Vector()->length() == 4);
15801   match(Set dst (URShiftVI src shift));
15802   ins_cost(INSN_COST);
15803   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
15804   ins_encode %{
15805     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15806             as_FloatRegister($src$$reg),
15807             as_FloatRegister($shift$$reg));
15808   %}
15809   ins_pipe(pipe_class_default);
15810 %}
15811 
15812 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15813   predicate(n->as_Vector()->length() == 2);
15814   match(Set dst (LShiftVI src shift));
15815   ins_cost(INSN_COST);
15816   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15817   ins_encode %{
15818     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15819            as_FloatRegister($src$$reg),
15820            (int)$shift$$constant & 31);
15821   %}
15822   ins_pipe(pipe_class_default);
15823 %}
15824 
15825 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15826   predicate(n->as_Vector()->length() == 4);
15827   match(Set dst (LShiftVI src shift));
15828   ins_cost(INSN_COST);
15829   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15830   ins_encode %{
15831     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15832            as_FloatRegister($src$$reg),
15833            (int)$shift$$constant & 31);
15834   %}
15835   ins_pipe(pipe_class_default);
15836 %}
15837 
15838 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15839   predicate(n->as_Vector()->length() == 2);
15840   match(Set dst (RShiftVI src shift));
15841   ins_cost(INSN_COST);
15842   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15843   ins_encode %{
15844     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15845             as_FloatRegister($src$$reg),
15846             -(int)$shift$$constant & 31);
15847   %}
15848   ins_pipe(pipe_class_default);
15849 %}
15850 
15851 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15852   predicate(n->as_Vector()->length() == 4);
15853   match(Set dst (RShiftVI src shift));
15854   ins_cost(INSN_COST);
15855   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15856   ins_encode %{
15857     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
15858             as_FloatRegister($src$$reg),
15859             -(int)$shift$$constant & 31);
15860   %}
15861   ins_pipe(pipe_class_default);
15862 %}
15863 
15864 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
15865   predicate(n->as_Vector()->length() == 2);
15866   match(Set dst (URShiftVI src shift));
15867   ins_cost(INSN_COST);
15868   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
15869   ins_encode %{
15870     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
15871             as_FloatRegister($src$$reg),
15872             -(int)$shift$$constant & 31);
15873   %}
15874   ins_pipe(pipe_class_default);
15875 %}
15876 
15877 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
15878   predicate(n->as_Vector()->length() == 4);
15879   match(Set dst (URShiftVI src shift));
15880   ins_cost(INSN_COST);
15881   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
15882   ins_encode %{
15883     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
15884             as_FloatRegister($src$$reg),
15885             -(int)$shift$$constant & 31);
15886   %}
15887   ins_pipe(pipe_class_default);
15888 %}
15889 
15890 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
15891   predicate(n->as_Vector()->length() == 2);
15892   match(Set dst (LShiftVL src shift));
15893   match(Set dst (RShiftVL src shift));
15894   ins_cost(INSN_COST);
15895   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
15896   ins_encode %{
15897     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15898             as_FloatRegister($src$$reg),
15899             as_FloatRegister($shift$$reg));
15900   %}
15901   ins_pipe(pipe_class_default);
15902 %}
15903 
15904 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
15905   predicate(n->as_Vector()->length() == 2);
15906   match(Set dst (URShiftVL src shift));
15907   ins_cost(INSN_COST);
15908   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
15909   ins_encode %{
15910     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
15911             as_FloatRegister($src$$reg),
15912             as_FloatRegister($shift$$reg));
15913   %}
15914   ins_pipe(pipe_class_default);
15915 %}
15916 
15917 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
15918   predicate(n->as_Vector()->length() == 2);
15919   match(Set dst (LShiftVL src shift));
15920   ins_cost(INSN_COST);
15921   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
15922   ins_encode %{
15923     __ shl(as_FloatRegister($dst$$reg), __ T2D,
15924            as_FloatRegister($src$$reg),
15925            (int)$shift$$constant & 63);
15926   %}
15927   ins_pipe(pipe_class_default);
15928 %}
15929 
15930 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
15931   predicate(n->as_Vector()->length() == 2);
15932   match(Set dst (RShiftVL src shift));
15933   ins_cost(INSN_COST);
15934   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
15935   ins_encode %{
15936     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
15937             as_FloatRegister($src$$reg),
15938             -(int)$shift$$constant & 63);
15939   %}
15940   ins_pipe(pipe_class_default);
15941 %}
15942 
15943 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
15944   predicate(n->as_Vector()->length() == 2);
15945   match(Set dst (URShiftVL src shift));
15946   ins_cost(INSN_COST);
15947   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
15948   ins_encode %{
15949     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
15950             as_FloatRegister($src$$reg),
15951             -(int)$shift$$constant & 63);
15952   %}
15953   ins_pipe(pipe_class_default);
15954 %}
15955 
15956 //----------PEEPHOLE RULES-----------------------------------------------------
15957 // These must follow all instruction definitions as they use the names
15958 // defined in the instructions definitions.
15959 //
15960 // peepmatch ( root_instr_name [preceding_instruction]* );
15961 //
15962 // peepconstraint %{
15963 // (instruction_number.operand_name relational_op instruction_number.operand_name
15964 //  [, ...] );
15965 // // instruction numbers are zero-based using left to right order in peepmatch
15966 //
15967 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
15968 // // provide an instruction_number.operand_name for each operand that appears
15969 // // in the replacement instruction's match rule
15970 //
15971 // ---------VM FLAGS---------------------------------------------------------
15972 //
15973 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15974 //
15975 // Each peephole rule is given an identifying number starting with zero and
15976 // increasing by one in the order seen by the parser.  An individual peephole
15977 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15978 // on the command-line.
15979 //
15980 // ---------CURRENT LIMITATIONS----------------------------------------------
15981 //
15982 // Only match adjacent instructions in same basic block
15983 // Only equality constraints
15984 // Only constraints between operands, not (0.dest_reg == RAX_enc)
15985 // Only one replacement instruction
15986 //
15987 // ---------EXAMPLE----------------------------------------------------------
15988 //
15989 // // pertinent parts of existing instructions in architecture description
15990 // instruct movI(iRegINoSp dst, iRegI src)
15991 // %{
15992 //   match(Set dst (CopyI src));
15993 // %}
15994 //
15995 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
15996 // %{
15997 //   match(Set dst (AddI dst src));
15998 //   effect(KILL cr);
15999 // %}
16000 //
16001 // // Change (inc mov) to lea
16002 // peephole %{
16003 //   // increment preceeded by register-register move
16004 //   peepmatch ( incI_iReg movI );
16005 //   // require that the destination register of the increment
16006 //   // match the destination register of the move
16007 //   peepconstraint ( 0.dst == 1.dst );
16008 //   // construct a replacement instruction that sets
16009 //   // the destination to ( move's source register + one )
16010 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16011 // %}
16012 //
16013 
16014 // Implementation no longer uses movX instructions since
16015 // machine-independent system no longer uses CopyX nodes.
16016 //
16017 // peephole
16018 // %{
16019 //   peepmatch (incI_iReg movI);
16020 //   peepconstraint (0.dst == 1.dst);
16021 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16022 // %}
16023 
16024 // peephole
16025 // %{
16026 //   peepmatch (decI_iReg movI);
16027 //   peepconstraint (0.dst == 1.dst);
16028 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16029 // %}
16030 
16031 // peephole
16032 // %{
16033 //   peepmatch (addI_iReg_imm movI);
16034 //   peepconstraint (0.dst == 1.dst);
16035 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16036 // %}
16037 
16038 // peephole
16039 // %{
16040 //   peepmatch (incL_iReg movL);
16041 //   peepconstraint (0.dst == 1.dst);
16042 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16043 // %}
16044 
16045 // peephole
16046 // %{
16047 //   peepmatch (decL_iReg movL);
16048 //   peepconstraint (0.dst == 1.dst);
16049 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16050 // %}
16051 
16052 // peephole
16053 // %{
16054 //   peepmatch (addL_iReg_imm movL);
16055 //   peepconstraint (0.dst == 1.dst);
16056 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16057 // %}
16058 
16059 // peephole
16060 // %{
16061 //   peepmatch (addP_iReg_imm movP);
16062 //   peepconstraint (0.dst == 1.dst);
16063 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16064 // %}
16065 
16066 // // Change load of spilled value to only a spill
16067 // instruct storeI(memory mem, iRegI src)
16068 // %{
16069 //   match(Set mem (StoreI mem src));
16070 // %}
16071 //
16072 // instruct loadI(iRegINoSp dst, memory mem)
16073 // %{
16074 //   match(Set dst (LoadI mem));
16075 // %}
16076 //
16077 
16078 //----------SMARTSPILL RULES---------------------------------------------------
16079 // These must follow all instruction definitions as they use the names
16080 // defined in the instructions definitions.
16081 
16082 // Local Variables:
16083 // mode: c++
16084 // End: