1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_trailing(MemBarNode *leading);
1045   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1046   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1047 
1048   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1049 
1050   bool unnecessary_acquire(const Node *barrier);
1051   bool needs_acquiring_load(const Node *load);
1052 
1053   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1054 
1055   bool unnecessary_release(const Node *barrier);
1056   bool unnecessary_volatile(const Node *barrier);
1057   bool needs_releasing_store(const Node *store);
1058 
1059   // predicate controlling translation of CompareAndSwapX
1060   bool needs_acquiring_load_exclusive(const Node *load);
1061 
1062   // predicate controlling translation of StoreCM
1063   bool unnecessary_storestore(const Node *storecm);
1064 %}
1065 
1066 source %{
1067 
1068   // Optimizaton of volatile gets and puts
1069   // -------------------------------------
1070   //
1071   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1072   // use to implement volatile reads and writes. For a volatile read
1073   // we simply need
1074   //
1075   //   ldar<x>
1076   //
1077   // and for a volatile write we need
1078   //
1079   //   stlr<x>
1080   //
1081   // Alternatively, we can implement them by pairing a normal
1082   // load/store with a memory barrier. For a volatile read we need
1083   //
1084   //   ldr<x>
1085   //   dmb ishld
1086   //
1087   // for a volatile write
1088   //
1089   //   dmb ish
1090   //   str<x>
1091   //   dmb ish
1092   //
1093   // We can also use ldaxr and stlxr to implement compare and swap CAS
1094   // sequences. These are normally translated to an instruction
1095   // sequence like the following
1096   //
1097   //   dmb      ish
1098   // retry:
1099   //   ldxr<x>   rval raddr
1100   //   cmp       rval rold
1101   //   b.ne done
1102   //   stlxr<x>  rval, rnew, rold
1103   //   cbnz      rval retry
1104   // done:
1105   //   cset      r0, eq
1106   //   dmb ishld
1107   //
1108   // Note that the exclusive store is already using an stlxr
1109   // instruction. That is required to ensure visibility to other
1110   // threads of the exclusive write (assuming it succeeds) before that
1111   // of any subsequent writes.
1112   //
1113   // The following instruction sequence is an improvement on the above
1114   //
1115   // retry:
1116   //   ldaxr<x>  rval raddr
1117   //   cmp       rval rold
1118   //   b.ne done
1119   //   stlxr<x>  rval, rnew, rold
1120   //   cbnz      rval retry
1121   // done:
1122   //   cset      r0, eq
1123   //
1124   // We don't need the leading dmb ish since the stlxr guarantees
1125   // visibility of prior writes in the case that the swap is
1126   // successful. Crucially we don't have to worry about the case where
1127   // the swap is not successful since no valid program should be
1128   // relying on visibility of prior changes by the attempting thread
1129   // in the case where the CAS fails.
1130   //
1131   // Similarly, we don't need the trailing dmb ishld if we substitute
1132   // an ldaxr instruction since that will provide all the guarantees we
1133   // require regarding observation of changes made by other threads
1134   // before any change to the CAS address observed by the load.
1135   //
1136   // In order to generate the desired instruction sequence we need to
1137   // be able to identify specific 'signature' ideal graph node
1138   // sequences which i) occur as a translation of a volatile reads or
1139   // writes or CAS operations and ii) do not occur through any other
1140   // translation or graph transformation. We can then provide
1141   // alternative aldc matching rules which translate these node
1142   // sequences to the desired machine code sequences. Selection of the
1143   // alternative rules can be implemented by predicates which identify
1144   // the relevant node sequences.
1145   //
1146   // The ideal graph generator translates a volatile read to the node
1147   // sequence
1148   //
1149   //   LoadX[mo_acquire]
1150   //   MemBarAcquire
1151   //
1152   // As a special case when using the compressed oops optimization we
1153   // may also see this variant
1154   //
1155   //   LoadN[mo_acquire]
1156   //   DecodeN
1157   //   MemBarAcquire
1158   //
1159   // A volatile write is translated to the node sequence
1160   //
1161   //   MemBarRelease
1162   //   StoreX[mo_release] {CardMark}-optional
1163   //   MemBarVolatile
1164   //
1165   // n.b. the above node patterns are generated with a strict
1166   // 'signature' configuration of input and output dependencies (see
1167   // the predicates below for exact details). The card mark may be as
1168   // simple as a few extra nodes or, in a few GC configurations, may
1169   // include more complex control flow between the leading and
1170   // trailing memory barriers. However, whatever the card mark
1171   // configuration these signatures are unique to translated volatile
1172   // reads/stores -- they will not appear as a result of any other
1173   // bytecode translation or inlining nor as a consequence of
1174   // optimizing transforms.
1175   //
1176   // We also want to catch inlined unsafe volatile gets and puts and
1177   // be able to implement them using either ldar<x>/stlr<x> or some
1178   // combination of ldr<x>/stlr<x> and dmb instructions.
1179   //
1180   // Inlined unsafe volatiles puts manifest as a minor variant of the
1181   // normal volatile put node sequence containing an extra cpuorder
1182   // membar
1183   //
1184   //   MemBarRelease
1185   //   MemBarCPUOrder
1186   //   StoreX[mo_release] {CardMark}-optional
1187   //   MemBarVolatile
1188   //
1189   // n.b. as an aside, the cpuorder membar is not itself subject to
1190   // matching and translation by adlc rules.  However, the rule
1191   // predicates need to detect its presence in order to correctly
1192   // select the desired adlc rules.
1193   //
1194   // Inlined unsafe volatile gets manifest as a somewhat different
1195   // node sequence to a normal volatile get
1196   //
1197   //   MemBarCPUOrder
1198   //        ||       \\
1199   //   MemBarAcquire LoadX[mo_acquire]
1200   //        ||
1201   //   MemBarCPUOrder
1202   //
1203   // In this case the acquire membar does not directly depend on the
1204   // load. However, we can be sure that the load is generated from an
1205   // inlined unsafe volatile get if we see it dependent on this unique
1206   // sequence of membar nodes. Similarly, given an acquire membar we
1207   // can know that it was added because of an inlined unsafe volatile
1208   // get if it is fed and feeds a cpuorder membar and if its feed
1209   // membar also feeds an acquiring load.
1210   //
1211   // Finally an inlined (Unsafe) CAS operation is translated to the
1212   // following ideal graph
1213   //
1214   //   MemBarRelease
1215   //   MemBarCPUOrder
1216   //   CompareAndSwapX {CardMark}-optional
1217   //   MemBarCPUOrder
1218   //   MemBarAcquire
1219   //
1220   // So, where we can identify these volatile read and write
1221   // signatures we can choose to plant either of the above two code
1222   // sequences. For a volatile read we can simply plant a normal
1223   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1224   // also choose to inhibit translation of the MemBarAcquire and
1225   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1226   //
1227   // When we recognise a volatile store signature we can choose to
1228   // plant at a dmb ish as a translation for the MemBarRelease, a
1229   // normal str<x> and then a dmb ish for the MemBarVolatile.
1230   // Alternatively, we can inhibit translation of the MemBarRelease
1231   // and MemBarVolatile and instead plant a simple stlr<x>
1232   // instruction.
1233   //
1234   // when we recognise a CAS signature we can choose to plant a dmb
1235   // ish as a translation for the MemBarRelease, the conventional
1236   // macro-instruction sequence for the CompareAndSwap node (which
1237   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1238   // Alternatively, we can elide generation of the dmb instructions
1239   // and plant the alternative CompareAndSwap macro-instruction
1240   // sequence (which uses ldaxr<x>).
1241   //
1242   // Of course, the above only applies when we see these signature
1243   // configurations. We still want to plant dmb instructions in any
1244   // other cases where we may see a MemBarAcquire, MemBarRelease or
1245   // MemBarVolatile. For example, at the end of a constructor which
1246   // writes final/volatile fields we will see a MemBarRelease
1247   // instruction and this needs a 'dmb ish' lest we risk the
1248   // constructed object being visible without making the
1249   // final/volatile field writes visible.
1250   //
1251   // n.b. the translation rules below which rely on detection of the
1252   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1253   // If we see anything other than the signature configurations we
1254   // always just translate the loads and stores to ldr<x> and str<x>
1255   // and translate acquire, release and volatile membars to the
1256   // relevant dmb instructions.
1257   //
1258 
1259   // graph traversal helpers used for volatile put/get and CAS
1260   // optimization
1261 
1262   // 1) general purpose helpers
1263 
1264   // if node n is linked to a parent MemBarNode by an intervening
1265   // Control and Memory ProjNode return the MemBarNode otherwise return
1266   // NULL.
1267   //
1268   // n may only be a Load or a MemBar.
1269 
1270   MemBarNode *parent_membar(const Node *n)
1271   {
1272     Node *ctl = NULL;
1273     Node *mem = NULL;
1274     Node *membar = NULL;
1275 
1276     if (n->is_Load()) {
1277       ctl = n->lookup(LoadNode::Control);
1278       mem = n->lookup(LoadNode::Memory);
1279     } else if (n->is_MemBar()) {
1280       ctl = n->lookup(TypeFunc::Control);
1281       mem = n->lookup(TypeFunc::Memory);
1282     } else {
1283         return NULL;
1284     }
1285 
1286     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1287       return NULL;
1288     }
1289 
1290     membar = ctl->lookup(0);
1291 
1292     if (!membar || !membar->is_MemBar()) {
1293       return NULL;
1294     }
1295 
1296     if (mem->lookup(0) != membar) {
1297       return NULL;
1298     }
1299 
1300     return membar->as_MemBar();
1301   }
1302 
1303   // if n is linked to a child MemBarNode by intervening Control and
1304   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1305 
1306   MemBarNode *child_membar(const MemBarNode *n)
1307   {
1308     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1309     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1310 
1311     // MemBar needs to have both a Ctl and Mem projection
1312     if (! ctl || ! mem)
1313       return NULL;
1314 
1315     MemBarNode *child = NULL;
1316     Node *x;
1317 
1318     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1319       x = ctl->fast_out(i);
1320       // if we see a membar we keep hold of it. we may also see a new
1321       // arena copy of the original but it will appear later
1322       if (x->is_MemBar()) {
1323           child = x->as_MemBar();
1324           break;
1325       }
1326     }
1327 
1328     if (child == NULL) {
1329       return NULL;
1330     }
1331 
1332     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1333       x = mem->fast_out(i);
1334       // if we see a membar we keep hold of it. we may also see a new
1335       // arena copy of the original but it will appear later
1336       if (x == child) {
1337         return child;
1338       }
1339     }
1340     return NULL;
1341   }
1342 
1343   // helper predicate use to filter candidates for a leading memory
1344   // barrier
1345   //
1346   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1347   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1348 
1349   bool leading_membar(const MemBarNode *barrier)
1350   {
1351     int opcode = barrier->Opcode();
1352     // if this is a release membar we are ok
1353     if (opcode == Op_MemBarRelease) {
1354       return true;
1355     }
1356     // if its a cpuorder membar . . .
1357     if (opcode != Op_MemBarCPUOrder) {
1358       return false;
1359     }
1360     // then the parent has to be a release membar
1361     MemBarNode *parent = parent_membar(barrier);
1362     if (!parent) {
1363       return false;
1364     }
1365     opcode = parent->Opcode();
1366     return opcode == Op_MemBarRelease;
1367   }
1368 
1369   // 2) card mark detection helper
1370 
1371   // helper predicate which can be used to detect a volatile membar
1372   // introduced as part of a conditional card mark sequence either by
1373   // G1 or by CMS when UseCondCardMark is true.
1374   //
1375   // membar can be definitively determined to be part of a card mark
1376   // sequence if and only if all the following hold
1377   //
1378   // i) it is a MemBarVolatile
1379   //
1380   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1381   // true
1382   //
1383   // iii) the node's Mem projection feeds a StoreCM node.
1384 
1385   bool is_card_mark_membar(const MemBarNode *barrier)
1386   {
1387     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1388       return false;
1389     }
1390 
1391     if (barrier->Opcode() != Op_MemBarVolatile) {
1392       return false;
1393     }
1394 
1395     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1396 
1397     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1398       Node *y = mem->fast_out(i);
1399       if (y->Opcode() == Op_StoreCM) {
1400         return true;
1401       }
1402     }
1403 
1404     return false;
1405   }
1406 
1407 
1408   // 3) helper predicates to traverse volatile put or CAS graphs which
1409   // may contain GC barrier subgraphs
1410 
1411   // Preamble
1412   // --------
1413   //
1414   // for volatile writes we can omit generating barriers and employ a
1415   // releasing store when we see a node sequence sequence with a
1416   // leading MemBarRelease and a trailing MemBarVolatile as follows
1417   //
1418   //   MemBarRelease
1419   //  {    ||        } -- optional
1420   //  {MemBarCPUOrder}
1421   //       ||       \\
1422   //       ||     StoreX[mo_release]
1423   //       | \ Bot    / ???
1424   //       | MergeMem
1425   //       | /
1426   //   MemBarVolatile
1427   //
1428   // where
1429   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1430   //  | \ and / indicate further routing of the Ctl and Mem feeds
1431   //
1432   // Note that the memory feed from the CPUOrder membar to the
1433   // MergeMem node is an AliasIdxBot slice while the feed from the
1434   // StoreX is for a slice determined by the type of value being
1435   // written.
1436   //
1437   // the diagram above shows the graph we see for non-object stores.
1438   // for a volatile Object store (StoreN/P) we may see other nodes
1439   // below the leading membar because of the need for a GC pre- or
1440   // post-write barrier.
1441   //
1442   // with most GC configurations we with see this simple variant which
1443   // includes a post-write barrier card mark.
1444   //
1445   //   MemBarRelease______________________________
1446   //         ||    \\               Ctl \        \\
1447   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1448   //         | \ Bot  / oop                 . . .  /
1449   //         | MergeMem
1450   //         | /
1451   //         ||      /
1452   //   MemBarVolatile
1453   //
1454   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1455   // the object address to an int used to compute the card offset) and
1456   // Ctl+Mem to a StoreB node (which does the actual card mark).
1457   //
1458   // n.b. a StoreCM node is only ever used when CMS (with or without
1459   // CondCardMark) or G1 is configured. This abstract instruction
1460   // differs from a normal card mark write (StoreB) because it implies
1461   // a requirement to order visibility of the card mark (StoreCM)
1462   // after that of the object put (StoreP/N) using a StoreStore memory
1463   // barrier. Note that this is /not/ a requirement to order the
1464   // instructions in the generated code (that is already guaranteed by
1465   // the order of memory dependencies). Rather it is a requirement to
1466   // ensure visibility order which only applies on architectures like
1467   // AArch64 which do not implement TSO. This ordering is required for
1468   // both non-volatile and volatile puts.
1469   //
1470   // That implies that we need to translate a StoreCM using the
1471   // sequence
1472   //
1473   //   dmb ishst
1474   //   stlrb
1475   //
1476   // This dmb cannot be omitted even when the associated StoreX or
1477   // CompareAndSwapX is implemented using stlr. However, as described
1478   // below there are circumstances where a specific GC configuration
1479   // requires a stronger barrier in which case it can be omitted.
1480   // 
1481   // With the Serial or Parallel GC using +CondCardMark the card mark
1482   // is performed conditionally on it currently being unmarked in
1483   // which case the volatile put graph looks slightly different
1484   //
1485   //   MemBarRelease____________________________________________
1486   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1487   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1488   //         | \ Bot / oop                          \            |
1489   //         | MergeMem                            . . .      StoreB
1490   //         | /                                                /
1491   //         ||     /
1492   //   MemBarVolatile
1493   //
1494   // It is worth noting at this stage that all the above
1495   // configurations can be uniquely identified by checking that the
1496   // memory flow includes the following subgraph:
1497   //
1498   //   MemBarRelease
1499   //  {MemBarCPUOrder}
1500   //      |  \      . . .
1501   //      |  StoreX[mo_release]  . . .
1502   //  Bot |   / oop
1503   //     MergeMem
1504   //      |
1505   //   MemBarVolatile
1506   //
1507   // This is referred to as a *normal* volatile store subgraph. It can
1508   // easily be detected starting from any candidate MemBarRelease,
1509   // StoreX[mo_release] or MemBarVolatile node.
1510   //
1511   // A small variation on this normal case occurs for an unsafe CAS
1512   // operation. The basic memory flow subgraph for a non-object CAS is
1513   // as follows
1514   //
1515   //   MemBarRelease
1516   //         ||
1517   //   MemBarCPUOrder
1518   //          |     \\   . . .
1519   //          |     CompareAndSwapX
1520   //          |       |
1521   //      Bot |     SCMemProj
1522   //           \     / Bot
1523   //           MergeMem
1524   //           /
1525   //   MemBarCPUOrder
1526   //         ||
1527   //   MemBarAcquire
1528   //
1529   // The same basic variations on this arrangement (mutatis mutandis)
1530   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1531   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1532   // flow subgraph is still present.
1533   // 
1534   // This is referred to as a *normal* CAS subgraph. It can easily be
1535   // detected starting from any candidate MemBarRelease,
1536   // StoreX[mo_release] or MemBarAcquire node.
1537   //
1538   // The code below uses two helper predicates, leading_to_trailing
1539   // and trailing_to_leading to identify these normal graphs, one
1540   // validating the layout starting from the top membar and searching
1541   // down and the other validating the layout starting from the lower
1542   // membar and searching up.
1543   //
1544   // There are two special case GC configurations when the simple
1545   // normal graphs above may not be generated: when using G1 (which
1546   // always employs a conditional card mark); and when using CMS with
1547   // conditional card marking (+CondCardMark) configured. These GCs
1548   // are both concurrent rather than stop-the world GCs. So they
1549   // introduce extra Ctl+Mem flow into the graph between the leading
1550   // and trailing membar nodes, in particular enforcing stronger
1551   // memory serialisation beween the object put and the corresponding
1552   // conditional card mark. CMS employs a post-write GC barrier while
1553   // G1 employs both a pre- and post-write GC barrier.
1554   //
1555   // The post-write barrier subgraph for these configurations includes
1556   // a MemBarVolatile node -- referred to as a card mark membar --
1557   // which is needed to order the card write (StoreCM) operation in
1558   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1559   // operations performed by GC threads i.e. a card mark membar
1560   // constitutes a StoreLoad barrier hence must be translated to a dmb
1561   // ish (whether or not it sits inside a volatile store sequence).
1562   //
1563   // Of course, the use of the dmb ish for the card mark membar also
1564   // implies theat the StoreCM which follows can omit the dmb ishst
1565   // instruction. The necessary visibility ordering will already be
1566   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1567   // needs to be generated for as part of the StoreCM sequence with GC
1568   // configuration +CMS -CondCardMark.
1569   // 
1570   // Of course all these extra barrier nodes may well be absent --
1571   // they are only inserted for object puts. Their potential presence
1572   // significantly complicates the task of identifying whether a
1573   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1574   // MemBarAcquire forms part of a volatile put or CAS when using
1575   // these GC configurations (see below) and also complicates the
1576   // decision as to how to translate a MemBarVolatile and StoreCM.
1577   //
1578   // So, thjis means that a card mark MemBarVolatile occurring in the
1579   // post-barrier graph it needs to be distinguished from a normal
1580   // trailing MemBarVolatile. Resolving this is straightforward: a
1581   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1582   // node and that is a unique marker
1583   //
1584   //      MemBarVolatile (card mark)
1585   //       C |    \     . . .
1586   //         |   StoreCM   . . .
1587   //       . . .
1588   //
1589   // Returning to the task of translating the object put and the
1590   // leading/trailing membar nodes: what do the node graphs look like
1591   // for these 2 special cases? and how can we determine the status of
1592   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1593   // normal and non-normal cases?
1594   //
1595   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1596   // which selects conditonal execution based on the value loaded
1597   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1598   // intervening StoreLoad barrier (MemBarVolatile).
1599   //
1600   // So, with CMS we may see a node graph for a volatile object store
1601   // which looks like this
1602   //
1603   //   MemBarRelease
1604   //   MemBarCPUOrder_(leading)____________________
1605   //     C |  | M \       \\               M |   C \
1606   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1607   //       |  | Bot \    / oop      \        |
1608   //       |  |    MergeMem          \      / 
1609   //       |  |      /                |    /
1610   //     MemBarVolatile (card mark)   |   /
1611   //     C |  ||    M |               |  /
1612   //       | LoadB    | Bot       oop | / Bot
1613   //       |   |      |              / /
1614   //       | Cmp      |\            / /
1615   //       | /        | \          / /
1616   //       If         |  \        / /
1617   //       | \        |   \      / /
1618   // IfFalse  IfTrue  |    \    / /
1619   //       \     / \  |    |   / /
1620   //        \   / StoreCM  |  / /
1621   //         \ /      \   /  / /
1622   //        Region     Phi  / /
1623   //          | \   Raw |  / /
1624   //          |  . . .  | / /
1625   //          |       MergeMem
1626   //          |           |
1627   //        MemBarVolatile (trailing)
1628   //
1629   // Notice that there are two MergeMem nodes below the leading
1630   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1631   // the leading membar and the oopptr Mem slice from the Store into
1632   // the card mark membar. The trailing MergeMem merges the
1633   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1634   // slice from the StoreCM and an oop slice from the StoreN/P node
1635   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1636   // associated with the If region).
1637   //
1638   // So, in the case of CMS + CondCardMark the volatile object store
1639   // graph still includes a normal volatile store subgraph from the
1640   // leading membar to the trailing membar. However, it also contains
1641   // the same shape memory flow to the card mark membar. The two flows
1642   // can be distinguished by testing whether or not the downstream
1643   // membar is a card mark membar.
1644   //
1645   // The graph for a CAS also varies with CMS + CondCardMark, in
1646   // particular employing a control feed from the CompareAndSwapX node
1647   // through a CmpI and If to the card mark membar and StoreCM which
1648   // updates the associated card. This avoids executing the card mark
1649   // if the CAS fails. However, it can be seen from the diagram below
1650   // that the presence of the barrier does not alter the normal CAS
1651   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1652   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1653   // MemBarAcquire pair.
1654   //
1655   //   MemBarRelease
1656   //   MemBarCPUOrder__(leading)_______________________
1657   //   C /  M |                        \\            C \
1658   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1659   //          |                  C /  M |
1660   //          |                 CmpI    |
1661   //          |                  /      |
1662   //          |               . . .     |
1663   //          |              IfTrue     |
1664   //          |              /          |
1665   //       MemBarVolatile (card mark)   |
1666   //        C |  ||    M |              |
1667   //          | LoadB    | Bot   ______/|
1668   //          |   |      |      /       |
1669   //          | Cmp      |     /      SCMemProj
1670   //          | /        |    /         |
1671   //          If         |   /         /
1672   //          | \        |  /         / Bot
1673   //     IfFalse  IfTrue | /         /
1674   //          |   / \   / / prec    /
1675   //   . . .  |  /  StoreCM        /
1676   //        \ | /      | raw      /
1677   //        Region    . . .      /
1678   //           | \              /
1679   //           |   . . .   \    / Bot
1680   //           |        MergeMem
1681   //           |          /
1682   //         MemBarCPUOrder
1683   //         MemBarAcquire (trailing)
1684   //
1685   // This has a slightly different memory subgraph to the one seen
1686   // previously but the core of it has a similar memory flow to the
1687   // CAS normal subgraph:
1688   //
1689   //   MemBarRelease
1690   //   MemBarCPUOrder____
1691   //         |          \      . . .
1692   //         |       CompareAndSwapX  . . .
1693   //         |       C /  M |
1694   //         |      CmpI    |
1695   //         |       /      |
1696   //         |      . .    /
1697   //     Bot |   IfTrue   /
1698   //         |   /       /
1699   //    MemBarVolatile  /
1700   //         | ...     /
1701   //      StoreCM ... /
1702   //         |       / 
1703   //       . . .  SCMemProj
1704   //      Raw \    / Bot
1705   //        MergeMem
1706   //           |
1707   //   MemBarCPUOrder
1708   //   MemBarAcquire
1709   //
1710   // The G1 graph for a volatile object put is a lot more complicated.
1711   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1712   // which adds the old value to the SATB queue; the releasing store
1713   // itself; and, finally, a post-write graph which performs a card
1714   // mark.
1715   //
1716   // The pre-write graph may be omitted, but only when the put is
1717   // writing to a newly allocated (young gen) object and then only if
1718   // there is a direct memory chain to the Initialize node for the
1719   // object allocation. This will not happen for a volatile put since
1720   // any memory chain passes through the leading membar.
1721   //
1722   // The pre-write graph includes a series of 3 If tests. The outermost
1723   // If tests whether SATB is enabled (no else case). The next If tests
1724   // whether the old value is non-NULL (no else case). The third tests
1725   // whether the SATB queue index is > 0, if so updating the queue. The
1726   // else case for this third If calls out to the runtime to allocate a
1727   // new queue buffer.
1728   //
1729   // So with G1 the pre-write and releasing store subgraph looks like
1730   // this (the nested Ifs are omitted).
1731   //
1732   //  MemBarRelease (leading)____________
1733   //     C |  ||  M \   M \    M \  M \ . . .
1734   //       | LoadB   \  LoadL  LoadN   \
1735   //       | /        \                 \
1736   //       If         |\                 \
1737   //       | \        | \                 \
1738   //  IfFalse  IfTrue |  \                 \
1739   //       |     |    |   \                 |
1740   //       |     If   |   /\                |
1741   //       |     |          \               |
1742   //       |                 \              |
1743   //       |    . . .         \             |
1744   //       | /       | /       |            |
1745   //      Region  Phi[M]       |            |
1746   //       | \       |         |            |
1747   //       |  \_____ | ___     |            |
1748   //     C | C \     |   C \ M |            |
1749   //       | CastP2X | StoreN/P[mo_release] |
1750   //       |         |         |            |
1751   //     C |       M |       M |          M |
1752   //        \        | Raw     | oop       / Bot
1753   //                  . . .
1754   //          (post write subtree elided)
1755   //                    . . .
1756   //             C \         M /
1757   //         MemBarVolatile (trailing)
1758   //
1759   // Note that the three memory feeds into the post-write tree are an
1760   // AliasRawIdx slice associated with the writes in the pre-write
1761   // tree, an oop type slice from the StoreX specific to the type of
1762   // the volatile field and the AliasBotIdx slice emanating from the
1763   // leading membar.
1764   //
1765   // n.b. the LoadB in this subgraph is not the card read -- it's a
1766   // read of the SATB queue active flag.
1767   //
1768   // The CAS graph is once again a variant of the above with a
1769   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1770   // value from the CompareAndSwapX node is fed into the post-write
1771   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1772   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1773   //
1774   //  MemBarRelease (leading)____________
1775   //     C |  ||  M \   M \    M \  M \ . . .
1776   //       | LoadB   \  LoadL  LoadN   \
1777   //       | /        \                 \
1778   //       If         |\                 \
1779   //       | \        | \                 \
1780   //  IfFalse  IfTrue |  \                 \
1781   //       |     |    |   \                 \
1782   //       |     If   |    \                 |
1783   //       |     |          \                |
1784   //       |                 \               |
1785   //       |    . . .         \              |
1786   //       | /       | /       \             |
1787   //      Region  Phi[M]        \            |
1788   //       | \       |           \           |
1789   //       |  \_____ |            |          |
1790   //     C | C \     |            |          |
1791   //       | CastP2X |     CompareAndSwapX   |
1792   //       |         |   res |     |         |
1793   //     C |       M |       |  SCMemProj  M |
1794   //        \        | Raw   |     | Bot    / Bot
1795   //                  . . .
1796   //          (post write subtree elided)
1797   //                    . . .
1798   //             C \         M /
1799   //         MemBarVolatile (trailing)
1800   //
1801   // The G1 post-write subtree is also optional, this time when the
1802   // new value being written is either null or can be identified as a
1803   // newly allocated (young gen) object with no intervening control
1804   // flow. The latter cannot happen but the former may, in which case
1805   // the card mark membar is omitted and the memory feeds from the
1806   // leading membar and the SToreN/P are merged direct into the
1807   // trailing membar as per the normal subgraph. So, the only special
1808   // case which arises is when the post-write subgraph is generated.
1809   //
1810   // The kernel of the post-write G1 subgraph is the card mark itself
1811   // which includes a card mark memory barrier (MemBarVolatile), a
1812   // card test (LoadB), and a conditional update (If feeding a
1813   // StoreCM). These nodes are surrounded by a series of nested Ifs
1814   // which try to avoid doing the card mark. The top level If skips if
1815   // the object reference does not cross regions (i.e. it tests if
1816   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1817   // need not be recorded. The next If, which skips on a NULL value,
1818   // may be absent (it is not generated if the type of value is >=
1819   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1820   // checking if card_val != young).  n.b. although this test requires
1821   // a pre-read of the card it can safely be done before the StoreLoad
1822   // barrier. However that does not bypass the need to reread the card
1823   // after the barrier.
1824   //
1825   //                (pre-write subtree elided)
1826   //        . . .                  . . .    . . .  . . .
1827   //        C |               M |    M |    M |
1828   //       Region            Phi[M] StoreN    |
1829   //          |            Raw  |  oop |  Bot |
1830   //         / \_______         |\     |\     |\
1831   //      C / C \      . . .    | \    | \    | \
1832   //       If   CastP2X . . .   |  \   |  \   |  \
1833   //       / \                  |   \  |   \  |   \
1834   //      /   \                 |    \ |    \ |    \
1835   // IfFalse IfTrue             |      |      |     \
1836   //   |       |                 \     |     /       |
1837   //   |       If                 \    | \  /   \    |
1838   //   |      / \                  \   |   /     \   |
1839   //   |     /   \                  \  |  / \     |  |
1840   //   | IfFalse IfTrue           MergeMem   \    |  |
1841   //   |  . . .    / \                 |      \   |  |
1842   //   |          /   \                |       |  |  |
1843   //   |     IfFalse IfTrue            |       |  |  |
1844   //   |      . . .    |               |       |  |  |
1845   //   |               If             /        |  |  |
1846   //   |               / \           /         |  |  |
1847   //   |              /   \         /          |  |  |
1848   //   |         IfFalse IfTrue    /           |  |  |
1849   //   |           . . .   |      /            |  |  |
1850   //   |                    \    /             |  |  |
1851   //   |                     \  /              |  |  |
1852   //   |         MemBarVolatile__(card mark  ) |  |  |
1853   //   |              ||   C |     \           |  |  |
1854   //   |             LoadB   If     |         /   |  |
1855   //   |                    / \ Raw |        /   /  /
1856   //   |                   . . .    |       /   /  /
1857   //   |                        \   |      /   /  /
1858   //   |                        StoreCM   /   /  /
1859   //   |                           |     /   /  /
1860   //   |                            . . .   /  /
1861   //   |                                   /  /
1862   //   |   . . .                          /  /
1863   //   |    |             | /            /  /
1864   //   |    |           Phi[M] /        /  /
1865   //   |    |             |   /        /  /
1866   //   |    |             |  /        /  /
1867   //   |  Region  . . .  Phi[M]      /  /
1868   //   |    |             |         /  /
1869   //    \   |             |        /  /
1870   //     \  | . . .       |       /  /
1871   //      \ |             |      /  /
1872   //      Region         Phi[M] /  /
1873   //        |               \  /  /
1874   //         \             MergeMem
1875   //          \            /
1876   //          MemBarVolatile
1877   //
1878   // As with CMS + CondCardMark the first MergeMem merges the
1879   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1880   // slice from the Store into the card mark membar. However, in this
1881   // case it may also merge an AliasRawIdx mem slice from the pre
1882   // barrier write.
1883   //
1884   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1885   // leading membar with an oop slice from the StoreN and an
1886   // AliasRawIdx slice from the post barrier writes. In this case the
1887   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1888   // which combine feeds from the If regions in the post barrier
1889   // subgraph.
1890   //
1891   // So, for G1 the same characteristic subgraph arises as for CMS +
1892   // CondCardMark. There is a normal subgraph feeding the card mark
1893   // membar and a normal subgraph feeding the trailing membar.
1894   //
1895   // The CAS graph when using G1GC also includes an optional
1896   // post-write subgraph. It is very similar to the above graph except
1897   // for a few details.
1898   // 
1899   // - The control flow is gated by an additonal If which tests the
1900   // result from the CompareAndSwapX node
1901   // 
1902   //  - The MergeMem which feeds the card mark membar only merges the
1903   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1904   // slice from the pre-barrier. It does not merge the SCMemProj
1905   // AliasIdxBot slice. So, this subgraph does not look like the
1906   // normal CAS subgraph.
1907   //
1908   // - The MergeMem which feeds the trailing membar merges the
1909   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1910   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1911   // has two AliasIdxBot input slices. However, this subgraph does
1912   // still look like the normal CAS subgraph.
1913   //
1914   // So, the upshot is:
1915   //
1916   // In all cases a volatile put graph will include a *normal*
1917   // volatile store subgraph betwen the leading membar and the
1918   // trailing membar. It may also include a normal volatile store
1919   // subgraph betwen the leading membar and the card mark membar.
1920   //
1921   // In all cases a CAS graph will contain a unique normal CAS graph
1922   // feeding the trailing membar.
1923   //
1924   // In all cases where there is a card mark membar (either as part of
1925   // a volatile object put or CAS) it will be fed by a MergeMem whose
1926   // AliasIdxBot slice feed will be a leading membar.
1927   //
1928   // The predicates controlling generation of instructions for store
1929   // and barrier nodes employ a few simple helper functions (described
1930   // below) which identify the presence or absence of all these
1931   // subgraph configurations and provide a means of traversing from
1932   // one node in the subgraph to another.
1933 
1934   // is_CAS(int opcode)
1935   //
1936   // return true if opcode is one of the possible CompareAndSwapX
1937   // values otherwise false.
1938 
1939   bool is_CAS(int opcode)
1940   {
1941     return (opcode == Op_CompareAndSwapI ||
1942             opcode == Op_CompareAndSwapL ||
1943             opcode == Op_CompareAndSwapN ||
1944             opcode == Op_CompareAndSwapP);
1945   }
1946 
1947   // leading_to_trailing
1948   //
1949   //graph traversal helper which detects the normal case Mem feed from
1950   // a release membar (or, optionally, its cpuorder child) to a
1951   // dependent volatile membar i.e. it ensures that one or other of
1952   // the following Mem flow subgraph is present.
1953   //
1954   //   MemBarRelease {leading}
1955   //   {MemBarCPUOrder} {optional}
1956   //     Bot |  \      . . .
1957   //         |  StoreN/P[mo_release]  . . .
1958   //         |   /
1959   //        MergeMem
1960   //         |
1961   //   MemBarVolatile {not card mark}
1962   //
1963   //   MemBarRelease {leading}
1964   //   {MemBarCPUOrder} {optional}
1965   //      |       \      . . .
1966   //      |     CompareAndSwapX  . . .
1967   //               |
1968   //     . . .    SCMemProj
1969   //           \   |
1970   //      |    MergeMem
1971   //      |       /
1972   //    MemBarCPUOrder
1973   //    MemBarAcquire {trailing}
1974   //
1975   // the predicate needs to be capable of distinguishing the following
1976   // volatile put graph which may arises when a GC post barrier
1977   // inserts a card mark membar
1978   //
1979   //   MemBarRelease {leading}
1980   //   {MemBarCPUOrder}__
1981   //     Bot |   \       \
1982   //         |   StoreN/P \
1983   //         |    / \     |
1984   //        MergeMem \    |
1985   //         |        \   |
1986   //   MemBarVolatile  \  |
1987   //    {card mark}     \ |
1988   //                  MergeMem
1989   //                      |
1990   // {not card mark} MemBarVolatile
1991   //
1992   // if the correct configuration is present returns the trailing
1993   // membar otherwise NULL.
1994   //
1995   // the input membar is expected to be either a cpuorder membar or a
1996   // release membar. in the latter case it should not have a cpu membar
1997   // child.
1998   //
1999   // the returned value may be a card mark or trailing membar
2000   //
2001 
2002   MemBarNode *leading_to_trailing(MemBarNode *leading)
2003   {
2004     assert((leading->Opcode() == Op_MemBarRelease ||
2005             leading->Opcode() == Op_MemBarCPUOrder),
2006            "expecting a volatile or cpuroder membar!");
2007 
2008     // check the mem flow
2009     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2010 
2011     if (!mem) {
2012       return NULL;
2013     }
2014 
2015     Node *x = NULL;
2016     StoreNode * st = NULL;
2017     LoadStoreNode *cas = NULL;
2018     MergeMemNode *mm = NULL;
2019     MergeMemNode *mm2 = NULL;
2020 
2021     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2022       x = mem->fast_out(i);
2023       if (x->is_MergeMem()) {
2024         if (mm != NULL) {
2025           if (mm2 != NULL) {
2026           // should not see more than 2 merge mems
2027             return NULL;
2028           } else {
2029             mm2 = x->as_MergeMem();
2030           }
2031         } else {
2032           mm = x->as_MergeMem();
2033         }
2034       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2035         // two releasing stores/CAS nodes is one too many
2036         if (st != NULL || cas != NULL) {
2037           return NULL;
2038         }
2039         st = x->as_Store();
2040       } else if (is_CAS(x->Opcode())) {
2041         if (st != NULL || cas != NULL) {
2042           return NULL;
2043         }
2044         cas = x->as_LoadStore();
2045       }
2046     }
2047 
2048     // must have a store or a cas
2049     if (!st && !cas) {
2050       return NULL;
2051     }
2052 
2053     // must have at least one merge if we also have st
2054     if (st && !mm) {
2055       return NULL;
2056     }
2057 
2058     if (cas) {
2059       Node *y = NULL;
2060       // look for an SCMemProj
2061       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2062         x = cas->fast_out(i);
2063         if (x->is_Proj()) {
2064           y = x;
2065           break;
2066         }
2067       }
2068       if (y == NULL) {
2069         return NULL;
2070       }
2071       // the proj must feed a MergeMem
2072       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2073         x = y->fast_out(i);
2074         if (x->is_MergeMem()) {
2075           mm = x->as_MergeMem();
2076           break;
2077         }
2078       }
2079       if (mm == NULL) {
2080         return NULL;
2081       }
2082       MemBarNode *mbar = NULL;
2083       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2084       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2085         x = mm->fast_out(i);
2086         if (x->is_MemBar()) {
2087           int opcode = x->Opcode();
2088           if (opcode == Op_MemBarCPUOrder) {
2089             MemBarNode *z =  x->as_MemBar();
2090             z = child_membar(z);
2091             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2092               mbar = z;
2093             }
2094           }
2095           break;
2096         }
2097       }
2098       return mbar;
2099     } else {
2100       Node *y = NULL;
2101       // ensure the store feeds the first mergemem;
2102       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2103         if (st->fast_out(i) == mm) {
2104           y = st;
2105           break;
2106         }
2107       }
2108       if (y == NULL) {
2109         return NULL;
2110       }
2111       if (mm2 != NULL) {
2112         // ensure the store feeds the second mergemem;
2113         y = NULL;
2114         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2115           if (st->fast_out(i) == mm2) {
2116             y = st;
2117           }
2118         }
2119         if (y == NULL) {
2120           return NULL;
2121         }
2122       }
2123 
2124       MemBarNode *mbar = NULL;
2125       // ensure the first mergemem feeds a volatile membar
2126       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2127         x = mm->fast_out(i);
2128         if (x->is_MemBar()) {
2129           int opcode = x->Opcode();
2130           if (opcode == Op_MemBarVolatile) {
2131             mbar = x->as_MemBar();
2132           }
2133           break;
2134         }
2135       }
2136       if (mm2 == NULL) {
2137         // this is our only option for a trailing membar
2138         return mbar;
2139       }
2140       // ensure the second mergemem feeds a volatile membar
2141       MemBarNode *mbar2 = NULL;
2142       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2143         x = mm2->fast_out(i);
2144         if (x->is_MemBar()) {
2145           int opcode = x->Opcode();
2146           if (opcode == Op_MemBarVolatile) {
2147             mbar2 = x->as_MemBar();
2148           }
2149           break;
2150         }
2151       }
2152       // if we have two merge mems we must have two volatile membars
2153       if (mbar == NULL || mbar2 == NULL) {
2154         return NULL;
2155       }
2156       // return the trailing membar
2157       if (is_card_mark_membar(mbar2)) {
2158         return mbar;
2159       } else {
2160         if (is_card_mark_membar(mbar)) {
2161           return mbar2;
2162         } else {
2163           return NULL;
2164         }
2165       }
2166     }
2167   }
2168 
2169   // trailing_to_leading
2170   //
2171   // graph traversal helper which detects the normal case Mem feed
2172   // from a trailing membar to a preceding release membar (optionally
2173   // its cpuorder child) i.e. it ensures that one or other of the
2174   // following Mem flow subgraphs is present.
2175   //
2176   //   MemBarRelease {leading}
2177   //   MemBarCPUOrder {optional}
2178   //    | Bot |  \      . . .
2179   //    |     |  StoreN/P[mo_release]  . . .
2180   //    |     |   /
2181   //    |    MergeMem
2182   //    |     |
2183   //   MemBarVolatile {not card mark}
2184   //
2185   //   MemBarRelease {leading}
2186   //   MemBarCPUOrder {optional}
2187   //      |       \      . . .
2188   //      |     CompareAndSwapX  . . .
2189   //               |
2190   //     . . .    SCMemProj
2191   //           \   |
2192   //      |    MergeMem
2193   //      |       |
2194   //    MemBarCPUOrder
2195   //    MemBarAcquire {trailing}
2196   //
2197   // this predicate checks for the same flow as the previous predicate
2198   // but starting from the bottom rather than the top.
2199   //
2200   // if the configuration is present returns the cpuorder member for
2201   // preference or when absent the release membar otherwise NULL.
2202   //
2203   // n.b. the input membar is expected to be a MemBarVolatile or
2204   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2205   // mark membar.
2206 
2207   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2208   {
2209     // input must be a volatile membar
2210     assert((barrier->Opcode() == Op_MemBarVolatile ||
2211             barrier->Opcode() == Op_MemBarAcquire),
2212            "expecting a volatile or an acquire membar");
2213 
2214     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2215            !is_card_mark_membar(barrier),
2216            "not expecting a card mark membar");
2217     Node *x;
2218     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2219 
2220     // if we have an acquire membar then it must be fed via a CPUOrder
2221     // membar
2222 
2223     if (is_cas) {
2224       // skip to parent barrier which must be a cpuorder
2225       x = parent_membar(barrier);
2226       if (x->Opcode() != Op_MemBarCPUOrder)
2227         return NULL;
2228     } else {
2229       // start from the supplied barrier
2230       x = (Node *)barrier;
2231     }
2232 
2233     // the Mem feed to the membar should be a merge
2234     x = x ->in(TypeFunc::Memory);
2235     if (!x->is_MergeMem())
2236       return NULL;
2237 
2238     MergeMemNode *mm = x->as_MergeMem();
2239 
2240     if (is_cas) {
2241       // the merge should be fed from the CAS via an SCMemProj node
2242       x = NULL;
2243       for (uint idx = 1; idx < mm->req(); idx++) {
2244         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2245           x = mm->in(idx);
2246           break;
2247         }
2248       }
2249       if (x == NULL) {
2250         return NULL;
2251       }
2252       // check for a CAS feeding this proj
2253       x = x->in(0);
2254       int opcode = x->Opcode();
2255       if (!is_CAS(opcode)) {
2256         return NULL;
2257       }
2258       // the CAS should get its mem feed from the leading membar
2259       x = x->in(MemNode::Memory);
2260     } else {
2261       // the merge should get its Bottom mem feed from the leading membar
2262       x = mm->in(Compile::AliasIdxBot);
2263     }
2264 
2265     // ensure this is a non control projection
2266     if (!x->is_Proj() || x->is_CFG()) {
2267       return NULL;
2268     }
2269     // if it is fed by a membar that's the one we want
2270     x = x->in(0);
2271 
2272     if (!x->is_MemBar()) {
2273       return NULL;
2274     }
2275 
2276     MemBarNode *leading = x->as_MemBar();
2277     // reject invalid candidates
2278     if (!leading_membar(leading)) {
2279       return NULL;
2280     }
2281 
2282     // ok, we have a leading membar, now for the sanity clauses
2283 
2284     // the leading membar must feed Mem to a releasing store or CAS
2285     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2286     StoreNode *st = NULL;
2287     LoadStoreNode *cas = NULL;
2288     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2289       x = mem->fast_out(i);
2290       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2291         // two stores or CASes is one too many
2292         if (st != NULL || cas != NULL) {
2293           return NULL;
2294         }
2295         st = x->as_Store();
2296       } else if (is_CAS(x->Opcode())) {
2297         if (st != NULL || cas != NULL) {
2298           return NULL;
2299         }
2300         cas = x->as_LoadStore();
2301       }
2302     }
2303 
2304     // we should not have both a store and a cas
2305     if (st == NULL & cas == NULL) {
2306       return NULL;
2307     }
2308 
2309     if (st == NULL) {
2310       // nothing more to check
2311       return leading;
2312     } else {
2313       // we should not have a store if we started from an acquire
2314       if (is_cas) {
2315         return NULL;
2316       }
2317 
2318       // the store should feed the merge we used to get here
2319       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2320         if (st->fast_out(i) == mm) {
2321           return leading;
2322         }
2323       }
2324     }
2325 
2326     return NULL;
2327   }
2328 
2329   // card_mark_to_leading
2330   //
2331   // graph traversal helper which traverses from a card mark volatile
2332   // membar to a leading membar i.e. it ensures that the following Mem
2333   // flow subgraph is present.
2334   //
2335   //    MemBarRelease {leading}
2336   //   {MemBarCPUOrder} {optional}
2337   //         |   . . .
2338   //     Bot |   /
2339   //      MergeMem
2340   //         |
2341   //     MemBarVolatile (card mark)
2342   //        |     \
2343   //      . . .   StoreCM
2344   //
2345   // if the configuration is present returns the cpuorder member for
2346   // preference or when absent the release membar otherwise NULL.
2347   //
2348   // n.b. the input membar is expected to be a MemBarVolatile amd must
2349   // be a card mark membar.
2350 
2351   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2352   {
2353     // input must be a card mark volatile membar
2354     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2355 
2356     // the Mem feed to the membar should be a merge
2357     Node *x = barrier->in(TypeFunc::Memory);
2358     if (!x->is_MergeMem()) {
2359       return NULL;
2360     }
2361 
2362     MergeMemNode *mm = x->as_MergeMem();
2363 
2364     x = mm->in(Compile::AliasIdxBot);
2365 
2366     if (!x->is_MemBar()) {
2367       return NULL;
2368     }
2369 
2370     MemBarNode *leading = x->as_MemBar();
2371 
2372     if (leading_membar(leading)) {
2373       return leading;
2374     }
2375 
2376     return NULL;
2377   }
2378 
2379 bool unnecessary_acquire(const Node *barrier)
2380 {
2381   assert(barrier->is_MemBar(), "expecting a membar");
2382 
2383   if (UseBarriersForVolatile) {
2384     // we need to plant a dmb
2385     return false;
2386   }
2387 
2388   // a volatile read derived from bytecode (or also from an inlined
2389   // SHA field read via LibraryCallKit::load_field_from_object)
2390   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2391   // with a bogus read dependency on it's preceding load. so in those
2392   // cases we will find the load node at the PARMS offset of the
2393   // acquire membar.  n.b. there may be an intervening DecodeN node.
2394   //
2395   // a volatile load derived from an inlined unsafe field access
2396   // manifests as a cpuorder membar with Ctl and Mem projections
2397   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2398   // acquire then feeds another cpuorder membar via Ctl and Mem
2399   // projections. The load has no output dependency on these trailing
2400   // membars because subsequent nodes inserted into the graph take
2401   // their control feed from the final membar cpuorder meaning they
2402   // are all ordered after the load.
2403 
2404   Node *x = barrier->lookup(TypeFunc::Parms);
2405   if (x) {
2406     // we are starting from an acquire and it has a fake dependency
2407     //
2408     // need to check for
2409     //
2410     //   LoadX[mo_acquire]
2411     //   {  |1   }
2412     //   {DecodeN}
2413     //      |Parms
2414     //   MemBarAcquire*
2415     //
2416     // where * tags node we were passed
2417     // and |k means input k
2418     if (x->is_DecodeNarrowPtr()) {
2419       x = x->in(1);
2420     }
2421 
2422     return (x->is_Load() && x->as_Load()->is_acquire());
2423   }
2424 
2425   // now check for an unsafe volatile get
2426 
2427   // need to check for
2428   //
2429   //   MemBarCPUOrder
2430   //        ||       \\
2431   //   MemBarAcquire* LoadX[mo_acquire]
2432   //        ||
2433   //   MemBarCPUOrder
2434   //
2435   // where * tags node we were passed
2436   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2437 
2438   // check for a parent MemBarCPUOrder
2439   ProjNode *ctl;
2440   ProjNode *mem;
2441   MemBarNode *parent = parent_membar(barrier);
2442   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2443     return false;
2444   ctl = parent->proj_out(TypeFunc::Control);
2445   mem = parent->proj_out(TypeFunc::Memory);
2446   if (!ctl || !mem) {
2447     return false;
2448   }
2449   // ensure the proj nodes both feed a LoadX[mo_acquire]
2450   LoadNode *ld = NULL;
2451   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2452     x = ctl->fast_out(i);
2453     // if we see a load we keep hold of it and stop searching
2454     if (x->is_Load()) {
2455       ld = x->as_Load();
2456       break;
2457     }
2458   }
2459   // it must be an acquiring load
2460   if (ld && ld->is_acquire()) {
2461 
2462     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2463       x = mem->fast_out(i);
2464       // if we see the same load we drop it and stop searching
2465       if (x == ld) {
2466         ld = NULL;
2467         break;
2468       }
2469     }
2470     // we must have dropped the load
2471     if (ld == NULL) {
2472       // check for a child cpuorder membar
2473       MemBarNode *child  = child_membar(barrier->as_MemBar());
2474       if (child && child->Opcode() == Op_MemBarCPUOrder)
2475         return true;
2476     }
2477   }
2478 
2479   // final option for unnecessary mebar is that it is a trailing node
2480   // belonging to a CAS
2481 
2482   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2483 
2484   return leading != NULL;
2485 }
2486 
2487 bool needs_acquiring_load(const Node *n)
2488 {
2489   assert(n->is_Load(), "expecting a load");
2490   if (UseBarriersForVolatile) {
2491     // we use a normal load and a dmb
2492     return false;
2493   }
2494 
2495   LoadNode *ld = n->as_Load();
2496 
2497   if (!ld->is_acquire()) {
2498     return false;
2499   }
2500 
2501   // check if this load is feeding an acquire membar
2502   //
2503   //   LoadX[mo_acquire]
2504   //   {  |1   }
2505   //   {DecodeN}
2506   //      |Parms
2507   //   MemBarAcquire*
2508   //
2509   // where * tags node we were passed
2510   // and |k means input k
2511 
2512   Node *start = ld;
2513   Node *mbacq = NULL;
2514 
2515   // if we hit a DecodeNarrowPtr we reset the start node and restart
2516   // the search through the outputs
2517  restart:
2518 
2519   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2520     Node *x = start->fast_out(i);
2521     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2522       mbacq = x;
2523     } else if (!mbacq &&
2524                (x->is_DecodeNarrowPtr() ||
2525                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2526       start = x;
2527       goto restart;
2528     }
2529   }
2530 
2531   if (mbacq) {
2532     return true;
2533   }
2534 
2535   // now check for an unsafe volatile get
2536 
2537   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2538   //
2539   //     MemBarCPUOrder
2540   //        ||       \\
2541   //   MemBarAcquire* LoadX[mo_acquire]
2542   //        ||
2543   //   MemBarCPUOrder
2544 
2545   MemBarNode *membar;
2546 
2547   membar = parent_membar(ld);
2548 
2549   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2550     return false;
2551   }
2552 
2553   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2554 
2555   membar = child_membar(membar);
2556 
2557   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2558     return false;
2559   }
2560 
2561   membar = child_membar(membar);
2562 
2563   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2564     return false;
2565   }
2566 
2567   return true;
2568 }
2569 
2570 bool unnecessary_release(const Node *n)
2571 {
2572   assert((n->is_MemBar() &&
2573           n->Opcode() == Op_MemBarRelease),
2574          "expecting a release membar");
2575 
2576   if (UseBarriersForVolatile) {
2577     // we need to plant a dmb
2578     return false;
2579   }
2580 
2581   // if there is a dependent CPUOrder barrier then use that as the
2582   // leading
2583 
2584   MemBarNode *barrier = n->as_MemBar();
2585   // check for an intervening cpuorder membar
2586   MemBarNode *b = child_membar(barrier);
2587   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2588     // ok, so start the check from the dependent cpuorder barrier
2589     barrier = b;
2590   }
2591 
2592   // must start with a normal feed
2593   MemBarNode *trailing = leading_to_trailing(barrier);
2594 
2595   return (trailing != NULL);
2596 }
2597 
2598 bool unnecessary_volatile(const Node *n)
2599 {
2600   // assert n->is_MemBar();
2601   if (UseBarriersForVolatile) {
2602     // we need to plant a dmb
2603     return false;
2604   }
2605 
2606   MemBarNode *mbvol = n->as_MemBar();
2607 
2608   // first we check if this is part of a card mark. if so then we have
2609   // to generate a StoreLoad barrier
2610 
2611   if (is_card_mark_membar(mbvol)) {
2612       return false;
2613   }
2614 
2615   // ok, if it's not a card mark then we still need to check if it is
2616   // a trailing membar of a volatile put graph.
2617 
2618   return (trailing_to_leading(mbvol) != NULL);
2619 }
2620 
2621 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2622 
2623 bool needs_releasing_store(const Node *n)
2624 {
2625   // assert n->is_Store();
2626   if (UseBarriersForVolatile) {
2627     // we use a normal store and dmb combination
2628     return false;
2629   }
2630 
2631   StoreNode *st = n->as_Store();
2632 
2633   // the store must be marked as releasing
2634   if (!st->is_release()) {
2635     return false;
2636   }
2637 
2638   // the store must be fed by a membar
2639 
2640   Node *x = st->lookup(StoreNode::Memory);
2641 
2642   if (! x || !x->is_Proj()) {
2643     return false;
2644   }
2645 
2646   ProjNode *proj = x->as_Proj();
2647 
2648   x = proj->lookup(0);
2649 
2650   if (!x || !x->is_MemBar()) {
2651     return false;
2652   }
2653 
2654   MemBarNode *barrier = x->as_MemBar();
2655 
2656   // if the barrier is a release membar or a cpuorder mmebar fed by a
2657   // release membar then we need to check whether that forms part of a
2658   // volatile put graph.
2659 
2660   // reject invalid candidates
2661   if (!leading_membar(barrier)) {
2662     return false;
2663   }
2664 
2665   // does this lead a normal subgraph?
2666   MemBarNode *trailing = leading_to_trailing(barrier);
2667 
2668   return (trailing != NULL);
2669 }
2670 
2671 // predicate controlling translation of CAS
2672 //
2673 // returns true if CAS needs to use an acquiring load otherwise false
2674 
2675 bool needs_acquiring_load_exclusive(const Node *n)
2676 {
2677   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2678   if (UseBarriersForVolatile) {
2679     return false;
2680   }
2681 
2682   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2683 #ifdef ASSERT
2684   LoadStoreNode *st = n->as_LoadStore();
2685 
2686   // the store must be fed by a membar
2687 
2688   Node *x = st->lookup(StoreNode::Memory);
2689 
2690   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2691 
2692   ProjNode *proj = x->as_Proj();
2693 
2694   x = proj->lookup(0);
2695 
2696   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2697 
2698   MemBarNode *barrier = x->as_MemBar();
2699 
2700   // the barrier must be a cpuorder mmebar fed by a release membar
2701 
2702   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2703          "CAS not fed by cpuorder membar!");
2704 
2705   MemBarNode *b = parent_membar(barrier);
2706   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2707           "CAS not fed by cpuorder+release membar pair!");
2708 
2709   // does this lead a normal subgraph?
2710   MemBarNode *mbar = leading_to_trailing(barrier);
2711 
2712   assert(mbar != NULL, "CAS not embedded in normal graph!");
2713 
2714   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2715 #endif // ASSERT
2716   // so we can just return true here
2717   return true;
2718 }
2719 
2720 // predicate controlling translation of StoreCM
2721 //
2722 // returns true if a StoreStore must precede the card write otherwise
2723 // false
2724 
2725 bool unnecessary_storestore(const Node *storecm)
2726 {
2727   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2728 
2729   // we only ever need to generate a dmb ishst between an object put
2730   // and the associated card mark when we are using CMS without
2731   // conditional card marking. Any other occurence will happen when
2732   // performing a card mark using CMS with conditional card marking or
2733   // G1. In those cases the preceding MamBarVolatile will be
2734   // translated to a dmb ish which guarantes visibility of the
2735   // preceding StoreN/P before this StoreCM
2736 
2737   if (!UseConcMarkSweepGC || UseCondCardMark) {
2738     return true;
2739   }
2740 
2741   // if we are implementing volatile puts using barriers then we must
2742   // insert the dmb ishst
2743 
2744   if (UseBarriersForVolatile) {
2745     return false;
2746   }
2747 
2748   // we must be using CMS with conditional card marking so we ahve to
2749   // generate the StoreStore
2750 
2751   return false;
2752 }
2753 
2754 
2755 #define __ _masm.
2756 
2757 // advance declarations for helper functions to convert register
2758 // indices to register objects
2759 
2760 // the ad file has to provide implementations of certain methods
2761 // expected by the generic code
2762 //
2763 // REQUIRED FUNCTIONALITY
2764 
2765 //=============================================================================
2766 
2767 // !!!!! Special hack to get all types of calls to specify the byte offset
2768 //       from the start of the call to the point where the return address
2769 //       will point.
2770 
2771 int MachCallStaticJavaNode::ret_addr_offset()
2772 {
2773   // call should be a simple bl
2774   int off = 4;
2775   return off;
2776 }
2777 
2778 int MachCallDynamicJavaNode::ret_addr_offset()
2779 {
2780   return 16; // movz, movk, movk, bl
2781 }
2782 
2783 int MachCallRuntimeNode::ret_addr_offset() {
2784   // for generated stubs the call will be
2785   //   far_call(addr)
2786   // for real runtime callouts it will be six instructions
2787   // see aarch64_enc_java_to_runtime
2788   //   adr(rscratch2, retaddr)
2789   //   lea(rscratch1, RuntimeAddress(addr)
2790   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2791   //   blrt rscratch1
2792   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2793   if (cb) {
2794     return MacroAssembler::far_branch_size();
2795   } else {
2796     return 6 * NativeInstruction::instruction_size;
2797   }
2798 }
2799 
2800 // Indicate if the safepoint node needs the polling page as an input
2801 
2802 // the shared code plants the oop data at the start of the generated
2803 // code for the safepoint node and that needs ot be at the load
2804 // instruction itself. so we cannot plant a mov of the safepoint poll
2805 // address followed by a load. setting this to true means the mov is
2806 // scheduled as a prior instruction. that's better for scheduling
2807 // anyway.
2808 
2809 bool SafePointNode::needs_polling_address_input()
2810 {
2811   return true;
2812 }
2813 
2814 //=============================================================================
2815 
2816 #ifndef PRODUCT
2817 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2818   st->print("BREAKPOINT");
2819 }
2820 #endif
2821 
2822 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2823   MacroAssembler _masm(&cbuf);
2824   __ brk(0);
2825 }
2826 
2827 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2828   return MachNode::size(ra_);
2829 }
2830 
2831 //=============================================================================
2832 
2833 #ifndef PRODUCT
2834   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2835     st->print("nop \t# %d bytes pad for loops and calls", _count);
2836   }
2837 #endif
2838 
2839   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2840     MacroAssembler _masm(&cbuf);
2841     for (int i = 0; i < _count; i++) {
2842       __ nop();
2843     }
2844   }
2845 
2846   uint MachNopNode::size(PhaseRegAlloc*) const {
2847     return _count * NativeInstruction::instruction_size;
2848   }
2849 
2850 #ifndef PRODUCT
2851   void MachMskNode::format(PhaseRegAlloc*, outputStream* st) const {
2852     // TBD
2853   }
2854 #endif
2855 
2856   void MachMskNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2857     // TBD
2858   }
2859 
2860   uint MachMskNode::size(PhaseRegAlloc* ra_) const {
2861     return 0; // TBD
2862   }
2863 
2864 //=============================================================================
2865 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2866 
2867 int Compile::ConstantTable::calculate_table_base_offset() const {
2868   return 0;  // absolute addressing, no offset
2869 }
2870 
2871 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2872 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2873   ShouldNotReachHere();
2874 }
2875 
2876 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2877   // Empty encoding
2878 }
2879 
2880 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2881   return 0;
2882 }
2883 
2884 #ifndef PRODUCT
2885 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2886   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2887 }
2888 #endif
2889 
2890 #ifndef PRODUCT
2891 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2892   Compile* C = ra_->C;
2893 
2894   int framesize = C->frame_slots() << LogBytesPerInt;
2895 
2896   if (C->need_stack_bang(framesize))
2897     st->print("# stack bang size=%d\n\t", framesize);
2898 
2899   if (framesize < ((1 << 9) + 2 * wordSize)) {
2900     st->print("sub  sp, sp, #%d\n\t", framesize);
2901     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2902     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2903   } else {
2904     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2905     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2906     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2907     st->print("sub  sp, sp, rscratch1");
2908   }
2909 }
2910 #endif
2911 
2912 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2913   Compile* C = ra_->C;
2914   MacroAssembler _masm(&cbuf);
2915 
2916   // n.b. frame size includes space for return pc and rfp
2917   const long framesize = C->frame_size_in_bytes();
2918   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2919 
2920   // insert a nop at the start of the prolog so we can patch in a
2921   // branch if we need to invalidate the method later
2922   __ nop();
2923 
2924   int bangsize = C->bang_size_in_bytes();
2925   if (C->need_stack_bang(bangsize) && UseStackBanging)
2926     __ generate_stack_overflow_check(bangsize);
2927 
2928   __ build_frame(framesize);
2929 
2930   if (NotifySimulator) {
2931     __ notify(Assembler::method_entry);
2932   }
2933 
2934   if (VerifyStackAtCalls) {
2935     Unimplemented();
2936   }
2937 
2938   C->set_frame_complete(cbuf.insts_size());
2939 
2940   if (C->has_mach_constant_base_node()) {
2941     // NOTE: We set the table base offset here because users might be
2942     // emitted before MachConstantBaseNode.
2943     Compile::ConstantTable& constant_table = C->constant_table();
2944     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2945   }
2946 }
2947 
2948 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2949 {
2950   return MachNode::size(ra_); // too many variables; just compute it
2951                               // the hard way
2952 }
2953 
2954 int MachPrologNode::reloc() const
2955 {
2956   return 0;
2957 }
2958 
2959 //=============================================================================
2960 
2961 #ifndef PRODUCT
2962 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2963   Compile* C = ra_->C;
2964   int framesize = C->frame_slots() << LogBytesPerInt;
2965 
2966   st->print("# pop frame %d\n\t",framesize);
2967 
2968   if (framesize == 0) {
2969     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2970   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2971     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2972     st->print("add  sp, sp, #%d\n\t", framesize);
2973   } else {
2974     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2975     st->print("add  sp, sp, rscratch1\n\t");
2976     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2977   }
2978 
2979   if (do_polling() && C->is_method_compilation()) {
2980     st->print("# touch polling page\n\t");
2981     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2982     st->print("ldr zr, [rscratch1]");
2983   }
2984 }
2985 #endif
2986 
2987 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2988   Compile* C = ra_->C;
2989   MacroAssembler _masm(&cbuf);
2990   int framesize = C->frame_slots() << LogBytesPerInt;
2991 
2992   __ remove_frame(framesize);
2993 
2994   if (NotifySimulator) {
2995     __ notify(Assembler::method_reentry);
2996   }
2997 
2998   if (do_polling() && C->is_method_compilation()) {
2999     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3000   }
3001 }
3002 
3003 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3004   // Variable size. Determine dynamically.
3005   return MachNode::size(ra_);
3006 }
3007 
3008 int MachEpilogNode::reloc() const {
3009   // Return number of relocatable values contained in this instruction.
3010   return 1; // 1 for polling page.
3011 }
3012 
3013 const Pipeline * MachEpilogNode::pipeline() const {
3014   return MachNode::pipeline_class();
3015 }
3016 
3017 // This method seems to be obsolete. It is declared in machnode.hpp
3018 // and defined in all *.ad files, but it is never called. Should we
3019 // get rid of it?
3020 int MachEpilogNode::safepoint_offset() const {
3021   assert(do_polling(), "no return for this epilog node");
3022   return 4;
3023 }
3024 
3025 //=============================================================================
3026 
3027 // Figure out which register class each belongs in: rc_int, rc_float or
3028 // rc_stack.
3029 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3030 
3031 static enum RC rc_class(OptoReg::Name reg) {
3032 
3033   if (reg == OptoReg::Bad) {
3034     return rc_bad;
3035   }
3036 
3037   // we have 30 int registers * 2 halves
3038   // (rscratch1 and rscratch2 are omitted)
3039 
3040   if (reg < 60) {
3041     return rc_int;
3042   }
3043 
3044   // we have 32 float register * 2 halves
3045   if (reg < 60 + 128) {
3046     return rc_float;
3047   }
3048 
3049   // Between float regs & stack is the flags regs.
3050   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3051 
3052   return rc_stack;
3053 }
3054 
3055 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3056   Compile* C = ra_->C;
3057 
3058   // Get registers to move.
3059   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3060   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3061   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3062   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3063 
3064   enum RC src_hi_rc = rc_class(src_hi);
3065   enum RC src_lo_rc = rc_class(src_lo);
3066   enum RC dst_hi_rc = rc_class(dst_hi);
3067   enum RC dst_lo_rc = rc_class(dst_lo);
3068 
3069   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3070 
3071   if (src_hi != OptoReg::Bad) {
3072     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3073            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3074            "expected aligned-adjacent pairs");
3075   }
3076 
3077   if (src_lo == dst_lo && src_hi == dst_hi) {
3078     return 0;            // Self copy, no move.
3079   }
3080 
3081   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3082               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3083   int src_offset = ra_->reg2offset(src_lo);
3084   int dst_offset = ra_->reg2offset(dst_lo);
3085 
3086   if (bottom_type()->isa_vect() != NULL) {
3087     uint ireg = ideal_reg();
3088     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3089     if (cbuf) {
3090       MacroAssembler _masm(cbuf);
3091       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3092       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3093         // stack->stack
3094         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3095         if (ireg == Op_VecD) {
3096           __ unspill(rscratch1, true, src_offset);
3097           __ spill(rscratch1, true, dst_offset);
3098         } else {
3099           __ spill_copy128(src_offset, dst_offset);
3100         }
3101       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3102         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3103                ireg == Op_VecD ? __ T8B : __ T16B,
3104                as_FloatRegister(Matcher::_regEncode[src_lo]));
3105       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3106         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3107                        ireg == Op_VecD ? __ D : __ Q,
3108                        ra_->reg2offset(dst_lo));
3109       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3110         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3111                        ireg == Op_VecD ? __ D : __ Q,
3112                        ra_->reg2offset(src_lo));
3113       } else {
3114         ShouldNotReachHere();
3115       }
3116     }
3117   } else if (cbuf) {
3118     MacroAssembler _masm(cbuf);
3119     switch (src_lo_rc) {
3120     case rc_int:
3121       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3122         if (is64) {
3123             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3124                    as_Register(Matcher::_regEncode[src_lo]));
3125         } else {
3126             MacroAssembler _masm(cbuf);
3127             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3128                     as_Register(Matcher::_regEncode[src_lo]));
3129         }
3130       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3131         if (is64) {
3132             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3133                      as_Register(Matcher::_regEncode[src_lo]));
3134         } else {
3135             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3136                      as_Register(Matcher::_regEncode[src_lo]));
3137         }
3138       } else {                    // gpr --> stack spill
3139         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3140         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3141       }
3142       break;
3143     case rc_float:
3144       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3145         if (is64) {
3146             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3147                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3148         } else {
3149             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3150                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3151         }
3152       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3153           if (cbuf) {
3154             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3155                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3156         } else {
3157             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3158                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3159         }
3160       } else {                    // fpr --> stack spill
3161         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3162         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3163                  is64 ? __ D : __ S, dst_offset);
3164       }
3165       break;
3166     case rc_stack:
3167       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3168         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3169       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3170         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3171                    is64 ? __ D : __ S, src_offset);
3172       } else {                    // stack --> stack copy
3173         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3174         __ unspill(rscratch1, is64, src_offset);
3175         __ spill(rscratch1, is64, dst_offset);
3176       }
3177       break;
3178     default:
3179       assert(false, "bad rc_class for spill");
3180       ShouldNotReachHere();
3181     }
3182   }
3183 
3184   if (st) {
3185     st->print("spill ");
3186     if (src_lo_rc == rc_stack) {
3187       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3188     } else {
3189       st->print("%s -> ", Matcher::regName[src_lo]);
3190     }
3191     if (dst_lo_rc == rc_stack) {
3192       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3193     } else {
3194       st->print("%s", Matcher::regName[dst_lo]);
3195     }
3196     if (bottom_type()->isa_vect() != NULL) {
3197       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3198     } else {
3199       st->print("\t# spill size = %d", is64 ? 64:32);
3200     }
3201   }
3202 
3203   return 0;
3204 
3205 }
3206 
3207 #ifndef PRODUCT
3208 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3209   if (!ra_)
3210     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3211   else
3212     implementation(NULL, ra_, false, st);
3213 }
3214 #endif
3215 
3216 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3217   implementation(&cbuf, ra_, false, NULL);
3218 }
3219 
3220 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3221   return MachNode::size(ra_);
3222 }
3223 
3224 //=============================================================================
3225 
3226 #ifndef PRODUCT
3227 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3228   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3229   int reg = ra_->get_reg_first(this);
3230   st->print("add %s, rsp, #%d]\t# box lock",
3231             Matcher::regName[reg], offset);
3232 }
3233 #endif
3234 
3235 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3236   MacroAssembler _masm(&cbuf);
3237 
3238   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3239   int reg    = ra_->get_encode(this);
3240 
3241   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3242     __ add(as_Register(reg), sp, offset);
3243   } else {
3244     ShouldNotReachHere();
3245   }
3246 }
3247 
3248 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3249   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3250   return 4;
3251 }
3252 
3253 //=============================================================================
3254 
3255 #ifndef PRODUCT
3256 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3257 {
3258   st->print_cr("# MachUEPNode");
3259   if (UseCompressedClassPointers) {
3260     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3261     if (Universe::narrow_klass_shift() != 0) {
3262       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3263     }
3264   } else {
3265    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3266   }
3267   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3268   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3269 }
3270 #endif
3271 
3272 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3273 {
3274   // This is the unverified entry point.
3275   MacroAssembler _masm(&cbuf);
3276 
3277   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3278   Label skip;
3279   // TODO
3280   // can we avoid this skip and still use a reloc?
3281   __ br(Assembler::EQ, skip);
3282   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3283   __ bind(skip);
3284 }
3285 
3286 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3287 {
3288   return MachNode::size(ra_);
3289 }
3290 
3291 // REQUIRED EMIT CODE
3292 
3293 //=============================================================================
3294 
3295 // Emit exception handler code.
3296 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3297 {
3298   // mov rscratch1 #exception_blob_entry_point
3299   // br rscratch1
3300   // Note that the code buffer's insts_mark is always relative to insts.
3301   // That's why we must use the macroassembler to generate a handler.
3302   MacroAssembler _masm(&cbuf);
3303   address base = __ start_a_stub(size_exception_handler());
3304   if (base == NULL) {
3305     ciEnv::current()->record_failure("CodeCache is full");
3306     return 0;  // CodeBuffer::expand failed
3307   }
3308   int offset = __ offset();
3309   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3310   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3311   __ end_a_stub();
3312   return offset;
3313 }
3314 
3315 // Emit deopt handler code.
3316 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3317 {
3318   // Note that the code buffer's insts_mark is always relative to insts.
3319   // That's why we must use the macroassembler to generate a handler.
3320   MacroAssembler _masm(&cbuf);
3321   address base = __ start_a_stub(size_deopt_handler());
3322   if (base == NULL) {
3323     ciEnv::current()->record_failure("CodeCache is full");
3324     return 0;  // CodeBuffer::expand failed
3325   }
3326   int offset = __ offset();
3327 
3328   __ adr(lr, __ pc());
3329   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3330 
3331   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3332   __ end_a_stub();
3333   return offset;
3334 }
3335 
3336 // REQUIRED MATCHER CODE
3337 
3338 //=============================================================================
3339 
3340 const bool Matcher::match_rule_supported(int opcode) {
3341 
3342   // TODO
3343   // identify extra cases that we might want to provide match rules for
3344   // e.g. Op_StrEquals and other intrinsics
3345   if (!has_match_rule(opcode)) {
3346     return false;
3347   }
3348 
3349   return true;  // Per default match rules are supported.
3350 }
3351 
3352 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3353 
3354   // TODO
3355   // identify extra cases that we might want to provide match rules for
3356   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3357   bool ret_value = match_rule_supported(opcode);
3358   // Add rules here.
3359 
3360   return ret_value;  // Per default match rules are supported.
3361 }
3362 
3363 const bool Matcher::has_predicated_vectors(void) {
3364   return false;
3365 }
3366 
3367 const int Matcher::float_pressure(int default_pressure_threshold) {
3368   return default_pressure_threshold;
3369 }
3370 
3371 int Matcher::regnum_to_fpu_offset(int regnum)
3372 {
3373   Unimplemented();
3374   return 0;
3375 }
3376 
3377 // Is this branch offset short enough that a short branch can be used?
3378 //
3379 // NOTE: If the platform does not provide any short branch variants, then
3380 //       this method should return false for offset 0.
3381 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3382   // The passed offset is relative to address of the branch.
3383 
3384   return (-32768 <= offset && offset < 32768);
3385 }
3386 
3387 const bool Matcher::isSimpleConstant64(jlong value) {
3388   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3389   // Probably always true, even if a temp register is required.
3390   return true;
3391 }
3392 
3393 // true just means we have fast l2f conversion
3394 const bool Matcher::convL2FSupported(void) {
3395   return true;
3396 }
3397 
3398 // Vector width in bytes.
3399 const int Matcher::vector_width_in_bytes(BasicType bt) {
3400   int size = MIN2(16,(int)MaxVectorSize);
3401   // Minimum 2 values in vector
3402   if (size < 2*type2aelembytes(bt)) size = 0;
3403   // But never < 4
3404   if (size < 4) size = 0;
3405   return size;
3406 }
3407 
3408 // Limits on vector size (number of elements) loaded into vector.
3409 const int Matcher::max_vector_size(const BasicType bt) {
3410   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3411 }
3412 const int Matcher::min_vector_size(const BasicType bt) {
3413 //  For the moment limit the vector size to 8 bytes
3414     int size = 8 / type2aelembytes(bt);
3415     if (size < 2) size = 2;
3416     return size;
3417 }
3418 
3419 // Vector ideal reg.
3420 const int Matcher::vector_ideal_reg(int len) {
3421   switch(len) {
3422     case  8: return Op_VecD;
3423     case 16: return Op_VecX;
3424   }
3425   ShouldNotReachHere();
3426   return 0;
3427 }
3428 
3429 const int Matcher::vector_shift_count_ideal_reg(int size) {
3430   return Op_VecX;
3431 }
3432 
3433 // AES support not yet implemented
3434 const bool Matcher::pass_original_key_for_aes() {
3435   return false;
3436 }
3437 
3438 // x86 supports misaligned vectors store/load.
3439 const bool Matcher::misaligned_vectors_ok() {
3440   return !AlignVector; // can be changed by flag
3441 }
3442 
3443 // false => size gets scaled to BytesPerLong, ok.
3444 const bool Matcher::init_array_count_is_in_bytes = false;
3445 
3446 // Use conditional move (CMOVL)
3447 const int Matcher::long_cmove_cost() {
3448   // long cmoves are no more expensive than int cmoves
3449   return 0;
3450 }
3451 
3452 const int Matcher::float_cmove_cost() {
3453   // float cmoves are no more expensive than int cmoves
3454   return 0;
3455 }
3456 
3457 // Does the CPU require late expand (see block.cpp for description of late expand)?
3458 const bool Matcher::require_postalloc_expand = false;
3459 
3460 // Should the Matcher clone shifts on addressing modes, expecting them
3461 // to be subsumed into complex addressing expressions or compute them
3462 // into registers?  True for Intel but false for most RISCs
3463 const bool Matcher::clone_shift_expressions = false;
3464 
3465 // Do we need to mask the count passed to shift instructions or does
3466 // the cpu only look at the lower 5/6 bits anyway?
3467 const bool Matcher::need_masked_shift_count = false;
3468 
3469 // This affects two different things:
3470 //  - how Decode nodes are matched
3471 //  - how ImplicitNullCheck opportunities are recognized
3472 // If true, the matcher will try to remove all Decodes and match them
3473 // (as operands) into nodes. NullChecks are not prepared to deal with
3474 // Decodes by final_graph_reshaping().
3475 // If false, final_graph_reshaping() forces the decode behind the Cmp
3476 // for a NullCheck. The matcher matches the Decode node into a register.
3477 // Implicit_null_check optimization moves the Decode along with the
3478 // memory operation back up before the NullCheck.
3479 bool Matcher::narrow_oop_use_complex_address() {
3480   return Universe::narrow_oop_shift() == 0;
3481 }
3482 
3483 bool Matcher::narrow_klass_use_complex_address() {
3484 // TODO
3485 // decide whether we need to set this to true
3486   return false;
3487 }
3488 
3489 // Is it better to copy float constants, or load them directly from
3490 // memory?  Intel can load a float constant from a direct address,
3491 // requiring no extra registers.  Most RISCs will have to materialize
3492 // an address into a register first, so they would do better to copy
3493 // the constant from stack.
3494 const bool Matcher::rematerialize_float_constants = false;
3495 
3496 // If CPU can load and store mis-aligned doubles directly then no
3497 // fixup is needed.  Else we split the double into 2 integer pieces
3498 // and move it piece-by-piece.  Only happens when passing doubles into
3499 // C code as the Java calling convention forces doubles to be aligned.
3500 const bool Matcher::misaligned_doubles_ok = true;
3501 
3502 // No-op on amd64
3503 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3504   Unimplemented();
3505 }
3506 
3507 // Advertise here if the CPU requires explicit rounding operations to
3508 // implement the UseStrictFP mode.
3509 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3510 
3511 // Are floats converted to double when stored to stack during
3512 // deoptimization?
3513 bool Matcher::float_in_double() { return true; }
3514 
3515 // Do ints take an entire long register or just half?
3516 // The relevant question is how the int is callee-saved:
3517 // the whole long is written but de-opt'ing will have to extract
3518 // the relevant 32 bits.
3519 const bool Matcher::int_in_long = true;
3520 
3521 // Return whether or not this register is ever used as an argument.
3522 // This function is used on startup to build the trampoline stubs in
3523 // generateOptoStub.  Registers not mentioned will be killed by the VM
3524 // call in the trampoline, and arguments in those registers not be
3525 // available to the callee.
3526 bool Matcher::can_be_java_arg(int reg)
3527 {
3528   return
3529     reg ==  R0_num || reg == R0_H_num ||
3530     reg ==  R1_num || reg == R1_H_num ||
3531     reg ==  R2_num || reg == R2_H_num ||
3532     reg ==  R3_num || reg == R3_H_num ||
3533     reg ==  R4_num || reg == R4_H_num ||
3534     reg ==  R5_num || reg == R5_H_num ||
3535     reg ==  R6_num || reg == R6_H_num ||
3536     reg ==  R7_num || reg == R7_H_num ||
3537     reg ==  V0_num || reg == V0_H_num ||
3538     reg ==  V1_num || reg == V1_H_num ||
3539     reg ==  V2_num || reg == V2_H_num ||
3540     reg ==  V3_num || reg == V3_H_num ||
3541     reg ==  V4_num || reg == V4_H_num ||
3542     reg ==  V5_num || reg == V5_H_num ||
3543     reg ==  V6_num || reg == V6_H_num ||
3544     reg ==  V7_num || reg == V7_H_num;
3545 }
3546 
3547 bool Matcher::is_spillable_arg(int reg)
3548 {
3549   return can_be_java_arg(reg);
3550 }
3551 
3552 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3553   return false;
3554 }
3555 
3556 RegMask Matcher::divI_proj_mask() {
3557   ShouldNotReachHere();
3558   return RegMask();
3559 }
3560 
3561 // Register for MODI projection of divmodI.
3562 RegMask Matcher::modI_proj_mask() {
3563   ShouldNotReachHere();
3564   return RegMask();
3565 }
3566 
3567 // Register for DIVL projection of divmodL.
3568 RegMask Matcher::divL_proj_mask() {
3569   ShouldNotReachHere();
3570   return RegMask();
3571 }
3572 
3573 // Register for MODL projection of divmodL.
3574 RegMask Matcher::modL_proj_mask() {
3575   ShouldNotReachHere();
3576   return RegMask();
3577 }
3578 
3579 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3580   return FP_REG_mask();
3581 }
3582 
3583 // helper for encoding java_to_runtime calls on sim
3584 //
3585 // this is needed to compute the extra arguments required when
3586 // planting a call to the simulator blrt instruction. the TypeFunc
3587 // can be queried to identify the counts for integral, and floating
3588 // arguments and the return type
3589 
3590 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3591 {
3592   int gps = 0;
3593   int fps = 0;
3594   const TypeTuple *domain = tf->domain();
3595   int max = domain->cnt();
3596   for (int i = TypeFunc::Parms; i < max; i++) {
3597     const Type *t = domain->field_at(i);
3598     switch(t->basic_type()) {
3599     case T_FLOAT:
3600     case T_DOUBLE:
3601       fps++;
3602     default:
3603       gps++;
3604     }
3605   }
3606   gpcnt = gps;
3607   fpcnt = fps;
3608   BasicType rt = tf->return_type();
3609   switch (rt) {
3610   case T_VOID:
3611     rtype = MacroAssembler::ret_type_void;
3612     break;
3613   default:
3614     rtype = MacroAssembler::ret_type_integral;
3615     break;
3616   case T_FLOAT:
3617     rtype = MacroAssembler::ret_type_float;
3618     break;
3619   case T_DOUBLE:
3620     rtype = MacroAssembler::ret_type_double;
3621     break;
3622   }
3623 }
3624 
3625 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3626   MacroAssembler _masm(&cbuf);                                          \
3627   {                                                                     \
3628     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3629     guarantee(DISP == 0, "mode not permitted for volatile");            \
3630     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3631     __ INSN(REG, as_Register(BASE));                                    \
3632   }
3633 
3634 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3635 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3636 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3637                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3638 
3639   // Used for all non-volatile memory accesses.  The use of
3640   // $mem->opcode() to discover whether this pattern uses sign-extended
3641   // offsets is something of a kludge.
3642   static void loadStore(MacroAssembler masm, mem_insn insn,
3643                          Register reg, int opcode,
3644                          Register base, int index, int size, int disp)
3645   {
3646     Address::extend scale;
3647 
3648     // Hooboy, this is fugly.  We need a way to communicate to the
3649     // encoder that the index needs to be sign extended, so we have to
3650     // enumerate all the cases.
3651     switch (opcode) {
3652     case INDINDEXSCALEDOFFSETI2L:
3653     case INDINDEXSCALEDI2L:
3654     case INDINDEXSCALEDOFFSETI2LN:
3655     case INDINDEXSCALEDI2LN:
3656     case INDINDEXOFFSETI2L:
3657     case INDINDEXOFFSETI2LN:
3658       scale = Address::sxtw(size);
3659       break;
3660     default:
3661       scale = Address::lsl(size);
3662     }
3663 
3664     if (index == -1) {
3665       (masm.*insn)(reg, Address(base, disp));
3666     } else {
3667       if (disp == 0) {
3668         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3669       } else {
3670         masm.lea(rscratch1, Address(base, disp));
3671         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3672       }
3673     }
3674   }
3675 
3676   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3677                          FloatRegister reg, int opcode,
3678                          Register base, int index, int size, int disp)
3679   {
3680     Address::extend scale;
3681 
3682     switch (opcode) {
3683     case INDINDEXSCALEDOFFSETI2L:
3684     case INDINDEXSCALEDI2L:
3685     case INDINDEXSCALEDOFFSETI2LN:
3686     case INDINDEXSCALEDI2LN:
3687       scale = Address::sxtw(size);
3688       break;
3689     default:
3690       scale = Address::lsl(size);
3691     }
3692 
3693      if (index == -1) {
3694       (masm.*insn)(reg, Address(base, disp));
3695     } else {
3696       if (disp == 0) {
3697         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3698       } else {
3699         masm.lea(rscratch1, Address(base, disp));
3700         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3701       }
3702     }
3703   }
3704 
3705   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3706                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3707                          int opcode, Register base, int index, int size, int disp)
3708   {
3709     if (index == -1) {
3710       (masm.*insn)(reg, T, Address(base, disp));
3711     } else {
3712       assert(disp == 0, "unsupported address mode");
3713       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3714     }
3715   }
3716 
3717 %}
3718 
3719 
3720 
3721 //----------ENCODING BLOCK-----------------------------------------------------
3722 // This block specifies the encoding classes used by the compiler to
3723 // output byte streams.  Encoding classes are parameterized macros
3724 // used by Machine Instruction Nodes in order to generate the bit
3725 // encoding of the instruction.  Operands specify their base encoding
3726 // interface with the interface keyword.  There are currently
3727 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3728 // COND_INTER.  REG_INTER causes an operand to generate a function
3729 // which returns its register number when queried.  CONST_INTER causes
3730 // an operand to generate a function which returns the value of the
3731 // constant when queried.  MEMORY_INTER causes an operand to generate
3732 // four functions which return the Base Register, the Index Register,
3733 // the Scale Value, and the Offset Value of the operand when queried.
3734 // COND_INTER causes an operand to generate six functions which return
3735 // the encoding code (ie - encoding bits for the instruction)
3736 // associated with each basic boolean condition for a conditional
3737 // instruction.
3738 //
3739 // Instructions specify two basic values for encoding.  Again, a
3740 // function is available to check if the constant displacement is an
3741 // oop. They use the ins_encode keyword to specify their encoding
3742 // classes (which must be a sequence of enc_class names, and their
3743 // parameters, specified in the encoding block), and they use the
3744 // opcode keyword to specify, in order, their primary, secondary, and
3745 // tertiary opcode.  Only the opcode sections which a particular
3746 // instruction needs for encoding need to be specified.
3747 encode %{
3748   // Build emit functions for each basic byte or larger field in the
3749   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3750   // from C++ code in the enc_class source block.  Emit functions will
3751   // live in the main source block for now.  In future, we can
3752   // generalize this by adding a syntax that specifies the sizes of
3753   // fields in an order, so that the adlc can build the emit functions
3754   // automagically
3755 
3756   // catch all for unimplemented encodings
3757   enc_class enc_unimplemented %{
3758     MacroAssembler _masm(&cbuf);
3759     __ unimplemented("C2 catch all");
3760   %}
3761 
3762   // BEGIN Non-volatile memory access
3763 
3764   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3765     Register dst_reg = as_Register($dst$$reg);
3766     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3767                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3768   %}
3769 
3770   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3771     Register dst_reg = as_Register($dst$$reg);
3772     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3773                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3774   %}
3775 
3776   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3777     Register dst_reg = as_Register($dst$$reg);
3778     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3779                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3780   %}
3781 
3782   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3783     Register dst_reg = as_Register($dst$$reg);
3784     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3785                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3786   %}
3787 
3788   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3789     Register dst_reg = as_Register($dst$$reg);
3790     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3791                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3792   %}
3793 
3794   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3795     Register dst_reg = as_Register($dst$$reg);
3796     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3797                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3798   %}
3799 
3800   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3801     Register dst_reg = as_Register($dst$$reg);
3802     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3803                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3804   %}
3805 
3806   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3807     Register dst_reg = as_Register($dst$$reg);
3808     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3809                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3810   %}
3811 
3812   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3813     Register dst_reg = as_Register($dst$$reg);
3814     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3815                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3816   %}
3817 
3818   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3819     Register dst_reg = as_Register($dst$$reg);
3820     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3821                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3822   %}
3823 
3824   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3825     Register dst_reg = as_Register($dst$$reg);
3826     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3827                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3828   %}
3829 
3830   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3831     Register dst_reg = as_Register($dst$$reg);
3832     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3833                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3834   %}
3835 
3836   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3837     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3838     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3839                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3840   %}
3841 
3842   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3843     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3844     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3845                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3846   %}
3847 
3848   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3849     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3850     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3851        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3852   %}
3853 
3854   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3855     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3856     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3857        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3858   %}
3859 
3860   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3861     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3862     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3863        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3864   %}
3865 
3866   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3867     Register src_reg = as_Register($src$$reg);
3868     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3869                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3870   %}
3871 
3872   enc_class aarch64_enc_strb0(memory mem) %{
3873     MacroAssembler _masm(&cbuf);
3874     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3875                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3876   %}
3877 
3878   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3879     MacroAssembler _masm(&cbuf);
3880     __ membar(Assembler::StoreStore);
3881     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3882                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3883   %}
3884 
3885   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3886     Register src_reg = as_Register($src$$reg);
3887     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3888                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3889   %}
3890 
3891   enc_class aarch64_enc_strh0(memory mem) %{
3892     MacroAssembler _masm(&cbuf);
3893     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3894                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3895   %}
3896 
3897   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3898     Register src_reg = as_Register($src$$reg);
3899     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3900                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3901   %}
3902 
3903   enc_class aarch64_enc_strw0(memory mem) %{
3904     MacroAssembler _masm(&cbuf);
3905     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3906                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3907   %}
3908 
3909   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3910     Register src_reg = as_Register($src$$reg);
3911     // we sometimes get asked to store the stack pointer into the
3912     // current thread -- we cannot do that directly on AArch64
3913     if (src_reg == r31_sp) {
3914       MacroAssembler _masm(&cbuf);
3915       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3916       __ mov(rscratch2, sp);
3917       src_reg = rscratch2;
3918     }
3919     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3920                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3921   %}
3922 
3923   enc_class aarch64_enc_str0(memory mem) %{
3924     MacroAssembler _masm(&cbuf);
3925     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3926                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3927   %}
3928 
3929   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3930     FloatRegister src_reg = as_FloatRegister($src$$reg);
3931     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3932                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3933   %}
3934 
3935   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3936     FloatRegister src_reg = as_FloatRegister($src$$reg);
3937     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3938                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3939   %}
3940 
3941   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3942     FloatRegister src_reg = as_FloatRegister($src$$reg);
3943     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3944        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3945   %}
3946 
3947   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3948     FloatRegister src_reg = as_FloatRegister($src$$reg);
3949     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3950        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3951   %}
3952 
3953   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3954     FloatRegister src_reg = as_FloatRegister($src$$reg);
3955     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3956        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3957   %}
3958 
3959   // END Non-volatile memory access
3960 
3961   // volatile loads and stores
3962 
3963   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3964     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3965                  rscratch1, stlrb);
3966   %}
3967 
3968   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3969     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3970                  rscratch1, stlrh);
3971   %}
3972 
3973   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3974     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3975                  rscratch1, stlrw);
3976   %}
3977 
3978 
3979   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3980     Register dst_reg = as_Register($dst$$reg);
3981     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3982              rscratch1, ldarb);
3983     __ sxtbw(dst_reg, dst_reg);
3984   %}
3985 
3986   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3987     Register dst_reg = as_Register($dst$$reg);
3988     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3989              rscratch1, ldarb);
3990     __ sxtb(dst_reg, dst_reg);
3991   %}
3992 
3993   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3994     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3995              rscratch1, ldarb);
3996   %}
3997 
3998   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3999     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4000              rscratch1, ldarb);
4001   %}
4002 
4003   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4004     Register dst_reg = as_Register($dst$$reg);
4005     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4006              rscratch1, ldarh);
4007     __ sxthw(dst_reg, dst_reg);
4008   %}
4009 
4010   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4011     Register dst_reg = as_Register($dst$$reg);
4012     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4013              rscratch1, ldarh);
4014     __ sxth(dst_reg, dst_reg);
4015   %}
4016 
4017   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4018     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4019              rscratch1, ldarh);
4020   %}
4021 
4022   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4023     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4024              rscratch1, ldarh);
4025   %}
4026 
4027   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4028     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4029              rscratch1, ldarw);
4030   %}
4031 
4032   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4033     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4034              rscratch1, ldarw);
4035   %}
4036 
4037   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4038     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4039              rscratch1, ldar);
4040   %}
4041 
4042   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4043     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4044              rscratch1, ldarw);
4045     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4046   %}
4047 
4048   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4049     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4050              rscratch1, ldar);
4051     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4052   %}
4053 
4054   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4055     Register src_reg = as_Register($src$$reg);
4056     // we sometimes get asked to store the stack pointer into the
4057     // current thread -- we cannot do that directly on AArch64
4058     if (src_reg == r31_sp) {
4059         MacroAssembler _masm(&cbuf);
4060       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4061       __ mov(rscratch2, sp);
4062       src_reg = rscratch2;
4063     }
4064     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4065                  rscratch1, stlr);
4066   %}
4067 
4068   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4069     {
4070       MacroAssembler _masm(&cbuf);
4071       FloatRegister src_reg = as_FloatRegister($src$$reg);
4072       __ fmovs(rscratch2, src_reg);
4073     }
4074     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4075                  rscratch1, stlrw);
4076   %}
4077 
4078   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4079     {
4080       MacroAssembler _masm(&cbuf);
4081       FloatRegister src_reg = as_FloatRegister($src$$reg);
4082       __ fmovd(rscratch2, src_reg);
4083     }
4084     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4085                  rscratch1, stlr);
4086   %}
4087 
4088   // synchronized read/update encodings
4089 
4090   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4091     MacroAssembler _masm(&cbuf);
4092     Register dst_reg = as_Register($dst$$reg);
4093     Register base = as_Register($mem$$base);
4094     int index = $mem$$index;
4095     int scale = $mem$$scale;
4096     int disp = $mem$$disp;
4097     if (index == -1) {
4098        if (disp != 0) {
4099         __ lea(rscratch1, Address(base, disp));
4100         __ ldaxr(dst_reg, rscratch1);
4101       } else {
4102         // TODO
4103         // should we ever get anything other than this case?
4104         __ ldaxr(dst_reg, base);
4105       }
4106     } else {
4107       Register index_reg = as_Register(index);
4108       if (disp == 0) {
4109         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4110         __ ldaxr(dst_reg, rscratch1);
4111       } else {
4112         __ lea(rscratch1, Address(base, disp));
4113         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4114         __ ldaxr(dst_reg, rscratch1);
4115       }
4116     }
4117   %}
4118 
4119   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4120     MacroAssembler _masm(&cbuf);
4121     Register src_reg = as_Register($src$$reg);
4122     Register base = as_Register($mem$$base);
4123     int index = $mem$$index;
4124     int scale = $mem$$scale;
4125     int disp = $mem$$disp;
4126     if (index == -1) {
4127        if (disp != 0) {
4128         __ lea(rscratch2, Address(base, disp));
4129         __ stlxr(rscratch1, src_reg, rscratch2);
4130       } else {
4131         // TODO
4132         // should we ever get anything other than this case?
4133         __ stlxr(rscratch1, src_reg, base);
4134       }
4135     } else {
4136       Register index_reg = as_Register(index);
4137       if (disp == 0) {
4138         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4139         __ stlxr(rscratch1, src_reg, rscratch2);
4140       } else {
4141         __ lea(rscratch2, Address(base, disp));
4142         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4143         __ stlxr(rscratch1, src_reg, rscratch2);
4144       }
4145     }
4146     __ cmpw(rscratch1, zr);
4147   %}
4148 
4149   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4150     MacroAssembler _masm(&cbuf);
4151     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4152     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4153                Assembler::xword, /*acquire*/ false, /*release*/ true);
4154   %}
4155 
4156   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4157     MacroAssembler _masm(&cbuf);
4158     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4159     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4160                Assembler::word, /*acquire*/ false, /*release*/ true);
4161   %}
4162 
4163 
4164   // The only difference between aarch64_enc_cmpxchg and
4165   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4166   // CompareAndSwap sequence to serve as a barrier on acquiring a
4167   // lock.
4168   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4169     MacroAssembler _masm(&cbuf);
4170     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4171     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4172                Assembler::xword, /*acquire*/ true, /*release*/ true);
4173   %}
4174 
4175   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4176     MacroAssembler _masm(&cbuf);
4177     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4178     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4179                Assembler::word, /*acquire*/ true, /*release*/ true);
4180   %}
4181 
4182 
4183   // auxiliary used for CompareAndSwapX to set result register
4184   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4185     MacroAssembler _masm(&cbuf);
4186     Register res_reg = as_Register($res$$reg);
4187     __ cset(res_reg, Assembler::EQ);
4188   %}
4189 
4190   // prefetch encodings
4191 
4192   enc_class aarch64_enc_prefetchw(memory mem) %{
4193     MacroAssembler _masm(&cbuf);
4194     Register base = as_Register($mem$$base);
4195     int index = $mem$$index;
4196     int scale = $mem$$scale;
4197     int disp = $mem$$disp;
4198     if (index == -1) {
4199       __ prfm(Address(base, disp), PSTL1KEEP);
4200     } else {
4201       Register index_reg = as_Register(index);
4202       if (disp == 0) {
4203         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4204       } else {
4205         __ lea(rscratch1, Address(base, disp));
4206         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4207       }
4208     }
4209   %}
4210 
4211   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4212     MacroAssembler _masm(&cbuf);
4213     Register cnt_reg = as_Register($cnt$$reg);
4214     Register base_reg = as_Register($base$$reg);
4215     // base is word aligned
4216     // cnt is count of words
4217 
4218     Label loop;
4219     Label entry;
4220 
4221 //  Algorithm:
4222 //
4223 //    scratch1 = cnt & 7;
4224 //    cnt -= scratch1;
4225 //    p += scratch1;
4226 //    switch (scratch1) {
4227 //      do {
4228 //        cnt -= 8;
4229 //          p[-8] = 0;
4230 //        case 7:
4231 //          p[-7] = 0;
4232 //        case 6:
4233 //          p[-6] = 0;
4234 //          // ...
4235 //        case 1:
4236 //          p[-1] = 0;
4237 //        case 0:
4238 //          p += 8;
4239 //      } while (cnt);
4240 //    }
4241 
4242     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4243 
4244     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4245     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4246     // base_reg always points to the end of the region we're about to zero
4247     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4248     __ adr(rscratch2, entry);
4249     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4250     __ br(rscratch2);
4251     __ bind(loop);
4252     __ sub(cnt_reg, cnt_reg, unroll);
4253     for (int i = -unroll; i < 0; i++)
4254       __ str(zr, Address(base_reg, i * wordSize));
4255     __ bind(entry);
4256     __ add(base_reg, base_reg, unroll * wordSize);
4257     __ cbnz(cnt_reg, loop);
4258   %}
4259 
4260   /// mov envcodings
4261 
4262   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4263     MacroAssembler _masm(&cbuf);
4264     u_int32_t con = (u_int32_t)$src$$constant;
4265     Register dst_reg = as_Register($dst$$reg);
4266     if (con == 0) {
4267       __ movw(dst_reg, zr);
4268     } else {
4269       __ movw(dst_reg, con);
4270     }
4271   %}
4272 
4273   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4274     MacroAssembler _masm(&cbuf);
4275     Register dst_reg = as_Register($dst$$reg);
4276     u_int64_t con = (u_int64_t)$src$$constant;
4277     if (con == 0) {
4278       __ mov(dst_reg, zr);
4279     } else {
4280       __ mov(dst_reg, con);
4281     }
4282   %}
4283 
4284   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4285     MacroAssembler _masm(&cbuf);
4286     Register dst_reg = as_Register($dst$$reg);
4287     address con = (address)$src$$constant;
4288     if (con == NULL || con == (address)1) {
4289       ShouldNotReachHere();
4290     } else {
4291       relocInfo::relocType rtype = $src->constant_reloc();
4292       if (rtype == relocInfo::oop_type) {
4293         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4294       } else if (rtype == relocInfo::metadata_type) {
4295         __ mov_metadata(dst_reg, (Metadata*)con);
4296       } else {
4297         assert(rtype == relocInfo::none, "unexpected reloc type");
4298         if (con < (address)(uintptr_t)os::vm_page_size()) {
4299           __ mov(dst_reg, con);
4300         } else {
4301           unsigned long offset;
4302           __ adrp(dst_reg, con, offset);
4303           __ add(dst_reg, dst_reg, offset);
4304         }
4305       }
4306     }
4307   %}
4308 
4309   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4310     MacroAssembler _masm(&cbuf);
4311     Register dst_reg = as_Register($dst$$reg);
4312     __ mov(dst_reg, zr);
4313   %}
4314 
4315   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4316     MacroAssembler _masm(&cbuf);
4317     Register dst_reg = as_Register($dst$$reg);
4318     __ mov(dst_reg, (u_int64_t)1);
4319   %}
4320 
4321   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4322     MacroAssembler _masm(&cbuf);
4323     address page = (address)$src$$constant;
4324     Register dst_reg = as_Register($dst$$reg);
4325     unsigned long off;
4326     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4327     assert(off == 0, "assumed offset == 0");
4328   %}
4329 
4330   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4331     MacroAssembler _masm(&cbuf);
4332     __ load_byte_map_base($dst$$Register);
4333   %}
4334 
4335   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4336     MacroAssembler _masm(&cbuf);
4337     Register dst_reg = as_Register($dst$$reg);
4338     address con = (address)$src$$constant;
4339     if (con == NULL) {
4340       ShouldNotReachHere();
4341     } else {
4342       relocInfo::relocType rtype = $src->constant_reloc();
4343       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4344       __ set_narrow_oop(dst_reg, (jobject)con);
4345     }
4346   %}
4347 
4348   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4349     MacroAssembler _masm(&cbuf);
4350     Register dst_reg = as_Register($dst$$reg);
4351     __ mov(dst_reg, zr);
4352   %}
4353 
4354   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4355     MacroAssembler _masm(&cbuf);
4356     Register dst_reg = as_Register($dst$$reg);
4357     address con = (address)$src$$constant;
4358     if (con == NULL) {
4359       ShouldNotReachHere();
4360     } else {
4361       relocInfo::relocType rtype = $src->constant_reloc();
4362       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4363       __ set_narrow_klass(dst_reg, (Klass *)con);
4364     }
4365   %}
4366 
4367   // arithmetic encodings
4368 
4369   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4370     MacroAssembler _masm(&cbuf);
4371     Register dst_reg = as_Register($dst$$reg);
4372     Register src_reg = as_Register($src1$$reg);
4373     int32_t con = (int32_t)$src2$$constant;
4374     // add has primary == 0, subtract has primary == 1
4375     if ($primary) { con = -con; }
4376     if (con < 0) {
4377       __ subw(dst_reg, src_reg, -con);
4378     } else {
4379       __ addw(dst_reg, src_reg, con);
4380     }
4381   %}
4382 
4383   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4384     MacroAssembler _masm(&cbuf);
4385     Register dst_reg = as_Register($dst$$reg);
4386     Register src_reg = as_Register($src1$$reg);
4387     int32_t con = (int32_t)$src2$$constant;
4388     // add has primary == 0, subtract has primary == 1
4389     if ($primary) { con = -con; }
4390     if (con < 0) {
4391       __ sub(dst_reg, src_reg, -con);
4392     } else {
4393       __ add(dst_reg, src_reg, con);
4394     }
4395   %}
4396 
4397   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4398     MacroAssembler _masm(&cbuf);
4399    Register dst_reg = as_Register($dst$$reg);
4400    Register src1_reg = as_Register($src1$$reg);
4401    Register src2_reg = as_Register($src2$$reg);
4402     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4403   %}
4404 
4405   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4406     MacroAssembler _masm(&cbuf);
4407    Register dst_reg = as_Register($dst$$reg);
4408    Register src1_reg = as_Register($src1$$reg);
4409    Register src2_reg = as_Register($src2$$reg);
4410     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4411   %}
4412 
4413   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4414     MacroAssembler _masm(&cbuf);
4415    Register dst_reg = as_Register($dst$$reg);
4416    Register src1_reg = as_Register($src1$$reg);
4417    Register src2_reg = as_Register($src2$$reg);
4418     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4419   %}
4420 
4421   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4422     MacroAssembler _masm(&cbuf);
4423    Register dst_reg = as_Register($dst$$reg);
4424    Register src1_reg = as_Register($src1$$reg);
4425    Register src2_reg = as_Register($src2$$reg);
4426     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4427   %}
4428 
4429   // compare instruction encodings
4430 
4431   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4432     MacroAssembler _masm(&cbuf);
4433     Register reg1 = as_Register($src1$$reg);
4434     Register reg2 = as_Register($src2$$reg);
4435     __ cmpw(reg1, reg2);
4436   %}
4437 
4438   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4439     MacroAssembler _masm(&cbuf);
4440     Register reg = as_Register($src1$$reg);
4441     int32_t val = $src2$$constant;
4442     if (val >= 0) {
4443       __ subsw(zr, reg, val);
4444     } else {
4445       __ addsw(zr, reg, -val);
4446     }
4447   %}
4448 
4449   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4450     MacroAssembler _masm(&cbuf);
4451     Register reg1 = as_Register($src1$$reg);
4452     u_int32_t val = (u_int32_t)$src2$$constant;
4453     __ movw(rscratch1, val);
4454     __ cmpw(reg1, rscratch1);
4455   %}
4456 
4457   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4458     MacroAssembler _masm(&cbuf);
4459     Register reg1 = as_Register($src1$$reg);
4460     Register reg2 = as_Register($src2$$reg);
4461     __ cmp(reg1, reg2);
4462   %}
4463 
4464   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4465     MacroAssembler _masm(&cbuf);
4466     Register reg = as_Register($src1$$reg);
4467     int64_t val = $src2$$constant;
4468     if (val >= 0) {
4469       __ subs(zr, reg, val);
4470     } else if (val != -val) {
4471       __ adds(zr, reg, -val);
4472     } else {
4473     // aargh, Long.MIN_VALUE is a special case
4474       __ orr(rscratch1, zr, (u_int64_t)val);
4475       __ subs(zr, reg, rscratch1);
4476     }
4477   %}
4478 
4479   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4480     MacroAssembler _masm(&cbuf);
4481     Register reg1 = as_Register($src1$$reg);
4482     u_int64_t val = (u_int64_t)$src2$$constant;
4483     __ mov(rscratch1, val);
4484     __ cmp(reg1, rscratch1);
4485   %}
4486 
4487   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4488     MacroAssembler _masm(&cbuf);
4489     Register reg1 = as_Register($src1$$reg);
4490     Register reg2 = as_Register($src2$$reg);
4491     __ cmp(reg1, reg2);
4492   %}
4493 
4494   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4495     MacroAssembler _masm(&cbuf);
4496     Register reg1 = as_Register($src1$$reg);
4497     Register reg2 = as_Register($src2$$reg);
4498     __ cmpw(reg1, reg2);
4499   %}
4500 
4501   enc_class aarch64_enc_testp(iRegP src) %{
4502     MacroAssembler _masm(&cbuf);
4503     Register reg = as_Register($src$$reg);
4504     __ cmp(reg, zr);
4505   %}
4506 
4507   enc_class aarch64_enc_testn(iRegN src) %{
4508     MacroAssembler _masm(&cbuf);
4509     Register reg = as_Register($src$$reg);
4510     __ cmpw(reg, zr);
4511   %}
4512 
4513   enc_class aarch64_enc_b(label lbl) %{
4514     MacroAssembler _masm(&cbuf);
4515     Label *L = $lbl$$label;
4516     __ b(*L);
4517   %}
4518 
4519   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4520     MacroAssembler _masm(&cbuf);
4521     Label *L = $lbl$$label;
4522     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4523   %}
4524 
4525   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4526     MacroAssembler _masm(&cbuf);
4527     Label *L = $lbl$$label;
4528     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4529   %}
4530 
4531   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4532   %{
4533      Register sub_reg = as_Register($sub$$reg);
4534      Register super_reg = as_Register($super$$reg);
4535      Register temp_reg = as_Register($temp$$reg);
4536      Register result_reg = as_Register($result$$reg);
4537 
4538      Label miss;
4539      MacroAssembler _masm(&cbuf);
4540      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4541                                      NULL, &miss,
4542                                      /*set_cond_codes:*/ true);
4543      if ($primary) {
4544        __ mov(result_reg, zr);
4545      }
4546      __ bind(miss);
4547   %}
4548 
4549   enc_class aarch64_enc_java_static_call(method meth) %{
4550     MacroAssembler _masm(&cbuf);
4551 
4552     address addr = (address)$meth$$method;
4553     address call;
4554     if (!_method) {
4555       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4556       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4557     } else {
4558       int method_index = resolved_method_index(cbuf);
4559       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4560                                                   : static_call_Relocation::spec(method_index);
4561       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4562 
4563       // Emit stub for static call
4564       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4565       if (stub == NULL) {
4566         ciEnv::current()->record_failure("CodeCache is full");
4567         return;
4568       }
4569     }
4570     if (call == NULL) {
4571       ciEnv::current()->record_failure("CodeCache is full");
4572       return;
4573     }
4574   %}
4575 
4576   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4577     MacroAssembler _masm(&cbuf);
4578     int method_index = resolved_method_index(cbuf);
4579     address call = __ ic_call((address)$meth$$method, method_index);
4580     if (call == NULL) {
4581       ciEnv::current()->record_failure("CodeCache is full");
4582       return;
4583     }
4584   %}
4585 
4586   enc_class aarch64_enc_call_epilog() %{
4587     MacroAssembler _masm(&cbuf);
4588     if (VerifyStackAtCalls) {
4589       // Check that stack depth is unchanged: find majik cookie on stack
4590       __ call_Unimplemented();
4591     }
4592   %}
4593 
4594   enc_class aarch64_enc_java_to_runtime(method meth) %{
4595     MacroAssembler _masm(&cbuf);
4596 
4597     // some calls to generated routines (arraycopy code) are scheduled
4598     // by C2 as runtime calls. if so we can call them using a br (they
4599     // will be in a reachable segment) otherwise we have to use a blrt
4600     // which loads the absolute address into a register.
4601     address entry = (address)$meth$$method;
4602     CodeBlob *cb = CodeCache::find_blob(entry);
4603     if (cb) {
4604       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4605       if (call == NULL) {
4606         ciEnv::current()->record_failure("CodeCache is full");
4607         return;
4608       }
4609     } else {
4610       int gpcnt;
4611       int fpcnt;
4612       int rtype;
4613       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4614       Label retaddr;
4615       __ adr(rscratch2, retaddr);
4616       __ lea(rscratch1, RuntimeAddress(entry));
4617       // Leave a breadcrumb for JavaThread::pd_last_frame().
4618       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4619       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4620       __ bind(retaddr);
4621       __ add(sp, sp, 2 * wordSize);
4622     }
4623   %}
4624 
4625   enc_class aarch64_enc_rethrow() %{
4626     MacroAssembler _masm(&cbuf);
4627     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4628   %}
4629 
4630   enc_class aarch64_enc_ret() %{
4631     MacroAssembler _masm(&cbuf);
4632     __ ret(lr);
4633   %}
4634 
4635   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4636     MacroAssembler _masm(&cbuf);
4637     Register target_reg = as_Register($jump_target$$reg);
4638     __ br(target_reg);
4639   %}
4640 
4641   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4642     MacroAssembler _masm(&cbuf);
4643     Register target_reg = as_Register($jump_target$$reg);
4644     // exception oop should be in r0
4645     // ret addr has been popped into lr
4646     // callee expects it in r3
4647     __ mov(r3, lr);
4648     __ br(target_reg);
4649   %}
4650 
4651   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4652     MacroAssembler _masm(&cbuf);
4653     Register oop = as_Register($object$$reg);
4654     Register box = as_Register($box$$reg);
4655     Register disp_hdr = as_Register($tmp$$reg);
4656     Register tmp = as_Register($tmp2$$reg);
4657     Label cont;
4658     Label object_has_monitor;
4659     Label cas_failed;
4660 
4661     assert_different_registers(oop, box, tmp, disp_hdr);
4662 
4663     // Load markOop from object into displaced_header.
4664     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4665 
4666     // Always do locking in runtime.
4667     if (EmitSync & 0x01) {
4668       __ cmp(oop, zr);
4669       return;
4670     }
4671 
4672     if (UseBiasedLocking && !UseOptoBiasInlining) {
4673       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4674     }
4675 
4676     // Handle existing monitor
4677     if ((EmitSync & 0x02) == 0) {
4678       // we can use AArch64's bit test and branch here but
4679       // markoopDesc does not define a bit index just the bit value
4680       // so assert in case the bit pos changes
4681 #     define __monitor_value_log2 1
4682       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4683       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4684 #     undef __monitor_value_log2
4685     }
4686 
4687     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4688     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4689 
4690     // Load Compare Value application register.
4691 
4692     // Initialize the box. (Must happen before we update the object mark!)
4693     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4694 
4695     // Compare object markOop with mark and if equal exchange scratch1
4696     // with object markOop.
4697     if (UseLSE) {
4698       __ mov(tmp, disp_hdr);
4699       __ casal(Assembler::xword, tmp, box, oop);
4700       __ cmp(tmp, disp_hdr);
4701       __ br(Assembler::EQ, cont);
4702     } else {
4703       Label retry_load;
4704       __ prfm(Address(oop), PSTL1STRM);
4705       __ bind(retry_load);
4706       __ ldaxr(tmp, oop);
4707       __ cmp(tmp, disp_hdr);
4708       __ br(Assembler::NE, cas_failed);
4709       // use stlxr to ensure update is immediately visible
4710       __ stlxr(tmp, box, oop);
4711       __ cbzw(tmp, cont);
4712       __ b(retry_load);
4713     }
4714 
4715     // Formerly:
4716     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4717     //               /*newv=*/box,
4718     //               /*addr=*/oop,
4719     //               /*tmp=*/tmp,
4720     //               cont,
4721     //               /*fail*/NULL);
4722 
4723     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4724 
4725     // If the compare-and-exchange succeeded, then we found an unlocked
4726     // object, will have now locked it will continue at label cont
4727 
4728     __ bind(cas_failed);
4729     // We did not see an unlocked object so try the fast recursive case.
4730 
4731     // Check if the owner is self by comparing the value in the
4732     // markOop of object (disp_hdr) with the stack pointer.
4733     __ mov(rscratch1, sp);
4734     __ sub(disp_hdr, disp_hdr, rscratch1);
4735     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4736     // If condition is true we are cont and hence we can store 0 as the
4737     // displaced header in the box, which indicates that it is a recursive lock.
4738     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4739     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4740 
4741     // Handle existing monitor.
4742     if ((EmitSync & 0x02) == 0) {
4743       __ b(cont);
4744 
4745       __ bind(object_has_monitor);
4746       // The object's monitor m is unlocked iff m->owner == NULL,
4747       // otherwise m->owner may contain a thread or a stack address.
4748       //
4749       // Try to CAS m->owner from NULL to current thread.
4750       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4751       __ mov(disp_hdr, zr);
4752 
4753       if (UseLSE) {
4754         __ mov(rscratch1, disp_hdr);
4755         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4756         __ cmp(rscratch1, disp_hdr);
4757       } else {
4758         Label retry_load, fail;
4759         __ prfm(Address(tmp), PSTL1STRM);
4760         __ bind(retry_load);
4761         __ ldaxr(rscratch1, tmp);
4762         __ cmp(disp_hdr, rscratch1);
4763         __ br(Assembler::NE, fail);
4764         // use stlxr to ensure update is immediately visible
4765         __ stlxr(rscratch1, rthread, tmp);
4766         __ cbnzw(rscratch1, retry_load);
4767         __ bind(fail);
4768       }
4769 
4770       // Label next;
4771       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4772       //               /*newv=*/rthread,
4773       //               /*addr=*/tmp,
4774       //               /*tmp=*/rscratch1,
4775       //               /*succeed*/next,
4776       //               /*fail*/NULL);
4777       // __ bind(next);
4778 
4779       // store a non-null value into the box.
4780       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4781 
4782       // PPC port checks the following invariants
4783       // #ifdef ASSERT
4784       // bne(flag, cont);
4785       // We have acquired the monitor, check some invariants.
4786       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4787       // Invariant 1: _recursions should be 0.
4788       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4789       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4790       //                        "monitor->_recursions should be 0", -1);
4791       // Invariant 2: OwnerIsThread shouldn't be 0.
4792       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4793       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4794       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4795       // #endif
4796     }
4797 
4798     __ bind(cont);
4799     // flag == EQ indicates success
4800     // flag == NE indicates failure
4801 
4802   %}
4803 
4804   // TODO
4805   // reimplement this with custom cmpxchgptr code
4806   // which avoids some of the unnecessary branching
4807   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4808     MacroAssembler _masm(&cbuf);
4809     Register oop = as_Register($object$$reg);
4810     Register box = as_Register($box$$reg);
4811     Register disp_hdr = as_Register($tmp$$reg);
4812     Register tmp = as_Register($tmp2$$reg);
4813     Label cont;
4814     Label object_has_monitor;
4815     Label cas_failed;
4816 
4817     assert_different_registers(oop, box, tmp, disp_hdr);
4818 
4819     // Always do locking in runtime.
4820     if (EmitSync & 0x01) {
4821       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4822       return;
4823     }
4824 
4825     if (UseBiasedLocking && !UseOptoBiasInlining) {
4826       __ biased_locking_exit(oop, tmp, cont);
4827     }
4828 
4829     // Find the lock address and load the displaced header from the stack.
4830     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4831 
4832     // If the displaced header is 0, we have a recursive unlock.
4833     __ cmp(disp_hdr, zr);
4834     __ br(Assembler::EQ, cont);
4835 
4836 
4837     // Handle existing monitor.
4838     if ((EmitSync & 0x02) == 0) {
4839       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4840       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4841     }
4842 
4843     // Check if it is still a light weight lock, this is is true if we
4844     // see the stack address of the basicLock in the markOop of the
4845     // object.
4846 
4847       if (UseLSE) {
4848         __ mov(tmp, box);
4849         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4850         __ cmp(tmp, box);
4851       } else {
4852         Label retry_load;
4853         __ prfm(Address(oop), PSTL1STRM);
4854         __ bind(retry_load);
4855         __ ldxr(tmp, oop);
4856         __ cmp(box, tmp);
4857         __ br(Assembler::NE, cas_failed);
4858         // use stlxr to ensure update is immediately visible
4859         __ stlxr(tmp, disp_hdr, oop);
4860         __ cbzw(tmp, cont);
4861         __ b(retry_load);
4862       }
4863 
4864     // __ cmpxchgptr(/*compare_value=*/box,
4865     //               /*exchange_value=*/disp_hdr,
4866     //               /*where=*/oop,
4867     //               /*result=*/tmp,
4868     //               cont,
4869     //               /*cas_failed*/NULL);
4870     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4871 
4872     __ bind(cas_failed);
4873 
4874     // Handle existing monitor.
4875     if ((EmitSync & 0x02) == 0) {
4876       __ b(cont);
4877 
4878       __ bind(object_has_monitor);
4879       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4880       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4881       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4882       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4883       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4884       __ cmp(rscratch1, zr);
4885       __ br(Assembler::NE, cont);
4886 
4887       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4888       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4889       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4890       __ cmp(rscratch1, zr);
4891       __ cbnz(rscratch1, cont);
4892       // need a release store here
4893       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4894       __ stlr(rscratch1, tmp); // rscratch1 is zero
4895     }
4896 
4897     __ bind(cont);
4898     // flag == EQ indicates success
4899     // flag == NE indicates failure
4900   %}
4901 
4902 %}
4903 
4904 //----------FRAME--------------------------------------------------------------
4905 // Definition of frame structure and management information.
4906 //
4907 //  S T A C K   L A Y O U T    Allocators stack-slot number
4908 //                             |   (to get allocators register number
4909 //  G  Owned by    |        |  v    add OptoReg::stack0())
4910 //  r   CALLER     |        |
4911 //  o     |        +--------+      pad to even-align allocators stack-slot
4912 //  w     V        |  pad0  |        numbers; owned by CALLER
4913 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4914 //  h     ^        |   in   |  5
4915 //        |        |  args  |  4   Holes in incoming args owned by SELF
4916 //  |     |        |        |  3
4917 //  |     |        +--------+
4918 //  V     |        | old out|      Empty on Intel, window on Sparc
4919 //        |    old |preserve|      Must be even aligned.
4920 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4921 //        |        |   in   |  3   area for Intel ret address
4922 //     Owned by    |preserve|      Empty on Sparc.
4923 //       SELF      +--------+
4924 //        |        |  pad2  |  2   pad to align old SP
4925 //        |        +--------+  1
4926 //        |        | locks  |  0
4927 //        |        +--------+----> OptoReg::stack0(), even aligned
4928 //        |        |  pad1  | 11   pad to align new SP
4929 //        |        +--------+
4930 //        |        |        | 10
4931 //        |        | spills |  9   spills
4932 //        V        |        |  8   (pad0 slot for callee)
4933 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4934 //        ^        |  out   |  7
4935 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4936 //     Owned by    +--------+
4937 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4938 //        |    new |preserve|      Must be even-aligned.
4939 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4940 //        |        |        |
4941 //
4942 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4943 //         known from SELF's arguments and the Java calling convention.
4944 //         Region 6-7 is determined per call site.
4945 // Note 2: If the calling convention leaves holes in the incoming argument
4946 //         area, those holes are owned by SELF.  Holes in the outgoing area
4947 //         are owned by the CALLEE.  Holes should not be nessecary in the
4948 //         incoming area, as the Java calling convention is completely under
4949 //         the control of the AD file.  Doubles can be sorted and packed to
4950 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4951 //         varargs C calling conventions.
4952 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4953 //         even aligned with pad0 as needed.
4954 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4955 //           (the latter is true on Intel but is it false on AArch64?)
4956 //         region 6-11 is even aligned; it may be padded out more so that
4957 //         the region from SP to FP meets the minimum stack alignment.
4958 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4959 //         alignment.  Region 11, pad1, may be dynamically extended so that
4960 //         SP meets the minimum alignment.
4961 
4962 frame %{
4963   // What direction does stack grow in (assumed to be same for C & Java)
4964   stack_direction(TOWARDS_LOW);
4965 
4966   // These three registers define part of the calling convention
4967   // between compiled code and the interpreter.
4968 
4969   // Inline Cache Register or methodOop for I2C.
4970   inline_cache_reg(R12);
4971 
4972   // Method Oop Register when calling interpreter.
4973   interpreter_method_oop_reg(R12);
4974 
4975   // Number of stack slots consumed by locking an object
4976   sync_stack_slots(2);
4977 
4978   // Compiled code's Frame Pointer
4979   frame_pointer(R31);
4980 
4981   // Interpreter stores its frame pointer in a register which is
4982   // stored to the stack by I2CAdaptors.
4983   // I2CAdaptors convert from interpreted java to compiled java.
4984   interpreter_frame_pointer(R29);
4985 
4986   // Stack alignment requirement
4987   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4988 
4989   // Number of stack slots between incoming argument block and the start of
4990   // a new frame.  The PROLOG must add this many slots to the stack.  The
4991   // EPILOG must remove this many slots. aarch64 needs two slots for
4992   // return address and fp.
4993   // TODO think this is correct but check
4994   in_preserve_stack_slots(4);
4995 
4996   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4997   // for calls to C.  Supports the var-args backing area for register parms.
4998   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4999 
5000   // The after-PROLOG location of the return address.  Location of
5001   // return address specifies a type (REG or STACK) and a number
5002   // representing the register number (i.e. - use a register name) or
5003   // stack slot.
5004   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5005   // Otherwise, it is above the locks and verification slot and alignment word
5006   // TODO this may well be correct but need to check why that - 2 is there
5007   // ppc port uses 0 but we definitely need to allow for fixed_slots
5008   // which folds in the space used for monitors
5009   return_addr(STACK - 2 +
5010               round_to((Compile::current()->in_preserve_stack_slots() +
5011                         Compile::current()->fixed_slots()),
5012                        stack_alignment_in_slots()));
5013 
5014   // Body of function which returns an integer array locating
5015   // arguments either in registers or in stack slots.  Passed an array
5016   // of ideal registers called "sig" and a "length" count.  Stack-slot
5017   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5018   // arguments for a CALLEE.  Incoming stack arguments are
5019   // automatically biased by the preserve_stack_slots field above.
5020 
5021   calling_convention
5022   %{
5023     // No difference between ingoing/outgoing just pass false
5024     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5025   %}
5026 
5027   c_calling_convention
5028   %{
5029     // This is obviously always outgoing
5030     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5031   %}
5032 
5033   // Location of compiled Java return values.  Same as C for now.
5034   return_value
5035   %{
5036     // TODO do we allow ideal_reg == Op_RegN???
5037     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5038            "only return normal values");
5039 
5040     static const int lo[Op_RegL + 1] = { // enum name
5041       0,                                 // Op_Node
5042       0,                                 // Op_Set
5043       R0_num,                            // Op_RegN
5044       R0_num,                            // Op_RegI
5045       R0_num,                            // Op_RegP
5046       V0_num,                            // Op_RegF
5047       V0_num,                            // Op_RegD
5048       R0_num                             // Op_RegL
5049     };
5050 
5051     static const int hi[Op_RegL + 1] = { // enum name
5052       0,                                 // Op_Node
5053       0,                                 // Op_Set
5054       OptoReg::Bad,                       // Op_RegN
5055       OptoReg::Bad,                      // Op_RegI
5056       R0_H_num,                          // Op_RegP
5057       OptoReg::Bad,                      // Op_RegF
5058       V0_H_num,                          // Op_RegD
5059       R0_H_num                           // Op_RegL
5060     };
5061 
5062     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5063   %}
5064 %}
5065 
5066 //----------ATTRIBUTES---------------------------------------------------------
5067 //----------Operand Attributes-------------------------------------------------
5068 op_attrib op_cost(1);        // Required cost attribute
5069 
5070 //----------Instruction Attributes---------------------------------------------
5071 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5072 ins_attrib ins_size(32);        // Required size attribute (in bits)
5073 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5074                                 // a non-matching short branch variant
5075                                 // of some long branch?
5076 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5077                                 // be a power of 2) specifies the
5078                                 // alignment that some part of the
5079                                 // instruction (not necessarily the
5080                                 // start) requires.  If > 1, a
5081                                 // compute_padding() function must be
5082                                 // provided for the instruction
5083 
5084 //----------OPERANDS-----------------------------------------------------------
5085 // Operand definitions must precede instruction definitions for correct parsing
5086 // in the ADLC because operands constitute user defined types which are used in
5087 // instruction definitions.
5088 
5089 //----------Simple Operands----------------------------------------------------
5090 
5091 // Integer operands 32 bit
5092 // 32 bit immediate
5093 operand immI()
5094 %{
5095   match(ConI);
5096 
5097   op_cost(0);
5098   format %{ %}
5099   interface(CONST_INTER);
5100 %}
5101 
5102 // 32 bit zero
5103 operand immI0()
5104 %{
5105   predicate(n->get_int() == 0);
5106   match(ConI);
5107 
5108   op_cost(0);
5109   format %{ %}
5110   interface(CONST_INTER);
5111 %}
5112 
5113 // 32 bit unit increment
5114 operand immI_1()
5115 %{
5116   predicate(n->get_int() == 1);
5117   match(ConI);
5118 
5119   op_cost(0);
5120   format %{ %}
5121   interface(CONST_INTER);
5122 %}
5123 
5124 // 32 bit unit decrement
5125 operand immI_M1()
5126 %{
5127   predicate(n->get_int() == -1);
5128   match(ConI);
5129 
5130   op_cost(0);
5131   format %{ %}
5132   interface(CONST_INTER);
5133 %}
5134 
5135 operand immI_le_4()
5136 %{
5137   predicate(n->get_int() <= 4);
5138   match(ConI);
5139 
5140   op_cost(0);
5141   format %{ %}
5142   interface(CONST_INTER);
5143 %}
5144 
5145 operand immI_31()
5146 %{
5147   predicate(n->get_int() == 31);
5148   match(ConI);
5149 
5150   op_cost(0);
5151   format %{ %}
5152   interface(CONST_INTER);
5153 %}
5154 
5155 operand immI_8()
5156 %{
5157   predicate(n->get_int() == 8);
5158   match(ConI);
5159 
5160   op_cost(0);
5161   format %{ %}
5162   interface(CONST_INTER);
5163 %}
5164 
5165 operand immI_16()
5166 %{
5167   predicate(n->get_int() == 16);
5168   match(ConI);
5169 
5170   op_cost(0);
5171   format %{ %}
5172   interface(CONST_INTER);
5173 %}
5174 
5175 operand immI_24()
5176 %{
5177   predicate(n->get_int() == 24);
5178   match(ConI);
5179 
5180   op_cost(0);
5181   format %{ %}
5182   interface(CONST_INTER);
5183 %}
5184 
5185 operand immI_32()
5186 %{
5187   predicate(n->get_int() == 32);
5188   match(ConI);
5189 
5190   op_cost(0);
5191   format %{ %}
5192   interface(CONST_INTER);
5193 %}
5194 
5195 operand immI_48()
5196 %{
5197   predicate(n->get_int() == 48);
5198   match(ConI);
5199 
5200   op_cost(0);
5201   format %{ %}
5202   interface(CONST_INTER);
5203 %}
5204 
5205 operand immI_56()
5206 %{
5207   predicate(n->get_int() == 56);
5208   match(ConI);
5209 
5210   op_cost(0);
5211   format %{ %}
5212   interface(CONST_INTER);
5213 %}
5214 
5215 operand immI_64()
5216 %{
5217   predicate(n->get_int() == 64);
5218   match(ConI);
5219 
5220   op_cost(0);
5221   format %{ %}
5222   interface(CONST_INTER);
5223 %}
5224 
5225 operand immI_255()
5226 %{
5227   predicate(n->get_int() == 255);
5228   match(ConI);
5229 
5230   op_cost(0);
5231   format %{ %}
5232   interface(CONST_INTER);
5233 %}
5234 
5235 operand immI_65535()
5236 %{
5237   predicate(n->get_int() == 65535);
5238   match(ConI);
5239 
5240   op_cost(0);
5241   format %{ %}
5242   interface(CONST_INTER);
5243 %}
5244 
5245 operand immL_63()
5246 %{
5247   predicate(n->get_int() == 63);
5248   match(ConI);
5249 
5250   op_cost(0);
5251   format %{ %}
5252   interface(CONST_INTER);
5253 %}
5254 
5255 operand immL_255()
5256 %{
5257   predicate(n->get_int() == 255);
5258   match(ConI);
5259 
5260   op_cost(0);
5261   format %{ %}
5262   interface(CONST_INTER);
5263 %}
5264 
5265 operand immL_65535()
5266 %{
5267   predicate(n->get_long() == 65535L);
5268   match(ConL);
5269 
5270   op_cost(0);
5271   format %{ %}
5272   interface(CONST_INTER);
5273 %}
5274 
5275 operand immL_4294967295()
5276 %{
5277   predicate(n->get_long() == 4294967295L);
5278   match(ConL);
5279 
5280   op_cost(0);
5281   format %{ %}
5282   interface(CONST_INTER);
5283 %}
5284 
5285 operand immL_bitmask()
5286 %{
5287   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5288             && is_power_of_2(n->get_long() + 1));
5289   match(ConL);
5290 
5291   op_cost(0);
5292   format %{ %}
5293   interface(CONST_INTER);
5294 %}
5295 
5296 operand immI_bitmask()
5297 %{
5298   predicate(((n->get_int() & 0xc0000000) == 0)
5299             && is_power_of_2(n->get_int() + 1));
5300   match(ConI);
5301 
5302   op_cost(0);
5303   format %{ %}
5304   interface(CONST_INTER);
5305 %}
5306 
5307 // Scale values for scaled offset addressing modes (up to long but not quad)
5308 operand immIScale()
5309 %{
5310   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5311   match(ConI);
5312 
5313   op_cost(0);
5314   format %{ %}
5315   interface(CONST_INTER);
5316 %}
5317 
5318 // 26 bit signed offset -- for pc-relative branches
5319 operand immI26()
5320 %{
5321   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5322   match(ConI);
5323 
5324   op_cost(0);
5325   format %{ %}
5326   interface(CONST_INTER);
5327 %}
5328 
5329 // 19 bit signed offset -- for pc-relative loads
5330 operand immI19()
5331 %{
5332   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5333   match(ConI);
5334 
5335   op_cost(0);
5336   format %{ %}
5337   interface(CONST_INTER);
5338 %}
5339 
5340 // 12 bit unsigned offset -- for base plus immediate loads
5341 operand immIU12()
5342 %{
5343   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5344   match(ConI);
5345 
5346   op_cost(0);
5347   format %{ %}
5348   interface(CONST_INTER);
5349 %}
5350 
5351 operand immLU12()
5352 %{
5353   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5354   match(ConL);
5355 
5356   op_cost(0);
5357   format %{ %}
5358   interface(CONST_INTER);
5359 %}
5360 
5361 // Offset for scaled or unscaled immediate loads and stores
5362 operand immIOffset()
5363 %{
5364   predicate(Address::offset_ok_for_immed(n->get_int()));
5365   match(ConI);
5366 
5367   op_cost(0);
5368   format %{ %}
5369   interface(CONST_INTER);
5370 %}
5371 
5372 operand immLoffset()
5373 %{
5374   predicate(Address::offset_ok_for_immed(n->get_long()));
5375   match(ConL);
5376 
5377   op_cost(0);
5378   format %{ %}
5379   interface(CONST_INTER);
5380 %}
5381 
5382 // 32 bit integer valid for add sub immediate
5383 operand immIAddSub()
5384 %{
5385   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5386   match(ConI);
5387   op_cost(0);
5388   format %{ %}
5389   interface(CONST_INTER);
5390 %}
5391 
5392 // 32 bit unsigned integer valid for logical immediate
5393 // TODO -- check this is right when e.g the mask is 0x80000000
5394 operand immILog()
5395 %{
5396   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5397   match(ConI);
5398 
5399   op_cost(0);
5400   format %{ %}
5401   interface(CONST_INTER);
5402 %}
5403 
5404 // Integer operands 64 bit
5405 // 64 bit immediate
5406 operand immL()
5407 %{
5408   match(ConL);
5409 
5410   op_cost(0);
5411   format %{ %}
5412   interface(CONST_INTER);
5413 %}
5414 
5415 // 64 bit zero
5416 operand immL0()
5417 %{
5418   predicate(n->get_long() == 0);
5419   match(ConL);
5420 
5421   op_cost(0);
5422   format %{ %}
5423   interface(CONST_INTER);
5424 %}
5425 
5426 // 64 bit unit increment
5427 operand immL_1()
5428 %{
5429   predicate(n->get_long() == 1);
5430   match(ConL);
5431 
5432   op_cost(0);
5433   format %{ %}
5434   interface(CONST_INTER);
5435 %}
5436 
5437 // 64 bit unit decrement
5438 operand immL_M1()
5439 %{
5440   predicate(n->get_long() == -1);
5441   match(ConL);
5442 
5443   op_cost(0);
5444   format %{ %}
5445   interface(CONST_INTER);
5446 %}
5447 
5448 // 32 bit offset of pc in thread anchor
5449 
5450 operand immL_pc_off()
5451 %{
5452   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5453                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5454   match(ConL);
5455 
5456   op_cost(0);
5457   format %{ %}
5458   interface(CONST_INTER);
5459 %}
5460 
5461 // 64 bit integer valid for add sub immediate
5462 operand immLAddSub()
5463 %{
5464   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5465   match(ConL);
5466   op_cost(0);
5467   format %{ %}
5468   interface(CONST_INTER);
5469 %}
5470 
5471 // 64 bit integer valid for logical immediate
5472 operand immLLog()
5473 %{
5474   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5475   match(ConL);
5476   op_cost(0);
5477   format %{ %}
5478   interface(CONST_INTER);
5479 %}
5480 
5481 // Long Immediate: low 32-bit mask
5482 operand immL_32bits()
5483 %{
5484   predicate(n->get_long() == 0xFFFFFFFFL);
5485   match(ConL);
5486   op_cost(0);
5487   format %{ %}
5488   interface(CONST_INTER);
5489 %}
5490 
5491 // Pointer operands
5492 // Pointer Immediate
5493 operand immP()
5494 %{
5495   match(ConP);
5496 
5497   op_cost(0);
5498   format %{ %}
5499   interface(CONST_INTER);
5500 %}
5501 
5502 // NULL Pointer Immediate
5503 operand immP0()
5504 %{
5505   predicate(n->get_ptr() == 0);
5506   match(ConP);
5507 
5508   op_cost(0);
5509   format %{ %}
5510   interface(CONST_INTER);
5511 %}
5512 
5513 // Pointer Immediate One
5514 // this is used in object initialization (initial object header)
5515 operand immP_1()
5516 %{
5517   predicate(n->get_ptr() == 1);
5518   match(ConP);
5519 
5520   op_cost(0);
5521   format %{ %}
5522   interface(CONST_INTER);
5523 %}
5524 
5525 // Polling Page Pointer Immediate
5526 operand immPollPage()
5527 %{
5528   predicate((address)n->get_ptr() == os::get_polling_page());
5529   match(ConP);
5530 
5531   op_cost(0);
5532   format %{ %}
5533   interface(CONST_INTER);
5534 %}
5535 
5536 // Card Table Byte Map Base
5537 operand immByteMapBase()
5538 %{
5539   // Get base of card map
5540   predicate((jbyte*)n->get_ptr() ==
5541         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5542   match(ConP);
5543 
5544   op_cost(0);
5545   format %{ %}
5546   interface(CONST_INTER);
5547 %}
5548 
5549 // Pointer Immediate Minus One
5550 // this is used when we want to write the current PC to the thread anchor
5551 operand immP_M1()
5552 %{
5553   predicate(n->get_ptr() == -1);
5554   match(ConP);
5555 
5556   op_cost(0);
5557   format %{ %}
5558   interface(CONST_INTER);
5559 %}
5560 
5561 // Pointer Immediate Minus Two
5562 // this is used when we want to write the current PC to the thread anchor
5563 operand immP_M2()
5564 %{
5565   predicate(n->get_ptr() == -2);
5566   match(ConP);
5567 
5568   op_cost(0);
5569   format %{ %}
5570   interface(CONST_INTER);
5571 %}
5572 
5573 // Float and Double operands
5574 // Double Immediate
5575 operand immD()
5576 %{
5577   match(ConD);
5578   op_cost(0);
5579   format %{ %}
5580   interface(CONST_INTER);
5581 %}
5582 
5583 // Double Immediate: +0.0d
5584 operand immD0()
5585 %{
5586   predicate(jlong_cast(n->getd()) == 0);
5587   match(ConD);
5588 
5589   op_cost(0);
5590   format %{ %}
5591   interface(CONST_INTER);
5592 %}
5593 
5594 // constant 'double +0.0'.
5595 operand immDPacked()
5596 %{
5597   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5598   match(ConD);
5599   op_cost(0);
5600   format %{ %}
5601   interface(CONST_INTER);
5602 %}
5603 
5604 // Float Immediate
5605 operand immF()
5606 %{
5607   match(ConF);
5608   op_cost(0);
5609   format %{ %}
5610   interface(CONST_INTER);
5611 %}
5612 
5613 // Float Immediate: +0.0f.
5614 operand immF0()
5615 %{
5616   predicate(jint_cast(n->getf()) == 0);
5617   match(ConF);
5618 
5619   op_cost(0);
5620   format %{ %}
5621   interface(CONST_INTER);
5622 %}
5623 
5624 //
5625 operand immFPacked()
5626 %{
5627   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5628   match(ConF);
5629   op_cost(0);
5630   format %{ %}
5631   interface(CONST_INTER);
5632 %}
5633 
5634 // Narrow pointer operands
5635 // Narrow Pointer Immediate
5636 operand immN()
5637 %{
5638   match(ConN);
5639 
5640   op_cost(0);
5641   format %{ %}
5642   interface(CONST_INTER);
5643 %}
5644 
5645 // Narrow NULL Pointer Immediate
5646 operand immN0()
5647 %{
5648   predicate(n->get_narrowcon() == 0);
5649   match(ConN);
5650 
5651   op_cost(0);
5652   format %{ %}
5653   interface(CONST_INTER);
5654 %}
5655 
5656 operand immNKlass()
5657 %{
5658   match(ConNKlass);
5659 
5660   op_cost(0);
5661   format %{ %}
5662   interface(CONST_INTER);
5663 %}
5664 
5665 // Integer 32 bit Register Operands
5666 // Integer 32 bitRegister (excludes SP)
5667 operand iRegI()
5668 %{
5669   constraint(ALLOC_IN_RC(any_reg32));
5670   match(RegI);
5671   match(iRegINoSp);
5672   op_cost(0);
5673   format %{ %}
5674   interface(REG_INTER);
5675 %}
5676 
5677 // Integer 32 bit Register not Special
5678 operand iRegINoSp()
5679 %{
5680   constraint(ALLOC_IN_RC(no_special_reg32));
5681   match(RegI);
5682   op_cost(0);
5683   format %{ %}
5684   interface(REG_INTER);
5685 %}
5686 
5687 // Integer 64 bit Register Operands
5688 // Integer 64 bit Register (includes SP)
5689 operand iRegL()
5690 %{
5691   constraint(ALLOC_IN_RC(any_reg));
5692   match(RegL);
5693   match(iRegLNoSp);
5694   op_cost(0);
5695   format %{ %}
5696   interface(REG_INTER);
5697 %}
5698 
5699 // Integer 64 bit Register not Special
5700 operand iRegLNoSp()
5701 %{
5702   constraint(ALLOC_IN_RC(no_special_reg));
5703   match(RegL);
5704   format %{ %}
5705   interface(REG_INTER);
5706 %}
5707 
5708 // Pointer Register Operands
5709 // Pointer Register
5710 operand iRegP()
5711 %{
5712   constraint(ALLOC_IN_RC(ptr_reg));
5713   match(RegP);
5714   match(iRegPNoSp);
5715   match(iRegP_R0);
5716   //match(iRegP_R2);
5717   //match(iRegP_R4);
5718   //match(iRegP_R5);
5719   match(thread_RegP);
5720   op_cost(0);
5721   format %{ %}
5722   interface(REG_INTER);
5723 %}
5724 
5725 // Pointer 64 bit Register not Special
5726 operand iRegPNoSp()
5727 %{
5728   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5729   match(RegP);
5730   // match(iRegP);
5731   // match(iRegP_R0);
5732   // match(iRegP_R2);
5733   // match(iRegP_R4);
5734   // match(iRegP_R5);
5735   // match(thread_RegP);
5736   op_cost(0);
5737   format %{ %}
5738   interface(REG_INTER);
5739 %}
5740 
5741 // Pointer 64 bit Register R0 only
5742 operand iRegP_R0()
5743 %{
5744   constraint(ALLOC_IN_RC(r0_reg));
5745   match(RegP);
5746   // match(iRegP);
5747   match(iRegPNoSp);
5748   op_cost(0);
5749   format %{ %}
5750   interface(REG_INTER);
5751 %}
5752 
5753 // Pointer 64 bit Register R1 only
5754 operand iRegP_R1()
5755 %{
5756   constraint(ALLOC_IN_RC(r1_reg));
5757   match(RegP);
5758   // match(iRegP);
5759   match(iRegPNoSp);
5760   op_cost(0);
5761   format %{ %}
5762   interface(REG_INTER);
5763 %}
5764 
5765 // Pointer 64 bit Register R2 only
5766 operand iRegP_R2()
5767 %{
5768   constraint(ALLOC_IN_RC(r2_reg));
5769   match(RegP);
5770   // match(iRegP);
5771   match(iRegPNoSp);
5772   op_cost(0);
5773   format %{ %}
5774   interface(REG_INTER);
5775 %}
5776 
5777 // Pointer 64 bit Register R3 only
5778 operand iRegP_R3()
5779 %{
5780   constraint(ALLOC_IN_RC(r3_reg));
5781   match(RegP);
5782   // match(iRegP);
5783   match(iRegPNoSp);
5784   op_cost(0);
5785   format %{ %}
5786   interface(REG_INTER);
5787 %}
5788 
5789 // Pointer 64 bit Register R4 only
5790 operand iRegP_R4()
5791 %{
5792   constraint(ALLOC_IN_RC(r4_reg));
5793   match(RegP);
5794   // match(iRegP);
5795   match(iRegPNoSp);
5796   op_cost(0);
5797   format %{ %}
5798   interface(REG_INTER);
5799 %}
5800 
5801 // Pointer 64 bit Register R5 only
5802 operand iRegP_R5()
5803 %{
5804   constraint(ALLOC_IN_RC(r5_reg));
5805   match(RegP);
5806   // match(iRegP);
5807   match(iRegPNoSp);
5808   op_cost(0);
5809   format %{ %}
5810   interface(REG_INTER);
5811 %}
5812 
5813 // Pointer 64 bit Register R10 only
5814 operand iRegP_R10()
5815 %{
5816   constraint(ALLOC_IN_RC(r10_reg));
5817   match(RegP);
5818   // match(iRegP);
5819   match(iRegPNoSp);
5820   op_cost(0);
5821   format %{ %}
5822   interface(REG_INTER);
5823 %}
5824 
5825 // Long 64 bit Register R11 only
5826 operand iRegL_R11()
5827 %{
5828   constraint(ALLOC_IN_RC(r11_reg));
5829   match(RegL);
5830   match(iRegLNoSp);
5831   op_cost(0);
5832   format %{ %}
5833   interface(REG_INTER);
5834 %}
5835 
5836 // Pointer 64 bit Register FP only
5837 operand iRegP_FP()
5838 %{
5839   constraint(ALLOC_IN_RC(fp_reg));
5840   match(RegP);
5841   // match(iRegP);
5842   op_cost(0);
5843   format %{ %}
5844   interface(REG_INTER);
5845 %}
5846 
5847 // Register R0 only
5848 operand iRegI_R0()
5849 %{
5850   constraint(ALLOC_IN_RC(int_r0_reg));
5851   match(RegI);
5852   match(iRegINoSp);
5853   op_cost(0);
5854   format %{ %}
5855   interface(REG_INTER);
5856 %}
5857 
5858 // Register R2 only
5859 operand iRegI_R2()
5860 %{
5861   constraint(ALLOC_IN_RC(int_r2_reg));
5862   match(RegI);
5863   match(iRegINoSp);
5864   op_cost(0);
5865   format %{ %}
5866   interface(REG_INTER);
5867 %}
5868 
5869 // Register R3 only
5870 operand iRegI_R3()
5871 %{
5872   constraint(ALLOC_IN_RC(int_r3_reg));
5873   match(RegI);
5874   match(iRegINoSp);
5875   op_cost(0);
5876   format %{ %}
5877   interface(REG_INTER);
5878 %}
5879 
5880 
5881 // Register R2 only
5882 operand iRegI_R4()
5883 %{
5884   constraint(ALLOC_IN_RC(int_r4_reg));
5885   match(RegI);
5886   match(iRegINoSp);
5887   op_cost(0);
5888   format %{ %}
5889   interface(REG_INTER);
5890 %}
5891 
5892 
5893 // Pointer Register Operands
5894 // Narrow Pointer Register
5895 operand iRegN()
5896 %{
5897   constraint(ALLOC_IN_RC(any_reg32));
5898   match(RegN);
5899   match(iRegNNoSp);
5900   op_cost(0);
5901   format %{ %}
5902   interface(REG_INTER);
5903 %}
5904 
5905 // Integer 64 bit Register not Special
5906 operand iRegNNoSp()
5907 %{
5908   constraint(ALLOC_IN_RC(no_special_reg32));
5909   match(RegN);
5910   op_cost(0);
5911   format %{ %}
5912   interface(REG_INTER);
5913 %}
5914 
5915 // heap base register -- used for encoding immN0
5916 
5917 operand iRegIHeapbase()
5918 %{
5919   constraint(ALLOC_IN_RC(heapbase_reg));
5920   match(RegI);
5921   op_cost(0);
5922   format %{ %}
5923   interface(REG_INTER);
5924 %}
5925 
5926 // Float Register
5927 // Float register operands
5928 operand vRegF()
5929 %{
5930   constraint(ALLOC_IN_RC(float_reg));
5931   match(RegF);
5932 
5933   op_cost(0);
5934   format %{ %}
5935   interface(REG_INTER);
5936 %}
5937 
5938 // Double Register
5939 // Double register operands
5940 operand vRegD()
5941 %{
5942   constraint(ALLOC_IN_RC(double_reg));
5943   match(RegD);
5944 
5945   op_cost(0);
5946   format %{ %}
5947   interface(REG_INTER);
5948 %}
5949 
5950 operand vecD()
5951 %{
5952   constraint(ALLOC_IN_RC(vectord_reg));
5953   match(VecD);
5954 
5955   op_cost(0);
5956   format %{ %}
5957   interface(REG_INTER);
5958 %}
5959 
5960 operand vecX()
5961 %{
5962   constraint(ALLOC_IN_RC(vectorx_reg));
5963   match(VecX);
5964 
5965   op_cost(0);
5966   format %{ %}
5967   interface(REG_INTER);
5968 %}
5969 
5970 operand vRegD_V0()
5971 %{
5972   constraint(ALLOC_IN_RC(v0_reg));
5973   match(RegD);
5974   op_cost(0);
5975   format %{ %}
5976   interface(REG_INTER);
5977 %}
5978 
5979 operand vRegD_V1()
5980 %{
5981   constraint(ALLOC_IN_RC(v1_reg));
5982   match(RegD);
5983   op_cost(0);
5984   format %{ %}
5985   interface(REG_INTER);
5986 %}
5987 
5988 operand vRegD_V2()
5989 %{
5990   constraint(ALLOC_IN_RC(v2_reg));
5991   match(RegD);
5992   op_cost(0);
5993   format %{ %}
5994   interface(REG_INTER);
5995 %}
5996 
5997 operand vRegD_V3()
5998 %{
5999   constraint(ALLOC_IN_RC(v3_reg));
6000   match(RegD);
6001   op_cost(0);
6002   format %{ %}
6003   interface(REG_INTER);
6004 %}
6005 
6006 // Flags register, used as output of signed compare instructions
6007 
6008 // note that on AArch64 we also use this register as the output for
6009 // for floating point compare instructions (CmpF CmpD). this ensures
6010 // that ordered inequality tests use GT, GE, LT or LE none of which
6011 // pass through cases where the result is unordered i.e. one or both
6012 // inputs to the compare is a NaN. this means that the ideal code can
6013 // replace e.g. a GT with an LE and not end up capturing the NaN case
6014 // (where the comparison should always fail). EQ and NE tests are
6015 // always generated in ideal code so that unordered folds into the NE
6016 // case, matching the behaviour of AArch64 NE.
6017 //
6018 // This differs from x86 where the outputs of FP compares use a
6019 // special FP flags registers and where compares based on this
6020 // register are distinguished into ordered inequalities (cmpOpUCF) and
6021 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6022 // to explicitly handle the unordered case in branches. x86 also has
6023 // to include extra CMoveX rules to accept a cmpOpUCF input.
6024 
6025 operand rFlagsReg()
6026 %{
6027   constraint(ALLOC_IN_RC(int_flags));
6028   match(RegFlags);
6029 
6030   op_cost(0);
6031   format %{ "RFLAGS" %}
6032   interface(REG_INTER);
6033 %}
6034 
6035 // Flags register, used as output of unsigned compare instructions
6036 operand rFlagsRegU()
6037 %{
6038   constraint(ALLOC_IN_RC(int_flags));
6039   match(RegFlags);
6040 
6041   op_cost(0);
6042   format %{ "RFLAGSU" %}
6043   interface(REG_INTER);
6044 %}
6045 
6046 // Special Registers
6047 
6048 // Method Register
6049 operand inline_cache_RegP(iRegP reg)
6050 %{
6051   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6052   match(reg);
6053   match(iRegPNoSp);
6054   op_cost(0);
6055   format %{ %}
6056   interface(REG_INTER);
6057 %}
6058 
6059 operand interpreter_method_oop_RegP(iRegP reg)
6060 %{
6061   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6062   match(reg);
6063   match(iRegPNoSp);
6064   op_cost(0);
6065   format %{ %}
6066   interface(REG_INTER);
6067 %}
6068 
6069 // Thread Register
6070 operand thread_RegP(iRegP reg)
6071 %{
6072   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6073   match(reg);
6074   op_cost(0);
6075   format %{ %}
6076   interface(REG_INTER);
6077 %}
6078 
6079 operand lr_RegP(iRegP reg)
6080 %{
6081   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6082   match(reg);
6083   op_cost(0);
6084   format %{ %}
6085   interface(REG_INTER);
6086 %}
6087 
6088 //----------Memory Operands----------------------------------------------------
6089 
6090 operand indirect(iRegP reg)
6091 %{
6092   constraint(ALLOC_IN_RC(ptr_reg));
6093   match(reg);
6094   op_cost(0);
6095   format %{ "[$reg]" %}
6096   interface(MEMORY_INTER) %{
6097     base($reg);
6098     index(0xffffffff);
6099     scale(0x0);
6100     disp(0x0);
6101   %}
6102 %}
6103 
6104 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6105 %{
6106   constraint(ALLOC_IN_RC(ptr_reg));
6107   match(AddP (AddP reg (LShiftL lreg scale)) off);
6108   op_cost(INSN_COST);
6109   format %{ "$reg, $lreg lsl($scale), $off" %}
6110   interface(MEMORY_INTER) %{
6111     base($reg);
6112     index($lreg);
6113     scale($scale);
6114     disp($off);
6115   %}
6116 %}
6117 
6118 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6119 %{
6120   constraint(ALLOC_IN_RC(ptr_reg));
6121   match(AddP (AddP reg (LShiftL lreg scale)) off);
6122   op_cost(INSN_COST);
6123   format %{ "$reg, $lreg lsl($scale), $off" %}
6124   interface(MEMORY_INTER) %{
6125     base($reg);
6126     index($lreg);
6127     scale($scale);
6128     disp($off);
6129   %}
6130 %}
6131 
6132 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6133 %{
6134   constraint(ALLOC_IN_RC(ptr_reg));
6135   match(AddP (AddP reg (ConvI2L ireg)) off);
6136   op_cost(INSN_COST);
6137   format %{ "$reg, $ireg, $off I2L" %}
6138   interface(MEMORY_INTER) %{
6139     base($reg);
6140     index($ireg);
6141     scale(0x0);
6142     disp($off);
6143   %}
6144 %}
6145 
6146 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6147 %{
6148   constraint(ALLOC_IN_RC(ptr_reg));
6149   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6150   op_cost(INSN_COST);
6151   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6152   interface(MEMORY_INTER) %{
6153     base($reg);
6154     index($ireg);
6155     scale($scale);
6156     disp($off);
6157   %}
6158 %}
6159 
6160 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6161 %{
6162   constraint(ALLOC_IN_RC(ptr_reg));
6163   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6164   op_cost(0);
6165   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6166   interface(MEMORY_INTER) %{
6167     base($reg);
6168     index($ireg);
6169     scale($scale);
6170     disp(0x0);
6171   %}
6172 %}
6173 
6174 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6175 %{
6176   constraint(ALLOC_IN_RC(ptr_reg));
6177   match(AddP reg (LShiftL lreg scale));
6178   op_cost(0);
6179   format %{ "$reg, $lreg lsl($scale)" %}
6180   interface(MEMORY_INTER) %{
6181     base($reg);
6182     index($lreg);
6183     scale($scale);
6184     disp(0x0);
6185   %}
6186 %}
6187 
6188 operand indIndex(iRegP reg, iRegL lreg)
6189 %{
6190   constraint(ALLOC_IN_RC(ptr_reg));
6191   match(AddP reg lreg);
6192   op_cost(0);
6193   format %{ "$reg, $lreg" %}
6194   interface(MEMORY_INTER) %{
6195     base($reg);
6196     index($lreg);
6197     scale(0x0);
6198     disp(0x0);
6199   %}
6200 %}
6201 
6202 operand indOffI(iRegP reg, immIOffset off)
6203 %{
6204   constraint(ALLOC_IN_RC(ptr_reg));
6205   match(AddP reg off);
6206   op_cost(0);
6207   format %{ "[$reg, $off]" %}
6208   interface(MEMORY_INTER) %{
6209     base($reg);
6210     index(0xffffffff);
6211     scale(0x0);
6212     disp($off);
6213   %}
6214 %}
6215 
6216 operand indOffL(iRegP reg, immLoffset off)
6217 %{
6218   constraint(ALLOC_IN_RC(ptr_reg));
6219   match(AddP reg off);
6220   op_cost(0);
6221   format %{ "[$reg, $off]" %}
6222   interface(MEMORY_INTER) %{
6223     base($reg);
6224     index(0xffffffff);
6225     scale(0x0);
6226     disp($off);
6227   %}
6228 %}
6229 
6230 
6231 operand indirectN(iRegN reg)
6232 %{
6233   predicate(Universe::narrow_oop_shift() == 0);
6234   constraint(ALLOC_IN_RC(ptr_reg));
6235   match(DecodeN reg);
6236   op_cost(0);
6237   format %{ "[$reg]\t# narrow" %}
6238   interface(MEMORY_INTER) %{
6239     base($reg);
6240     index(0xffffffff);
6241     scale(0x0);
6242     disp(0x0);
6243   %}
6244 %}
6245 
6246 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6247 %{
6248   predicate(Universe::narrow_oop_shift() == 0);
6249   constraint(ALLOC_IN_RC(ptr_reg));
6250   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6251   op_cost(0);
6252   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6253   interface(MEMORY_INTER) %{
6254     base($reg);
6255     index($lreg);
6256     scale($scale);
6257     disp($off);
6258   %}
6259 %}
6260 
6261 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6262 %{
6263   predicate(Universe::narrow_oop_shift() == 0);
6264   constraint(ALLOC_IN_RC(ptr_reg));
6265   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6266   op_cost(INSN_COST);
6267   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6268   interface(MEMORY_INTER) %{
6269     base($reg);
6270     index($lreg);
6271     scale($scale);
6272     disp($off);
6273   %}
6274 %}
6275 
6276 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6277 %{
6278   predicate(Universe::narrow_oop_shift() == 0);
6279   constraint(ALLOC_IN_RC(ptr_reg));
6280   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6281   op_cost(INSN_COST);
6282   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6283   interface(MEMORY_INTER) %{
6284     base($reg);
6285     index($ireg);
6286     scale(0x0);
6287     disp($off);
6288   %}
6289 %}
6290 
6291 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6292 %{
6293   predicate(Universe::narrow_oop_shift() == 0);
6294   constraint(ALLOC_IN_RC(ptr_reg));
6295   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6296   op_cost(INSN_COST);
6297   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6298   interface(MEMORY_INTER) %{
6299     base($reg);
6300     index($ireg);
6301     scale($scale);
6302     disp($off);
6303   %}
6304 %}
6305 
6306 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6307 %{
6308   predicate(Universe::narrow_oop_shift() == 0);
6309   constraint(ALLOC_IN_RC(ptr_reg));
6310   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6311   op_cost(0);
6312   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6313   interface(MEMORY_INTER) %{
6314     base($reg);
6315     index($ireg);
6316     scale($scale);
6317     disp(0x0);
6318   %}
6319 %}
6320 
6321 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6322 %{
6323   predicate(Universe::narrow_oop_shift() == 0);
6324   constraint(ALLOC_IN_RC(ptr_reg));
6325   match(AddP (DecodeN reg) (LShiftL lreg scale));
6326   op_cost(0);
6327   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6328   interface(MEMORY_INTER) %{
6329     base($reg);
6330     index($lreg);
6331     scale($scale);
6332     disp(0x0);
6333   %}
6334 %}
6335 
6336 operand indIndexN(iRegN reg, iRegL lreg)
6337 %{
6338   predicate(Universe::narrow_oop_shift() == 0);
6339   constraint(ALLOC_IN_RC(ptr_reg));
6340   match(AddP (DecodeN reg) lreg);
6341   op_cost(0);
6342   format %{ "$reg, $lreg\t# narrow" %}
6343   interface(MEMORY_INTER) %{
6344     base($reg);
6345     index($lreg);
6346     scale(0x0);
6347     disp(0x0);
6348   %}
6349 %}
6350 
6351 operand indOffIN(iRegN reg, immIOffset off)
6352 %{
6353   predicate(Universe::narrow_oop_shift() == 0);
6354   constraint(ALLOC_IN_RC(ptr_reg));
6355   match(AddP (DecodeN reg) off);
6356   op_cost(0);
6357   format %{ "[$reg, $off]\t# narrow" %}
6358   interface(MEMORY_INTER) %{
6359     base($reg);
6360     index(0xffffffff);
6361     scale(0x0);
6362     disp($off);
6363   %}
6364 %}
6365 
6366 operand indOffLN(iRegN reg, immLoffset off)
6367 %{
6368   predicate(Universe::narrow_oop_shift() == 0);
6369   constraint(ALLOC_IN_RC(ptr_reg));
6370   match(AddP (DecodeN reg) off);
6371   op_cost(0);
6372   format %{ "[$reg, $off]\t# narrow" %}
6373   interface(MEMORY_INTER) %{
6374     base($reg);
6375     index(0xffffffff);
6376     scale(0x0);
6377     disp($off);
6378   %}
6379 %}
6380 
6381 
6382 
6383 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6384 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6385 %{
6386   constraint(ALLOC_IN_RC(ptr_reg));
6387   match(AddP reg off);
6388   op_cost(0);
6389   format %{ "[$reg, $off]" %}
6390   interface(MEMORY_INTER) %{
6391     base($reg);
6392     index(0xffffffff);
6393     scale(0x0);
6394     disp($off);
6395   %}
6396 %}
6397 
6398 //----------Special Memory Operands--------------------------------------------
6399 // Stack Slot Operand - This operand is used for loading and storing temporary
6400 //                      values on the stack where a match requires a value to
6401 //                      flow through memory.
6402 operand stackSlotP(sRegP reg)
6403 %{
6404   constraint(ALLOC_IN_RC(stack_slots));
6405   op_cost(100);
6406   // No match rule because this operand is only generated in matching
6407   // match(RegP);
6408   format %{ "[$reg]" %}
6409   interface(MEMORY_INTER) %{
6410     base(0x1e);  // RSP
6411     index(0x0);  // No Index
6412     scale(0x0);  // No Scale
6413     disp($reg);  // Stack Offset
6414   %}
6415 %}
6416 
6417 operand stackSlotI(sRegI reg)
6418 %{
6419   constraint(ALLOC_IN_RC(stack_slots));
6420   // No match rule because this operand is only generated in matching
6421   // match(RegI);
6422   format %{ "[$reg]" %}
6423   interface(MEMORY_INTER) %{
6424     base(0x1e);  // RSP
6425     index(0x0);  // No Index
6426     scale(0x0);  // No Scale
6427     disp($reg);  // Stack Offset
6428   %}
6429 %}
6430 
6431 operand stackSlotF(sRegF reg)
6432 %{
6433   constraint(ALLOC_IN_RC(stack_slots));
6434   // No match rule because this operand is only generated in matching
6435   // match(RegF);
6436   format %{ "[$reg]" %}
6437   interface(MEMORY_INTER) %{
6438     base(0x1e);  // RSP
6439     index(0x0);  // No Index
6440     scale(0x0);  // No Scale
6441     disp($reg);  // Stack Offset
6442   %}
6443 %}
6444 
6445 operand stackSlotD(sRegD reg)
6446 %{
6447   constraint(ALLOC_IN_RC(stack_slots));
6448   // No match rule because this operand is only generated in matching
6449   // match(RegD);
6450   format %{ "[$reg]" %}
6451   interface(MEMORY_INTER) %{
6452     base(0x1e);  // RSP
6453     index(0x0);  // No Index
6454     scale(0x0);  // No Scale
6455     disp($reg);  // Stack Offset
6456   %}
6457 %}
6458 
6459 operand stackSlotL(sRegL reg)
6460 %{
6461   constraint(ALLOC_IN_RC(stack_slots));
6462   // No match rule because this operand is only generated in matching
6463   // match(RegL);
6464   format %{ "[$reg]" %}
6465   interface(MEMORY_INTER) %{
6466     base(0x1e);  // RSP
6467     index(0x0);  // No Index
6468     scale(0x0);  // No Scale
6469     disp($reg);  // Stack Offset
6470   %}
6471 %}
6472 
6473 // Operands for expressing Control Flow
6474 // NOTE: Label is a predefined operand which should not be redefined in
6475 //       the AD file. It is generically handled within the ADLC.
6476 
6477 //----------Conditional Branch Operands----------------------------------------
6478 // Comparison Op  - This is the operation of the comparison, and is limited to
6479 //                  the following set of codes:
6480 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6481 //
6482 // Other attributes of the comparison, such as unsignedness, are specified
6483 // by the comparison instruction that sets a condition code flags register.
6484 // That result is represented by a flags operand whose subtype is appropriate
6485 // to the unsignedness (etc.) of the comparison.
6486 //
6487 // Later, the instruction which matches both the Comparison Op (a Bool) and
6488 // the flags (produced by the Cmp) specifies the coding of the comparison op
6489 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6490 
6491 // used for signed integral comparisons and fp comparisons
6492 
6493 operand cmpOp()
6494 %{
6495   match(Bool);
6496 
6497   format %{ "" %}
6498   interface(COND_INTER) %{
6499     equal(0x0, "eq");
6500     not_equal(0x1, "ne");
6501     less(0xb, "lt");
6502     greater_equal(0xa, "ge");
6503     less_equal(0xd, "le");
6504     greater(0xc, "gt");
6505     overflow(0x6, "vs");
6506     no_overflow(0x7, "vc");
6507   %}
6508 %}
6509 
6510 // used for unsigned integral comparisons
6511 
6512 operand cmpOpU()
6513 %{
6514   match(Bool);
6515 
6516   format %{ "" %}
6517   interface(COND_INTER) %{
6518     equal(0x0, "eq");
6519     not_equal(0x1, "ne");
6520     less(0x3, "lo");
6521     greater_equal(0x2, "hs");
6522     less_equal(0x9, "ls");
6523     greater(0x8, "hi");
6524     overflow(0x6, "vs");
6525     no_overflow(0x7, "vc");
6526   %}
6527 %}
6528 
6529 // Special operand allowing long args to int ops to be truncated for free
6530 
6531 operand iRegL2I(iRegL reg) %{
6532 
6533   op_cost(0);
6534 
6535   match(ConvL2I reg);
6536 
6537   format %{ "l2i($reg)" %}
6538 
6539   interface(REG_INTER)
6540 %}
6541 
6542 opclass vmem(indirect, indIndex, indOffI, indOffL);
6543 
6544 //----------OPERAND CLASSES----------------------------------------------------
6545 // Operand Classes are groups of operands that are used as to simplify
6546 // instruction definitions by not requiring the AD writer to specify
6547 // separate instructions for every form of operand when the
6548 // instruction accepts multiple operand types with the same basic
6549 // encoding and format. The classic case of this is memory operands.
6550 
6551 // memory is used to define read/write location for load/store
6552 // instruction defs. we can turn a memory op into an Address
6553 
6554 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6555                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6556 
6557 
6558 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6559 // operations. it allows the src to be either an iRegI or a (ConvL2I
6560 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6561 // can be elided because the 32-bit instruction will just employ the
6562 // lower 32 bits anyway.
6563 //
6564 // n.b. this does not elide all L2I conversions. if the truncated
6565 // value is consumed by more than one operation then the ConvL2I
6566 // cannot be bundled into the consuming nodes so an l2i gets planted
6567 // (actually a movw $dst $src) and the downstream instructions consume
6568 // the result of the l2i as an iRegI input. That's a shame since the
6569 // movw is actually redundant but its not too costly.
6570 
6571 opclass iRegIorL2I(iRegI, iRegL2I);
6572 
6573 //----------PIPELINE-----------------------------------------------------------
6574 // Rules which define the behavior of the target architectures pipeline.
6575 
6576 // For specific pipelines, eg A53, define the stages of that pipeline
6577 //pipe_desc(ISS, EX1, EX2, WR);
6578 #define ISS S0
6579 #define EX1 S1
6580 #define EX2 S2
6581 #define WR  S3
6582 
6583 // Integer ALU reg operation
6584 pipeline %{
6585 
6586 attributes %{
6587   // ARM instructions are of fixed length
6588   fixed_size_instructions;        // Fixed size instructions TODO does
6589   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6590   // ARM instructions come in 32-bit word units
6591   instruction_unit_size = 4;         // An instruction is 4 bytes long
6592   instruction_fetch_unit_size = 64;  // The processor fetches one line
6593   instruction_fetch_units = 1;       // of 64 bytes
6594 
6595   // List of nop instructions
6596   nops( MachNop );
6597 %}
6598 
6599 // We don't use an actual pipeline model so don't care about resources
6600 // or description. we do use pipeline classes to introduce fixed
6601 // latencies
6602 
6603 //----------RESOURCES----------------------------------------------------------
6604 // Resources are the functional units available to the machine
6605 
6606 resources( INS0, INS1, INS01 = INS0 | INS1,
6607            ALU0, ALU1, ALU = ALU0 | ALU1,
6608            MAC,
6609            DIV,
6610            BRANCH,
6611            LDST,
6612            NEON_FP);
6613 
6614 //----------PIPELINE DESCRIPTION-----------------------------------------------
6615 // Pipeline Description specifies the stages in the machine's pipeline
6616 
6617 // Define the pipeline as a generic 6 stage pipeline
6618 pipe_desc(S0, S1, S2, S3, S4, S5);
6619 
6620 //----------PIPELINE CLASSES---------------------------------------------------
6621 // Pipeline Classes describe the stages in which input and output are
6622 // referenced by the hardware pipeline.
6623 
6624 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6625 %{
6626   single_instruction;
6627   src1   : S1(read);
6628   src2   : S2(read);
6629   dst    : S5(write);
6630   INS01  : ISS;
6631   NEON_FP : S5;
6632 %}
6633 
6634 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6635 %{
6636   single_instruction;
6637   src1   : S1(read);
6638   src2   : S2(read);
6639   dst    : S5(write);
6640   INS01  : ISS;
6641   NEON_FP : S5;
6642 %}
6643 
6644 pipe_class fp_uop_s(vRegF dst, vRegF src)
6645 %{
6646   single_instruction;
6647   src    : S1(read);
6648   dst    : S5(write);
6649   INS01  : ISS;
6650   NEON_FP : S5;
6651 %}
6652 
6653 pipe_class fp_uop_d(vRegD dst, vRegD src)
6654 %{
6655   single_instruction;
6656   src    : S1(read);
6657   dst    : S5(write);
6658   INS01  : ISS;
6659   NEON_FP : S5;
6660 %}
6661 
6662 pipe_class fp_d2f(vRegF dst, vRegD src)
6663 %{
6664   single_instruction;
6665   src    : S1(read);
6666   dst    : S5(write);
6667   INS01  : ISS;
6668   NEON_FP : S5;
6669 %}
6670 
6671 pipe_class fp_f2d(vRegD dst, vRegF src)
6672 %{
6673   single_instruction;
6674   src    : S1(read);
6675   dst    : S5(write);
6676   INS01  : ISS;
6677   NEON_FP : S5;
6678 %}
6679 
6680 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6681 %{
6682   single_instruction;
6683   src    : S1(read);
6684   dst    : S5(write);
6685   INS01  : ISS;
6686   NEON_FP : S5;
6687 %}
6688 
6689 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6690 %{
6691   single_instruction;
6692   src    : S1(read);
6693   dst    : S5(write);
6694   INS01  : ISS;
6695   NEON_FP : S5;
6696 %}
6697 
6698 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6699 %{
6700   single_instruction;
6701   src    : S1(read);
6702   dst    : S5(write);
6703   INS01  : ISS;
6704   NEON_FP : S5;
6705 %}
6706 
6707 pipe_class fp_l2f(vRegF dst, iRegL src)
6708 %{
6709   single_instruction;
6710   src    : S1(read);
6711   dst    : S5(write);
6712   INS01  : ISS;
6713   NEON_FP : S5;
6714 %}
6715 
6716 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6717 %{
6718   single_instruction;
6719   src    : S1(read);
6720   dst    : S5(write);
6721   INS01  : ISS;
6722   NEON_FP : S5;
6723 %}
6724 
6725 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6726 %{
6727   single_instruction;
6728   src    : S1(read);
6729   dst    : S5(write);
6730   INS01  : ISS;
6731   NEON_FP : S5;
6732 %}
6733 
6734 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6735 %{
6736   single_instruction;
6737   src    : S1(read);
6738   dst    : S5(write);
6739   INS01  : ISS;
6740   NEON_FP : S5;
6741 %}
6742 
6743 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6744 %{
6745   single_instruction;
6746   src    : S1(read);
6747   dst    : S5(write);
6748   INS01  : ISS;
6749   NEON_FP : S5;
6750 %}
6751 
6752 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6753 %{
6754   single_instruction;
6755   src1   : S1(read);
6756   src2   : S2(read);
6757   dst    : S5(write);
6758   INS0   : ISS;
6759   NEON_FP : S5;
6760 %}
6761 
6762 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6763 %{
6764   single_instruction;
6765   src1   : S1(read);
6766   src2   : S2(read);
6767   dst    : S5(write);
6768   INS0   : ISS;
6769   NEON_FP : S5;
6770 %}
6771 
6772 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6773 %{
6774   single_instruction;
6775   cr     : S1(read);
6776   src1   : S1(read);
6777   src2   : S1(read);
6778   dst    : S3(write);
6779   INS01  : ISS;
6780   NEON_FP : S3;
6781 %}
6782 
6783 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6784 %{
6785   single_instruction;
6786   cr     : S1(read);
6787   src1   : S1(read);
6788   src2   : S1(read);
6789   dst    : S3(write);
6790   INS01  : ISS;
6791   NEON_FP : S3;
6792 %}
6793 
6794 pipe_class fp_imm_s(vRegF dst)
6795 %{
6796   single_instruction;
6797   dst    : S3(write);
6798   INS01  : ISS;
6799   NEON_FP : S3;
6800 %}
6801 
6802 pipe_class fp_imm_d(vRegD dst)
6803 %{
6804   single_instruction;
6805   dst    : S3(write);
6806   INS01  : ISS;
6807   NEON_FP : S3;
6808 %}
6809 
6810 pipe_class fp_load_constant_s(vRegF dst)
6811 %{
6812   single_instruction;
6813   dst    : S4(write);
6814   INS01  : ISS;
6815   NEON_FP : S4;
6816 %}
6817 
6818 pipe_class fp_load_constant_d(vRegD dst)
6819 %{
6820   single_instruction;
6821   dst    : S4(write);
6822   INS01  : ISS;
6823   NEON_FP : S4;
6824 %}
6825 
6826 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6827 %{
6828   single_instruction;
6829   dst    : S5(write);
6830   src1   : S1(read);
6831   src2   : S1(read);
6832   INS01  : ISS;
6833   NEON_FP : S5;
6834 %}
6835 
6836 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6837 %{
6838   single_instruction;
6839   dst    : S5(write);
6840   src1   : S1(read);
6841   src2   : S1(read);
6842   INS0   : ISS;
6843   NEON_FP : S5;
6844 %}
6845 
6846 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6847 %{
6848   single_instruction;
6849   dst    : S5(write);
6850   src1   : S1(read);
6851   src2   : S1(read);
6852   dst    : S1(read);
6853   INS01  : ISS;
6854   NEON_FP : S5;
6855 %}
6856 
6857 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6858 %{
6859   single_instruction;
6860   dst    : S5(write);
6861   src1   : S1(read);
6862   src2   : S1(read);
6863   dst    : S1(read);
6864   INS0   : ISS;
6865   NEON_FP : S5;
6866 %}
6867 
6868 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6869 %{
6870   single_instruction;
6871   dst    : S4(write);
6872   src1   : S2(read);
6873   src2   : S2(read);
6874   INS01  : ISS;
6875   NEON_FP : S4;
6876 %}
6877 
6878 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6879 %{
6880   single_instruction;
6881   dst    : S4(write);
6882   src1   : S2(read);
6883   src2   : S2(read);
6884   INS0   : ISS;
6885   NEON_FP : S4;
6886 %}
6887 
6888 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6889 %{
6890   single_instruction;
6891   dst    : S3(write);
6892   src1   : S2(read);
6893   src2   : S2(read);
6894   INS01  : ISS;
6895   NEON_FP : S3;
6896 %}
6897 
6898 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6899 %{
6900   single_instruction;
6901   dst    : S3(write);
6902   src1   : S2(read);
6903   src2   : S2(read);
6904   INS0   : ISS;
6905   NEON_FP : S3;
6906 %}
6907 
6908 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6909 %{
6910   single_instruction;
6911   dst    : S3(write);
6912   src    : S1(read);
6913   shift  : S1(read);
6914   INS01  : ISS;
6915   NEON_FP : S3;
6916 %}
6917 
6918 pipe_class vshift128(vecX dst, vecX src, vecX shift)
6919 %{
6920   single_instruction;
6921   dst    : S3(write);
6922   src    : S1(read);
6923   shift  : S1(read);
6924   INS0   : ISS;
6925   NEON_FP : S3;
6926 %}
6927 
6928 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
6929 %{
6930   single_instruction;
6931   dst    : S3(write);
6932   src    : S1(read);
6933   INS01  : ISS;
6934   NEON_FP : S3;
6935 %}
6936 
6937 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
6938 %{
6939   single_instruction;
6940   dst    : S3(write);
6941   src    : S1(read);
6942   INS0   : ISS;
6943   NEON_FP : S3;
6944 %}
6945 
6946 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
6947 %{
6948   single_instruction;
6949   dst    : S5(write);
6950   src1   : S1(read);
6951   src2   : S1(read);
6952   INS01  : ISS;
6953   NEON_FP : S5;
6954 %}
6955 
6956 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
6957 %{
6958   single_instruction;
6959   dst    : S5(write);
6960   src1   : S1(read);
6961   src2   : S1(read);
6962   INS0   : ISS;
6963   NEON_FP : S5;
6964 %}
6965 
6966 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
6967 %{
6968   single_instruction;
6969   dst    : S5(write);
6970   src1   : S1(read);
6971   src2   : S1(read);
6972   INS0   : ISS;
6973   NEON_FP : S5;
6974 %}
6975 
6976 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
6977 %{
6978   single_instruction;
6979   dst    : S5(write);
6980   src1   : S1(read);
6981   src2   : S1(read);
6982   INS0   : ISS;
6983   NEON_FP : S5;
6984 %}
6985 
6986 pipe_class vsqrt_fp128(vecX dst, vecX src)
6987 %{
6988   single_instruction;
6989   dst    : S5(write);
6990   src    : S1(read);
6991   INS0   : ISS;
6992   NEON_FP : S5;
6993 %}
6994 
6995 pipe_class vunop_fp64(vecD dst, vecD src)
6996 %{
6997   single_instruction;
6998   dst    : S5(write);
6999   src    : S1(read);
7000   INS01  : ISS;
7001   NEON_FP : S5;
7002 %}
7003 
7004 pipe_class vunop_fp128(vecX dst, vecX src)
7005 %{
7006   single_instruction;
7007   dst    : S5(write);
7008   src    : S1(read);
7009   INS0   : ISS;
7010   NEON_FP : S5;
7011 %}
7012 
7013 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7014 %{
7015   single_instruction;
7016   dst    : S3(write);
7017   src    : S1(read);
7018   INS01  : ISS;
7019   NEON_FP : S3;
7020 %}
7021 
7022 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7023 %{
7024   single_instruction;
7025   dst    : S3(write);
7026   src    : S1(read);
7027   INS01  : ISS;
7028   NEON_FP : S3;
7029 %}
7030 
7031 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7032 %{
7033   single_instruction;
7034   dst    : S3(write);
7035   src    : S1(read);
7036   INS01  : ISS;
7037   NEON_FP : S3;
7038 %}
7039 
7040 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7041 %{
7042   single_instruction;
7043   dst    : S3(write);
7044   src    : S1(read);
7045   INS01  : ISS;
7046   NEON_FP : S3;
7047 %}
7048 
7049 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7050 %{
7051   single_instruction;
7052   dst    : S3(write);
7053   src    : S1(read);
7054   INS01  : ISS;
7055   NEON_FP : S3;
7056 %}
7057 
7058 pipe_class vmovi_reg_imm64(vecD dst)
7059 %{
7060   single_instruction;
7061   dst    : S3(write);
7062   INS01  : ISS;
7063   NEON_FP : S3;
7064 %}
7065 
7066 pipe_class vmovi_reg_imm128(vecX dst)
7067 %{
7068   single_instruction;
7069   dst    : S3(write);
7070   INS0   : ISS;
7071   NEON_FP : S3;
7072 %}
7073 
7074 pipe_class vload_reg_mem64(vecD dst, vmem mem)
7075 %{
7076   single_instruction;
7077   dst    : S5(write);
7078   mem    : ISS(read);
7079   INS01  : ISS;
7080   NEON_FP : S3;
7081 %}
7082 
7083 pipe_class vload_reg_mem128(vecX dst, vmem mem)
7084 %{
7085   single_instruction;
7086   dst    : S5(write);
7087   mem    : ISS(read);
7088   INS01  : ISS;
7089   NEON_FP : S3;
7090 %}
7091 
7092 pipe_class vstore_reg_mem64(vecD src, vmem mem)
7093 %{
7094   single_instruction;
7095   mem    : ISS(read);
7096   src    : S2(read);
7097   INS01  : ISS;
7098   NEON_FP : S3;
7099 %}
7100 
7101 pipe_class vstore_reg_mem128(vecD src, vmem mem)
7102 %{
7103   single_instruction;
7104   mem    : ISS(read);
7105   src    : S2(read);
7106   INS01  : ISS;
7107   NEON_FP : S3;
7108 %}
7109 
7110 //------- Integer ALU operations --------------------------
7111 
7112 // Integer ALU reg-reg operation
7113 // Operands needed in EX1, result generated in EX2
7114 // Eg.  ADD     x0, x1, x2
7115 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7116 %{
7117   single_instruction;
7118   dst    : EX2(write);
7119   src1   : EX1(read);
7120   src2   : EX1(read);
7121   INS01  : ISS; // Dual issue as instruction 0 or 1
7122   ALU    : EX2;
7123 %}
7124 
7125 // Integer ALU reg-reg operation with constant shift
7126 // Shifted register must be available in LATE_ISS instead of EX1
7127 // Eg.  ADD     x0, x1, x2, LSL #2
7128 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7129 %{
7130   single_instruction;
7131   dst    : EX2(write);
7132   src1   : EX1(read);
7133   src2   : ISS(read);
7134   INS01  : ISS;
7135   ALU    : EX2;
7136 %}
7137 
7138 // Integer ALU reg operation with constant shift
7139 // Eg.  LSL     x0, x1, #shift
7140 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7141 %{
7142   single_instruction;
7143   dst    : EX2(write);
7144   src1   : ISS(read);
7145   INS01  : ISS;
7146   ALU    : EX2;
7147 %}
7148 
7149 // Integer ALU reg-reg operation with variable shift
7150 // Both operands must be available in LATE_ISS instead of EX1
7151 // Result is available in EX1 instead of EX2
7152 // Eg.  LSLV    x0, x1, x2
7153 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7154 %{
7155   single_instruction;
7156   dst    : EX1(write);
7157   src1   : ISS(read);
7158   src2   : ISS(read);
7159   INS01  : ISS;
7160   ALU    : EX1;
7161 %}
7162 
7163 // Integer ALU reg-reg operation with extract
7164 // As for _vshift above, but result generated in EX2
7165 // Eg.  EXTR    x0, x1, x2, #N
7166 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7167 %{
7168   single_instruction;
7169   dst    : EX2(write);
7170   src1   : ISS(read);
7171   src2   : ISS(read);
7172   INS1   : ISS; // Can only dual issue as Instruction 1
7173   ALU    : EX1;
7174 %}
7175 
7176 // Integer ALU reg operation
7177 // Eg.  NEG     x0, x1
7178 pipe_class ialu_reg(iRegI dst, iRegI src)
7179 %{
7180   single_instruction;
7181   dst    : EX2(write);
7182   src    : EX1(read);
7183   INS01  : ISS;
7184   ALU    : EX2;
7185 %}
7186 
7187 // Integer ALU reg mmediate operation
7188 // Eg.  ADD     x0, x1, #N
7189 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7190 %{
7191   single_instruction;
7192   dst    : EX2(write);
7193   src1   : EX1(read);
7194   INS01  : ISS;
7195   ALU    : EX2;
7196 %}
7197 
7198 // Integer ALU immediate operation (no source operands)
7199 // Eg.  MOV     x0, #N
7200 pipe_class ialu_imm(iRegI dst)
7201 %{
7202   single_instruction;
7203   dst    : EX1(write);
7204   INS01  : ISS;
7205   ALU    : EX1;
7206 %}
7207 
7208 //------- Compare operation -------------------------------
7209 
7210 // Compare reg-reg
7211 // Eg.  CMP     x0, x1
7212 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7213 %{
7214   single_instruction;
7215 //  fixed_latency(16);
7216   cr     : EX2(write);
7217   op1    : EX1(read);
7218   op2    : EX1(read);
7219   INS01  : ISS;
7220   ALU    : EX2;
7221 %}
7222 
7223 // Compare reg-reg
7224 // Eg.  CMP     x0, #N
7225 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7226 %{
7227   single_instruction;
7228 //  fixed_latency(16);
7229   cr     : EX2(write);
7230   op1    : EX1(read);
7231   INS01  : ISS;
7232   ALU    : EX2;
7233 %}
7234 
7235 //------- Conditional instructions ------------------------
7236 
7237 // Conditional no operands
7238 // Eg.  CSINC   x0, zr, zr, <cond>
7239 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7240 %{
7241   single_instruction;
7242   cr     : EX1(read);
7243   dst    : EX2(write);
7244   INS01  : ISS;
7245   ALU    : EX2;
7246 %}
7247 
7248 // Conditional 2 operand
7249 // EG.  CSEL    X0, X1, X2, <cond>
7250 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7251 %{
7252   single_instruction;
7253   cr     : EX1(read);
7254   src1   : EX1(read);
7255   src2   : EX1(read);
7256   dst    : EX2(write);
7257   INS01  : ISS;
7258   ALU    : EX2;
7259 %}
7260 
7261 // Conditional 2 operand
7262 // EG.  CSEL    X0, X1, X2, <cond>
7263 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7264 %{
7265   single_instruction;
7266   cr     : EX1(read);
7267   src    : EX1(read);
7268   dst    : EX2(write);
7269   INS01  : ISS;
7270   ALU    : EX2;
7271 %}
7272 
7273 //------- Multiply pipeline operations --------------------
7274 
7275 // Multiply reg-reg
7276 // Eg.  MUL     w0, w1, w2
7277 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7278 %{
7279   single_instruction;
7280   dst    : WR(write);
7281   src1   : ISS(read);
7282   src2   : ISS(read);
7283   INS01  : ISS;
7284   MAC    : WR;
7285 %}
7286 
7287 // Multiply accumulate
7288 // Eg.  MADD    w0, w1, w2, w3
7289 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7290 %{
7291   single_instruction;
7292   dst    : WR(write);
7293   src1   : ISS(read);
7294   src2   : ISS(read);
7295   src3   : ISS(read);
7296   INS01  : ISS;
7297   MAC    : WR;
7298 %}
7299 
7300 // Eg.  MUL     w0, w1, w2
7301 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7302 %{
7303   single_instruction;
7304   fixed_latency(3); // Maximum latency for 64 bit mul
7305   dst    : WR(write);
7306   src1   : ISS(read);
7307   src2   : ISS(read);
7308   INS01  : ISS;
7309   MAC    : WR;
7310 %}
7311 
7312 // Multiply accumulate
7313 // Eg.  MADD    w0, w1, w2, w3
7314 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7315 %{
7316   single_instruction;
7317   fixed_latency(3); // Maximum latency for 64 bit mul
7318   dst    : WR(write);
7319   src1   : ISS(read);
7320   src2   : ISS(read);
7321   src3   : ISS(read);
7322   INS01  : ISS;
7323   MAC    : WR;
7324 %}
7325 
7326 //------- Divide pipeline operations --------------------
7327 
7328 // Eg.  SDIV    w0, w1, w2
7329 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7330 %{
7331   single_instruction;
7332   fixed_latency(8); // Maximum latency for 32 bit divide
7333   dst    : WR(write);
7334   src1   : ISS(read);
7335   src2   : ISS(read);
7336   INS0   : ISS; // Can only dual issue as instruction 0
7337   DIV    : WR;
7338 %}
7339 
7340 // Eg.  SDIV    x0, x1, x2
7341 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7342 %{
7343   single_instruction;
7344   fixed_latency(16); // Maximum latency for 64 bit divide
7345   dst    : WR(write);
7346   src1   : ISS(read);
7347   src2   : ISS(read);
7348   INS0   : ISS; // Can only dual issue as instruction 0
7349   DIV    : WR;
7350 %}
7351 
7352 //------- Load pipeline operations ------------------------
7353 
7354 // Load - prefetch
7355 // Eg.  PFRM    <mem>
7356 pipe_class iload_prefetch(memory mem)
7357 %{
7358   single_instruction;
7359   mem    : ISS(read);
7360   INS01  : ISS;
7361   LDST   : WR;
7362 %}
7363 
7364 // Load - reg, mem
7365 // Eg.  LDR     x0, <mem>
7366 pipe_class iload_reg_mem(iRegI dst, memory mem)
7367 %{
7368   single_instruction;
7369   dst    : WR(write);
7370   mem    : ISS(read);
7371   INS01  : ISS;
7372   LDST   : WR;
7373 %}
7374 
7375 // Load - reg, reg
7376 // Eg.  LDR     x0, [sp, x1]
7377 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7378 %{
7379   single_instruction;
7380   dst    : WR(write);
7381   src    : ISS(read);
7382   INS01  : ISS;
7383   LDST   : WR;
7384 %}
7385 
7386 //------- Store pipeline operations -----------------------
7387 
7388 // Store - zr, mem
7389 // Eg.  STR     zr, <mem>
7390 pipe_class istore_mem(memory mem)
7391 %{
7392   single_instruction;
7393   mem    : ISS(read);
7394   INS01  : ISS;
7395   LDST   : WR;
7396 %}
7397 
7398 // Store - reg, mem
7399 // Eg.  STR     x0, <mem>
7400 pipe_class istore_reg_mem(iRegI src, memory mem)
7401 %{
7402   single_instruction;
7403   mem    : ISS(read);
7404   src    : EX2(read);
7405   INS01  : ISS;
7406   LDST   : WR;
7407 %}
7408 
7409 // Store - reg, reg
7410 // Eg. STR      x0, [sp, x1]
7411 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7412 %{
7413   single_instruction;
7414   dst    : ISS(read);
7415   src    : EX2(read);
7416   INS01  : ISS;
7417   LDST   : WR;
7418 %}
7419 
7420 //------- Store pipeline operations -----------------------
7421 
7422 // Branch
7423 pipe_class pipe_branch()
7424 %{
7425   single_instruction;
7426   INS01  : ISS;
7427   BRANCH : EX1;
7428 %}
7429 
7430 // Conditional branch
7431 pipe_class pipe_branch_cond(rFlagsReg cr)
7432 %{
7433   single_instruction;
7434   cr     : EX1(read);
7435   INS01  : ISS;
7436   BRANCH : EX1;
7437 %}
7438 
7439 // Compare & Branch
7440 // EG.  CBZ/CBNZ
7441 pipe_class pipe_cmp_branch(iRegI op1)
7442 %{
7443   single_instruction;
7444   op1    : EX1(read);
7445   INS01  : ISS;
7446   BRANCH : EX1;
7447 %}
7448 
7449 //------- Synchronisation operations ----------------------
7450 
7451 // Any operation requiring serialization.
7452 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7453 pipe_class pipe_serial()
7454 %{
7455   single_instruction;
7456   force_serialization;
7457   fixed_latency(16);
7458   INS01  : ISS(2); // Cannot dual issue with any other instruction
7459   LDST   : WR;
7460 %}
7461 
7462 // Generic big/slow expanded idiom - also serialized
7463 pipe_class pipe_slow()
7464 %{
7465   instruction_count(10);
7466   multiple_bundles;
7467   force_serialization;
7468   fixed_latency(16);
7469   INS01  : ISS(2); // Cannot dual issue with any other instruction
7470   LDST   : WR;
7471 %}
7472 
7473 // Empty pipeline class
7474 pipe_class pipe_class_empty()
7475 %{
7476   single_instruction;
7477   fixed_latency(0);
7478 %}
7479 
7480 // Default pipeline class.
7481 pipe_class pipe_class_default()
7482 %{
7483   single_instruction;
7484   fixed_latency(2);
7485 %}
7486 
7487 // Pipeline class for compares.
7488 pipe_class pipe_class_compare()
7489 %{
7490   single_instruction;
7491   fixed_latency(16);
7492 %}
7493 
7494 // Pipeline class for memory operations.
7495 pipe_class pipe_class_memory()
7496 %{
7497   single_instruction;
7498   fixed_latency(16);
7499 %}
7500 
7501 // Pipeline class for call.
7502 pipe_class pipe_class_call()
7503 %{
7504   single_instruction;
7505   fixed_latency(100);
7506 %}
7507 
7508 // Define the class for the Nop node.
7509 define %{
7510    MachNop = pipe_class_empty;
7511 %}
7512 
7513 %}
7514 //----------INSTRUCTIONS-------------------------------------------------------
7515 //
7516 // match      -- States which machine-independent subtree may be replaced
7517 //               by this instruction.
7518 // ins_cost   -- The estimated cost of this instruction is used by instruction
7519 //               selection to identify a minimum cost tree of machine
7520 //               instructions that matches a tree of machine-independent
7521 //               instructions.
7522 // format     -- A string providing the disassembly for this instruction.
7523 //               The value of an instruction's operand may be inserted
7524 //               by referring to it with a '$' prefix.
7525 // opcode     -- Three instruction opcodes may be provided.  These are referred
7526 //               to within an encode class as $primary, $secondary, and $tertiary
7527 //               rrspectively.  The primary opcode is commonly used to
7528 //               indicate the type of machine instruction, while secondary
7529 //               and tertiary are often used for prefix options or addressing
7530 //               modes.
7531 // ins_encode -- A list of encode classes with parameters. The encode class
7532 //               name must have been defined in an 'enc_class' specification
7533 //               in the encode section of the architecture description.
7534 
7535 // ============================================================================
7536 // Memory (Load/Store) Instructions
7537 
7538 // Load Instructions
7539 
7540 // Load Byte (8 bit signed)
7541 instruct loadB(iRegINoSp dst, memory mem)
7542 %{
7543   match(Set dst (LoadB mem));
7544   predicate(!needs_acquiring_load(n));
7545 
7546   ins_cost(4 * INSN_COST);
7547   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7548 
7549   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7550 
7551   ins_pipe(iload_reg_mem);
7552 %}
7553 
7554 // Load Byte (8 bit signed) into long
7555 instruct loadB2L(iRegLNoSp dst, memory mem)
7556 %{
7557   match(Set dst (ConvI2L (LoadB mem)));
7558   predicate(!needs_acquiring_load(n->in(1)));
7559 
7560   ins_cost(4 * INSN_COST);
7561   format %{ "ldrsb  $dst, $mem\t# byte" %}
7562 
7563   ins_encode(aarch64_enc_ldrsb(dst, mem));
7564 
7565   ins_pipe(iload_reg_mem);
7566 %}
7567 
7568 // Load Byte (8 bit unsigned)
7569 instruct loadUB(iRegINoSp dst, memory mem)
7570 %{
7571   match(Set dst (LoadUB mem));
7572   predicate(!needs_acquiring_load(n));
7573 
7574   ins_cost(4 * INSN_COST);
7575   format %{ "ldrbw  $dst, $mem\t# byte" %}
7576 
7577   ins_encode(aarch64_enc_ldrb(dst, mem));
7578 
7579   ins_pipe(iload_reg_mem);
7580 %}
7581 
7582 // Load Byte (8 bit unsigned) into long
7583 instruct loadUB2L(iRegLNoSp dst, memory mem)
7584 %{
7585   match(Set dst (ConvI2L (LoadUB mem)));
7586   predicate(!needs_acquiring_load(n->in(1)));
7587 
7588   ins_cost(4 * INSN_COST);
7589   format %{ "ldrb  $dst, $mem\t# byte" %}
7590 
7591   ins_encode(aarch64_enc_ldrb(dst, mem));
7592 
7593   ins_pipe(iload_reg_mem);
7594 %}
7595 
7596 // Load Short (16 bit signed)
7597 instruct loadS(iRegINoSp dst, memory mem)
7598 %{
7599   match(Set dst (LoadS mem));
7600   predicate(!needs_acquiring_load(n));
7601 
7602   ins_cost(4 * INSN_COST);
7603   format %{ "ldrshw  $dst, $mem\t# short" %}
7604 
7605   ins_encode(aarch64_enc_ldrshw(dst, mem));
7606 
7607   ins_pipe(iload_reg_mem);
7608 %}
7609 
7610 // Load Short (16 bit signed) into long
7611 instruct loadS2L(iRegLNoSp dst, memory mem)
7612 %{
7613   match(Set dst (ConvI2L (LoadS mem)));
7614   predicate(!needs_acquiring_load(n->in(1)));
7615 
7616   ins_cost(4 * INSN_COST);
7617   format %{ "ldrsh  $dst, $mem\t# short" %}
7618 
7619   ins_encode(aarch64_enc_ldrsh(dst, mem));
7620 
7621   ins_pipe(iload_reg_mem);
7622 %}
7623 
7624 // Load Char (16 bit unsigned)
7625 instruct loadUS(iRegINoSp dst, memory mem)
7626 %{
7627   match(Set dst (LoadUS mem));
7628   predicate(!needs_acquiring_load(n));
7629 
7630   ins_cost(4 * INSN_COST);
7631   format %{ "ldrh  $dst, $mem\t# short" %}
7632 
7633   ins_encode(aarch64_enc_ldrh(dst, mem));
7634 
7635   ins_pipe(iload_reg_mem);
7636 %}
7637 
7638 // Load Short/Char (16 bit unsigned) into long
7639 instruct loadUS2L(iRegLNoSp dst, memory mem)
7640 %{
7641   match(Set dst (ConvI2L (LoadUS mem)));
7642   predicate(!needs_acquiring_load(n->in(1)));
7643 
7644   ins_cost(4 * INSN_COST);
7645   format %{ "ldrh  $dst, $mem\t# short" %}
7646 
7647   ins_encode(aarch64_enc_ldrh(dst, mem));
7648 
7649   ins_pipe(iload_reg_mem);
7650 %}
7651 
7652 // Load Integer (32 bit signed)
7653 instruct loadI(iRegINoSp dst, memory mem)
7654 %{
7655   match(Set dst (LoadI mem));
7656   predicate(!needs_acquiring_load(n));
7657 
7658   ins_cost(4 * INSN_COST);
7659   format %{ "ldrw  $dst, $mem\t# int" %}
7660 
7661   ins_encode(aarch64_enc_ldrw(dst, mem));
7662 
7663   ins_pipe(iload_reg_mem);
7664 %}
7665 
7666 // Load Integer (32 bit signed) into long
7667 instruct loadI2L(iRegLNoSp dst, memory mem)
7668 %{
7669   match(Set dst (ConvI2L (LoadI mem)));
7670   predicate(!needs_acquiring_load(n->in(1)));
7671 
7672   ins_cost(4 * INSN_COST);
7673   format %{ "ldrsw  $dst, $mem\t# int" %}
7674 
7675   ins_encode(aarch64_enc_ldrsw(dst, mem));
7676 
7677   ins_pipe(iload_reg_mem);
7678 %}
7679 
7680 // Load Integer (32 bit unsigned) into long
7681 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7682 %{
7683   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7684   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7685 
7686   ins_cost(4 * INSN_COST);
7687   format %{ "ldrw  $dst, $mem\t# int" %}
7688 
7689   ins_encode(aarch64_enc_ldrw(dst, mem));
7690 
7691   ins_pipe(iload_reg_mem);
7692 %}
7693 
7694 // Load Long (64 bit signed)
7695 instruct loadL(iRegLNoSp dst, memory mem)
7696 %{
7697   match(Set dst (LoadL mem));
7698   predicate(!needs_acquiring_load(n));
7699 
7700   ins_cost(4 * INSN_COST);
7701   format %{ "ldr  $dst, $mem\t# int" %}
7702 
7703   ins_encode(aarch64_enc_ldr(dst, mem));
7704 
7705   ins_pipe(iload_reg_mem);
7706 %}
7707 
7708 // Load Range
7709 instruct loadRange(iRegINoSp dst, memory mem)
7710 %{
7711   match(Set dst (LoadRange mem));
7712 
7713   ins_cost(4 * INSN_COST);
7714   format %{ "ldrw  $dst, $mem\t# range" %}
7715 
7716   ins_encode(aarch64_enc_ldrw(dst, mem));
7717 
7718   ins_pipe(iload_reg_mem);
7719 %}
7720 
7721 // Load Pointer
7722 instruct loadP(iRegPNoSp dst, memory mem)
7723 %{
7724   match(Set dst (LoadP mem));
7725   predicate(!needs_acquiring_load(n));
7726 
7727   ins_cost(4 * INSN_COST);
7728   format %{ "ldr  $dst, $mem\t# ptr" %}
7729 
7730   ins_encode(aarch64_enc_ldr(dst, mem));
7731 
7732   ins_pipe(iload_reg_mem);
7733 %}
7734 
7735 // Load Compressed Pointer
7736 instruct loadN(iRegNNoSp dst, memory mem)
7737 %{
7738   match(Set dst (LoadN mem));
7739   predicate(!needs_acquiring_load(n));
7740 
7741   ins_cost(4 * INSN_COST);
7742   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7743 
7744   ins_encode(aarch64_enc_ldrw(dst, mem));
7745 
7746   ins_pipe(iload_reg_mem);
7747 %}
7748 
7749 // Load Klass Pointer
7750 instruct loadKlass(iRegPNoSp dst, memory mem)
7751 %{
7752   match(Set dst (LoadKlass mem));
7753   predicate(!needs_acquiring_load(n));
7754 
7755   ins_cost(4 * INSN_COST);
7756   format %{ "ldr  $dst, $mem\t# class" %}
7757 
7758   ins_encode(aarch64_enc_ldr(dst, mem));
7759 
7760   ins_pipe(iload_reg_mem);
7761 %}
7762 
7763 // Load Narrow Klass Pointer
7764 instruct loadNKlass(iRegNNoSp dst, memory mem)
7765 %{
7766   match(Set dst (LoadNKlass mem));
7767   predicate(!needs_acquiring_load(n));
7768 
7769   ins_cost(4 * INSN_COST);
7770   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7771 
7772   ins_encode(aarch64_enc_ldrw(dst, mem));
7773 
7774   ins_pipe(iload_reg_mem);
7775 %}
7776 
7777 // Load Float
7778 instruct loadF(vRegF dst, memory mem)
7779 %{
7780   match(Set dst (LoadF mem));
7781   predicate(!needs_acquiring_load(n));
7782 
7783   ins_cost(4 * INSN_COST);
7784   format %{ "ldrs  $dst, $mem\t# float" %}
7785 
7786   ins_encode( aarch64_enc_ldrs(dst, mem) );
7787 
7788   ins_pipe(pipe_class_memory);
7789 %}
7790 
7791 // Load Double
7792 instruct loadD(vRegD dst, memory mem)
7793 %{
7794   match(Set dst (LoadD mem));
7795   predicate(!needs_acquiring_load(n));
7796 
7797   ins_cost(4 * INSN_COST);
7798   format %{ "ldrd  $dst, $mem\t# double" %}
7799 
7800   ins_encode( aarch64_enc_ldrd(dst, mem) );
7801 
7802   ins_pipe(pipe_class_memory);
7803 %}
7804 
7805 
7806 // Load Int Constant
7807 instruct loadConI(iRegINoSp dst, immI src)
7808 %{
7809   match(Set dst src);
7810 
7811   ins_cost(INSN_COST);
7812   format %{ "mov $dst, $src\t# int" %}
7813 
7814   ins_encode( aarch64_enc_movw_imm(dst, src) );
7815 
7816   ins_pipe(ialu_imm);
7817 %}
7818 
7819 // Load Long Constant
7820 instruct loadConL(iRegLNoSp dst, immL src)
7821 %{
7822   match(Set dst src);
7823 
7824   ins_cost(INSN_COST);
7825   format %{ "mov $dst, $src\t# long" %}
7826 
7827   ins_encode( aarch64_enc_mov_imm(dst, src) );
7828 
7829   ins_pipe(ialu_imm);
7830 %}
7831 
7832 // Load Pointer Constant
7833 
7834 instruct loadConP(iRegPNoSp dst, immP con)
7835 %{
7836   match(Set dst con);
7837 
7838   ins_cost(INSN_COST * 4);
7839   format %{
7840     "mov  $dst, $con\t# ptr\n\t"
7841   %}
7842 
7843   ins_encode(aarch64_enc_mov_p(dst, con));
7844 
7845   ins_pipe(ialu_imm);
7846 %}
7847 
7848 // Load Null Pointer Constant
7849 
7850 instruct loadConP0(iRegPNoSp dst, immP0 con)
7851 %{
7852   match(Set dst con);
7853 
7854   ins_cost(INSN_COST);
7855   format %{ "mov  $dst, $con\t# NULL ptr" %}
7856 
7857   ins_encode(aarch64_enc_mov_p0(dst, con));
7858 
7859   ins_pipe(ialu_imm);
7860 %}
7861 
7862 // Load Pointer Constant One
7863 
7864 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7865 %{
7866   match(Set dst con);
7867 
7868   ins_cost(INSN_COST);
7869   format %{ "mov  $dst, $con\t# NULL ptr" %}
7870 
7871   ins_encode(aarch64_enc_mov_p1(dst, con));
7872 
7873   ins_pipe(ialu_imm);
7874 %}
7875 
7876 // Load Poll Page Constant
7877 
7878 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7879 %{
7880   match(Set dst con);
7881 
7882   ins_cost(INSN_COST);
7883   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7884 
7885   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7886 
7887   ins_pipe(ialu_imm);
7888 %}
7889 
7890 // Load Byte Map Base Constant
7891 
7892 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7893 %{
7894   match(Set dst con);
7895 
7896   ins_cost(INSN_COST);
7897   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7898 
7899   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7900 
7901   ins_pipe(ialu_imm);
7902 %}
7903 
7904 // Load Narrow Pointer Constant
7905 
7906 instruct loadConN(iRegNNoSp dst, immN con)
7907 %{
7908   match(Set dst con);
7909 
7910   ins_cost(INSN_COST * 4);
7911   format %{ "mov  $dst, $con\t# compressed ptr" %}
7912 
7913   ins_encode(aarch64_enc_mov_n(dst, con));
7914 
7915   ins_pipe(ialu_imm);
7916 %}
7917 
7918 // Load Narrow Null Pointer Constant
7919 
7920 instruct loadConN0(iRegNNoSp dst, immN0 con)
7921 %{
7922   match(Set dst con);
7923 
7924   ins_cost(INSN_COST);
7925   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
7926 
7927   ins_encode(aarch64_enc_mov_n0(dst, con));
7928 
7929   ins_pipe(ialu_imm);
7930 %}
7931 
7932 // Load Narrow Klass Constant
7933 
7934 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
7935 %{
7936   match(Set dst con);
7937 
7938   ins_cost(INSN_COST);
7939   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
7940 
7941   ins_encode(aarch64_enc_mov_nk(dst, con));
7942 
7943   ins_pipe(ialu_imm);
7944 %}
7945 
7946 // Load Packed Float Constant
7947 
7948 instruct loadConF_packed(vRegF dst, immFPacked con) %{
7949   match(Set dst con);
7950   ins_cost(INSN_COST * 4);
7951   format %{ "fmovs  $dst, $con"%}
7952   ins_encode %{
7953     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
7954   %}
7955 
7956   ins_pipe(fp_imm_s);
7957 %}
7958 
7959 // Load Float Constant
7960 
7961 instruct loadConF(vRegF dst, immF con) %{
7962   match(Set dst con);
7963 
7964   ins_cost(INSN_COST * 4);
7965 
7966   format %{
7967     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7968   %}
7969 
7970   ins_encode %{
7971     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
7972   %}
7973 
7974   ins_pipe(fp_load_constant_s);
7975 %}
7976 
7977 // Load Packed Double Constant
7978 
7979 instruct loadConD_packed(vRegD dst, immDPacked con) %{
7980   match(Set dst con);
7981   ins_cost(INSN_COST);
7982   format %{ "fmovd  $dst, $con"%}
7983   ins_encode %{
7984     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
7985   %}
7986 
7987   ins_pipe(fp_imm_d);
7988 %}
7989 
7990 // Load Double Constant
7991 
7992 instruct loadConD(vRegD dst, immD con) %{
7993   match(Set dst con);
7994 
7995   ins_cost(INSN_COST * 5);
7996   format %{
7997     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
7998   %}
7999 
8000   ins_encode %{
8001     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8002   %}
8003 
8004   ins_pipe(fp_load_constant_d);
8005 %}
8006 
8007 // Store Instructions
8008 
8009 // Store CMS card-mark Immediate
8010 instruct storeimmCM0(immI0 zero, memory mem)
8011 %{
8012   match(Set mem (StoreCM mem zero));
8013   predicate(unnecessary_storestore(n));
8014 
8015   ins_cost(INSN_COST);
8016   format %{ "strb zr, $mem\t# byte" %}
8017 
8018   ins_encode(aarch64_enc_strb0(mem));
8019 
8020   ins_pipe(istore_mem);
8021 %}
8022 
8023 // Store CMS card-mark Immediate with intervening StoreStore
8024 // needed when using CMS with no conditional card marking
8025 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8026 %{
8027   match(Set mem (StoreCM mem zero));
8028 
8029   ins_cost(INSN_COST * 2);
8030   format %{ "dmb ishst"
8031       "\n\tstrb zr, $mem\t# byte" %}
8032 
8033   ins_encode(aarch64_enc_strb0_ordered(mem));
8034 
8035   ins_pipe(istore_mem);
8036 %}
8037 
8038 // Store Byte
8039 instruct storeB(iRegIorL2I src, memory mem)
8040 %{
8041   match(Set mem (StoreB mem src));
8042   predicate(!needs_releasing_store(n));
8043 
8044   ins_cost(INSN_COST);
8045   format %{ "strb  $src, $mem\t# byte" %}
8046 
8047   ins_encode(aarch64_enc_strb(src, mem));
8048 
8049   ins_pipe(istore_reg_mem);
8050 %}
8051 
8052 
8053 instruct storeimmB0(immI0 zero, memory mem)
8054 %{
8055   match(Set mem (StoreB mem zero));
8056   predicate(!needs_releasing_store(n));
8057 
8058   ins_cost(INSN_COST);
8059   format %{ "strb rscractch2, $mem\t# byte" %}
8060 
8061   ins_encode(aarch64_enc_strb0(mem));
8062 
8063   ins_pipe(istore_mem);
8064 %}
8065 
8066 // Store Char/Short
8067 instruct storeC(iRegIorL2I src, memory mem)
8068 %{
8069   match(Set mem (StoreC mem src));
8070   predicate(!needs_releasing_store(n));
8071 
8072   ins_cost(INSN_COST);
8073   format %{ "strh  $src, $mem\t# short" %}
8074 
8075   ins_encode(aarch64_enc_strh(src, mem));
8076 
8077   ins_pipe(istore_reg_mem);
8078 %}
8079 
8080 instruct storeimmC0(immI0 zero, memory mem)
8081 %{
8082   match(Set mem (StoreC mem zero));
8083   predicate(!needs_releasing_store(n));
8084 
8085   ins_cost(INSN_COST);
8086   format %{ "strh  zr, $mem\t# short" %}
8087 
8088   ins_encode(aarch64_enc_strh0(mem));
8089 
8090   ins_pipe(istore_mem);
8091 %}
8092 
8093 // Store Integer
8094 
8095 instruct storeI(iRegIorL2I src, memory mem)
8096 %{
8097   match(Set mem(StoreI mem src));
8098   predicate(!needs_releasing_store(n));
8099 
8100   ins_cost(INSN_COST);
8101   format %{ "strw  $src, $mem\t# int" %}
8102 
8103   ins_encode(aarch64_enc_strw(src, mem));
8104 
8105   ins_pipe(istore_reg_mem);
8106 %}
8107 
8108 instruct storeimmI0(immI0 zero, memory mem)
8109 %{
8110   match(Set mem(StoreI mem zero));
8111   predicate(!needs_releasing_store(n));
8112 
8113   ins_cost(INSN_COST);
8114   format %{ "strw  zr, $mem\t# int" %}
8115 
8116   ins_encode(aarch64_enc_strw0(mem));
8117 
8118   ins_pipe(istore_mem);
8119 %}
8120 
8121 // Store Long (64 bit signed)
8122 instruct storeL(iRegL src, memory mem)
8123 %{
8124   match(Set mem (StoreL mem src));
8125   predicate(!needs_releasing_store(n));
8126 
8127   ins_cost(INSN_COST);
8128   format %{ "str  $src, $mem\t# int" %}
8129 
8130   ins_encode(aarch64_enc_str(src, mem));
8131 
8132   ins_pipe(istore_reg_mem);
8133 %}
8134 
8135 // Store Long (64 bit signed)
8136 instruct storeimmL0(immL0 zero, memory mem)
8137 %{
8138   match(Set mem (StoreL mem zero));
8139   predicate(!needs_releasing_store(n));
8140 
8141   ins_cost(INSN_COST);
8142   format %{ "str  zr, $mem\t# int" %}
8143 
8144   ins_encode(aarch64_enc_str0(mem));
8145 
8146   ins_pipe(istore_mem);
8147 %}
8148 
8149 // Store Pointer
8150 instruct storeP(iRegP src, memory mem)
8151 %{
8152   match(Set mem (StoreP mem src));
8153   predicate(!needs_releasing_store(n));
8154 
8155   ins_cost(INSN_COST);
8156   format %{ "str  $src, $mem\t# ptr" %}
8157 
8158   ins_encode(aarch64_enc_str(src, mem));
8159 
8160   ins_pipe(istore_reg_mem);
8161 %}
8162 
8163 // Store Pointer
8164 instruct storeimmP0(immP0 zero, memory mem)
8165 %{
8166   match(Set mem (StoreP mem zero));
8167   predicate(!needs_releasing_store(n));
8168 
8169   ins_cost(INSN_COST);
8170   format %{ "str zr, $mem\t# ptr" %}
8171 
8172   ins_encode(aarch64_enc_str0(mem));
8173 
8174   ins_pipe(istore_mem);
8175 %}
8176 
8177 // Store Compressed Pointer
8178 instruct storeN(iRegN src, memory mem)
8179 %{
8180   match(Set mem (StoreN mem src));
8181   predicate(!needs_releasing_store(n));
8182 
8183   ins_cost(INSN_COST);
8184   format %{ "strw  $src, $mem\t# compressed ptr" %}
8185 
8186   ins_encode(aarch64_enc_strw(src, mem));
8187 
8188   ins_pipe(istore_reg_mem);
8189 %}
8190 
8191 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8192 %{
8193   match(Set mem (StoreN mem zero));
8194   predicate(Universe::narrow_oop_base() == NULL &&
8195             Universe::narrow_klass_base() == NULL &&
8196             (!needs_releasing_store(n)));
8197 
8198   ins_cost(INSN_COST);
8199   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8200 
8201   ins_encode(aarch64_enc_strw(heapbase, mem));
8202 
8203   ins_pipe(istore_reg_mem);
8204 %}
8205 
8206 // Store Float
8207 instruct storeF(vRegF src, memory mem)
8208 %{
8209   match(Set mem (StoreF mem src));
8210   predicate(!needs_releasing_store(n));
8211 
8212   ins_cost(INSN_COST);
8213   format %{ "strs  $src, $mem\t# float" %}
8214 
8215   ins_encode( aarch64_enc_strs(src, mem) );
8216 
8217   ins_pipe(pipe_class_memory);
8218 %}
8219 
8220 // TODO
8221 // implement storeImmF0 and storeFImmPacked
8222 
8223 // Store Double
8224 instruct storeD(vRegD src, memory mem)
8225 %{
8226   match(Set mem (StoreD mem src));
8227   predicate(!needs_releasing_store(n));
8228 
8229   ins_cost(INSN_COST);
8230   format %{ "strd  $src, $mem\t# double" %}
8231 
8232   ins_encode( aarch64_enc_strd(src, mem) );
8233 
8234   ins_pipe(pipe_class_memory);
8235 %}
8236 
8237 // Store Compressed Klass Pointer
8238 instruct storeNKlass(iRegN src, memory mem)
8239 %{
8240   predicate(!needs_releasing_store(n));
8241   match(Set mem (StoreNKlass mem src));
8242 
8243   ins_cost(INSN_COST);
8244   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8245 
8246   ins_encode(aarch64_enc_strw(src, mem));
8247 
8248   ins_pipe(istore_reg_mem);
8249 %}
8250 
8251 // TODO
8252 // implement storeImmD0 and storeDImmPacked
8253 
8254 // prefetch instructions
8255 // Must be safe to execute with invalid address (cannot fault).
8256 
8257 instruct prefetchalloc( memory mem ) %{
8258   match(PrefetchAllocation mem);
8259 
8260   ins_cost(INSN_COST);
8261   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8262 
8263   ins_encode( aarch64_enc_prefetchw(mem) );
8264 
8265   ins_pipe(iload_prefetch);
8266 %}
8267 
8268 //  ---------------- volatile loads and stores ----------------
8269 
8270 // Load Byte (8 bit signed)
8271 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8272 %{
8273   match(Set dst (LoadB mem));
8274 
8275   ins_cost(VOLATILE_REF_COST);
8276   format %{ "ldarsb  $dst, $mem\t# byte" %}
8277 
8278   ins_encode(aarch64_enc_ldarsb(dst, mem));
8279 
8280   ins_pipe(pipe_serial);
8281 %}
8282 
8283 // Load Byte (8 bit signed) into long
8284 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8285 %{
8286   match(Set dst (ConvI2L (LoadB mem)));
8287 
8288   ins_cost(VOLATILE_REF_COST);
8289   format %{ "ldarsb  $dst, $mem\t# byte" %}
8290 
8291   ins_encode(aarch64_enc_ldarsb(dst, mem));
8292 
8293   ins_pipe(pipe_serial);
8294 %}
8295 
8296 // Load Byte (8 bit unsigned)
8297 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8298 %{
8299   match(Set dst (LoadUB mem));
8300 
8301   ins_cost(VOLATILE_REF_COST);
8302   format %{ "ldarb  $dst, $mem\t# byte" %}
8303 
8304   ins_encode(aarch64_enc_ldarb(dst, mem));
8305 
8306   ins_pipe(pipe_serial);
8307 %}
8308 
8309 // Load Byte (8 bit unsigned) into long
8310 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8311 %{
8312   match(Set dst (ConvI2L (LoadUB mem)));
8313 
8314   ins_cost(VOLATILE_REF_COST);
8315   format %{ "ldarb  $dst, $mem\t# byte" %}
8316 
8317   ins_encode(aarch64_enc_ldarb(dst, mem));
8318 
8319   ins_pipe(pipe_serial);
8320 %}
8321 
8322 // Load Short (16 bit signed)
8323 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8324 %{
8325   match(Set dst (LoadS mem));
8326 
8327   ins_cost(VOLATILE_REF_COST);
8328   format %{ "ldarshw  $dst, $mem\t# short" %}
8329 
8330   ins_encode(aarch64_enc_ldarshw(dst, mem));
8331 
8332   ins_pipe(pipe_serial);
8333 %}
8334 
8335 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8336 %{
8337   match(Set dst (LoadUS mem));
8338 
8339   ins_cost(VOLATILE_REF_COST);
8340   format %{ "ldarhw  $dst, $mem\t# short" %}
8341 
8342   ins_encode(aarch64_enc_ldarhw(dst, mem));
8343 
8344   ins_pipe(pipe_serial);
8345 %}
8346 
8347 // Load Short/Char (16 bit unsigned) into long
8348 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8349 %{
8350   match(Set dst (ConvI2L (LoadUS mem)));
8351 
8352   ins_cost(VOLATILE_REF_COST);
8353   format %{ "ldarh  $dst, $mem\t# short" %}
8354 
8355   ins_encode(aarch64_enc_ldarh(dst, mem));
8356 
8357   ins_pipe(pipe_serial);
8358 %}
8359 
8360 // Load Short/Char (16 bit signed) into long
8361 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8362 %{
8363   match(Set dst (ConvI2L (LoadS mem)));
8364 
8365   ins_cost(VOLATILE_REF_COST);
8366   format %{ "ldarh  $dst, $mem\t# short" %}
8367 
8368   ins_encode(aarch64_enc_ldarsh(dst, mem));
8369 
8370   ins_pipe(pipe_serial);
8371 %}
8372 
8373 // Load Integer (32 bit signed)
8374 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8375 %{
8376   match(Set dst (LoadI mem));
8377 
8378   ins_cost(VOLATILE_REF_COST);
8379   format %{ "ldarw  $dst, $mem\t# int" %}
8380 
8381   ins_encode(aarch64_enc_ldarw(dst, mem));
8382 
8383   ins_pipe(pipe_serial);
8384 %}
8385 
8386 // Load Integer (32 bit unsigned) into long
8387 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8388 %{
8389   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8390 
8391   ins_cost(VOLATILE_REF_COST);
8392   format %{ "ldarw  $dst, $mem\t# int" %}
8393 
8394   ins_encode(aarch64_enc_ldarw(dst, mem));
8395 
8396   ins_pipe(pipe_serial);
8397 %}
8398 
8399 // Load Long (64 bit signed)
8400 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8401 %{
8402   match(Set dst (LoadL mem));
8403 
8404   ins_cost(VOLATILE_REF_COST);
8405   format %{ "ldar  $dst, $mem\t# int" %}
8406 
8407   ins_encode(aarch64_enc_ldar(dst, mem));
8408 
8409   ins_pipe(pipe_serial);
8410 %}
8411 
8412 // Load Pointer
8413 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8414 %{
8415   match(Set dst (LoadP mem));
8416 
8417   ins_cost(VOLATILE_REF_COST);
8418   format %{ "ldar  $dst, $mem\t# ptr" %}
8419 
8420   ins_encode(aarch64_enc_ldar(dst, mem));
8421 
8422   ins_pipe(pipe_serial);
8423 %}
8424 
8425 // Load Compressed Pointer
8426 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8427 %{
8428   match(Set dst (LoadN mem));
8429 
8430   ins_cost(VOLATILE_REF_COST);
8431   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8432 
8433   ins_encode(aarch64_enc_ldarw(dst, mem));
8434 
8435   ins_pipe(pipe_serial);
8436 %}
8437 
8438 // Load Float
8439 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8440 %{
8441   match(Set dst (LoadF mem));
8442 
8443   ins_cost(VOLATILE_REF_COST);
8444   format %{ "ldars  $dst, $mem\t# float" %}
8445 
8446   ins_encode( aarch64_enc_fldars(dst, mem) );
8447 
8448   ins_pipe(pipe_serial);
8449 %}
8450 
8451 // Load Double
8452 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8453 %{
8454   match(Set dst (LoadD mem));
8455 
8456   ins_cost(VOLATILE_REF_COST);
8457   format %{ "ldard  $dst, $mem\t# double" %}
8458 
8459   ins_encode( aarch64_enc_fldard(dst, mem) );
8460 
8461   ins_pipe(pipe_serial);
8462 %}
8463 
8464 // Store Byte
8465 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8466 %{
8467   match(Set mem (StoreB mem src));
8468 
8469   ins_cost(VOLATILE_REF_COST);
8470   format %{ "stlrb  $src, $mem\t# byte" %}
8471 
8472   ins_encode(aarch64_enc_stlrb(src, mem));
8473 
8474   ins_pipe(pipe_class_memory);
8475 %}
8476 
8477 // Store Char/Short
8478 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8479 %{
8480   match(Set mem (StoreC mem src));
8481 
8482   ins_cost(VOLATILE_REF_COST);
8483   format %{ "stlrh  $src, $mem\t# short" %}
8484 
8485   ins_encode(aarch64_enc_stlrh(src, mem));
8486 
8487   ins_pipe(pipe_class_memory);
8488 %}
8489 
8490 // Store Integer
8491 
8492 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8493 %{
8494   match(Set mem(StoreI mem src));
8495 
8496   ins_cost(VOLATILE_REF_COST);
8497   format %{ "stlrw  $src, $mem\t# int" %}
8498 
8499   ins_encode(aarch64_enc_stlrw(src, mem));
8500 
8501   ins_pipe(pipe_class_memory);
8502 %}
8503 
8504 // Store Long (64 bit signed)
8505 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8506 %{
8507   match(Set mem (StoreL mem src));
8508 
8509   ins_cost(VOLATILE_REF_COST);
8510   format %{ "stlr  $src, $mem\t# int" %}
8511 
8512   ins_encode(aarch64_enc_stlr(src, mem));
8513 
8514   ins_pipe(pipe_class_memory);
8515 %}
8516 
8517 // Store Pointer
8518 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8519 %{
8520   match(Set mem (StoreP mem src));
8521 
8522   ins_cost(VOLATILE_REF_COST);
8523   format %{ "stlr  $src, $mem\t# ptr" %}
8524 
8525   ins_encode(aarch64_enc_stlr(src, mem));
8526 
8527   ins_pipe(pipe_class_memory);
8528 %}
8529 
8530 // Store Compressed Pointer
8531 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8532 %{
8533   match(Set mem (StoreN mem src));
8534 
8535   ins_cost(VOLATILE_REF_COST);
8536   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8537 
8538   ins_encode(aarch64_enc_stlrw(src, mem));
8539 
8540   ins_pipe(pipe_class_memory);
8541 %}
8542 
8543 // Store Float
8544 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8545 %{
8546   match(Set mem (StoreF mem src));
8547 
8548   ins_cost(VOLATILE_REF_COST);
8549   format %{ "stlrs  $src, $mem\t# float" %}
8550 
8551   ins_encode( aarch64_enc_fstlrs(src, mem) );
8552 
8553   ins_pipe(pipe_class_memory);
8554 %}
8555 
8556 // TODO
8557 // implement storeImmF0 and storeFImmPacked
8558 
8559 // Store Double
8560 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8561 %{
8562   match(Set mem (StoreD mem src));
8563 
8564   ins_cost(VOLATILE_REF_COST);
8565   format %{ "stlrd  $src, $mem\t# double" %}
8566 
8567   ins_encode( aarch64_enc_fstlrd(src, mem) );
8568 
8569   ins_pipe(pipe_class_memory);
8570 %}
8571 
8572 //  ---------------- end of volatile loads and stores ----------------
8573 
8574 // ============================================================================
8575 // BSWAP Instructions
8576 
8577 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8578   match(Set dst (ReverseBytesI src));
8579 
8580   ins_cost(INSN_COST);
8581   format %{ "revw  $dst, $src" %}
8582 
8583   ins_encode %{
8584     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8585   %}
8586 
8587   ins_pipe(ialu_reg);
8588 %}
8589 
8590 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8591   match(Set dst (ReverseBytesL src));
8592 
8593   ins_cost(INSN_COST);
8594   format %{ "rev  $dst, $src" %}
8595 
8596   ins_encode %{
8597     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8598   %}
8599 
8600   ins_pipe(ialu_reg);
8601 %}
8602 
8603 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8604   match(Set dst (ReverseBytesUS src));
8605 
8606   ins_cost(INSN_COST);
8607   format %{ "rev16w  $dst, $src" %}
8608 
8609   ins_encode %{
8610     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8611   %}
8612 
8613   ins_pipe(ialu_reg);
8614 %}
8615 
8616 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8617   match(Set dst (ReverseBytesS src));
8618 
8619   ins_cost(INSN_COST);
8620   format %{ "rev16w  $dst, $src\n\t"
8621             "sbfmw $dst, $dst, #0, #15" %}
8622 
8623   ins_encode %{
8624     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8625     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8626   %}
8627 
8628   ins_pipe(ialu_reg);
8629 %}
8630 
8631 // ============================================================================
8632 // Zero Count Instructions
8633 
8634 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8635   match(Set dst (CountLeadingZerosI src));
8636 
8637   ins_cost(INSN_COST);
8638   format %{ "clzw  $dst, $src" %}
8639   ins_encode %{
8640     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8641   %}
8642 
8643   ins_pipe(ialu_reg);
8644 %}
8645 
8646 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8647   match(Set dst (CountLeadingZerosL src));
8648 
8649   ins_cost(INSN_COST);
8650   format %{ "clz   $dst, $src" %}
8651   ins_encode %{
8652     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8653   %}
8654 
8655   ins_pipe(ialu_reg);
8656 %}
8657 
8658 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8659   match(Set dst (CountTrailingZerosI src));
8660 
8661   ins_cost(INSN_COST * 2);
8662   format %{ "rbitw  $dst, $src\n\t"
8663             "clzw   $dst, $dst" %}
8664   ins_encode %{
8665     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8666     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8667   %}
8668 
8669   ins_pipe(ialu_reg);
8670 %}
8671 
8672 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8673   match(Set dst (CountTrailingZerosL src));
8674 
8675   ins_cost(INSN_COST * 2);
8676   format %{ "rbit   $dst, $src\n\t"
8677             "clz    $dst, $dst" %}
8678   ins_encode %{
8679     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8680     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8681   %}
8682 
8683   ins_pipe(ialu_reg);
8684 %}
8685 
8686 //---------- Population Count Instructions -------------------------------------
8687 //
8688 
8689 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8690   predicate(UsePopCountInstruction);
8691   match(Set dst (PopCountI src));
8692   effect(TEMP tmp);
8693   ins_cost(INSN_COST * 13);
8694 
8695   format %{ "movw   $src, $src\n\t"
8696             "mov    $tmp, $src\t# vector (1D)\n\t"
8697             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8698             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8699             "mov    $dst, $tmp\t# vector (1D)" %}
8700   ins_encode %{
8701     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8702     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8703     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8704     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8705     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8706   %}
8707 
8708   ins_pipe(pipe_class_default);
8709 %}
8710 
8711 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8712   predicate(UsePopCountInstruction);
8713   match(Set dst (PopCountI (LoadI mem)));
8714   effect(TEMP tmp);
8715   ins_cost(INSN_COST * 13);
8716 
8717   format %{ "ldrs   $tmp, $mem\n\t"
8718             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8719             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8720             "mov    $dst, $tmp\t# vector (1D)" %}
8721   ins_encode %{
8722     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8723     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8724                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8725     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8726     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8727     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8728   %}
8729 
8730   ins_pipe(pipe_class_default);
8731 %}
8732 
8733 // Note: Long.bitCount(long) returns an int.
8734 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8735   predicate(UsePopCountInstruction);
8736   match(Set dst (PopCountL src));
8737   effect(TEMP tmp);
8738   ins_cost(INSN_COST * 13);
8739 
8740   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8741             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8742             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8743             "mov    $dst, $tmp\t# vector (1D)" %}
8744   ins_encode %{
8745     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8746     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8747     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8748     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8749   %}
8750 
8751   ins_pipe(pipe_class_default);
8752 %}
8753 
8754 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8755   predicate(UsePopCountInstruction);
8756   match(Set dst (PopCountL (LoadL mem)));
8757   effect(TEMP tmp);
8758   ins_cost(INSN_COST * 13);
8759 
8760   format %{ "ldrd   $tmp, $mem\n\t"
8761             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8762             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8763             "mov    $dst, $tmp\t# vector (1D)" %}
8764   ins_encode %{
8765     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8766     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8767                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8768     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8769     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8770     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8771   %}
8772 
8773   ins_pipe(pipe_class_default);
8774 %}
8775 
8776 // ============================================================================
8777 // MemBar Instruction
8778 
8779 instruct load_fence() %{
8780   match(LoadFence);
8781   ins_cost(VOLATILE_REF_COST);
8782 
8783   format %{ "load_fence" %}
8784 
8785   ins_encode %{
8786     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8787   %}
8788   ins_pipe(pipe_serial);
8789 %}
8790 
8791 instruct unnecessary_membar_acquire() %{
8792   predicate(unnecessary_acquire(n));
8793   match(MemBarAcquire);
8794   ins_cost(0);
8795 
8796   format %{ "membar_acquire (elided)" %}
8797 
8798   ins_encode %{
8799     __ block_comment("membar_acquire (elided)");
8800   %}
8801 
8802   ins_pipe(pipe_class_empty);
8803 %}
8804 
8805 instruct membar_acquire() %{
8806   match(MemBarAcquire);
8807   ins_cost(VOLATILE_REF_COST);
8808 
8809   format %{ "membar_acquire" %}
8810 
8811   ins_encode %{
8812     __ block_comment("membar_acquire");
8813     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8814   %}
8815 
8816   ins_pipe(pipe_serial);
8817 %}
8818 
8819 
8820 instruct membar_acquire_lock() %{
8821   match(MemBarAcquireLock);
8822   ins_cost(VOLATILE_REF_COST);
8823 
8824   format %{ "membar_acquire_lock (elided)" %}
8825 
8826   ins_encode %{
8827     __ block_comment("membar_acquire_lock (elided)");
8828   %}
8829 
8830   ins_pipe(pipe_serial);
8831 %}
8832 
8833 instruct store_fence() %{
8834   match(StoreFence);
8835   ins_cost(VOLATILE_REF_COST);
8836 
8837   format %{ "store_fence" %}
8838 
8839   ins_encode %{
8840     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8841   %}
8842   ins_pipe(pipe_serial);
8843 %}
8844 
8845 instruct unnecessary_membar_release() %{
8846   predicate(unnecessary_release(n));
8847   match(MemBarRelease);
8848   ins_cost(0);
8849 
8850   format %{ "membar_release (elided)" %}
8851 
8852   ins_encode %{
8853     __ block_comment("membar_release (elided)");
8854   %}
8855   ins_pipe(pipe_serial);
8856 %}
8857 
8858 instruct membar_release() %{
8859   match(MemBarRelease);
8860   ins_cost(VOLATILE_REF_COST);
8861 
8862   format %{ "membar_release" %}
8863 
8864   ins_encode %{
8865     __ block_comment("membar_release");
8866     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8867   %}
8868   ins_pipe(pipe_serial);
8869 %}
8870 
8871 instruct membar_storestore() %{
8872   match(MemBarStoreStore);
8873   ins_cost(VOLATILE_REF_COST);
8874 
8875   format %{ "MEMBAR-store-store" %}
8876 
8877   ins_encode %{
8878     __ membar(Assembler::StoreStore);
8879   %}
8880   ins_pipe(pipe_serial);
8881 %}
8882 
8883 instruct membar_release_lock() %{
8884   match(MemBarReleaseLock);
8885   ins_cost(VOLATILE_REF_COST);
8886 
8887   format %{ "membar_release_lock (elided)" %}
8888 
8889   ins_encode %{
8890     __ block_comment("membar_release_lock (elided)");
8891   %}
8892 
8893   ins_pipe(pipe_serial);
8894 %}
8895 
8896 instruct unnecessary_membar_volatile() %{
8897   predicate(unnecessary_volatile(n));
8898   match(MemBarVolatile);
8899   ins_cost(0);
8900 
8901   format %{ "membar_volatile (elided)" %}
8902 
8903   ins_encode %{
8904     __ block_comment("membar_volatile (elided)");
8905   %}
8906 
8907   ins_pipe(pipe_serial);
8908 %}
8909 
8910 instruct membar_volatile() %{
8911   match(MemBarVolatile);
8912   ins_cost(VOLATILE_REF_COST*100);
8913 
8914   format %{ "membar_volatile" %}
8915 
8916   ins_encode %{
8917     __ block_comment("membar_volatile");
8918     __ membar(Assembler::StoreLoad);
8919   %}
8920 
8921   ins_pipe(pipe_serial);
8922 %}
8923 
8924 // ============================================================================
8925 // Cast/Convert Instructions
8926 
8927 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8928   match(Set dst (CastX2P src));
8929 
8930   ins_cost(INSN_COST);
8931   format %{ "mov $dst, $src\t# long -> ptr" %}
8932 
8933   ins_encode %{
8934     if ($dst$$reg != $src$$reg) {
8935       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8936     }
8937   %}
8938 
8939   ins_pipe(ialu_reg);
8940 %}
8941 
8942 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8943   match(Set dst (CastP2X src));
8944 
8945   ins_cost(INSN_COST);
8946   format %{ "mov $dst, $src\t# ptr -> long" %}
8947 
8948   ins_encode %{
8949     if ($dst$$reg != $src$$reg) {
8950       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8951     }
8952   %}
8953 
8954   ins_pipe(ialu_reg);
8955 %}
8956 
8957 // Convert oop into int for vectors alignment masking
8958 instruct convP2I(iRegINoSp dst, iRegP src) %{
8959   match(Set dst (ConvL2I (CastP2X src)));
8960 
8961   ins_cost(INSN_COST);
8962   format %{ "movw $dst, $src\t# ptr -> int" %}
8963   ins_encode %{
8964     __ movw($dst$$Register, $src$$Register);
8965   %}
8966 
8967   ins_pipe(ialu_reg);
8968 %}
8969 
8970 // Convert compressed oop into int for vectors alignment masking
8971 // in case of 32bit oops (heap < 4Gb).
8972 instruct convN2I(iRegINoSp dst, iRegN src)
8973 %{
8974   predicate(Universe::narrow_oop_shift() == 0);
8975   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8976 
8977   ins_cost(INSN_COST);
8978   format %{ "mov dst, $src\t# compressed ptr -> int" %}
8979   ins_encode %{
8980     __ movw($dst$$Register, $src$$Register);
8981   %}
8982 
8983   ins_pipe(ialu_reg);
8984 %}
8985 
8986 
8987 // Convert oop pointer into compressed form
8988 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
8989   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8990   match(Set dst (EncodeP src));
8991   effect(KILL cr);
8992   ins_cost(INSN_COST * 3);
8993   format %{ "encode_heap_oop $dst, $src" %}
8994   ins_encode %{
8995     Register s = $src$$Register;
8996     Register d = $dst$$Register;
8997     __ encode_heap_oop(d, s);
8998   %}
8999   ins_pipe(ialu_reg);
9000 %}
9001 
9002 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9003   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9004   match(Set dst (EncodeP src));
9005   ins_cost(INSN_COST * 3);
9006   format %{ "encode_heap_oop_not_null $dst, $src" %}
9007   ins_encode %{
9008     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9009   %}
9010   ins_pipe(ialu_reg);
9011 %}
9012 
9013 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9014   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9015             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9016   match(Set dst (DecodeN src));
9017   ins_cost(INSN_COST * 3);
9018   format %{ "decode_heap_oop $dst, $src" %}
9019   ins_encode %{
9020     Register s = $src$$Register;
9021     Register d = $dst$$Register;
9022     __ decode_heap_oop(d, s);
9023   %}
9024   ins_pipe(ialu_reg);
9025 %}
9026 
9027 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9028   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9029             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9030   match(Set dst (DecodeN src));
9031   ins_cost(INSN_COST * 3);
9032   format %{ "decode_heap_oop_not_null $dst, $src" %}
9033   ins_encode %{
9034     Register s = $src$$Register;
9035     Register d = $dst$$Register;
9036     __ decode_heap_oop_not_null(d, s);
9037   %}
9038   ins_pipe(ialu_reg);
9039 %}
9040 
9041 // n.b. AArch64 implementations of encode_klass_not_null and
9042 // decode_klass_not_null do not modify the flags register so, unlike
9043 // Intel, we don't kill CR as a side effect here
9044 
9045 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9046   match(Set dst (EncodePKlass src));
9047 
9048   ins_cost(INSN_COST * 3);
9049   format %{ "encode_klass_not_null $dst,$src" %}
9050 
9051   ins_encode %{
9052     Register src_reg = as_Register($src$$reg);
9053     Register dst_reg = as_Register($dst$$reg);
9054     __ encode_klass_not_null(dst_reg, src_reg);
9055   %}
9056 
9057    ins_pipe(ialu_reg);
9058 %}
9059 
9060 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9061   match(Set dst (DecodeNKlass src));
9062 
9063   ins_cost(INSN_COST * 3);
9064   format %{ "decode_klass_not_null $dst,$src" %}
9065 
9066   ins_encode %{
9067     Register src_reg = as_Register($src$$reg);
9068     Register dst_reg = as_Register($dst$$reg);
9069     if (dst_reg != src_reg) {
9070       __ decode_klass_not_null(dst_reg, src_reg);
9071     } else {
9072       __ decode_klass_not_null(dst_reg);
9073     }
9074   %}
9075 
9076    ins_pipe(ialu_reg);
9077 %}
9078 
9079 instruct checkCastPP(iRegPNoSp dst)
9080 %{
9081   match(Set dst (CheckCastPP dst));
9082 
9083   size(0);
9084   format %{ "# checkcastPP of $dst" %}
9085   ins_encode(/* empty encoding */);
9086   ins_pipe(pipe_class_empty);
9087 %}
9088 
9089 instruct castPP(iRegPNoSp dst)
9090 %{
9091   match(Set dst (CastPP dst));
9092 
9093   size(0);
9094   format %{ "# castPP of $dst" %}
9095   ins_encode(/* empty encoding */);
9096   ins_pipe(pipe_class_empty);
9097 %}
9098 
9099 instruct castII(iRegI dst)
9100 %{
9101   match(Set dst (CastII dst));
9102 
9103   size(0);
9104   format %{ "# castII of $dst" %}
9105   ins_encode(/* empty encoding */);
9106   ins_cost(0);
9107   ins_pipe(pipe_class_empty);
9108 %}
9109 
9110 // ============================================================================
9111 // Atomic operation instructions
9112 //
9113 // Intel and SPARC both implement Ideal Node LoadPLocked and
9114 // Store{PIL}Conditional instructions using a normal load for the
9115 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9116 //
9117 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9118 // pair to lock object allocations from Eden space when not using
9119 // TLABs.
9120 //
9121 // There does not appear to be a Load{IL}Locked Ideal Node and the
9122 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9123 // and to use StoreIConditional only for 32-bit and StoreLConditional
9124 // only for 64-bit.
9125 //
9126 // We implement LoadPLocked and StorePLocked instructions using,
9127 // respectively the AArch64 hw load-exclusive and store-conditional
9128 // instructions. Whereas we must implement each of
9129 // Store{IL}Conditional using a CAS which employs a pair of
9130 // instructions comprising a load-exclusive followed by a
9131 // store-conditional.
9132 
9133 
9134 // Locked-load (linked load) of the current heap-top
9135 // used when updating the eden heap top
9136 // implemented using ldaxr on AArch64
9137 
9138 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9139 %{
9140   match(Set dst (LoadPLocked mem));
9141 
9142   ins_cost(VOLATILE_REF_COST);
9143 
9144   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9145 
9146   ins_encode(aarch64_enc_ldaxr(dst, mem));
9147 
9148   ins_pipe(pipe_serial);
9149 %}
9150 
9151 // Conditional-store of the updated heap-top.
9152 // Used during allocation of the shared heap.
9153 // Sets flag (EQ) on success.
9154 // implemented using stlxr on AArch64.
9155 
9156 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9157 %{
9158   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9159 
9160   ins_cost(VOLATILE_REF_COST);
9161 
9162  // TODO
9163  // do we need to do a store-conditional release or can we just use a
9164  // plain store-conditional?
9165 
9166   format %{
9167     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9168     "cmpw rscratch1, zr\t# EQ on successful write"
9169   %}
9170 
9171   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9172 
9173   ins_pipe(pipe_serial);
9174 %}
9175 
9176 
9177 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9178 // when attempting to rebias a lock towards the current thread.  We
9179 // must use the acquire form of cmpxchg in order to guarantee acquire
9180 // semantics in this case.
9181 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9182 %{
9183   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9184 
9185   ins_cost(VOLATILE_REF_COST);
9186 
9187   format %{
9188     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9189     "cmpw rscratch1, zr\t# EQ on successful write"
9190   %}
9191 
9192   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9193 
9194   ins_pipe(pipe_slow);
9195 %}
9196 
9197 // storeIConditional also has acquire semantics, for no better reason
9198 // than matching storeLConditional.  At the time of writing this
9199 // comment storeIConditional was not used anywhere by AArch64.
9200 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9201 %{
9202   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9203 
9204   ins_cost(VOLATILE_REF_COST);
9205 
9206   format %{
9207     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9208     "cmpw rscratch1, zr\t# EQ on successful write"
9209   %}
9210 
9211   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9212 
9213   ins_pipe(pipe_slow);
9214 %}
9215 
9216 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9217 // can't match them
9218 
9219 // standard CompareAndSwapX when we are using barriers
9220 // these have higher priority than the rules selected by a predicate
9221 
9222 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9223 
9224   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9225   ins_cost(2 * VOLATILE_REF_COST);
9226 
9227   effect(KILL cr);
9228 
9229  format %{
9230     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9231     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9232  %}
9233 
9234  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9235             aarch64_enc_cset_eq(res));
9236 
9237   ins_pipe(pipe_slow);
9238 %}
9239 
9240 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9241 
9242   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9243   ins_cost(2 * VOLATILE_REF_COST);
9244 
9245   effect(KILL cr);
9246 
9247  format %{
9248     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9249     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9250  %}
9251 
9252  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9253             aarch64_enc_cset_eq(res));
9254 
9255   ins_pipe(pipe_slow);
9256 %}
9257 
9258 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9259 
9260   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9261   ins_cost(2 * VOLATILE_REF_COST);
9262 
9263   effect(KILL cr);
9264 
9265  format %{
9266     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9267     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9268  %}
9269 
9270  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9271             aarch64_enc_cset_eq(res));
9272 
9273   ins_pipe(pipe_slow);
9274 %}
9275 
9276 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9277 
9278   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9279   ins_cost(2 * VOLATILE_REF_COST);
9280 
9281   effect(KILL cr);
9282 
9283  format %{
9284     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9285     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9286  %}
9287 
9288  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9289             aarch64_enc_cset_eq(res));
9290 
9291   ins_pipe(pipe_slow);
9292 %}
9293 
9294 // alternative CompareAndSwapX when we are eliding barriers
9295 
9296 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9297 
9298   predicate(needs_acquiring_load_exclusive(n));
9299   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9300   ins_cost(VOLATILE_REF_COST);
9301 
9302   effect(KILL cr);
9303 
9304  format %{
9305     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9306     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9307  %}
9308 
9309  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9310             aarch64_enc_cset_eq(res));
9311 
9312   ins_pipe(pipe_slow);
9313 %}
9314 
9315 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9316 
9317   predicate(needs_acquiring_load_exclusive(n));
9318   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9319   ins_cost(VOLATILE_REF_COST);
9320 
9321   effect(KILL cr);
9322 
9323  format %{
9324     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9325     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9326  %}
9327 
9328  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9329             aarch64_enc_cset_eq(res));
9330 
9331   ins_pipe(pipe_slow);
9332 %}
9333 
9334 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9335 
9336   predicate(needs_acquiring_load_exclusive(n));
9337   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9338   ins_cost(VOLATILE_REF_COST);
9339 
9340   effect(KILL cr);
9341 
9342  format %{
9343     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9344     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9345  %}
9346 
9347  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9348             aarch64_enc_cset_eq(res));
9349 
9350   ins_pipe(pipe_slow);
9351 %}
9352 
9353 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9354 
9355   predicate(needs_acquiring_load_exclusive(n));
9356   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9357   ins_cost(VOLATILE_REF_COST);
9358 
9359   effect(KILL cr);
9360 
9361  format %{
9362     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9363     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9364  %}
9365 
9366  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9367             aarch64_enc_cset_eq(res));
9368 
9369   ins_pipe(pipe_slow);
9370 %}
9371 
9372 
9373 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9374   match(Set prev (GetAndSetI mem newv));
9375   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9376   ins_encode %{
9377     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9378   %}
9379   ins_pipe(pipe_serial);
9380 %}
9381 
9382 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9383   match(Set prev (GetAndSetL mem newv));
9384   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9385   ins_encode %{
9386     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9387   %}
9388   ins_pipe(pipe_serial);
9389 %}
9390 
9391 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9392   match(Set prev (GetAndSetN mem newv));
9393   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9394   ins_encode %{
9395     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9396   %}
9397   ins_pipe(pipe_serial);
9398 %}
9399 
9400 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9401   match(Set prev (GetAndSetP mem newv));
9402   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9403   ins_encode %{
9404     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9405   %}
9406   ins_pipe(pipe_serial);
9407 %}
9408 
9409 
9410 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9411   match(Set newval (GetAndAddL mem incr));
9412   ins_cost(INSN_COST * 10);
9413   format %{ "get_and_addL $newval, [$mem], $incr" %}
9414   ins_encode %{
9415     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9416   %}
9417   ins_pipe(pipe_serial);
9418 %}
9419 
9420 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9421   predicate(n->as_LoadStore()->result_not_used());
9422   match(Set dummy (GetAndAddL mem incr));
9423   ins_cost(INSN_COST * 9);
9424   format %{ "get_and_addL [$mem], $incr" %}
9425   ins_encode %{
9426     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9427   %}
9428   ins_pipe(pipe_serial);
9429 %}
9430 
9431 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9432   match(Set newval (GetAndAddL mem incr));
9433   ins_cost(INSN_COST * 10);
9434   format %{ "get_and_addL $newval, [$mem], $incr" %}
9435   ins_encode %{
9436     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9437   %}
9438   ins_pipe(pipe_serial);
9439 %}
9440 
9441 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9442   predicate(n->as_LoadStore()->result_not_used());
9443   match(Set dummy (GetAndAddL mem incr));
9444   ins_cost(INSN_COST * 9);
9445   format %{ "get_and_addL [$mem], $incr" %}
9446   ins_encode %{
9447     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9448   %}
9449   ins_pipe(pipe_serial);
9450 %}
9451 
9452 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9453   match(Set newval (GetAndAddI mem incr));
9454   ins_cost(INSN_COST * 10);
9455   format %{ "get_and_addI $newval, [$mem], $incr" %}
9456   ins_encode %{
9457     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9458   %}
9459   ins_pipe(pipe_serial);
9460 %}
9461 
9462 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9463   predicate(n->as_LoadStore()->result_not_used());
9464   match(Set dummy (GetAndAddI mem incr));
9465   ins_cost(INSN_COST * 9);
9466   format %{ "get_and_addI [$mem], $incr" %}
9467   ins_encode %{
9468     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9469   %}
9470   ins_pipe(pipe_serial);
9471 %}
9472 
9473 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9474   match(Set newval (GetAndAddI mem incr));
9475   ins_cost(INSN_COST * 10);
9476   format %{ "get_and_addI $newval, [$mem], $incr" %}
9477   ins_encode %{
9478     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9479   %}
9480   ins_pipe(pipe_serial);
9481 %}
9482 
9483 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9484   predicate(n->as_LoadStore()->result_not_used());
9485   match(Set dummy (GetAndAddI mem incr));
9486   ins_cost(INSN_COST * 9);
9487   format %{ "get_and_addI [$mem], $incr" %}
9488   ins_encode %{
9489     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9490   %}
9491   ins_pipe(pipe_serial);
9492 %}
9493 
9494 // Manifest a CmpL result in an integer register.
9495 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9496 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9497 %{
9498   match(Set dst (CmpL3 src1 src2));
9499   effect(KILL flags);
9500 
9501   ins_cost(INSN_COST * 6);
9502   format %{
9503       "cmp $src1, $src2"
9504       "csetw $dst, ne"
9505       "cnegw $dst, lt"
9506   %}
9507   // format %{ "CmpL3 $dst, $src1, $src2" %}
9508   ins_encode %{
9509     __ cmp($src1$$Register, $src2$$Register);
9510     __ csetw($dst$$Register, Assembler::NE);
9511     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9512   %}
9513 
9514   ins_pipe(pipe_class_default);
9515 %}
9516 
9517 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9518 %{
9519   match(Set dst (CmpL3 src1 src2));
9520   effect(KILL flags);
9521 
9522   ins_cost(INSN_COST * 6);
9523   format %{
9524       "cmp $src1, $src2"
9525       "csetw $dst, ne"
9526       "cnegw $dst, lt"
9527   %}
9528   ins_encode %{
9529     int32_t con = (int32_t)$src2$$constant;
9530      if (con < 0) {
9531       __ adds(zr, $src1$$Register, -con);
9532     } else {
9533       __ subs(zr, $src1$$Register, con);
9534     }
9535     __ csetw($dst$$Register, Assembler::NE);
9536     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9537   %}
9538 
9539   ins_pipe(pipe_class_default);
9540 %}
9541 
9542 // ============================================================================
9543 // Conditional Move Instructions
9544 
9545 // n.b. we have identical rules for both a signed compare op (cmpOp)
9546 // and an unsigned compare op (cmpOpU). it would be nice if we could
9547 // define an op class which merged both inputs and use it to type the
9548 // argument to a single rule. unfortunatelyt his fails because the
9549 // opclass does not live up to the COND_INTER interface of its
9550 // component operands. When the generic code tries to negate the
9551 // operand it ends up running the generci Machoper::negate method
9552 // which throws a ShouldNotHappen. So, we have to provide two flavours
9553 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9554 
9555 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9556   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9557 
9558   ins_cost(INSN_COST * 2);
9559   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9560 
9561   ins_encode %{
9562     __ cselw(as_Register($dst$$reg),
9563              as_Register($src2$$reg),
9564              as_Register($src1$$reg),
9565              (Assembler::Condition)$cmp$$cmpcode);
9566   %}
9567 
9568   ins_pipe(icond_reg_reg);
9569 %}
9570 
9571 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9572   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9573 
9574   ins_cost(INSN_COST * 2);
9575   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9576 
9577   ins_encode %{
9578     __ cselw(as_Register($dst$$reg),
9579              as_Register($src2$$reg),
9580              as_Register($src1$$reg),
9581              (Assembler::Condition)$cmp$$cmpcode);
9582   %}
9583 
9584   ins_pipe(icond_reg_reg);
9585 %}
9586 
9587 // special cases where one arg is zero
9588 
9589 // n.b. this is selected in preference to the rule above because it
9590 // avoids loading constant 0 into a source register
9591 
9592 // TODO
9593 // we ought only to be able to cull one of these variants as the ideal
9594 // transforms ought always to order the zero consistently (to left/right?)
9595 
9596 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9597   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9598 
9599   ins_cost(INSN_COST * 2);
9600   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9601 
9602   ins_encode %{
9603     __ cselw(as_Register($dst$$reg),
9604              as_Register($src$$reg),
9605              zr,
9606              (Assembler::Condition)$cmp$$cmpcode);
9607   %}
9608 
9609   ins_pipe(icond_reg);
9610 %}
9611 
9612 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9613   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9614 
9615   ins_cost(INSN_COST * 2);
9616   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9617 
9618   ins_encode %{
9619     __ cselw(as_Register($dst$$reg),
9620              as_Register($src$$reg),
9621              zr,
9622              (Assembler::Condition)$cmp$$cmpcode);
9623   %}
9624 
9625   ins_pipe(icond_reg);
9626 %}
9627 
9628 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9629   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9630 
9631   ins_cost(INSN_COST * 2);
9632   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9633 
9634   ins_encode %{
9635     __ cselw(as_Register($dst$$reg),
9636              zr,
9637              as_Register($src$$reg),
9638              (Assembler::Condition)$cmp$$cmpcode);
9639   %}
9640 
9641   ins_pipe(icond_reg);
9642 %}
9643 
9644 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9645   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9646 
9647   ins_cost(INSN_COST * 2);
9648   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9649 
9650   ins_encode %{
9651     __ cselw(as_Register($dst$$reg),
9652              zr,
9653              as_Register($src$$reg),
9654              (Assembler::Condition)$cmp$$cmpcode);
9655   %}
9656 
9657   ins_pipe(icond_reg);
9658 %}
9659 
9660 // special case for creating a boolean 0 or 1
9661 
9662 // n.b. this is selected in preference to the rule above because it
9663 // avoids loading constants 0 and 1 into a source register
9664 
9665 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9666   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9667 
9668   ins_cost(INSN_COST * 2);
9669   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9670 
9671   ins_encode %{
9672     // equivalently
9673     // cset(as_Register($dst$$reg),
9674     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9675     __ csincw(as_Register($dst$$reg),
9676              zr,
9677              zr,
9678              (Assembler::Condition)$cmp$$cmpcode);
9679   %}
9680 
9681   ins_pipe(icond_none);
9682 %}
9683 
9684 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9685   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9686 
9687   ins_cost(INSN_COST * 2);
9688   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9689 
9690   ins_encode %{
9691     // equivalently
9692     // cset(as_Register($dst$$reg),
9693     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9694     __ csincw(as_Register($dst$$reg),
9695              zr,
9696              zr,
9697              (Assembler::Condition)$cmp$$cmpcode);
9698   %}
9699 
9700   ins_pipe(icond_none);
9701 %}
9702 
9703 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9704   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9705 
9706   ins_cost(INSN_COST * 2);
9707   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9708 
9709   ins_encode %{
9710     __ csel(as_Register($dst$$reg),
9711             as_Register($src2$$reg),
9712             as_Register($src1$$reg),
9713             (Assembler::Condition)$cmp$$cmpcode);
9714   %}
9715 
9716   ins_pipe(icond_reg_reg);
9717 %}
9718 
9719 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9720   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9721 
9722   ins_cost(INSN_COST * 2);
9723   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9724 
9725   ins_encode %{
9726     __ csel(as_Register($dst$$reg),
9727             as_Register($src2$$reg),
9728             as_Register($src1$$reg),
9729             (Assembler::Condition)$cmp$$cmpcode);
9730   %}
9731 
9732   ins_pipe(icond_reg_reg);
9733 %}
9734 
9735 // special cases where one arg is zero
9736 
9737 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9738   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9739 
9740   ins_cost(INSN_COST * 2);
9741   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9742 
9743   ins_encode %{
9744     __ csel(as_Register($dst$$reg),
9745             zr,
9746             as_Register($src$$reg),
9747             (Assembler::Condition)$cmp$$cmpcode);
9748   %}
9749 
9750   ins_pipe(icond_reg);
9751 %}
9752 
9753 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9754   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9755 
9756   ins_cost(INSN_COST * 2);
9757   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9758 
9759   ins_encode %{
9760     __ csel(as_Register($dst$$reg),
9761             zr,
9762             as_Register($src$$reg),
9763             (Assembler::Condition)$cmp$$cmpcode);
9764   %}
9765 
9766   ins_pipe(icond_reg);
9767 %}
9768 
9769 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9770   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9771 
9772   ins_cost(INSN_COST * 2);
9773   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9774 
9775   ins_encode %{
9776     __ csel(as_Register($dst$$reg),
9777             as_Register($src$$reg),
9778             zr,
9779             (Assembler::Condition)$cmp$$cmpcode);
9780   %}
9781 
9782   ins_pipe(icond_reg);
9783 %}
9784 
9785 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9786   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9787 
9788   ins_cost(INSN_COST * 2);
9789   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9790 
9791   ins_encode %{
9792     __ csel(as_Register($dst$$reg),
9793             as_Register($src$$reg),
9794             zr,
9795             (Assembler::Condition)$cmp$$cmpcode);
9796   %}
9797 
9798   ins_pipe(icond_reg);
9799 %}
9800 
9801 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9802   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9803 
9804   ins_cost(INSN_COST * 2);
9805   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9806 
9807   ins_encode %{
9808     __ csel(as_Register($dst$$reg),
9809             as_Register($src2$$reg),
9810             as_Register($src1$$reg),
9811             (Assembler::Condition)$cmp$$cmpcode);
9812   %}
9813 
9814   ins_pipe(icond_reg_reg);
9815 %}
9816 
9817 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9818   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9819 
9820   ins_cost(INSN_COST * 2);
9821   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9822 
9823   ins_encode %{
9824     __ csel(as_Register($dst$$reg),
9825             as_Register($src2$$reg),
9826             as_Register($src1$$reg),
9827             (Assembler::Condition)$cmp$$cmpcode);
9828   %}
9829 
9830   ins_pipe(icond_reg_reg);
9831 %}
9832 
9833 // special cases where one arg is zero
9834 
9835 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9836   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9837 
9838   ins_cost(INSN_COST * 2);
9839   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9840 
9841   ins_encode %{
9842     __ csel(as_Register($dst$$reg),
9843             zr,
9844             as_Register($src$$reg),
9845             (Assembler::Condition)$cmp$$cmpcode);
9846   %}
9847 
9848   ins_pipe(icond_reg);
9849 %}
9850 
9851 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9852   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9853 
9854   ins_cost(INSN_COST * 2);
9855   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9856 
9857   ins_encode %{
9858     __ csel(as_Register($dst$$reg),
9859             zr,
9860             as_Register($src$$reg),
9861             (Assembler::Condition)$cmp$$cmpcode);
9862   %}
9863 
9864   ins_pipe(icond_reg);
9865 %}
9866 
9867 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9868   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9869 
9870   ins_cost(INSN_COST * 2);
9871   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9872 
9873   ins_encode %{
9874     __ csel(as_Register($dst$$reg),
9875             as_Register($src$$reg),
9876             zr,
9877             (Assembler::Condition)$cmp$$cmpcode);
9878   %}
9879 
9880   ins_pipe(icond_reg);
9881 %}
9882 
9883 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9884   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9885 
9886   ins_cost(INSN_COST * 2);
9887   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9888 
9889   ins_encode %{
9890     __ csel(as_Register($dst$$reg),
9891             as_Register($src$$reg),
9892             zr,
9893             (Assembler::Condition)$cmp$$cmpcode);
9894   %}
9895 
9896   ins_pipe(icond_reg);
9897 %}
9898 
9899 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9900   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9901 
9902   ins_cost(INSN_COST * 2);
9903   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9904 
9905   ins_encode %{
9906     __ cselw(as_Register($dst$$reg),
9907              as_Register($src2$$reg),
9908              as_Register($src1$$reg),
9909              (Assembler::Condition)$cmp$$cmpcode);
9910   %}
9911 
9912   ins_pipe(icond_reg_reg);
9913 %}
9914 
9915 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9916   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9917 
9918   ins_cost(INSN_COST * 2);
9919   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9920 
9921   ins_encode %{
9922     __ cselw(as_Register($dst$$reg),
9923              as_Register($src2$$reg),
9924              as_Register($src1$$reg),
9925              (Assembler::Condition)$cmp$$cmpcode);
9926   %}
9927 
9928   ins_pipe(icond_reg_reg);
9929 %}
9930 
9931 // special cases where one arg is zero
9932 
9933 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9934   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9935 
9936   ins_cost(INSN_COST * 2);
9937   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
9938 
9939   ins_encode %{
9940     __ cselw(as_Register($dst$$reg),
9941              zr,
9942              as_Register($src$$reg),
9943              (Assembler::Condition)$cmp$$cmpcode);
9944   %}
9945 
9946   ins_pipe(icond_reg);
9947 %}
9948 
9949 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
9950   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
9951 
9952   ins_cost(INSN_COST * 2);
9953   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
9954 
9955   ins_encode %{
9956     __ cselw(as_Register($dst$$reg),
9957              zr,
9958              as_Register($src$$reg),
9959              (Assembler::Condition)$cmp$$cmpcode);
9960   %}
9961 
9962   ins_pipe(icond_reg);
9963 %}
9964 
9965 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9966   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9967 
9968   ins_cost(INSN_COST * 2);
9969   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
9970 
9971   ins_encode %{
9972     __ cselw(as_Register($dst$$reg),
9973              as_Register($src$$reg),
9974              zr,
9975              (Assembler::Condition)$cmp$$cmpcode);
9976   %}
9977 
9978   ins_pipe(icond_reg);
9979 %}
9980 
9981 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
9982   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
9983 
9984   ins_cost(INSN_COST * 2);
9985   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
9986 
9987   ins_encode %{
9988     __ cselw(as_Register($dst$$reg),
9989              as_Register($src$$reg),
9990              zr,
9991              (Assembler::Condition)$cmp$$cmpcode);
9992   %}
9993 
9994   ins_pipe(icond_reg);
9995 %}
9996 
9997 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
9998 %{
9999   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10000 
10001   ins_cost(INSN_COST * 3);
10002 
10003   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10004   ins_encode %{
10005     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10006     __ fcsels(as_FloatRegister($dst$$reg),
10007               as_FloatRegister($src2$$reg),
10008               as_FloatRegister($src1$$reg),
10009               cond);
10010   %}
10011 
10012   ins_pipe(fp_cond_reg_reg_s);
10013 %}
10014 
10015 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10016 %{
10017   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10018 
10019   ins_cost(INSN_COST * 3);
10020 
10021   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10022   ins_encode %{
10023     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10024     __ fcsels(as_FloatRegister($dst$$reg),
10025               as_FloatRegister($src2$$reg),
10026               as_FloatRegister($src1$$reg),
10027               cond);
10028   %}
10029 
10030   ins_pipe(fp_cond_reg_reg_s);
10031 %}
10032 
10033 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10034 %{
10035   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10036 
10037   ins_cost(INSN_COST * 3);
10038 
10039   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10040   ins_encode %{
10041     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10042     __ fcseld(as_FloatRegister($dst$$reg),
10043               as_FloatRegister($src2$$reg),
10044               as_FloatRegister($src1$$reg),
10045               cond);
10046   %}
10047 
10048   ins_pipe(fp_cond_reg_reg_d);
10049 %}
10050 
10051 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10052 %{
10053   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10054 
10055   ins_cost(INSN_COST * 3);
10056 
10057   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10058   ins_encode %{
10059     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10060     __ fcseld(as_FloatRegister($dst$$reg),
10061               as_FloatRegister($src2$$reg),
10062               as_FloatRegister($src1$$reg),
10063               cond);
10064   %}
10065 
10066   ins_pipe(fp_cond_reg_reg_d);
10067 %}
10068 
10069 // ============================================================================
10070 // Arithmetic Instructions
10071 //
10072 
10073 // Integer Addition
10074 
10075 // TODO
10076 // these currently employ operations which do not set CR and hence are
10077 // not flagged as killing CR but we would like to isolate the cases
10078 // where we want to set flags from those where we don't. need to work
10079 // out how to do that.
10080 
10081 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10082   match(Set dst (AddI src1 src2));
10083 
10084   ins_cost(INSN_COST);
10085   format %{ "addw  $dst, $src1, $src2" %}
10086 
10087   ins_encode %{
10088     __ addw(as_Register($dst$$reg),
10089             as_Register($src1$$reg),
10090             as_Register($src2$$reg));
10091   %}
10092 
10093   ins_pipe(ialu_reg_reg);
10094 %}
10095 
10096 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10097   match(Set dst (AddI src1 src2));
10098 
10099   ins_cost(INSN_COST);
10100   format %{ "addw $dst, $src1, $src2" %}
10101 
10102   // use opcode to indicate that this is an add not a sub
10103   opcode(0x0);
10104 
10105   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10106 
10107   ins_pipe(ialu_reg_imm);
10108 %}
10109 
10110 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10111   match(Set dst (AddI (ConvL2I src1) src2));
10112 
10113   ins_cost(INSN_COST);
10114   format %{ "addw $dst, $src1, $src2" %}
10115 
10116   // use opcode to indicate that this is an add not a sub
10117   opcode(0x0);
10118 
10119   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10120 
10121   ins_pipe(ialu_reg_imm);
10122 %}
10123 
10124 // Pointer Addition
10125 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10126   match(Set dst (AddP src1 src2));
10127 
10128   ins_cost(INSN_COST);
10129   format %{ "add $dst, $src1, $src2\t# ptr" %}
10130 
10131   ins_encode %{
10132     __ add(as_Register($dst$$reg),
10133            as_Register($src1$$reg),
10134            as_Register($src2$$reg));
10135   %}
10136 
10137   ins_pipe(ialu_reg_reg);
10138 %}
10139 
10140 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10141   match(Set dst (AddP src1 (ConvI2L src2)));
10142 
10143   ins_cost(1.9 * INSN_COST);
10144   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10145 
10146   ins_encode %{
10147     __ add(as_Register($dst$$reg),
10148            as_Register($src1$$reg),
10149            as_Register($src2$$reg), ext::sxtw);
10150   %}
10151 
10152   ins_pipe(ialu_reg_reg);
10153 %}
10154 
10155 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10156   match(Set dst (AddP src1 (LShiftL src2 scale)));
10157 
10158   ins_cost(1.9 * INSN_COST);
10159   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10160 
10161   ins_encode %{
10162     __ lea(as_Register($dst$$reg),
10163            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10164                    Address::lsl($scale$$constant)));
10165   %}
10166 
10167   ins_pipe(ialu_reg_reg_shift);
10168 %}
10169 
10170 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10171   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10172 
10173   ins_cost(1.9 * INSN_COST);
10174   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10175 
10176   ins_encode %{
10177     __ lea(as_Register($dst$$reg),
10178            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10179                    Address::sxtw($scale$$constant)));
10180   %}
10181 
10182   ins_pipe(ialu_reg_reg_shift);
10183 %}
10184 
10185 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10186   match(Set dst (LShiftL (ConvI2L src) scale));
10187 
10188   ins_cost(INSN_COST);
10189   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10190 
10191   ins_encode %{
10192     __ sbfiz(as_Register($dst$$reg),
10193           as_Register($src$$reg),
10194           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10195   %}
10196 
10197   ins_pipe(ialu_reg_shift);
10198 %}
10199 
10200 // Pointer Immediate Addition
10201 // n.b. this needs to be more expensive than using an indirect memory
10202 // operand
10203 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10204   match(Set dst (AddP src1 src2));
10205 
10206   ins_cost(INSN_COST);
10207   format %{ "add $dst, $src1, $src2\t# ptr" %}
10208 
10209   // use opcode to indicate that this is an add not a sub
10210   opcode(0x0);
10211 
10212   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10213 
10214   ins_pipe(ialu_reg_imm);
10215 %}
10216 
10217 // Long Addition
10218 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10219 
10220   match(Set dst (AddL src1 src2));
10221 
10222   ins_cost(INSN_COST);
10223   format %{ "add  $dst, $src1, $src2" %}
10224 
10225   ins_encode %{
10226     __ add(as_Register($dst$$reg),
10227            as_Register($src1$$reg),
10228            as_Register($src2$$reg));
10229   %}
10230 
10231   ins_pipe(ialu_reg_reg);
10232 %}
10233 
10234 // No constant pool entries requiredLong Immediate Addition.
10235 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10236   match(Set dst (AddL src1 src2));
10237 
10238   ins_cost(INSN_COST);
10239   format %{ "add $dst, $src1, $src2" %}
10240 
10241   // use opcode to indicate that this is an add not a sub
10242   opcode(0x0);
10243 
10244   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10245 
10246   ins_pipe(ialu_reg_imm);
10247 %}
10248 
10249 // Integer Subtraction
10250 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10251   match(Set dst (SubI src1 src2));
10252 
10253   ins_cost(INSN_COST);
10254   format %{ "subw  $dst, $src1, $src2" %}
10255 
10256   ins_encode %{
10257     __ subw(as_Register($dst$$reg),
10258             as_Register($src1$$reg),
10259             as_Register($src2$$reg));
10260   %}
10261 
10262   ins_pipe(ialu_reg_reg);
10263 %}
10264 
10265 // Immediate Subtraction
10266 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10267   match(Set dst (SubI src1 src2));
10268 
10269   ins_cost(INSN_COST);
10270   format %{ "subw $dst, $src1, $src2" %}
10271 
10272   // use opcode to indicate that this is a sub not an add
10273   opcode(0x1);
10274 
10275   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10276 
10277   ins_pipe(ialu_reg_imm);
10278 %}
10279 
10280 // Long Subtraction
10281 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10282 
10283   match(Set dst (SubL src1 src2));
10284 
10285   ins_cost(INSN_COST);
10286   format %{ "sub  $dst, $src1, $src2" %}
10287 
10288   ins_encode %{
10289     __ sub(as_Register($dst$$reg),
10290            as_Register($src1$$reg),
10291            as_Register($src2$$reg));
10292   %}
10293 
10294   ins_pipe(ialu_reg_reg);
10295 %}
10296 
10297 // No constant pool entries requiredLong Immediate Subtraction.
10298 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10299   match(Set dst (SubL src1 src2));
10300 
10301   ins_cost(INSN_COST);
10302   format %{ "sub$dst, $src1, $src2" %}
10303 
10304   // use opcode to indicate that this is a sub not an add
10305   opcode(0x1);
10306 
10307   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10308 
10309   ins_pipe(ialu_reg_imm);
10310 %}
10311 
10312 // Integer Negation (special case for sub)
10313 
10314 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10315   match(Set dst (SubI zero src));
10316 
10317   ins_cost(INSN_COST);
10318   format %{ "negw $dst, $src\t# int" %}
10319 
10320   ins_encode %{
10321     __ negw(as_Register($dst$$reg),
10322             as_Register($src$$reg));
10323   %}
10324 
10325   ins_pipe(ialu_reg);
10326 %}
10327 
10328 // Long Negation
10329 
10330 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10331   match(Set dst (SubL zero src));
10332 
10333   ins_cost(INSN_COST);
10334   format %{ "neg $dst, $src\t# long" %}
10335 
10336   ins_encode %{
10337     __ neg(as_Register($dst$$reg),
10338            as_Register($src$$reg));
10339   %}
10340 
10341   ins_pipe(ialu_reg);
10342 %}
10343 
10344 // Integer Multiply
10345 
10346 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10347   match(Set dst (MulI src1 src2));
10348 
10349   ins_cost(INSN_COST * 3);
10350   format %{ "mulw  $dst, $src1, $src2" %}
10351 
10352   ins_encode %{
10353     __ mulw(as_Register($dst$$reg),
10354             as_Register($src1$$reg),
10355             as_Register($src2$$reg));
10356   %}
10357 
10358   ins_pipe(imul_reg_reg);
10359 %}
10360 
10361 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10362   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10363 
10364   ins_cost(INSN_COST * 3);
10365   format %{ "smull  $dst, $src1, $src2" %}
10366 
10367   ins_encode %{
10368     __ smull(as_Register($dst$$reg),
10369              as_Register($src1$$reg),
10370              as_Register($src2$$reg));
10371   %}
10372 
10373   ins_pipe(imul_reg_reg);
10374 %}
10375 
10376 // Long Multiply
10377 
10378 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10379   match(Set dst (MulL src1 src2));
10380 
10381   ins_cost(INSN_COST * 5);
10382   format %{ "mul  $dst, $src1, $src2" %}
10383 
10384   ins_encode %{
10385     __ mul(as_Register($dst$$reg),
10386            as_Register($src1$$reg),
10387            as_Register($src2$$reg));
10388   %}
10389 
10390   ins_pipe(lmul_reg_reg);
10391 %}
10392 
10393 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10394 %{
10395   match(Set dst (MulHiL src1 src2));
10396 
10397   ins_cost(INSN_COST * 7);
10398   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10399 
10400   ins_encode %{
10401     __ smulh(as_Register($dst$$reg),
10402              as_Register($src1$$reg),
10403              as_Register($src2$$reg));
10404   %}
10405 
10406   ins_pipe(lmul_reg_reg);
10407 %}
10408 
10409 // Combined Integer Multiply & Add/Sub
10410 
10411 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10412   match(Set dst (AddI src3 (MulI src1 src2)));
10413 
10414   ins_cost(INSN_COST * 3);
10415   format %{ "madd  $dst, $src1, $src2, $src3" %}
10416 
10417   ins_encode %{
10418     __ maddw(as_Register($dst$$reg),
10419              as_Register($src1$$reg),
10420              as_Register($src2$$reg),
10421              as_Register($src3$$reg));
10422   %}
10423 
10424   ins_pipe(imac_reg_reg);
10425 %}
10426 
10427 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10428   match(Set dst (SubI src3 (MulI src1 src2)));
10429 
10430   ins_cost(INSN_COST * 3);
10431   format %{ "msub  $dst, $src1, $src2, $src3" %}
10432 
10433   ins_encode %{
10434     __ msubw(as_Register($dst$$reg),
10435              as_Register($src1$$reg),
10436              as_Register($src2$$reg),
10437              as_Register($src3$$reg));
10438   %}
10439 
10440   ins_pipe(imac_reg_reg);
10441 %}
10442 
10443 // Combined Long Multiply & Add/Sub
10444 
10445 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10446   match(Set dst (AddL src3 (MulL src1 src2)));
10447 
10448   ins_cost(INSN_COST * 5);
10449   format %{ "madd  $dst, $src1, $src2, $src3" %}
10450 
10451   ins_encode %{
10452     __ madd(as_Register($dst$$reg),
10453             as_Register($src1$$reg),
10454             as_Register($src2$$reg),
10455             as_Register($src3$$reg));
10456   %}
10457 
10458   ins_pipe(lmac_reg_reg);
10459 %}
10460 
10461 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10462   match(Set dst (SubL src3 (MulL src1 src2)));
10463 
10464   ins_cost(INSN_COST * 5);
10465   format %{ "msub  $dst, $src1, $src2, $src3" %}
10466 
10467   ins_encode %{
10468     __ msub(as_Register($dst$$reg),
10469             as_Register($src1$$reg),
10470             as_Register($src2$$reg),
10471             as_Register($src3$$reg));
10472   %}
10473 
10474   ins_pipe(lmac_reg_reg);
10475 %}
10476 
10477 // Integer Divide
10478 
10479 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10480   match(Set dst (DivI src1 src2));
10481 
10482   ins_cost(INSN_COST * 19);
10483   format %{ "sdivw  $dst, $src1, $src2" %}
10484 
10485   ins_encode(aarch64_enc_divw(dst, src1, src2));
10486   ins_pipe(idiv_reg_reg);
10487 %}
10488 
10489 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10490   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10491   ins_cost(INSN_COST);
10492   format %{ "lsrw $dst, $src1, $div1" %}
10493   ins_encode %{
10494     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10495   %}
10496   ins_pipe(ialu_reg_shift);
10497 %}
10498 
10499 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10500   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10501   ins_cost(INSN_COST);
10502   format %{ "addw $dst, $src, LSR $div1" %}
10503 
10504   ins_encode %{
10505     __ addw(as_Register($dst$$reg),
10506               as_Register($src$$reg),
10507               as_Register($src$$reg),
10508               Assembler::LSR, 31);
10509   %}
10510   ins_pipe(ialu_reg);
10511 %}
10512 
10513 // Long Divide
10514 
10515 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10516   match(Set dst (DivL src1 src2));
10517 
10518   ins_cost(INSN_COST * 35);
10519   format %{ "sdiv   $dst, $src1, $src2" %}
10520 
10521   ins_encode(aarch64_enc_div(dst, src1, src2));
10522   ins_pipe(ldiv_reg_reg);
10523 %}
10524 
10525 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10526   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10527   ins_cost(INSN_COST);
10528   format %{ "lsr $dst, $src1, $div1" %}
10529   ins_encode %{
10530     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10531   %}
10532   ins_pipe(ialu_reg_shift);
10533 %}
10534 
10535 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10536   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10537   ins_cost(INSN_COST);
10538   format %{ "add $dst, $src, $div1" %}
10539 
10540   ins_encode %{
10541     __ add(as_Register($dst$$reg),
10542               as_Register($src$$reg),
10543               as_Register($src$$reg),
10544               Assembler::LSR, 63);
10545   %}
10546   ins_pipe(ialu_reg);
10547 %}
10548 
10549 // Integer Remainder
10550 
10551 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10552   match(Set dst (ModI src1 src2));
10553 
10554   ins_cost(INSN_COST * 22);
10555   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10556             "msubw($dst, rscratch1, $src2, $src1" %}
10557 
10558   ins_encode(aarch64_enc_modw(dst, src1, src2));
10559   ins_pipe(idiv_reg_reg);
10560 %}
10561 
10562 // Long Remainder
10563 
10564 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10565   match(Set dst (ModL src1 src2));
10566 
10567   ins_cost(INSN_COST * 38);
10568   format %{ "sdiv   rscratch1, $src1, $src2\n"
10569             "msub($dst, rscratch1, $src2, $src1" %}
10570 
10571   ins_encode(aarch64_enc_mod(dst, src1, src2));
10572   ins_pipe(ldiv_reg_reg);
10573 %}
10574 
10575 // Integer Shifts
10576 
10577 // Shift Left Register
10578 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10579   match(Set dst (LShiftI src1 src2));
10580 
10581   ins_cost(INSN_COST * 2);
10582   format %{ "lslvw  $dst, $src1, $src2" %}
10583 
10584   ins_encode %{
10585     __ lslvw(as_Register($dst$$reg),
10586              as_Register($src1$$reg),
10587              as_Register($src2$$reg));
10588   %}
10589 
10590   ins_pipe(ialu_reg_reg_vshift);
10591 %}
10592 
10593 // Shift Left Immediate
10594 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10595   match(Set dst (LShiftI src1 src2));
10596 
10597   ins_cost(INSN_COST);
10598   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10599 
10600   ins_encode %{
10601     __ lslw(as_Register($dst$$reg),
10602             as_Register($src1$$reg),
10603             $src2$$constant & 0x1f);
10604   %}
10605 
10606   ins_pipe(ialu_reg_shift);
10607 %}
10608 
10609 // Shift Right Logical Register
10610 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10611   match(Set dst (URShiftI src1 src2));
10612 
10613   ins_cost(INSN_COST * 2);
10614   format %{ "lsrvw  $dst, $src1, $src2" %}
10615 
10616   ins_encode %{
10617     __ lsrvw(as_Register($dst$$reg),
10618              as_Register($src1$$reg),
10619              as_Register($src2$$reg));
10620   %}
10621 
10622   ins_pipe(ialu_reg_reg_vshift);
10623 %}
10624 
10625 // Shift Right Logical Immediate
10626 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10627   match(Set dst (URShiftI src1 src2));
10628 
10629   ins_cost(INSN_COST);
10630   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10631 
10632   ins_encode %{
10633     __ lsrw(as_Register($dst$$reg),
10634             as_Register($src1$$reg),
10635             $src2$$constant & 0x1f);
10636   %}
10637 
10638   ins_pipe(ialu_reg_shift);
10639 %}
10640 
10641 // Shift Right Arithmetic Register
10642 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10643   match(Set dst (RShiftI src1 src2));
10644 
10645   ins_cost(INSN_COST * 2);
10646   format %{ "asrvw  $dst, $src1, $src2" %}
10647 
10648   ins_encode %{
10649     __ asrvw(as_Register($dst$$reg),
10650              as_Register($src1$$reg),
10651              as_Register($src2$$reg));
10652   %}
10653 
10654   ins_pipe(ialu_reg_reg_vshift);
10655 %}
10656 
10657 // Shift Right Arithmetic Immediate
10658 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10659   match(Set dst (RShiftI src1 src2));
10660 
10661   ins_cost(INSN_COST);
10662   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10663 
10664   ins_encode %{
10665     __ asrw(as_Register($dst$$reg),
10666             as_Register($src1$$reg),
10667             $src2$$constant & 0x1f);
10668   %}
10669 
10670   ins_pipe(ialu_reg_shift);
10671 %}
10672 
10673 // Combined Int Mask and Right Shift (using UBFM)
10674 // TODO
10675 
10676 // Long Shifts
10677 
10678 // Shift Left Register
10679 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10680   match(Set dst (LShiftL src1 src2));
10681 
10682   ins_cost(INSN_COST * 2);
10683   format %{ "lslv  $dst, $src1, $src2" %}
10684 
10685   ins_encode %{
10686     __ lslv(as_Register($dst$$reg),
10687             as_Register($src1$$reg),
10688             as_Register($src2$$reg));
10689   %}
10690 
10691   ins_pipe(ialu_reg_reg_vshift);
10692 %}
10693 
10694 // Shift Left Immediate
10695 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10696   match(Set dst (LShiftL src1 src2));
10697 
10698   ins_cost(INSN_COST);
10699   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10700 
10701   ins_encode %{
10702     __ lsl(as_Register($dst$$reg),
10703             as_Register($src1$$reg),
10704             $src2$$constant & 0x3f);
10705   %}
10706 
10707   ins_pipe(ialu_reg_shift);
10708 %}
10709 
10710 // Shift Right Logical Register
10711 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10712   match(Set dst (URShiftL src1 src2));
10713 
10714   ins_cost(INSN_COST * 2);
10715   format %{ "lsrv  $dst, $src1, $src2" %}
10716 
10717   ins_encode %{
10718     __ lsrv(as_Register($dst$$reg),
10719             as_Register($src1$$reg),
10720             as_Register($src2$$reg));
10721   %}
10722 
10723   ins_pipe(ialu_reg_reg_vshift);
10724 %}
10725 
10726 // Shift Right Logical Immediate
10727 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10728   match(Set dst (URShiftL src1 src2));
10729 
10730   ins_cost(INSN_COST);
10731   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10732 
10733   ins_encode %{
10734     __ lsr(as_Register($dst$$reg),
10735            as_Register($src1$$reg),
10736            $src2$$constant & 0x3f);
10737   %}
10738 
10739   ins_pipe(ialu_reg_shift);
10740 %}
10741 
10742 // A special-case pattern for card table stores.
10743 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10744   match(Set dst (URShiftL (CastP2X src1) src2));
10745 
10746   ins_cost(INSN_COST);
10747   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10748 
10749   ins_encode %{
10750     __ lsr(as_Register($dst$$reg),
10751            as_Register($src1$$reg),
10752            $src2$$constant & 0x3f);
10753   %}
10754 
10755   ins_pipe(ialu_reg_shift);
10756 %}
10757 
10758 // Shift Right Arithmetic Register
10759 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10760   match(Set dst (RShiftL src1 src2));
10761 
10762   ins_cost(INSN_COST * 2);
10763   format %{ "asrv  $dst, $src1, $src2" %}
10764 
10765   ins_encode %{
10766     __ asrv(as_Register($dst$$reg),
10767             as_Register($src1$$reg),
10768             as_Register($src2$$reg));
10769   %}
10770 
10771   ins_pipe(ialu_reg_reg_vshift);
10772 %}
10773 
10774 // Shift Right Arithmetic Immediate
10775 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10776   match(Set dst (RShiftL src1 src2));
10777 
10778   ins_cost(INSN_COST);
10779   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10780 
10781   ins_encode %{
10782     __ asr(as_Register($dst$$reg),
10783            as_Register($src1$$reg),
10784            $src2$$constant & 0x3f);
10785   %}
10786 
10787   ins_pipe(ialu_reg_shift);
10788 %}
10789 
10790 // BEGIN This section of the file is automatically generated. Do not edit --------------
10791 
10792 instruct regL_not_reg(iRegLNoSp dst,
10793                          iRegL src1, immL_M1 m1,
10794                          rFlagsReg cr) %{
10795   match(Set dst (XorL src1 m1));
10796   ins_cost(INSN_COST);
10797   format %{ "eon  $dst, $src1, zr" %}
10798 
10799   ins_encode %{
10800     __ eon(as_Register($dst$$reg),
10801               as_Register($src1$$reg),
10802               zr,
10803               Assembler::LSL, 0);
10804   %}
10805 
10806   ins_pipe(ialu_reg);
10807 %}
10808 instruct regI_not_reg(iRegINoSp dst,
10809                          iRegIorL2I src1, immI_M1 m1,
10810                          rFlagsReg cr) %{
10811   match(Set dst (XorI src1 m1));
10812   ins_cost(INSN_COST);
10813   format %{ "eonw  $dst, $src1, zr" %}
10814 
10815   ins_encode %{
10816     __ eonw(as_Register($dst$$reg),
10817               as_Register($src1$$reg),
10818               zr,
10819               Assembler::LSL, 0);
10820   %}
10821 
10822   ins_pipe(ialu_reg);
10823 %}
10824 
10825 instruct AndI_reg_not_reg(iRegINoSp dst,
10826                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10827                          rFlagsReg cr) %{
10828   match(Set dst (AndI src1 (XorI src2 m1)));
10829   ins_cost(INSN_COST);
10830   format %{ "bicw  $dst, $src1, $src2" %}
10831 
10832   ins_encode %{
10833     __ bicw(as_Register($dst$$reg),
10834               as_Register($src1$$reg),
10835               as_Register($src2$$reg),
10836               Assembler::LSL, 0);
10837   %}
10838 
10839   ins_pipe(ialu_reg_reg);
10840 %}
10841 
10842 instruct AndL_reg_not_reg(iRegLNoSp dst,
10843                          iRegL src1, iRegL src2, immL_M1 m1,
10844                          rFlagsReg cr) %{
10845   match(Set dst (AndL src1 (XorL src2 m1)));
10846   ins_cost(INSN_COST);
10847   format %{ "bic  $dst, $src1, $src2" %}
10848 
10849   ins_encode %{
10850     __ bic(as_Register($dst$$reg),
10851               as_Register($src1$$reg),
10852               as_Register($src2$$reg),
10853               Assembler::LSL, 0);
10854   %}
10855 
10856   ins_pipe(ialu_reg_reg);
10857 %}
10858 
10859 instruct OrI_reg_not_reg(iRegINoSp dst,
10860                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10861                          rFlagsReg cr) %{
10862   match(Set dst (OrI src1 (XorI src2 m1)));
10863   ins_cost(INSN_COST);
10864   format %{ "ornw  $dst, $src1, $src2" %}
10865 
10866   ins_encode %{
10867     __ ornw(as_Register($dst$$reg),
10868               as_Register($src1$$reg),
10869               as_Register($src2$$reg),
10870               Assembler::LSL, 0);
10871   %}
10872 
10873   ins_pipe(ialu_reg_reg);
10874 %}
10875 
10876 instruct OrL_reg_not_reg(iRegLNoSp dst,
10877                          iRegL src1, iRegL src2, immL_M1 m1,
10878                          rFlagsReg cr) %{
10879   match(Set dst (OrL src1 (XorL src2 m1)));
10880   ins_cost(INSN_COST);
10881   format %{ "orn  $dst, $src1, $src2" %}
10882 
10883   ins_encode %{
10884     __ orn(as_Register($dst$$reg),
10885               as_Register($src1$$reg),
10886               as_Register($src2$$reg),
10887               Assembler::LSL, 0);
10888   %}
10889 
10890   ins_pipe(ialu_reg_reg);
10891 %}
10892 
10893 instruct XorI_reg_not_reg(iRegINoSp dst,
10894                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10895                          rFlagsReg cr) %{
10896   match(Set dst (XorI m1 (XorI src2 src1)));
10897   ins_cost(INSN_COST);
10898   format %{ "eonw  $dst, $src1, $src2" %}
10899 
10900   ins_encode %{
10901     __ eonw(as_Register($dst$$reg),
10902               as_Register($src1$$reg),
10903               as_Register($src2$$reg),
10904               Assembler::LSL, 0);
10905   %}
10906 
10907   ins_pipe(ialu_reg_reg);
10908 %}
10909 
10910 instruct XorL_reg_not_reg(iRegLNoSp dst,
10911                          iRegL src1, iRegL src2, immL_M1 m1,
10912                          rFlagsReg cr) %{
10913   match(Set dst (XorL m1 (XorL src2 src1)));
10914   ins_cost(INSN_COST);
10915   format %{ "eon  $dst, $src1, $src2" %}
10916 
10917   ins_encode %{
10918     __ eon(as_Register($dst$$reg),
10919               as_Register($src1$$reg),
10920               as_Register($src2$$reg),
10921               Assembler::LSL, 0);
10922   %}
10923 
10924   ins_pipe(ialu_reg_reg);
10925 %}
10926 
10927 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10928                          iRegIorL2I src1, iRegIorL2I src2,
10929                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10930   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10931   ins_cost(1.9 * INSN_COST);
10932   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10933 
10934   ins_encode %{
10935     __ bicw(as_Register($dst$$reg),
10936               as_Register($src1$$reg),
10937               as_Register($src2$$reg),
10938               Assembler::LSR,
10939               $src3$$constant & 0x1f);
10940   %}
10941 
10942   ins_pipe(ialu_reg_reg_shift);
10943 %}
10944 
10945 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10946                          iRegL src1, iRegL src2,
10947                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10948   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10949   ins_cost(1.9 * INSN_COST);
10950   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10951 
10952   ins_encode %{
10953     __ bic(as_Register($dst$$reg),
10954               as_Register($src1$$reg),
10955               as_Register($src2$$reg),
10956               Assembler::LSR,
10957               $src3$$constant & 0x3f);
10958   %}
10959 
10960   ins_pipe(ialu_reg_reg_shift);
10961 %}
10962 
10963 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10964                          iRegIorL2I src1, iRegIorL2I src2,
10965                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10966   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10967   ins_cost(1.9 * INSN_COST);
10968   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10969 
10970   ins_encode %{
10971     __ bicw(as_Register($dst$$reg),
10972               as_Register($src1$$reg),
10973               as_Register($src2$$reg),
10974               Assembler::ASR,
10975               $src3$$constant & 0x1f);
10976   %}
10977 
10978   ins_pipe(ialu_reg_reg_shift);
10979 %}
10980 
10981 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
10982                          iRegL src1, iRegL src2,
10983                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10984   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
10985   ins_cost(1.9 * INSN_COST);
10986   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
10987 
10988   ins_encode %{
10989     __ bic(as_Register($dst$$reg),
10990               as_Register($src1$$reg),
10991               as_Register($src2$$reg),
10992               Assembler::ASR,
10993               $src3$$constant & 0x3f);
10994   %}
10995 
10996   ins_pipe(ialu_reg_reg_shift);
10997 %}
10998 
10999 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11000                          iRegIorL2I src1, iRegIorL2I src2,
11001                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11002   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11003   ins_cost(1.9 * INSN_COST);
11004   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11005 
11006   ins_encode %{
11007     __ bicw(as_Register($dst$$reg),
11008               as_Register($src1$$reg),
11009               as_Register($src2$$reg),
11010               Assembler::LSL,
11011               $src3$$constant & 0x1f);
11012   %}
11013 
11014   ins_pipe(ialu_reg_reg_shift);
11015 %}
11016 
11017 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11018                          iRegL src1, iRegL src2,
11019                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11020   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11021   ins_cost(1.9 * INSN_COST);
11022   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11023 
11024   ins_encode %{
11025     __ bic(as_Register($dst$$reg),
11026               as_Register($src1$$reg),
11027               as_Register($src2$$reg),
11028               Assembler::LSL,
11029               $src3$$constant & 0x3f);
11030   %}
11031 
11032   ins_pipe(ialu_reg_reg_shift);
11033 %}
11034 
11035 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11036                          iRegIorL2I src1, iRegIorL2I src2,
11037                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11038   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11039   ins_cost(1.9 * INSN_COST);
11040   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11041 
11042   ins_encode %{
11043     __ eonw(as_Register($dst$$reg),
11044               as_Register($src1$$reg),
11045               as_Register($src2$$reg),
11046               Assembler::LSR,
11047               $src3$$constant & 0x1f);
11048   %}
11049 
11050   ins_pipe(ialu_reg_reg_shift);
11051 %}
11052 
11053 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11054                          iRegL src1, iRegL src2,
11055                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11056   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11057   ins_cost(1.9 * INSN_COST);
11058   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11059 
11060   ins_encode %{
11061     __ eon(as_Register($dst$$reg),
11062               as_Register($src1$$reg),
11063               as_Register($src2$$reg),
11064               Assembler::LSR,
11065               $src3$$constant & 0x3f);
11066   %}
11067 
11068   ins_pipe(ialu_reg_reg_shift);
11069 %}
11070 
11071 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11072                          iRegIorL2I src1, iRegIorL2I src2,
11073                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11074   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11075   ins_cost(1.9 * INSN_COST);
11076   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11077 
11078   ins_encode %{
11079     __ eonw(as_Register($dst$$reg),
11080               as_Register($src1$$reg),
11081               as_Register($src2$$reg),
11082               Assembler::ASR,
11083               $src3$$constant & 0x1f);
11084   %}
11085 
11086   ins_pipe(ialu_reg_reg_shift);
11087 %}
11088 
11089 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11090                          iRegL src1, iRegL src2,
11091                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11092   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11093   ins_cost(1.9 * INSN_COST);
11094   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11095 
11096   ins_encode %{
11097     __ eon(as_Register($dst$$reg),
11098               as_Register($src1$$reg),
11099               as_Register($src2$$reg),
11100               Assembler::ASR,
11101               $src3$$constant & 0x3f);
11102   %}
11103 
11104   ins_pipe(ialu_reg_reg_shift);
11105 %}
11106 
11107 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11108                          iRegIorL2I src1, iRegIorL2I src2,
11109                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11110   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11111   ins_cost(1.9 * INSN_COST);
11112   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11113 
11114   ins_encode %{
11115     __ eonw(as_Register($dst$$reg),
11116               as_Register($src1$$reg),
11117               as_Register($src2$$reg),
11118               Assembler::LSL,
11119               $src3$$constant & 0x1f);
11120   %}
11121 
11122   ins_pipe(ialu_reg_reg_shift);
11123 %}
11124 
11125 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11126                          iRegL src1, iRegL src2,
11127                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11128   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11129   ins_cost(1.9 * INSN_COST);
11130   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11131 
11132   ins_encode %{
11133     __ eon(as_Register($dst$$reg),
11134               as_Register($src1$$reg),
11135               as_Register($src2$$reg),
11136               Assembler::LSL,
11137               $src3$$constant & 0x3f);
11138   %}
11139 
11140   ins_pipe(ialu_reg_reg_shift);
11141 %}
11142 
11143 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11144                          iRegIorL2I src1, iRegIorL2I src2,
11145                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11146   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11147   ins_cost(1.9 * INSN_COST);
11148   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11149 
11150   ins_encode %{
11151     __ ornw(as_Register($dst$$reg),
11152               as_Register($src1$$reg),
11153               as_Register($src2$$reg),
11154               Assembler::LSR,
11155               $src3$$constant & 0x1f);
11156   %}
11157 
11158   ins_pipe(ialu_reg_reg_shift);
11159 %}
11160 
11161 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11162                          iRegL src1, iRegL src2,
11163                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11164   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11165   ins_cost(1.9 * INSN_COST);
11166   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11167 
11168   ins_encode %{
11169     __ orn(as_Register($dst$$reg),
11170               as_Register($src1$$reg),
11171               as_Register($src2$$reg),
11172               Assembler::LSR,
11173               $src3$$constant & 0x3f);
11174   %}
11175 
11176   ins_pipe(ialu_reg_reg_shift);
11177 %}
11178 
11179 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11180                          iRegIorL2I src1, iRegIorL2I src2,
11181                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11182   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11183   ins_cost(1.9 * INSN_COST);
11184   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11185 
11186   ins_encode %{
11187     __ ornw(as_Register($dst$$reg),
11188               as_Register($src1$$reg),
11189               as_Register($src2$$reg),
11190               Assembler::ASR,
11191               $src3$$constant & 0x1f);
11192   %}
11193 
11194   ins_pipe(ialu_reg_reg_shift);
11195 %}
11196 
11197 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11198                          iRegL src1, iRegL src2,
11199                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11200   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11201   ins_cost(1.9 * INSN_COST);
11202   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11203 
11204   ins_encode %{
11205     __ orn(as_Register($dst$$reg),
11206               as_Register($src1$$reg),
11207               as_Register($src2$$reg),
11208               Assembler::ASR,
11209               $src3$$constant & 0x3f);
11210   %}
11211 
11212   ins_pipe(ialu_reg_reg_shift);
11213 %}
11214 
11215 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11216                          iRegIorL2I src1, iRegIorL2I src2,
11217                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11218   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11219   ins_cost(1.9 * INSN_COST);
11220   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11221 
11222   ins_encode %{
11223     __ ornw(as_Register($dst$$reg),
11224               as_Register($src1$$reg),
11225               as_Register($src2$$reg),
11226               Assembler::LSL,
11227               $src3$$constant & 0x1f);
11228   %}
11229 
11230   ins_pipe(ialu_reg_reg_shift);
11231 %}
11232 
11233 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11234                          iRegL src1, iRegL src2,
11235                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11236   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11237   ins_cost(1.9 * INSN_COST);
11238   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11239 
11240   ins_encode %{
11241     __ orn(as_Register($dst$$reg),
11242               as_Register($src1$$reg),
11243               as_Register($src2$$reg),
11244               Assembler::LSL,
11245               $src3$$constant & 0x3f);
11246   %}
11247 
11248   ins_pipe(ialu_reg_reg_shift);
11249 %}
11250 
11251 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11252                          iRegIorL2I src1, iRegIorL2I src2,
11253                          immI src3, rFlagsReg cr) %{
11254   match(Set dst (AndI src1 (URShiftI src2 src3)));
11255 
11256   ins_cost(1.9 * INSN_COST);
11257   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11258 
11259   ins_encode %{
11260     __ andw(as_Register($dst$$reg),
11261               as_Register($src1$$reg),
11262               as_Register($src2$$reg),
11263               Assembler::LSR,
11264               $src3$$constant & 0x1f);
11265   %}
11266 
11267   ins_pipe(ialu_reg_reg_shift);
11268 %}
11269 
11270 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11271                          iRegL src1, iRegL src2,
11272                          immI src3, rFlagsReg cr) %{
11273   match(Set dst (AndL src1 (URShiftL src2 src3)));
11274 
11275   ins_cost(1.9 * INSN_COST);
11276   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11277 
11278   ins_encode %{
11279     __ andr(as_Register($dst$$reg),
11280               as_Register($src1$$reg),
11281               as_Register($src2$$reg),
11282               Assembler::LSR,
11283               $src3$$constant & 0x3f);
11284   %}
11285 
11286   ins_pipe(ialu_reg_reg_shift);
11287 %}
11288 
11289 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11290                          iRegIorL2I src1, iRegIorL2I src2,
11291                          immI src3, rFlagsReg cr) %{
11292   match(Set dst (AndI src1 (RShiftI src2 src3)));
11293 
11294   ins_cost(1.9 * INSN_COST);
11295   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11296 
11297   ins_encode %{
11298     __ andw(as_Register($dst$$reg),
11299               as_Register($src1$$reg),
11300               as_Register($src2$$reg),
11301               Assembler::ASR,
11302               $src3$$constant & 0x1f);
11303   %}
11304 
11305   ins_pipe(ialu_reg_reg_shift);
11306 %}
11307 
11308 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11309                          iRegL src1, iRegL src2,
11310                          immI src3, rFlagsReg cr) %{
11311   match(Set dst (AndL src1 (RShiftL src2 src3)));
11312 
11313   ins_cost(1.9 * INSN_COST);
11314   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11315 
11316   ins_encode %{
11317     __ andr(as_Register($dst$$reg),
11318               as_Register($src1$$reg),
11319               as_Register($src2$$reg),
11320               Assembler::ASR,
11321               $src3$$constant & 0x3f);
11322   %}
11323 
11324   ins_pipe(ialu_reg_reg_shift);
11325 %}
11326 
11327 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11328                          iRegIorL2I src1, iRegIorL2I src2,
11329                          immI src3, rFlagsReg cr) %{
11330   match(Set dst (AndI src1 (LShiftI src2 src3)));
11331 
11332   ins_cost(1.9 * INSN_COST);
11333   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11334 
11335   ins_encode %{
11336     __ andw(as_Register($dst$$reg),
11337               as_Register($src1$$reg),
11338               as_Register($src2$$reg),
11339               Assembler::LSL,
11340               $src3$$constant & 0x1f);
11341   %}
11342 
11343   ins_pipe(ialu_reg_reg_shift);
11344 %}
11345 
11346 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11347                          iRegL src1, iRegL src2,
11348                          immI src3, rFlagsReg cr) %{
11349   match(Set dst (AndL src1 (LShiftL src2 src3)));
11350 
11351   ins_cost(1.9 * INSN_COST);
11352   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11353 
11354   ins_encode %{
11355     __ andr(as_Register($dst$$reg),
11356               as_Register($src1$$reg),
11357               as_Register($src2$$reg),
11358               Assembler::LSL,
11359               $src3$$constant & 0x3f);
11360   %}
11361 
11362   ins_pipe(ialu_reg_reg_shift);
11363 %}
11364 
11365 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11366                          iRegIorL2I src1, iRegIorL2I src2,
11367                          immI src3, rFlagsReg cr) %{
11368   match(Set dst (XorI src1 (URShiftI src2 src3)));
11369 
11370   ins_cost(1.9 * INSN_COST);
11371   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11372 
11373   ins_encode %{
11374     __ eorw(as_Register($dst$$reg),
11375               as_Register($src1$$reg),
11376               as_Register($src2$$reg),
11377               Assembler::LSR,
11378               $src3$$constant & 0x1f);
11379   %}
11380 
11381   ins_pipe(ialu_reg_reg_shift);
11382 %}
11383 
11384 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11385                          iRegL src1, iRegL src2,
11386                          immI src3, rFlagsReg cr) %{
11387   match(Set dst (XorL src1 (URShiftL src2 src3)));
11388 
11389   ins_cost(1.9 * INSN_COST);
11390   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11391 
11392   ins_encode %{
11393     __ eor(as_Register($dst$$reg),
11394               as_Register($src1$$reg),
11395               as_Register($src2$$reg),
11396               Assembler::LSR,
11397               $src3$$constant & 0x3f);
11398   %}
11399 
11400   ins_pipe(ialu_reg_reg_shift);
11401 %}
11402 
11403 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11404                          iRegIorL2I src1, iRegIorL2I src2,
11405                          immI src3, rFlagsReg cr) %{
11406   match(Set dst (XorI src1 (RShiftI src2 src3)));
11407 
11408   ins_cost(1.9 * INSN_COST);
11409   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11410 
11411   ins_encode %{
11412     __ eorw(as_Register($dst$$reg),
11413               as_Register($src1$$reg),
11414               as_Register($src2$$reg),
11415               Assembler::ASR,
11416               $src3$$constant & 0x1f);
11417   %}
11418 
11419   ins_pipe(ialu_reg_reg_shift);
11420 %}
11421 
11422 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11423                          iRegL src1, iRegL src2,
11424                          immI src3, rFlagsReg cr) %{
11425   match(Set dst (XorL src1 (RShiftL src2 src3)));
11426 
11427   ins_cost(1.9 * INSN_COST);
11428   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11429 
11430   ins_encode %{
11431     __ eor(as_Register($dst$$reg),
11432               as_Register($src1$$reg),
11433               as_Register($src2$$reg),
11434               Assembler::ASR,
11435               $src3$$constant & 0x3f);
11436   %}
11437 
11438   ins_pipe(ialu_reg_reg_shift);
11439 %}
11440 
11441 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11442                          iRegIorL2I src1, iRegIorL2I src2,
11443                          immI src3, rFlagsReg cr) %{
11444   match(Set dst (XorI src1 (LShiftI src2 src3)));
11445 
11446   ins_cost(1.9 * INSN_COST);
11447   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11448 
11449   ins_encode %{
11450     __ eorw(as_Register($dst$$reg),
11451               as_Register($src1$$reg),
11452               as_Register($src2$$reg),
11453               Assembler::LSL,
11454               $src3$$constant & 0x1f);
11455   %}
11456 
11457   ins_pipe(ialu_reg_reg_shift);
11458 %}
11459 
11460 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11461                          iRegL src1, iRegL src2,
11462                          immI src3, rFlagsReg cr) %{
11463   match(Set dst (XorL src1 (LShiftL src2 src3)));
11464 
11465   ins_cost(1.9 * INSN_COST);
11466   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11467 
11468   ins_encode %{
11469     __ eor(as_Register($dst$$reg),
11470               as_Register($src1$$reg),
11471               as_Register($src2$$reg),
11472               Assembler::LSL,
11473               $src3$$constant & 0x3f);
11474   %}
11475 
11476   ins_pipe(ialu_reg_reg_shift);
11477 %}
11478 
11479 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11480                          iRegIorL2I src1, iRegIorL2I src2,
11481                          immI src3, rFlagsReg cr) %{
11482   match(Set dst (OrI src1 (URShiftI src2 src3)));
11483 
11484   ins_cost(1.9 * INSN_COST);
11485   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11486 
11487   ins_encode %{
11488     __ orrw(as_Register($dst$$reg),
11489               as_Register($src1$$reg),
11490               as_Register($src2$$reg),
11491               Assembler::LSR,
11492               $src3$$constant & 0x1f);
11493   %}
11494 
11495   ins_pipe(ialu_reg_reg_shift);
11496 %}
11497 
11498 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11499                          iRegL src1, iRegL src2,
11500                          immI src3, rFlagsReg cr) %{
11501   match(Set dst (OrL src1 (URShiftL src2 src3)));
11502 
11503   ins_cost(1.9 * INSN_COST);
11504   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11505 
11506   ins_encode %{
11507     __ orr(as_Register($dst$$reg),
11508               as_Register($src1$$reg),
11509               as_Register($src2$$reg),
11510               Assembler::LSR,
11511               $src3$$constant & 0x3f);
11512   %}
11513 
11514   ins_pipe(ialu_reg_reg_shift);
11515 %}
11516 
11517 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11518                          iRegIorL2I src1, iRegIorL2I src2,
11519                          immI src3, rFlagsReg cr) %{
11520   match(Set dst (OrI src1 (RShiftI src2 src3)));
11521 
11522   ins_cost(1.9 * INSN_COST);
11523   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11524 
11525   ins_encode %{
11526     __ orrw(as_Register($dst$$reg),
11527               as_Register($src1$$reg),
11528               as_Register($src2$$reg),
11529               Assembler::ASR,
11530               $src3$$constant & 0x1f);
11531   %}
11532 
11533   ins_pipe(ialu_reg_reg_shift);
11534 %}
11535 
11536 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11537                          iRegL src1, iRegL src2,
11538                          immI src3, rFlagsReg cr) %{
11539   match(Set dst (OrL src1 (RShiftL src2 src3)));
11540 
11541   ins_cost(1.9 * INSN_COST);
11542   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11543 
11544   ins_encode %{
11545     __ orr(as_Register($dst$$reg),
11546               as_Register($src1$$reg),
11547               as_Register($src2$$reg),
11548               Assembler::ASR,
11549               $src3$$constant & 0x3f);
11550   %}
11551 
11552   ins_pipe(ialu_reg_reg_shift);
11553 %}
11554 
11555 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11556                          iRegIorL2I src1, iRegIorL2I src2,
11557                          immI src3, rFlagsReg cr) %{
11558   match(Set dst (OrI src1 (LShiftI src2 src3)));
11559 
11560   ins_cost(1.9 * INSN_COST);
11561   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11562 
11563   ins_encode %{
11564     __ orrw(as_Register($dst$$reg),
11565               as_Register($src1$$reg),
11566               as_Register($src2$$reg),
11567               Assembler::LSL,
11568               $src3$$constant & 0x1f);
11569   %}
11570 
11571   ins_pipe(ialu_reg_reg_shift);
11572 %}
11573 
11574 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11575                          iRegL src1, iRegL src2,
11576                          immI src3, rFlagsReg cr) %{
11577   match(Set dst (OrL src1 (LShiftL src2 src3)));
11578 
11579   ins_cost(1.9 * INSN_COST);
11580   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11581 
11582   ins_encode %{
11583     __ orr(as_Register($dst$$reg),
11584               as_Register($src1$$reg),
11585               as_Register($src2$$reg),
11586               Assembler::LSL,
11587               $src3$$constant & 0x3f);
11588   %}
11589 
11590   ins_pipe(ialu_reg_reg_shift);
11591 %}
11592 
11593 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11594                          iRegIorL2I src1, iRegIorL2I src2,
11595                          immI src3, rFlagsReg cr) %{
11596   match(Set dst (AddI src1 (URShiftI src2 src3)));
11597 
11598   ins_cost(1.9 * INSN_COST);
11599   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11600 
11601   ins_encode %{
11602     __ addw(as_Register($dst$$reg),
11603               as_Register($src1$$reg),
11604               as_Register($src2$$reg),
11605               Assembler::LSR,
11606               $src3$$constant & 0x1f);
11607   %}
11608 
11609   ins_pipe(ialu_reg_reg_shift);
11610 %}
11611 
11612 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11613                          iRegL src1, iRegL src2,
11614                          immI src3, rFlagsReg cr) %{
11615   match(Set dst (AddL src1 (URShiftL src2 src3)));
11616 
11617   ins_cost(1.9 * INSN_COST);
11618   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11619 
11620   ins_encode %{
11621     __ add(as_Register($dst$$reg),
11622               as_Register($src1$$reg),
11623               as_Register($src2$$reg),
11624               Assembler::LSR,
11625               $src3$$constant & 0x3f);
11626   %}
11627 
11628   ins_pipe(ialu_reg_reg_shift);
11629 %}
11630 
11631 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11632                          iRegIorL2I src1, iRegIorL2I src2,
11633                          immI src3, rFlagsReg cr) %{
11634   match(Set dst (AddI src1 (RShiftI src2 src3)));
11635 
11636   ins_cost(1.9 * INSN_COST);
11637   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11638 
11639   ins_encode %{
11640     __ addw(as_Register($dst$$reg),
11641               as_Register($src1$$reg),
11642               as_Register($src2$$reg),
11643               Assembler::ASR,
11644               $src3$$constant & 0x1f);
11645   %}
11646 
11647   ins_pipe(ialu_reg_reg_shift);
11648 %}
11649 
11650 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11651                          iRegL src1, iRegL src2,
11652                          immI src3, rFlagsReg cr) %{
11653   match(Set dst (AddL src1 (RShiftL src2 src3)));
11654 
11655   ins_cost(1.9 * INSN_COST);
11656   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11657 
11658   ins_encode %{
11659     __ add(as_Register($dst$$reg),
11660               as_Register($src1$$reg),
11661               as_Register($src2$$reg),
11662               Assembler::ASR,
11663               $src3$$constant & 0x3f);
11664   %}
11665 
11666   ins_pipe(ialu_reg_reg_shift);
11667 %}
11668 
11669 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11670                          iRegIorL2I src1, iRegIorL2I src2,
11671                          immI src3, rFlagsReg cr) %{
11672   match(Set dst (AddI src1 (LShiftI src2 src3)));
11673 
11674   ins_cost(1.9 * INSN_COST);
11675   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11676 
11677   ins_encode %{
11678     __ addw(as_Register($dst$$reg),
11679               as_Register($src1$$reg),
11680               as_Register($src2$$reg),
11681               Assembler::LSL,
11682               $src3$$constant & 0x1f);
11683   %}
11684 
11685   ins_pipe(ialu_reg_reg_shift);
11686 %}
11687 
11688 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11689                          iRegL src1, iRegL src2,
11690                          immI src3, rFlagsReg cr) %{
11691   match(Set dst (AddL src1 (LShiftL src2 src3)));
11692 
11693   ins_cost(1.9 * INSN_COST);
11694   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11695 
11696   ins_encode %{
11697     __ add(as_Register($dst$$reg),
11698               as_Register($src1$$reg),
11699               as_Register($src2$$reg),
11700               Assembler::LSL,
11701               $src3$$constant & 0x3f);
11702   %}
11703 
11704   ins_pipe(ialu_reg_reg_shift);
11705 %}
11706 
11707 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11708                          iRegIorL2I src1, iRegIorL2I src2,
11709                          immI src3, rFlagsReg cr) %{
11710   match(Set dst (SubI src1 (URShiftI src2 src3)));
11711 
11712   ins_cost(1.9 * INSN_COST);
11713   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11714 
11715   ins_encode %{
11716     __ subw(as_Register($dst$$reg),
11717               as_Register($src1$$reg),
11718               as_Register($src2$$reg),
11719               Assembler::LSR,
11720               $src3$$constant & 0x1f);
11721   %}
11722 
11723   ins_pipe(ialu_reg_reg_shift);
11724 %}
11725 
11726 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11727                          iRegL src1, iRegL src2,
11728                          immI src3, rFlagsReg cr) %{
11729   match(Set dst (SubL src1 (URShiftL src2 src3)));
11730 
11731   ins_cost(1.9 * INSN_COST);
11732   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11733 
11734   ins_encode %{
11735     __ sub(as_Register($dst$$reg),
11736               as_Register($src1$$reg),
11737               as_Register($src2$$reg),
11738               Assembler::LSR,
11739               $src3$$constant & 0x3f);
11740   %}
11741 
11742   ins_pipe(ialu_reg_reg_shift);
11743 %}
11744 
11745 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11746                          iRegIorL2I src1, iRegIorL2I src2,
11747                          immI src3, rFlagsReg cr) %{
11748   match(Set dst (SubI src1 (RShiftI src2 src3)));
11749 
11750   ins_cost(1.9 * INSN_COST);
11751   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11752 
11753   ins_encode %{
11754     __ subw(as_Register($dst$$reg),
11755               as_Register($src1$$reg),
11756               as_Register($src2$$reg),
11757               Assembler::ASR,
11758               $src3$$constant & 0x1f);
11759   %}
11760 
11761   ins_pipe(ialu_reg_reg_shift);
11762 %}
11763 
11764 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11765                          iRegL src1, iRegL src2,
11766                          immI src3, rFlagsReg cr) %{
11767   match(Set dst (SubL src1 (RShiftL src2 src3)));
11768 
11769   ins_cost(1.9 * INSN_COST);
11770   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11771 
11772   ins_encode %{
11773     __ sub(as_Register($dst$$reg),
11774               as_Register($src1$$reg),
11775               as_Register($src2$$reg),
11776               Assembler::ASR,
11777               $src3$$constant & 0x3f);
11778   %}
11779 
11780   ins_pipe(ialu_reg_reg_shift);
11781 %}
11782 
11783 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11784                          iRegIorL2I src1, iRegIorL2I src2,
11785                          immI src3, rFlagsReg cr) %{
11786   match(Set dst (SubI src1 (LShiftI src2 src3)));
11787 
11788   ins_cost(1.9 * INSN_COST);
11789   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11790 
11791   ins_encode %{
11792     __ subw(as_Register($dst$$reg),
11793               as_Register($src1$$reg),
11794               as_Register($src2$$reg),
11795               Assembler::LSL,
11796               $src3$$constant & 0x1f);
11797   %}
11798 
11799   ins_pipe(ialu_reg_reg_shift);
11800 %}
11801 
11802 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11803                          iRegL src1, iRegL src2,
11804                          immI src3, rFlagsReg cr) %{
11805   match(Set dst (SubL src1 (LShiftL src2 src3)));
11806 
11807   ins_cost(1.9 * INSN_COST);
11808   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11809 
11810   ins_encode %{
11811     __ sub(as_Register($dst$$reg),
11812               as_Register($src1$$reg),
11813               as_Register($src2$$reg),
11814               Assembler::LSL,
11815               $src3$$constant & 0x3f);
11816   %}
11817 
11818   ins_pipe(ialu_reg_reg_shift);
11819 %}
11820 
11821 
11822 
11823 // Shift Left followed by Shift Right.
11824 // This idiom is used by the compiler for the i2b bytecode etc.
11825 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11826 %{
11827   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11828   // Make sure we are not going to exceed what sbfm can do.
11829   predicate((unsigned int)n->in(2)->get_int() <= 63
11830             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11831 
11832   ins_cost(INSN_COST * 2);
11833   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11834   ins_encode %{
11835     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11836     int s = 63 - lshift;
11837     int r = (rshift - lshift) & 63;
11838     __ sbfm(as_Register($dst$$reg),
11839             as_Register($src$$reg),
11840             r, s);
11841   %}
11842 
11843   ins_pipe(ialu_reg_shift);
11844 %}
11845 
11846 // Shift Left followed by Shift Right.
11847 // This idiom is used by the compiler for the i2b bytecode etc.
11848 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11849 %{
11850   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11851   // Make sure we are not going to exceed what sbfmw can do.
11852   predicate((unsigned int)n->in(2)->get_int() <= 31
11853             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11854 
11855   ins_cost(INSN_COST * 2);
11856   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11857   ins_encode %{
11858     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11859     int s = 31 - lshift;
11860     int r = (rshift - lshift) & 31;
11861     __ sbfmw(as_Register($dst$$reg),
11862             as_Register($src$$reg),
11863             r, s);
11864   %}
11865 
11866   ins_pipe(ialu_reg_shift);
11867 %}
11868 
11869 // Shift Left followed by Shift Right.
11870 // This idiom is used by the compiler for the i2b bytecode etc.
11871 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11872 %{
11873   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11874   // Make sure we are not going to exceed what ubfm can do.
11875   predicate((unsigned int)n->in(2)->get_int() <= 63
11876             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11877 
11878   ins_cost(INSN_COST * 2);
11879   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11880   ins_encode %{
11881     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11882     int s = 63 - lshift;
11883     int r = (rshift - lshift) & 63;
11884     __ ubfm(as_Register($dst$$reg),
11885             as_Register($src$$reg),
11886             r, s);
11887   %}
11888 
11889   ins_pipe(ialu_reg_shift);
11890 %}
11891 
11892 // Shift Left followed by Shift Right.
11893 // This idiom is used by the compiler for the i2b bytecode etc.
11894 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11895 %{
11896   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11897   // Make sure we are not going to exceed what ubfmw can do.
11898   predicate((unsigned int)n->in(2)->get_int() <= 31
11899             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11900 
11901   ins_cost(INSN_COST * 2);
11902   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11903   ins_encode %{
11904     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11905     int s = 31 - lshift;
11906     int r = (rshift - lshift) & 31;
11907     __ ubfmw(as_Register($dst$$reg),
11908             as_Register($src$$reg),
11909             r, s);
11910   %}
11911 
11912   ins_pipe(ialu_reg_shift);
11913 %}
11914 // Bitfield extract with shift & mask
11915 
11916 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11917 %{
11918   match(Set dst (AndI (URShiftI src rshift) mask));
11919 
11920   ins_cost(INSN_COST);
11921   format %{ "ubfxw $dst, $src, $mask" %}
11922   ins_encode %{
11923     int rshift = $rshift$$constant;
11924     long mask = $mask$$constant;
11925     int width = exact_log2(mask+1);
11926     __ ubfxw(as_Register($dst$$reg),
11927             as_Register($src$$reg), rshift, width);
11928   %}
11929   ins_pipe(ialu_reg_shift);
11930 %}
11931 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11932 %{
11933   match(Set dst (AndL (URShiftL src rshift) mask));
11934 
11935   ins_cost(INSN_COST);
11936   format %{ "ubfx $dst, $src, $mask" %}
11937   ins_encode %{
11938     int rshift = $rshift$$constant;
11939     long mask = $mask$$constant;
11940     int width = exact_log2(mask+1);
11941     __ ubfx(as_Register($dst$$reg),
11942             as_Register($src$$reg), rshift, width);
11943   %}
11944   ins_pipe(ialu_reg_shift);
11945 %}
11946 
11947 // We can use ubfx when extending an And with a mask when we know mask
11948 // is positive.  We know that because immI_bitmask guarantees it.
11949 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11950 %{
11951   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11952 
11953   ins_cost(INSN_COST * 2);
11954   format %{ "ubfx $dst, $src, $mask" %}
11955   ins_encode %{
11956     int rshift = $rshift$$constant;
11957     long mask = $mask$$constant;
11958     int width = exact_log2(mask+1);
11959     __ ubfx(as_Register($dst$$reg),
11960             as_Register($src$$reg), rshift, width);
11961   %}
11962   ins_pipe(ialu_reg_shift);
11963 %}
11964 
11965 // Rotations
11966 
11967 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11968 %{
11969   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
11970   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
11971 
11972   ins_cost(INSN_COST);
11973   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11974 
11975   ins_encode %{
11976     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11977             $rshift$$constant & 63);
11978   %}
11979   ins_pipe(ialu_reg_reg_extr);
11980 %}
11981 
11982 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
11983 %{
11984   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
11985   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
11986 
11987   ins_cost(INSN_COST);
11988   format %{ "extr $dst, $src1, $src2, #$rshift" %}
11989 
11990   ins_encode %{
11991     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
11992             $rshift$$constant & 31);
11993   %}
11994   ins_pipe(ialu_reg_reg_extr);
11995 %}
11996 
11997 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
11998 %{
11999   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12000   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12001 
12002   ins_cost(INSN_COST);
12003   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12004 
12005   ins_encode %{
12006     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12007             $rshift$$constant & 63);
12008   %}
12009   ins_pipe(ialu_reg_reg_extr);
12010 %}
12011 
12012 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12013 %{
12014   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12015   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12016 
12017   ins_cost(INSN_COST);
12018   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12019 
12020   ins_encode %{
12021     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12022             $rshift$$constant & 31);
12023   %}
12024   ins_pipe(ialu_reg_reg_extr);
12025 %}
12026 
12027 
12028 // rol expander
12029 
12030 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12031 %{
12032   effect(DEF dst, USE src, USE shift);
12033 
12034   format %{ "rol    $dst, $src, $shift" %}
12035   ins_cost(INSN_COST * 3);
12036   ins_encode %{
12037     __ subw(rscratch1, zr, as_Register($shift$$reg));
12038     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12039             rscratch1);
12040     %}
12041   ins_pipe(ialu_reg_reg_vshift);
12042 %}
12043 
12044 // rol expander
12045 
12046 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12047 %{
12048   effect(DEF dst, USE src, USE shift);
12049 
12050   format %{ "rol    $dst, $src, $shift" %}
12051   ins_cost(INSN_COST * 3);
12052   ins_encode %{
12053     __ subw(rscratch1, zr, as_Register($shift$$reg));
12054     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12055             rscratch1);
12056     %}
12057   ins_pipe(ialu_reg_reg_vshift);
12058 %}
12059 
12060 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12061 %{
12062   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12063 
12064   expand %{
12065     rolL_rReg(dst, src, shift, cr);
12066   %}
12067 %}
12068 
12069 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12070 %{
12071   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12072 
12073   expand %{
12074     rolL_rReg(dst, src, shift, cr);
12075   %}
12076 %}
12077 
12078 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12079 %{
12080   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12081 
12082   expand %{
12083     rolL_rReg(dst, src, shift, cr);
12084   %}
12085 %}
12086 
12087 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12088 %{
12089   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12090 
12091   expand %{
12092     rolL_rReg(dst, src, shift, cr);
12093   %}
12094 %}
12095 
12096 // ror expander
12097 
12098 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12099 %{
12100   effect(DEF dst, USE src, USE shift);
12101 
12102   format %{ "ror    $dst, $src, $shift" %}
12103   ins_cost(INSN_COST);
12104   ins_encode %{
12105     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12106             as_Register($shift$$reg));
12107     %}
12108   ins_pipe(ialu_reg_reg_vshift);
12109 %}
12110 
12111 // ror expander
12112 
12113 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12114 %{
12115   effect(DEF dst, USE src, USE shift);
12116 
12117   format %{ "ror    $dst, $src, $shift" %}
12118   ins_cost(INSN_COST);
12119   ins_encode %{
12120     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12121             as_Register($shift$$reg));
12122     %}
12123   ins_pipe(ialu_reg_reg_vshift);
12124 %}
12125 
12126 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12127 %{
12128   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12129 
12130   expand %{
12131     rorL_rReg(dst, src, shift, cr);
12132   %}
12133 %}
12134 
12135 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12136 %{
12137   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12138 
12139   expand %{
12140     rorL_rReg(dst, src, shift, cr);
12141   %}
12142 %}
12143 
12144 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12145 %{
12146   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12147 
12148   expand %{
12149     rorL_rReg(dst, src, shift, cr);
12150   %}
12151 %}
12152 
12153 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12154 %{
12155   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12156 
12157   expand %{
12158     rorL_rReg(dst, src, shift, cr);
12159   %}
12160 %}
12161 
12162 // Add/subtract (extended)
12163 
12164 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12165 %{
12166   match(Set dst (AddL src1 (ConvI2L src2)));
12167   ins_cost(INSN_COST);
12168   format %{ "add  $dst, $src1, sxtw $src2" %}
12169 
12170    ins_encode %{
12171      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12172             as_Register($src2$$reg), ext::sxtw);
12173    %}
12174   ins_pipe(ialu_reg_reg);
12175 %};
12176 
12177 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12178 %{
12179   match(Set dst (SubL src1 (ConvI2L src2)));
12180   ins_cost(INSN_COST);
12181   format %{ "sub  $dst, $src1, sxtw $src2" %}
12182 
12183    ins_encode %{
12184      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12185             as_Register($src2$$reg), ext::sxtw);
12186    %}
12187   ins_pipe(ialu_reg_reg);
12188 %};
12189 
12190 
12191 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12192 %{
12193   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12194   ins_cost(INSN_COST);
12195   format %{ "add  $dst, $src1, sxth $src2" %}
12196 
12197    ins_encode %{
12198      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12199             as_Register($src2$$reg), ext::sxth);
12200    %}
12201   ins_pipe(ialu_reg_reg);
12202 %}
12203 
12204 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12205 %{
12206   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12207   ins_cost(INSN_COST);
12208   format %{ "add  $dst, $src1, sxtb $src2" %}
12209 
12210    ins_encode %{
12211      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12212             as_Register($src2$$reg), ext::sxtb);
12213    %}
12214   ins_pipe(ialu_reg_reg);
12215 %}
12216 
12217 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12218 %{
12219   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12220   ins_cost(INSN_COST);
12221   format %{ "add  $dst, $src1, uxtb $src2" %}
12222 
12223    ins_encode %{
12224      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12225             as_Register($src2$$reg), ext::uxtb);
12226    %}
12227   ins_pipe(ialu_reg_reg);
12228 %}
12229 
12230 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12231 %{
12232   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12233   ins_cost(INSN_COST);
12234   format %{ "add  $dst, $src1, sxth $src2" %}
12235 
12236    ins_encode %{
12237      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12238             as_Register($src2$$reg), ext::sxth);
12239    %}
12240   ins_pipe(ialu_reg_reg);
12241 %}
12242 
12243 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12244 %{
12245   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12246   ins_cost(INSN_COST);
12247   format %{ "add  $dst, $src1, sxtw $src2" %}
12248 
12249    ins_encode %{
12250      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12251             as_Register($src2$$reg), ext::sxtw);
12252    %}
12253   ins_pipe(ialu_reg_reg);
12254 %}
12255 
12256 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12257 %{
12258   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12259   ins_cost(INSN_COST);
12260   format %{ "add  $dst, $src1, sxtb $src2" %}
12261 
12262    ins_encode %{
12263      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12264             as_Register($src2$$reg), ext::sxtb);
12265    %}
12266   ins_pipe(ialu_reg_reg);
12267 %}
12268 
12269 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12270 %{
12271   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12272   ins_cost(INSN_COST);
12273   format %{ "add  $dst, $src1, uxtb $src2" %}
12274 
12275    ins_encode %{
12276      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12277             as_Register($src2$$reg), ext::uxtb);
12278    %}
12279   ins_pipe(ialu_reg_reg);
12280 %}
12281 
12282 
12283 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12284 %{
12285   match(Set dst (AddI src1 (AndI src2 mask)));
12286   ins_cost(INSN_COST);
12287   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12288 
12289    ins_encode %{
12290      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12291             as_Register($src2$$reg), ext::uxtb);
12292    %}
12293   ins_pipe(ialu_reg_reg);
12294 %}
12295 
12296 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12297 %{
12298   match(Set dst (AddI src1 (AndI src2 mask)));
12299   ins_cost(INSN_COST);
12300   format %{ "addw  $dst, $src1, $src2, uxth" %}
12301 
12302    ins_encode %{
12303      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12304             as_Register($src2$$reg), ext::uxth);
12305    %}
12306   ins_pipe(ialu_reg_reg);
12307 %}
12308 
12309 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12310 %{
12311   match(Set dst (AddL src1 (AndL src2 mask)));
12312   ins_cost(INSN_COST);
12313   format %{ "add  $dst, $src1, $src2, uxtb" %}
12314 
12315    ins_encode %{
12316      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12317             as_Register($src2$$reg), ext::uxtb);
12318    %}
12319   ins_pipe(ialu_reg_reg);
12320 %}
12321 
12322 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12323 %{
12324   match(Set dst (AddL src1 (AndL src2 mask)));
12325   ins_cost(INSN_COST);
12326   format %{ "add  $dst, $src1, $src2, uxth" %}
12327 
12328    ins_encode %{
12329      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12330             as_Register($src2$$reg), ext::uxth);
12331    %}
12332   ins_pipe(ialu_reg_reg);
12333 %}
12334 
12335 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12336 %{
12337   match(Set dst (AddL src1 (AndL src2 mask)));
12338   ins_cost(INSN_COST);
12339   format %{ "add  $dst, $src1, $src2, uxtw" %}
12340 
12341    ins_encode %{
12342      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12343             as_Register($src2$$reg), ext::uxtw);
12344    %}
12345   ins_pipe(ialu_reg_reg);
12346 %}
12347 
12348 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12349 %{
12350   match(Set dst (SubI src1 (AndI src2 mask)));
12351   ins_cost(INSN_COST);
12352   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12353 
12354    ins_encode %{
12355      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12356             as_Register($src2$$reg), ext::uxtb);
12357    %}
12358   ins_pipe(ialu_reg_reg);
12359 %}
12360 
12361 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12362 %{
12363   match(Set dst (SubI src1 (AndI src2 mask)));
12364   ins_cost(INSN_COST);
12365   format %{ "subw  $dst, $src1, $src2, uxth" %}
12366 
12367    ins_encode %{
12368      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12369             as_Register($src2$$reg), ext::uxth);
12370    %}
12371   ins_pipe(ialu_reg_reg);
12372 %}
12373 
12374 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12375 %{
12376   match(Set dst (SubL src1 (AndL src2 mask)));
12377   ins_cost(INSN_COST);
12378   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12379 
12380    ins_encode %{
12381      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12382             as_Register($src2$$reg), ext::uxtb);
12383    %}
12384   ins_pipe(ialu_reg_reg);
12385 %}
12386 
12387 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12388 %{
12389   match(Set dst (SubL src1 (AndL src2 mask)));
12390   ins_cost(INSN_COST);
12391   format %{ "sub  $dst, $src1, $src2, uxth" %}
12392 
12393    ins_encode %{
12394      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12395             as_Register($src2$$reg), ext::uxth);
12396    %}
12397   ins_pipe(ialu_reg_reg);
12398 %}
12399 
12400 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12401 %{
12402   match(Set dst (SubL src1 (AndL src2 mask)));
12403   ins_cost(INSN_COST);
12404   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12405 
12406    ins_encode %{
12407      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12408             as_Register($src2$$reg), ext::uxtw);
12409    %}
12410   ins_pipe(ialu_reg_reg);
12411 %}
12412 
12413 // END This section of the file is automatically generated. Do not edit --------------
12414 
12415 // ============================================================================
12416 // Floating Point Arithmetic Instructions
12417 
12418 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12419   match(Set dst (AddF src1 src2));
12420 
12421   ins_cost(INSN_COST * 5);
12422   format %{ "fadds   $dst, $src1, $src2" %}
12423 
12424   ins_encode %{
12425     __ fadds(as_FloatRegister($dst$$reg),
12426              as_FloatRegister($src1$$reg),
12427              as_FloatRegister($src2$$reg));
12428   %}
12429 
12430   ins_pipe(fp_dop_reg_reg_s);
12431 %}
12432 
12433 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12434   match(Set dst (AddD src1 src2));
12435 
12436   ins_cost(INSN_COST * 5);
12437   format %{ "faddd   $dst, $src1, $src2" %}
12438 
12439   ins_encode %{
12440     __ faddd(as_FloatRegister($dst$$reg),
12441              as_FloatRegister($src1$$reg),
12442              as_FloatRegister($src2$$reg));
12443   %}
12444 
12445   ins_pipe(fp_dop_reg_reg_d);
12446 %}
12447 
12448 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12449   match(Set dst (SubF src1 src2));
12450 
12451   ins_cost(INSN_COST * 5);
12452   format %{ "fsubs   $dst, $src1, $src2" %}
12453 
12454   ins_encode %{
12455     __ fsubs(as_FloatRegister($dst$$reg),
12456              as_FloatRegister($src1$$reg),
12457              as_FloatRegister($src2$$reg));
12458   %}
12459 
12460   ins_pipe(fp_dop_reg_reg_s);
12461 %}
12462 
12463 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12464   match(Set dst (SubD src1 src2));
12465 
12466   ins_cost(INSN_COST * 5);
12467   format %{ "fsubd   $dst, $src1, $src2" %}
12468 
12469   ins_encode %{
12470     __ fsubd(as_FloatRegister($dst$$reg),
12471              as_FloatRegister($src1$$reg),
12472              as_FloatRegister($src2$$reg));
12473   %}
12474 
12475   ins_pipe(fp_dop_reg_reg_d);
12476 %}
12477 
12478 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12479   match(Set dst (MulF src1 src2));
12480 
12481   ins_cost(INSN_COST * 6);
12482   format %{ "fmuls   $dst, $src1, $src2" %}
12483 
12484   ins_encode %{
12485     __ fmuls(as_FloatRegister($dst$$reg),
12486              as_FloatRegister($src1$$reg),
12487              as_FloatRegister($src2$$reg));
12488   %}
12489 
12490   ins_pipe(fp_dop_reg_reg_s);
12491 %}
12492 
12493 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12494   match(Set dst (MulD src1 src2));
12495 
12496   ins_cost(INSN_COST * 6);
12497   format %{ "fmuld   $dst, $src1, $src2" %}
12498 
12499   ins_encode %{
12500     __ fmuld(as_FloatRegister($dst$$reg),
12501              as_FloatRegister($src1$$reg),
12502              as_FloatRegister($src2$$reg));
12503   %}
12504 
12505   ins_pipe(fp_dop_reg_reg_d);
12506 %}
12507 
12508 // We cannot use these fused mul w add/sub ops because they don't
12509 // produce the same result as the equivalent separated ops
12510 // (essentially they don't round the intermediate result). that's a
12511 // shame. leaving them here in case we can idenitfy cases where it is
12512 // legitimate to use them
12513 
12514 
12515 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12516 //   match(Set dst (AddF (MulF src1 src2) src3));
12517 
12518 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12519 
12520 //   ins_encode %{
12521 //     __ fmadds(as_FloatRegister($dst$$reg),
12522 //              as_FloatRegister($src1$$reg),
12523 //              as_FloatRegister($src2$$reg),
12524 //              as_FloatRegister($src3$$reg));
12525 //   %}
12526 
12527 //   ins_pipe(pipe_class_default);
12528 // %}
12529 
12530 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12531 //   match(Set dst (AddD (MulD src1 src2) src3));
12532 
12533 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12534 
12535 //   ins_encode %{
12536 //     __ fmaddd(as_FloatRegister($dst$$reg),
12537 //              as_FloatRegister($src1$$reg),
12538 //              as_FloatRegister($src2$$reg),
12539 //              as_FloatRegister($src3$$reg));
12540 //   %}
12541 
12542 //   ins_pipe(pipe_class_default);
12543 // %}
12544 
12545 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12546 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12547 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12548 
12549 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12550 
12551 //   ins_encode %{
12552 //     __ fmsubs(as_FloatRegister($dst$$reg),
12553 //               as_FloatRegister($src1$$reg),
12554 //               as_FloatRegister($src2$$reg),
12555 //              as_FloatRegister($src3$$reg));
12556 //   %}
12557 
12558 //   ins_pipe(pipe_class_default);
12559 // %}
12560 
12561 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12562 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12563 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12564 
12565 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12566 
12567 //   ins_encode %{
12568 //     __ fmsubd(as_FloatRegister($dst$$reg),
12569 //               as_FloatRegister($src1$$reg),
12570 //               as_FloatRegister($src2$$reg),
12571 //               as_FloatRegister($src3$$reg));
12572 //   %}
12573 
12574 //   ins_pipe(pipe_class_default);
12575 // %}
12576 
12577 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12578 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12579 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12580 
12581 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12582 
12583 //   ins_encode %{
12584 //     __ fnmadds(as_FloatRegister($dst$$reg),
12585 //                as_FloatRegister($src1$$reg),
12586 //                as_FloatRegister($src2$$reg),
12587 //                as_FloatRegister($src3$$reg));
12588 //   %}
12589 
12590 //   ins_pipe(pipe_class_default);
12591 // %}
12592 
12593 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12594 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12595 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12596 
12597 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12598 
12599 //   ins_encode %{
12600 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12601 //                as_FloatRegister($src1$$reg),
12602 //                as_FloatRegister($src2$$reg),
12603 //                as_FloatRegister($src3$$reg));
12604 //   %}
12605 
12606 //   ins_pipe(pipe_class_default);
12607 // %}
12608 
12609 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12610 //   match(Set dst (SubF (MulF src1 src2) src3));
12611 
12612 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12613 
12614 //   ins_encode %{
12615 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12616 //                as_FloatRegister($src1$$reg),
12617 //                as_FloatRegister($src2$$reg),
12618 //                as_FloatRegister($src3$$reg));
12619 //   %}
12620 
12621 //   ins_pipe(pipe_class_default);
12622 // %}
12623 
12624 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12625 //   match(Set dst (SubD (MulD src1 src2) src3));
12626 
12627 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12628 
12629 //   ins_encode %{
12630 //   // n.b. insn name should be fnmsubd
12631 //     __ fnmsub(as_FloatRegister($dst$$reg),
12632 //                as_FloatRegister($src1$$reg),
12633 //                as_FloatRegister($src2$$reg),
12634 //                as_FloatRegister($src3$$reg));
12635 //   %}
12636 
12637 //   ins_pipe(pipe_class_default);
12638 // %}
12639 
12640 
12641 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12642   match(Set dst (DivF src1  src2));
12643 
12644   ins_cost(INSN_COST * 18);
12645   format %{ "fdivs   $dst, $src1, $src2" %}
12646 
12647   ins_encode %{
12648     __ fdivs(as_FloatRegister($dst$$reg),
12649              as_FloatRegister($src1$$reg),
12650              as_FloatRegister($src2$$reg));
12651   %}
12652 
12653   ins_pipe(fp_div_s);
12654 %}
12655 
12656 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12657   match(Set dst (DivD src1  src2));
12658 
12659   ins_cost(INSN_COST * 32);
12660   format %{ "fdivd   $dst, $src1, $src2" %}
12661 
12662   ins_encode %{
12663     __ fdivd(as_FloatRegister($dst$$reg),
12664              as_FloatRegister($src1$$reg),
12665              as_FloatRegister($src2$$reg));
12666   %}
12667 
12668   ins_pipe(fp_div_d);
12669 %}
12670 
12671 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12672   match(Set dst (NegF src));
12673 
12674   ins_cost(INSN_COST * 3);
12675   format %{ "fneg   $dst, $src" %}
12676 
12677   ins_encode %{
12678     __ fnegs(as_FloatRegister($dst$$reg),
12679              as_FloatRegister($src$$reg));
12680   %}
12681 
12682   ins_pipe(fp_uop_s);
12683 %}
12684 
12685 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12686   match(Set dst (NegD src));
12687 
12688   ins_cost(INSN_COST * 3);
12689   format %{ "fnegd   $dst, $src" %}
12690 
12691   ins_encode %{
12692     __ fnegd(as_FloatRegister($dst$$reg),
12693              as_FloatRegister($src$$reg));
12694   %}
12695 
12696   ins_pipe(fp_uop_d);
12697 %}
12698 
12699 instruct absF_reg(vRegF dst, vRegF src) %{
12700   match(Set dst (AbsF src));
12701 
12702   ins_cost(INSN_COST * 3);
12703   format %{ "fabss   $dst, $src" %}
12704   ins_encode %{
12705     __ fabss(as_FloatRegister($dst$$reg),
12706              as_FloatRegister($src$$reg));
12707   %}
12708 
12709   ins_pipe(fp_uop_s);
12710 %}
12711 
12712 instruct absD_reg(vRegD dst, vRegD src) %{
12713   match(Set dst (AbsD src));
12714 
12715   ins_cost(INSN_COST * 3);
12716   format %{ "fabsd   $dst, $src" %}
12717   ins_encode %{
12718     __ fabsd(as_FloatRegister($dst$$reg),
12719              as_FloatRegister($src$$reg));
12720   %}
12721 
12722   ins_pipe(fp_uop_d);
12723 %}
12724 
12725 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12726   match(Set dst (SqrtD src));
12727 
12728   ins_cost(INSN_COST * 50);
12729   format %{ "fsqrtd  $dst, $src" %}
12730   ins_encode %{
12731     __ fsqrtd(as_FloatRegister($dst$$reg),
12732              as_FloatRegister($src$$reg));
12733   %}
12734 
12735   ins_pipe(fp_div_s);
12736 %}
12737 
12738 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12739   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12740 
12741   ins_cost(INSN_COST * 50);
12742   format %{ "fsqrts  $dst, $src" %}
12743   ins_encode %{
12744     __ fsqrts(as_FloatRegister($dst$$reg),
12745              as_FloatRegister($src$$reg));
12746   %}
12747 
12748   ins_pipe(fp_div_d);
12749 %}
12750 
12751 // ============================================================================
12752 // Logical Instructions
12753 
12754 // Integer Logical Instructions
12755 
12756 // And Instructions
12757 
12758 
12759 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12760   match(Set dst (AndI src1 src2));
12761 
12762   format %{ "andw  $dst, $src1, $src2\t# int" %}
12763 
12764   ins_cost(INSN_COST);
12765   ins_encode %{
12766     __ andw(as_Register($dst$$reg),
12767             as_Register($src1$$reg),
12768             as_Register($src2$$reg));
12769   %}
12770 
12771   ins_pipe(ialu_reg_reg);
12772 %}
12773 
12774 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12775   match(Set dst (AndI src1 src2));
12776 
12777   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12778 
12779   ins_cost(INSN_COST);
12780   ins_encode %{
12781     __ andw(as_Register($dst$$reg),
12782             as_Register($src1$$reg),
12783             (unsigned long)($src2$$constant));
12784   %}
12785 
12786   ins_pipe(ialu_reg_imm);
12787 %}
12788 
12789 // Or Instructions
12790 
12791 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12792   match(Set dst (OrI src1 src2));
12793 
12794   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12795 
12796   ins_cost(INSN_COST);
12797   ins_encode %{
12798     __ orrw(as_Register($dst$$reg),
12799             as_Register($src1$$reg),
12800             as_Register($src2$$reg));
12801   %}
12802 
12803   ins_pipe(ialu_reg_reg);
12804 %}
12805 
12806 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12807   match(Set dst (OrI src1 src2));
12808 
12809   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12810 
12811   ins_cost(INSN_COST);
12812   ins_encode %{
12813     __ orrw(as_Register($dst$$reg),
12814             as_Register($src1$$reg),
12815             (unsigned long)($src2$$constant));
12816   %}
12817 
12818   ins_pipe(ialu_reg_imm);
12819 %}
12820 
12821 // Xor Instructions
12822 
12823 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12824   match(Set dst (XorI src1 src2));
12825 
12826   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12827 
12828   ins_cost(INSN_COST);
12829   ins_encode %{
12830     __ eorw(as_Register($dst$$reg),
12831             as_Register($src1$$reg),
12832             as_Register($src2$$reg));
12833   %}
12834 
12835   ins_pipe(ialu_reg_reg);
12836 %}
12837 
12838 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12839   match(Set dst (XorI src1 src2));
12840 
12841   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12842 
12843   ins_cost(INSN_COST);
12844   ins_encode %{
12845     __ eorw(as_Register($dst$$reg),
12846             as_Register($src1$$reg),
12847             (unsigned long)($src2$$constant));
12848   %}
12849 
12850   ins_pipe(ialu_reg_imm);
12851 %}
12852 
12853 // Long Logical Instructions
12854 // TODO
12855 
12856 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12857   match(Set dst (AndL src1 src2));
12858 
12859   format %{ "and  $dst, $src1, $src2\t# int" %}
12860 
12861   ins_cost(INSN_COST);
12862   ins_encode %{
12863     __ andr(as_Register($dst$$reg),
12864             as_Register($src1$$reg),
12865             as_Register($src2$$reg));
12866   %}
12867 
12868   ins_pipe(ialu_reg_reg);
12869 %}
12870 
12871 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12872   match(Set dst (AndL src1 src2));
12873 
12874   format %{ "and  $dst, $src1, $src2\t# int" %}
12875 
12876   ins_cost(INSN_COST);
12877   ins_encode %{
12878     __ andr(as_Register($dst$$reg),
12879             as_Register($src1$$reg),
12880             (unsigned long)($src2$$constant));
12881   %}
12882 
12883   ins_pipe(ialu_reg_imm);
12884 %}
12885 
12886 // Or Instructions
12887 
12888 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12889   match(Set dst (OrL src1 src2));
12890 
12891   format %{ "orr  $dst, $src1, $src2\t# int" %}
12892 
12893   ins_cost(INSN_COST);
12894   ins_encode %{
12895     __ orr(as_Register($dst$$reg),
12896            as_Register($src1$$reg),
12897            as_Register($src2$$reg));
12898   %}
12899 
12900   ins_pipe(ialu_reg_reg);
12901 %}
12902 
12903 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12904   match(Set dst (OrL src1 src2));
12905 
12906   format %{ "orr  $dst, $src1, $src2\t# int" %}
12907 
12908   ins_cost(INSN_COST);
12909   ins_encode %{
12910     __ orr(as_Register($dst$$reg),
12911            as_Register($src1$$reg),
12912            (unsigned long)($src2$$constant));
12913   %}
12914 
12915   ins_pipe(ialu_reg_imm);
12916 %}
12917 
12918 // Xor Instructions
12919 
12920 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12921   match(Set dst (XorL src1 src2));
12922 
12923   format %{ "eor  $dst, $src1, $src2\t# int" %}
12924 
12925   ins_cost(INSN_COST);
12926   ins_encode %{
12927     __ eor(as_Register($dst$$reg),
12928            as_Register($src1$$reg),
12929            as_Register($src2$$reg));
12930   %}
12931 
12932   ins_pipe(ialu_reg_reg);
12933 %}
12934 
12935 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12936   match(Set dst (XorL src1 src2));
12937 
12938   ins_cost(INSN_COST);
12939   format %{ "eor  $dst, $src1, $src2\t# int" %}
12940 
12941   ins_encode %{
12942     __ eor(as_Register($dst$$reg),
12943            as_Register($src1$$reg),
12944            (unsigned long)($src2$$constant));
12945   %}
12946 
12947   ins_pipe(ialu_reg_imm);
12948 %}
12949 
12950 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
12951 %{
12952   match(Set dst (ConvI2L src));
12953 
12954   ins_cost(INSN_COST);
12955   format %{ "sxtw  $dst, $src\t# i2l" %}
12956   ins_encode %{
12957     __ sbfm($dst$$Register, $src$$Register, 0, 31);
12958   %}
12959   ins_pipe(ialu_reg_shift);
12960 %}
12961 
12962 // this pattern occurs in bigmath arithmetic
12963 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
12964 %{
12965   match(Set dst (AndL (ConvI2L src) mask));
12966 
12967   ins_cost(INSN_COST);
12968   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
12969   ins_encode %{
12970     __ ubfm($dst$$Register, $src$$Register, 0, 31);
12971   %}
12972 
12973   ins_pipe(ialu_reg_shift);
12974 %}
12975 
12976 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
12977   match(Set dst (ConvL2I src));
12978 
12979   ins_cost(INSN_COST);
12980   format %{ "movw  $dst, $src \t// l2i" %}
12981 
12982   ins_encode %{
12983     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
12984   %}
12985 
12986   ins_pipe(ialu_reg);
12987 %}
12988 
12989 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
12990 %{
12991   match(Set dst (Conv2B src));
12992   effect(KILL cr);
12993 
12994   format %{
12995     "cmpw $src, zr\n\t"
12996     "cset $dst, ne"
12997   %}
12998 
12999   ins_encode %{
13000     __ cmpw(as_Register($src$$reg), zr);
13001     __ cset(as_Register($dst$$reg), Assembler::NE);
13002   %}
13003 
13004   ins_pipe(ialu_reg);
13005 %}
13006 
13007 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13008 %{
13009   match(Set dst (Conv2B src));
13010   effect(KILL cr);
13011 
13012   format %{
13013     "cmp  $src, zr\n\t"
13014     "cset $dst, ne"
13015   %}
13016 
13017   ins_encode %{
13018     __ cmp(as_Register($src$$reg), zr);
13019     __ cset(as_Register($dst$$reg), Assembler::NE);
13020   %}
13021 
13022   ins_pipe(ialu_reg);
13023 %}
13024 
13025 instruct convD2F_reg(vRegF dst, vRegD src) %{
13026   match(Set dst (ConvD2F src));
13027 
13028   ins_cost(INSN_COST * 5);
13029   format %{ "fcvtd  $dst, $src \t// d2f" %}
13030 
13031   ins_encode %{
13032     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13033   %}
13034 
13035   ins_pipe(fp_d2f);
13036 %}
13037 
13038 instruct convF2D_reg(vRegD dst, vRegF src) %{
13039   match(Set dst (ConvF2D src));
13040 
13041   ins_cost(INSN_COST * 5);
13042   format %{ "fcvts  $dst, $src \t// f2d" %}
13043 
13044   ins_encode %{
13045     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13046   %}
13047 
13048   ins_pipe(fp_f2d);
13049 %}
13050 
13051 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13052   match(Set dst (ConvF2I src));
13053 
13054   ins_cost(INSN_COST * 5);
13055   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13056 
13057   ins_encode %{
13058     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13059   %}
13060 
13061   ins_pipe(fp_f2i);
13062 %}
13063 
13064 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13065   match(Set dst (ConvF2L src));
13066 
13067   ins_cost(INSN_COST * 5);
13068   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13069 
13070   ins_encode %{
13071     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13072   %}
13073 
13074   ins_pipe(fp_f2l);
13075 %}
13076 
13077 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13078   match(Set dst (ConvI2F src));
13079 
13080   ins_cost(INSN_COST * 5);
13081   format %{ "scvtfws  $dst, $src \t// i2f" %}
13082 
13083   ins_encode %{
13084     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13085   %}
13086 
13087   ins_pipe(fp_i2f);
13088 %}
13089 
13090 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13091   match(Set dst (ConvL2F src));
13092 
13093   ins_cost(INSN_COST * 5);
13094   format %{ "scvtfs  $dst, $src \t// l2f" %}
13095 
13096   ins_encode %{
13097     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13098   %}
13099 
13100   ins_pipe(fp_l2f);
13101 %}
13102 
13103 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13104   match(Set dst (ConvD2I src));
13105 
13106   ins_cost(INSN_COST * 5);
13107   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13108 
13109   ins_encode %{
13110     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13111   %}
13112 
13113   ins_pipe(fp_d2i);
13114 %}
13115 
13116 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13117   match(Set dst (ConvD2L src));
13118 
13119   ins_cost(INSN_COST * 5);
13120   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13121 
13122   ins_encode %{
13123     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13124   %}
13125 
13126   ins_pipe(fp_d2l);
13127 %}
13128 
13129 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13130   match(Set dst (ConvI2D src));
13131 
13132   ins_cost(INSN_COST * 5);
13133   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13134 
13135   ins_encode %{
13136     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13137   %}
13138 
13139   ins_pipe(fp_i2d);
13140 %}
13141 
13142 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13143   match(Set dst (ConvL2D src));
13144 
13145   ins_cost(INSN_COST * 5);
13146   format %{ "scvtfd  $dst, $src \t// l2d" %}
13147 
13148   ins_encode %{
13149     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13150   %}
13151 
13152   ins_pipe(fp_l2d);
13153 %}
13154 
13155 // stack <-> reg and reg <-> reg shuffles with no conversion
13156 
13157 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13158 
13159   match(Set dst (MoveF2I src));
13160 
13161   effect(DEF dst, USE src);
13162 
13163   ins_cost(4 * INSN_COST);
13164 
13165   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13166 
13167   ins_encode %{
13168     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13169   %}
13170 
13171   ins_pipe(iload_reg_reg);
13172 
13173 %}
13174 
13175 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13176 
13177   match(Set dst (MoveI2F src));
13178 
13179   effect(DEF dst, USE src);
13180 
13181   ins_cost(4 * INSN_COST);
13182 
13183   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13184 
13185   ins_encode %{
13186     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13187   %}
13188 
13189   ins_pipe(pipe_class_memory);
13190 
13191 %}
13192 
13193 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13194 
13195   match(Set dst (MoveD2L src));
13196 
13197   effect(DEF dst, USE src);
13198 
13199   ins_cost(4 * INSN_COST);
13200 
13201   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13202 
13203   ins_encode %{
13204     __ ldr($dst$$Register, Address(sp, $src$$disp));
13205   %}
13206 
13207   ins_pipe(iload_reg_reg);
13208 
13209 %}
13210 
13211 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13212 
13213   match(Set dst (MoveL2D src));
13214 
13215   effect(DEF dst, USE src);
13216 
13217   ins_cost(4 * INSN_COST);
13218 
13219   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13220 
13221   ins_encode %{
13222     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13223   %}
13224 
13225   ins_pipe(pipe_class_memory);
13226 
13227 %}
13228 
13229 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13230 
13231   match(Set dst (MoveF2I src));
13232 
13233   effect(DEF dst, USE src);
13234 
13235   ins_cost(INSN_COST);
13236 
13237   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13238 
13239   ins_encode %{
13240     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13241   %}
13242 
13243   ins_pipe(pipe_class_memory);
13244 
13245 %}
13246 
13247 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13248 
13249   match(Set dst (MoveI2F src));
13250 
13251   effect(DEF dst, USE src);
13252 
13253   ins_cost(INSN_COST);
13254 
13255   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13256 
13257   ins_encode %{
13258     __ strw($src$$Register, Address(sp, $dst$$disp));
13259   %}
13260 
13261   ins_pipe(istore_reg_reg);
13262 
13263 %}
13264 
13265 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13266 
13267   match(Set dst (MoveD2L src));
13268 
13269   effect(DEF dst, USE src);
13270 
13271   ins_cost(INSN_COST);
13272 
13273   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13274 
13275   ins_encode %{
13276     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13277   %}
13278 
13279   ins_pipe(pipe_class_memory);
13280 
13281 %}
13282 
13283 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13284 
13285   match(Set dst (MoveL2D src));
13286 
13287   effect(DEF dst, USE src);
13288 
13289   ins_cost(INSN_COST);
13290 
13291   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13292 
13293   ins_encode %{
13294     __ str($src$$Register, Address(sp, $dst$$disp));
13295   %}
13296 
13297   ins_pipe(istore_reg_reg);
13298 
13299 %}
13300 
13301 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13302 
13303   match(Set dst (MoveF2I src));
13304 
13305   effect(DEF dst, USE src);
13306 
13307   ins_cost(INSN_COST);
13308 
13309   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13310 
13311   ins_encode %{
13312     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13313   %}
13314 
13315   ins_pipe(fp_f2i);
13316 
13317 %}
13318 
13319 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13320 
13321   match(Set dst (MoveI2F src));
13322 
13323   effect(DEF dst, USE src);
13324 
13325   ins_cost(INSN_COST);
13326 
13327   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13328 
13329   ins_encode %{
13330     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13331   %}
13332 
13333   ins_pipe(fp_i2f);
13334 
13335 %}
13336 
13337 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13338 
13339   match(Set dst (MoveD2L src));
13340 
13341   effect(DEF dst, USE src);
13342 
13343   ins_cost(INSN_COST);
13344 
13345   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13346 
13347   ins_encode %{
13348     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13349   %}
13350 
13351   ins_pipe(fp_d2l);
13352 
13353 %}
13354 
13355 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13356 
13357   match(Set dst (MoveL2D src));
13358 
13359   effect(DEF dst, USE src);
13360 
13361   ins_cost(INSN_COST);
13362 
13363   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13364 
13365   ins_encode %{
13366     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13367   %}
13368 
13369   ins_pipe(fp_l2d);
13370 
13371 %}
13372 
13373 // ============================================================================
13374 // clearing of an array
13375 
13376 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13377 %{
13378   match(Set dummy (ClearArray cnt base));
13379   effect(USE_KILL cnt, USE_KILL base);
13380 
13381   ins_cost(4 * INSN_COST);
13382   format %{ "ClearArray $cnt, $base" %}
13383 
13384   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
13385 
13386   ins_pipe(pipe_class_memory);
13387 %}
13388 
13389 // ============================================================================
13390 // Overflow Math Instructions
13391 
13392 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13393 %{
13394   match(Set cr (OverflowAddI op1 op2));
13395 
13396   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13397   ins_cost(INSN_COST);
13398   ins_encode %{
13399     __ cmnw($op1$$Register, $op2$$Register);
13400   %}
13401 
13402   ins_pipe(icmp_reg_reg);
13403 %}
13404 
13405 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13406 %{
13407   match(Set cr (OverflowAddI op1 op2));
13408 
13409   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13410   ins_cost(INSN_COST);
13411   ins_encode %{
13412     __ cmnw($op1$$Register, $op2$$constant);
13413   %}
13414 
13415   ins_pipe(icmp_reg_imm);
13416 %}
13417 
13418 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13419 %{
13420   match(Set cr (OverflowAddL op1 op2));
13421 
13422   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13423   ins_cost(INSN_COST);
13424   ins_encode %{
13425     __ cmn($op1$$Register, $op2$$Register);
13426   %}
13427 
13428   ins_pipe(icmp_reg_reg);
13429 %}
13430 
13431 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13432 %{
13433   match(Set cr (OverflowAddL op1 op2));
13434 
13435   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13436   ins_cost(INSN_COST);
13437   ins_encode %{
13438     __ cmn($op1$$Register, $op2$$constant);
13439   %}
13440 
13441   ins_pipe(icmp_reg_imm);
13442 %}
13443 
13444 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13445 %{
13446   match(Set cr (OverflowSubI op1 op2));
13447 
13448   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13449   ins_cost(INSN_COST);
13450   ins_encode %{
13451     __ cmpw($op1$$Register, $op2$$Register);
13452   %}
13453 
13454   ins_pipe(icmp_reg_reg);
13455 %}
13456 
13457 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13458 %{
13459   match(Set cr (OverflowSubI op1 op2));
13460 
13461   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13462   ins_cost(INSN_COST);
13463   ins_encode %{
13464     __ cmpw($op1$$Register, $op2$$constant);
13465   %}
13466 
13467   ins_pipe(icmp_reg_imm);
13468 %}
13469 
13470 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13471 %{
13472   match(Set cr (OverflowSubL op1 op2));
13473 
13474   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13475   ins_cost(INSN_COST);
13476   ins_encode %{
13477     __ cmp($op1$$Register, $op2$$Register);
13478   %}
13479 
13480   ins_pipe(icmp_reg_reg);
13481 %}
13482 
13483 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13484 %{
13485   match(Set cr (OverflowSubL op1 op2));
13486 
13487   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13488   ins_cost(INSN_COST);
13489   ins_encode %{
13490     __ cmp($op1$$Register, $op2$$constant);
13491   %}
13492 
13493   ins_pipe(icmp_reg_imm);
13494 %}
13495 
13496 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13497 %{
13498   match(Set cr (OverflowSubI zero op1));
13499 
13500   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13501   ins_cost(INSN_COST);
13502   ins_encode %{
13503     __ cmpw(zr, $op1$$Register);
13504   %}
13505 
13506   ins_pipe(icmp_reg_imm);
13507 %}
13508 
13509 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13510 %{
13511   match(Set cr (OverflowSubL zero op1));
13512 
13513   format %{ "cmp   zr, $op1\t# overflow check long" %}
13514   ins_cost(INSN_COST);
13515   ins_encode %{
13516     __ cmp(zr, $op1$$Register);
13517   %}
13518 
13519   ins_pipe(icmp_reg_imm);
13520 %}
13521 
13522 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13523 %{
13524   match(Set cr (OverflowMulI op1 op2));
13525 
13526   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13527             "cmp   rscratch1, rscratch1, sxtw\n\t"
13528             "movw  rscratch1, #0x80000000\n\t"
13529             "cselw rscratch1, rscratch1, zr, NE\n\t"
13530             "cmpw  rscratch1, #1" %}
13531   ins_cost(5 * INSN_COST);
13532   ins_encode %{
13533     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13534     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13535     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13536     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13537     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13538   %}
13539 
13540   ins_pipe(pipe_slow);
13541 %}
13542 
13543 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13544 %{
13545   match(If cmp (OverflowMulI op1 op2));
13546   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13547             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13548   effect(USE labl, KILL cr);
13549 
13550   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13551             "cmp   rscratch1, rscratch1, sxtw\n\t"
13552             "b$cmp   $labl" %}
13553   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13554   ins_encode %{
13555     Label* L = $labl$$label;
13556     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13557     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13558     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13559     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13560   %}
13561 
13562   ins_pipe(pipe_serial);
13563 %}
13564 
13565 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13566 %{
13567   match(Set cr (OverflowMulL op1 op2));
13568 
13569   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13570             "smulh rscratch2, $op1, $op2\n\t"
13571             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13572             "movw  rscratch1, #0x80000000\n\t"
13573             "cselw rscratch1, rscratch1, zr, NE\n\t"
13574             "cmpw  rscratch1, #1" %}
13575   ins_cost(6 * INSN_COST);
13576   ins_encode %{
13577     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13578     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13579     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13580     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13581     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13582     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13583   %}
13584 
13585   ins_pipe(pipe_slow);
13586 %}
13587 
13588 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13589 %{
13590   match(If cmp (OverflowMulL op1 op2));
13591   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13592             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13593   effect(USE labl, KILL cr);
13594 
13595   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13596             "smulh rscratch2, $op1, $op2\n\t"
13597             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13598             "b$cmp $labl" %}
13599   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13600   ins_encode %{
13601     Label* L = $labl$$label;
13602     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13603     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13604     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13605     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13606     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13607   %}
13608 
13609   ins_pipe(pipe_serial);
13610 %}
13611 
13612 // ============================================================================
13613 // Compare Instructions
13614 
13615 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13616 %{
13617   match(Set cr (CmpI op1 op2));
13618 
13619   effect(DEF cr, USE op1, USE op2);
13620 
13621   ins_cost(INSN_COST);
13622   format %{ "cmpw  $op1, $op2" %}
13623 
13624   ins_encode(aarch64_enc_cmpw(op1, op2));
13625 
13626   ins_pipe(icmp_reg_reg);
13627 %}
13628 
13629 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13630 %{
13631   match(Set cr (CmpI op1 zero));
13632 
13633   effect(DEF cr, USE op1);
13634 
13635   ins_cost(INSN_COST);
13636   format %{ "cmpw $op1, 0" %}
13637 
13638   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13639 
13640   ins_pipe(icmp_reg_imm);
13641 %}
13642 
13643 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13644 %{
13645   match(Set cr (CmpI op1 op2));
13646 
13647   effect(DEF cr, USE op1);
13648 
13649   ins_cost(INSN_COST);
13650   format %{ "cmpw  $op1, $op2" %}
13651 
13652   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13653 
13654   ins_pipe(icmp_reg_imm);
13655 %}
13656 
13657 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13658 %{
13659   match(Set cr (CmpI op1 op2));
13660 
13661   effect(DEF cr, USE op1);
13662 
13663   ins_cost(INSN_COST * 2);
13664   format %{ "cmpw  $op1, $op2" %}
13665 
13666   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13667 
13668   ins_pipe(icmp_reg_imm);
13669 %}
13670 
13671 // Unsigned compare Instructions; really, same as signed compare
13672 // except it should only be used to feed an If or a CMovI which takes a
13673 // cmpOpU.
13674 
13675 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13676 %{
13677   match(Set cr (CmpU op1 op2));
13678 
13679   effect(DEF cr, USE op1, USE op2);
13680 
13681   ins_cost(INSN_COST);
13682   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13683 
13684   ins_encode(aarch64_enc_cmpw(op1, op2));
13685 
13686   ins_pipe(icmp_reg_reg);
13687 %}
13688 
13689 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13690 %{
13691   match(Set cr (CmpU op1 zero));
13692 
13693   effect(DEF cr, USE op1);
13694 
13695   ins_cost(INSN_COST);
13696   format %{ "cmpw $op1, #0\t# unsigned" %}
13697 
13698   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13699 
13700   ins_pipe(icmp_reg_imm);
13701 %}
13702 
13703 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13704 %{
13705   match(Set cr (CmpU op1 op2));
13706 
13707   effect(DEF cr, USE op1);
13708 
13709   ins_cost(INSN_COST);
13710   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13711 
13712   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13713 
13714   ins_pipe(icmp_reg_imm);
13715 %}
13716 
13717 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13718 %{
13719   match(Set cr (CmpU op1 op2));
13720 
13721   effect(DEF cr, USE op1);
13722 
13723   ins_cost(INSN_COST * 2);
13724   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13725 
13726   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13727 
13728   ins_pipe(icmp_reg_imm);
13729 %}
13730 
13731 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13732 %{
13733   match(Set cr (CmpL op1 op2));
13734 
13735   effect(DEF cr, USE op1, USE op2);
13736 
13737   ins_cost(INSN_COST);
13738   format %{ "cmp  $op1, $op2" %}
13739 
13740   ins_encode(aarch64_enc_cmp(op1, op2));
13741 
13742   ins_pipe(icmp_reg_reg);
13743 %}
13744 
13745 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13746 %{
13747   match(Set cr (CmpL op1 zero));
13748 
13749   effect(DEF cr, USE op1);
13750 
13751   ins_cost(INSN_COST);
13752   format %{ "tst  $op1" %}
13753 
13754   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13755 
13756   ins_pipe(icmp_reg_imm);
13757 %}
13758 
13759 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13760 %{
13761   match(Set cr (CmpL op1 op2));
13762 
13763   effect(DEF cr, USE op1);
13764 
13765   ins_cost(INSN_COST);
13766   format %{ "cmp  $op1, $op2" %}
13767 
13768   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13769 
13770   ins_pipe(icmp_reg_imm);
13771 %}
13772 
13773 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13774 %{
13775   match(Set cr (CmpL op1 op2));
13776 
13777   effect(DEF cr, USE op1);
13778 
13779   ins_cost(INSN_COST * 2);
13780   format %{ "cmp  $op1, $op2" %}
13781 
13782   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13783 
13784   ins_pipe(icmp_reg_imm);
13785 %}
13786 
13787 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13788 %{
13789   match(Set cr (CmpP op1 op2));
13790 
13791   effect(DEF cr, USE op1, USE op2);
13792 
13793   ins_cost(INSN_COST);
13794   format %{ "cmp  $op1, $op2\t // ptr" %}
13795 
13796   ins_encode(aarch64_enc_cmpp(op1, op2));
13797 
13798   ins_pipe(icmp_reg_reg);
13799 %}
13800 
13801 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13802 %{
13803   match(Set cr (CmpN op1 op2));
13804 
13805   effect(DEF cr, USE op1, USE op2);
13806 
13807   ins_cost(INSN_COST);
13808   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13809 
13810   ins_encode(aarch64_enc_cmpn(op1, op2));
13811 
13812   ins_pipe(icmp_reg_reg);
13813 %}
13814 
13815 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13816 %{
13817   match(Set cr (CmpP op1 zero));
13818 
13819   effect(DEF cr, USE op1, USE zero);
13820 
13821   ins_cost(INSN_COST);
13822   format %{ "cmp  $op1, 0\t // ptr" %}
13823 
13824   ins_encode(aarch64_enc_testp(op1));
13825 
13826   ins_pipe(icmp_reg_imm);
13827 %}
13828 
13829 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13830 %{
13831   match(Set cr (CmpN op1 zero));
13832 
13833   effect(DEF cr, USE op1, USE zero);
13834 
13835   ins_cost(INSN_COST);
13836   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13837 
13838   ins_encode(aarch64_enc_testn(op1));
13839 
13840   ins_pipe(icmp_reg_imm);
13841 %}
13842 
13843 // FP comparisons
13844 //
13845 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13846 // using normal cmpOp. See declaration of rFlagsReg for details.
13847 
13848 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13849 %{
13850   match(Set cr (CmpF src1 src2));
13851 
13852   ins_cost(3 * INSN_COST);
13853   format %{ "fcmps $src1, $src2" %}
13854 
13855   ins_encode %{
13856     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13857   %}
13858 
13859   ins_pipe(pipe_class_compare);
13860 %}
13861 
13862 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13863 %{
13864   match(Set cr (CmpF src1 src2));
13865 
13866   ins_cost(3 * INSN_COST);
13867   format %{ "fcmps $src1, 0.0" %}
13868 
13869   ins_encode %{
13870     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13871   %}
13872 
13873   ins_pipe(pipe_class_compare);
13874 %}
13875 // FROM HERE
13876 
13877 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13878 %{
13879   match(Set cr (CmpD src1 src2));
13880 
13881   ins_cost(3 * INSN_COST);
13882   format %{ "fcmpd $src1, $src2" %}
13883 
13884   ins_encode %{
13885     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13886   %}
13887 
13888   ins_pipe(pipe_class_compare);
13889 %}
13890 
13891 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13892 %{
13893   match(Set cr (CmpD src1 src2));
13894 
13895   ins_cost(3 * INSN_COST);
13896   format %{ "fcmpd $src1, 0.0" %}
13897 
13898   ins_encode %{
13899     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13900   %}
13901 
13902   ins_pipe(pipe_class_compare);
13903 %}
13904 
13905 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
13906 %{
13907   match(Set dst (CmpF3 src1 src2));
13908   effect(KILL cr);
13909 
13910   ins_cost(5 * INSN_COST);
13911   format %{ "fcmps $src1, $src2\n\t"
13912             "csinvw($dst, zr, zr, eq\n\t"
13913             "csnegw($dst, $dst, $dst, lt)"
13914   %}
13915 
13916   ins_encode %{
13917     Label done;
13918     FloatRegister s1 = as_FloatRegister($src1$$reg);
13919     FloatRegister s2 = as_FloatRegister($src2$$reg);
13920     Register d = as_Register($dst$$reg);
13921     __ fcmps(s1, s2);
13922     // installs 0 if EQ else -1
13923     __ csinvw(d, zr, zr, Assembler::EQ);
13924     // keeps -1 if less or unordered else installs 1
13925     __ csnegw(d, d, d, Assembler::LT);
13926     __ bind(done);
13927   %}
13928 
13929   ins_pipe(pipe_class_default);
13930 
13931 %}
13932 
13933 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
13934 %{
13935   match(Set dst (CmpD3 src1 src2));
13936   effect(KILL cr);
13937 
13938   ins_cost(5 * INSN_COST);
13939   format %{ "fcmpd $src1, $src2\n\t"
13940             "csinvw($dst, zr, zr, eq\n\t"
13941             "csnegw($dst, $dst, $dst, lt)"
13942   %}
13943 
13944   ins_encode %{
13945     Label done;
13946     FloatRegister s1 = as_FloatRegister($src1$$reg);
13947     FloatRegister s2 = as_FloatRegister($src2$$reg);
13948     Register d = as_Register($dst$$reg);
13949     __ fcmpd(s1, s2);
13950     // installs 0 if EQ else -1
13951     __ csinvw(d, zr, zr, Assembler::EQ);
13952     // keeps -1 if less or unordered else installs 1
13953     __ csnegw(d, d, d, Assembler::LT);
13954     __ bind(done);
13955   %}
13956   ins_pipe(pipe_class_default);
13957 
13958 %}
13959 
13960 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
13961 %{
13962   match(Set dst (CmpF3 src1 zero));
13963   effect(KILL cr);
13964 
13965   ins_cost(5 * INSN_COST);
13966   format %{ "fcmps $src1, 0.0\n\t"
13967             "csinvw($dst, zr, zr, eq\n\t"
13968             "csnegw($dst, $dst, $dst, lt)"
13969   %}
13970 
13971   ins_encode %{
13972     Label done;
13973     FloatRegister s1 = as_FloatRegister($src1$$reg);
13974     Register d = as_Register($dst$$reg);
13975     __ fcmps(s1, 0.0D);
13976     // installs 0 if EQ else -1
13977     __ csinvw(d, zr, zr, Assembler::EQ);
13978     // keeps -1 if less or unordered else installs 1
13979     __ csnegw(d, d, d, Assembler::LT);
13980     __ bind(done);
13981   %}
13982 
13983   ins_pipe(pipe_class_default);
13984 
13985 %}
13986 
13987 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
13988 %{
13989   match(Set dst (CmpD3 src1 zero));
13990   effect(KILL cr);
13991 
13992   ins_cost(5 * INSN_COST);
13993   format %{ "fcmpd $src1, 0.0\n\t"
13994             "csinvw($dst, zr, zr, eq\n\t"
13995             "csnegw($dst, $dst, $dst, lt)"
13996   %}
13997 
13998   ins_encode %{
13999     Label done;
14000     FloatRegister s1 = as_FloatRegister($src1$$reg);
14001     Register d = as_Register($dst$$reg);
14002     __ fcmpd(s1, 0.0D);
14003     // installs 0 if EQ else -1
14004     __ csinvw(d, zr, zr, Assembler::EQ);
14005     // keeps -1 if less or unordered else installs 1
14006     __ csnegw(d, d, d, Assembler::LT);
14007     __ bind(done);
14008   %}
14009   ins_pipe(pipe_class_default);
14010 
14011 %}
14012 
14013 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14014 %{
14015   match(Set dst (CmpLTMask p q));
14016   effect(KILL cr);
14017 
14018   ins_cost(3 * INSN_COST);
14019 
14020   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14021             "csetw $dst, lt\n\t"
14022             "subw $dst, zr, $dst"
14023   %}
14024 
14025   ins_encode %{
14026     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14027     __ csetw(as_Register($dst$$reg), Assembler::LT);
14028     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14029   %}
14030 
14031   ins_pipe(ialu_reg_reg);
14032 %}
14033 
14034 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14035 %{
14036   match(Set dst (CmpLTMask src zero));
14037   effect(KILL cr);
14038 
14039   ins_cost(INSN_COST);
14040 
14041   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14042 
14043   ins_encode %{
14044     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14045   %}
14046 
14047   ins_pipe(ialu_reg_shift);
14048 %}
14049 
14050 // ============================================================================
14051 // Max and Min
14052 
14053 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14054 %{
14055   match(Set dst (MinI src1 src2));
14056 
14057   effect(DEF dst, USE src1, USE src2, KILL cr);
14058   size(8);
14059 
14060   ins_cost(INSN_COST * 3);
14061   format %{
14062     "cmpw $src1 $src2\t signed int\n\t"
14063     "cselw $dst, $src1, $src2 lt\t"
14064   %}
14065 
14066   ins_encode %{
14067     __ cmpw(as_Register($src1$$reg),
14068             as_Register($src2$$reg));
14069     __ cselw(as_Register($dst$$reg),
14070              as_Register($src1$$reg),
14071              as_Register($src2$$reg),
14072              Assembler::LT);
14073   %}
14074 
14075   ins_pipe(ialu_reg_reg);
14076 %}
14077 // FROM HERE
14078 
14079 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14080 %{
14081   match(Set dst (MaxI src1 src2));
14082 
14083   effect(DEF dst, USE src1, USE src2, KILL cr);
14084   size(8);
14085 
14086   ins_cost(INSN_COST * 3);
14087   format %{
14088     "cmpw $src1 $src2\t signed int\n\t"
14089     "cselw $dst, $src1, $src2 gt\t"
14090   %}
14091 
14092   ins_encode %{
14093     __ cmpw(as_Register($src1$$reg),
14094             as_Register($src2$$reg));
14095     __ cselw(as_Register($dst$$reg),
14096              as_Register($src1$$reg),
14097              as_Register($src2$$reg),
14098              Assembler::GT);
14099   %}
14100 
14101   ins_pipe(ialu_reg_reg);
14102 %}
14103 
14104 // ============================================================================
14105 // Branch Instructions
14106 
14107 // Direct Branch.
14108 instruct branch(label lbl)
14109 %{
14110   match(Goto);
14111 
14112   effect(USE lbl);
14113 
14114   ins_cost(BRANCH_COST);
14115   format %{ "b  $lbl" %}
14116 
14117   ins_encode(aarch64_enc_b(lbl));
14118 
14119   ins_pipe(pipe_branch);
14120 %}
14121 
14122 // Conditional Near Branch
14123 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14124 %{
14125   // Same match rule as `branchConFar'.
14126   match(If cmp cr);
14127 
14128   effect(USE lbl);
14129 
14130   ins_cost(BRANCH_COST);
14131   // If set to 1 this indicates that the current instruction is a
14132   // short variant of a long branch. This avoids using this
14133   // instruction in first-pass matching. It will then only be used in
14134   // the `Shorten_branches' pass.
14135   // ins_short_branch(1);
14136   format %{ "b$cmp  $lbl" %}
14137 
14138   ins_encode(aarch64_enc_br_con(cmp, lbl));
14139 
14140   ins_pipe(pipe_branch_cond);
14141 %}
14142 
14143 // Conditional Near Branch Unsigned
14144 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14145 %{
14146   // Same match rule as `branchConFar'.
14147   match(If cmp cr);
14148 
14149   effect(USE lbl);
14150 
14151   ins_cost(BRANCH_COST);
14152   // If set to 1 this indicates that the current instruction is a
14153   // short variant of a long branch. This avoids using this
14154   // instruction in first-pass matching. It will then only be used in
14155   // the `Shorten_branches' pass.
14156   // ins_short_branch(1);
14157   format %{ "b$cmp  $lbl\t# unsigned" %}
14158 
14159   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14160 
14161   ins_pipe(pipe_branch_cond);
14162 %}
14163 
14164 // Make use of CBZ and CBNZ.  These instructions, as well as being
14165 // shorter than (cmp; branch), have the additional benefit of not
14166 // killing the flags.
14167 
14168 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14169   match(If cmp (CmpI op1 op2));
14170   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14171             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14172   effect(USE labl);
14173 
14174   ins_cost(BRANCH_COST);
14175   format %{ "cbw$cmp   $op1, $labl" %}
14176   ins_encode %{
14177     Label* L = $labl$$label;
14178     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14179     if (cond == Assembler::EQ)
14180       __ cbzw($op1$$Register, *L);
14181     else
14182       __ cbnzw($op1$$Register, *L);
14183   %}
14184   ins_pipe(pipe_cmp_branch);
14185 %}
14186 
14187 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14188   match(If cmp (CmpL op1 op2));
14189   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14190             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14191   effect(USE labl);
14192 
14193   ins_cost(BRANCH_COST);
14194   format %{ "cb$cmp   $op1, $labl" %}
14195   ins_encode %{
14196     Label* L = $labl$$label;
14197     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14198     if (cond == Assembler::EQ)
14199       __ cbz($op1$$Register, *L);
14200     else
14201       __ cbnz($op1$$Register, *L);
14202   %}
14203   ins_pipe(pipe_cmp_branch);
14204 %}
14205 
14206 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14207   match(If cmp (CmpP op1 op2));
14208   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14209             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14210   effect(USE labl);
14211 
14212   ins_cost(BRANCH_COST);
14213   format %{ "cb$cmp   $op1, $labl" %}
14214   ins_encode %{
14215     Label* L = $labl$$label;
14216     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14217     if (cond == Assembler::EQ)
14218       __ cbz($op1$$Register, *L);
14219     else
14220       __ cbnz($op1$$Register, *L);
14221   %}
14222   ins_pipe(pipe_cmp_branch);
14223 %}
14224 
14225 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14226   match(If cmp (CmpN op1 op2));
14227   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14228             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14229   effect(USE labl);
14230 
14231   ins_cost(BRANCH_COST);
14232   format %{ "cbw$cmp   $op1, $labl" %}
14233   ins_encode %{
14234     Label* L = $labl$$label;
14235     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14236     if (cond == Assembler::EQ)
14237       __ cbzw($op1$$Register, *L);
14238     else
14239       __ cbnzw($op1$$Register, *L);
14240   %}
14241   ins_pipe(pipe_cmp_branch);
14242 %}
14243 
14244 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14245   match(If cmp (CmpP (DecodeN oop) zero));
14246   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14247             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14248   effect(USE labl);
14249 
14250   ins_cost(BRANCH_COST);
14251   format %{ "cb$cmp   $oop, $labl" %}
14252   ins_encode %{
14253     Label* L = $labl$$label;
14254     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14255     if (cond == Assembler::EQ)
14256       __ cbzw($oop$$Register, *L);
14257     else
14258       __ cbnzw($oop$$Register, *L);
14259   %}
14260   ins_pipe(pipe_cmp_branch);
14261 %}
14262 
14263 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14264   match(If cmp (CmpU op1 op2));
14265   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14266             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14267             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14268             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
14269   effect(USE labl);
14270 
14271   ins_cost(BRANCH_COST);
14272   format %{ "cbw$cmp   $op1, $labl" %}
14273   ins_encode %{
14274     Label* L = $labl$$label;
14275     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14276     if (cond == Assembler::EQ || cond == Assembler::LS)
14277       __ cbzw($op1$$Register, *L);
14278     else
14279       __ cbnzw($op1$$Register, *L);
14280   %}
14281   ins_pipe(pipe_cmp_branch);
14282 %}
14283 
14284 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14285   match(If cmp (CmpU op1 op2));
14286   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14287             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14288             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14289             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
14290   effect(USE labl);
14291 
14292   ins_cost(BRANCH_COST);
14293   format %{ "cb$cmp   $op1, $labl" %}
14294   ins_encode %{
14295     Label* L = $labl$$label;
14296     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14297     if (cond == Assembler::EQ || cond == Assembler::LS)
14298       __ cbz($op1$$Register, *L);
14299     else
14300       __ cbnz($op1$$Register, *L);
14301   %}
14302   ins_pipe(pipe_cmp_branch);
14303 %}
14304 
14305 // Test bit and Branch
14306 
14307 // Patterns for short (< 32KiB) variants
14308 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14309   match(If cmp (CmpL op1 op2));
14310   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14311             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14312   effect(USE labl);
14313 
14314   ins_cost(BRANCH_COST);
14315   format %{ "cb$cmp   $op1, $labl # long" %}
14316   ins_encode %{
14317     Label* L = $labl$$label;
14318     Assembler::Condition cond =
14319       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14320     __ tbr(cond, $op1$$Register, 63, *L);
14321   %}
14322   ins_pipe(pipe_cmp_branch);
14323   ins_short_branch(1);
14324 %}
14325 
14326 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14327   match(If cmp (CmpI op1 op2));
14328   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14329             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14330   effect(USE labl);
14331 
14332   ins_cost(BRANCH_COST);
14333   format %{ "cb$cmp   $op1, $labl # int" %}
14334   ins_encode %{
14335     Label* L = $labl$$label;
14336     Assembler::Condition cond =
14337       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14338     __ tbr(cond, $op1$$Register, 31, *L);
14339   %}
14340   ins_pipe(pipe_cmp_branch);
14341   ins_short_branch(1);
14342 %}
14343 
14344 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14345   match(If cmp (CmpL (AndL op1 op2) op3));
14346   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14347             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14348             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14349   effect(USE labl);
14350 
14351   ins_cost(BRANCH_COST);
14352   format %{ "tb$cmp   $op1, $op2, $labl" %}
14353   ins_encode %{
14354     Label* L = $labl$$label;
14355     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14356     int bit = exact_log2($op2$$constant);
14357     __ tbr(cond, $op1$$Register, bit, *L);
14358   %}
14359   ins_pipe(pipe_cmp_branch);
14360   ins_short_branch(1);
14361 %}
14362 
14363 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14364   match(If cmp (CmpI (AndI op1 op2) op3));
14365   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14366             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14367             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14368   effect(USE labl);
14369 
14370   ins_cost(BRANCH_COST);
14371   format %{ "tb$cmp   $op1, $op2, $labl" %}
14372   ins_encode %{
14373     Label* L = $labl$$label;
14374     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14375     int bit = exact_log2($op2$$constant);
14376     __ tbr(cond, $op1$$Register, bit, *L);
14377   %}
14378   ins_pipe(pipe_cmp_branch);
14379   ins_short_branch(1);
14380 %}
14381 
14382 // And far variants
14383 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14384   match(If cmp (CmpL op1 op2));
14385   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14386             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14387   effect(USE labl);
14388 
14389   ins_cost(BRANCH_COST);
14390   format %{ "cb$cmp   $op1, $labl # long" %}
14391   ins_encode %{
14392     Label* L = $labl$$label;
14393     Assembler::Condition cond =
14394       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14395     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14396   %}
14397   ins_pipe(pipe_cmp_branch);
14398 %}
14399 
14400 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14401   match(If cmp (CmpI op1 op2));
14402   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14403             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14404   effect(USE labl);
14405 
14406   ins_cost(BRANCH_COST);
14407   format %{ "cb$cmp   $op1, $labl # int" %}
14408   ins_encode %{
14409     Label* L = $labl$$label;
14410     Assembler::Condition cond =
14411       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14412     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14413   %}
14414   ins_pipe(pipe_cmp_branch);
14415 %}
14416 
14417 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14418   match(If cmp (CmpL (AndL op1 op2) op3));
14419   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14420             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14421             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14422   effect(USE labl);
14423 
14424   ins_cost(BRANCH_COST);
14425   format %{ "tb$cmp   $op1, $op2, $labl" %}
14426   ins_encode %{
14427     Label* L = $labl$$label;
14428     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14429     int bit = exact_log2($op2$$constant);
14430     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14431   %}
14432   ins_pipe(pipe_cmp_branch);
14433 %}
14434 
14435 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14436   match(If cmp (CmpI (AndI op1 op2) op3));
14437   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14438             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14439             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14440   effect(USE labl);
14441 
14442   ins_cost(BRANCH_COST);
14443   format %{ "tb$cmp   $op1, $op2, $labl" %}
14444   ins_encode %{
14445     Label* L = $labl$$label;
14446     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14447     int bit = exact_log2($op2$$constant);
14448     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14449   %}
14450   ins_pipe(pipe_cmp_branch);
14451 %}
14452 
14453 // Test bits
14454 
14455 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14456   match(Set cr (CmpL (AndL op1 op2) op3));
14457   predicate(Assembler::operand_valid_for_logical_immediate
14458             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14459 
14460   ins_cost(INSN_COST);
14461   format %{ "tst $op1, $op2 # long" %}
14462   ins_encode %{
14463     __ tst($op1$$Register, $op2$$constant);
14464   %}
14465   ins_pipe(ialu_reg_reg);
14466 %}
14467 
14468 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14469   match(Set cr (CmpI (AndI op1 op2) op3));
14470   predicate(Assembler::operand_valid_for_logical_immediate
14471             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14472 
14473   ins_cost(INSN_COST);
14474   format %{ "tst $op1, $op2 # int" %}
14475   ins_encode %{
14476     __ tstw($op1$$Register, $op2$$constant);
14477   %}
14478   ins_pipe(ialu_reg_reg);
14479 %}
14480 
14481 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14482   match(Set cr (CmpL (AndL op1 op2) op3));
14483 
14484   ins_cost(INSN_COST);
14485   format %{ "tst $op1, $op2 # long" %}
14486   ins_encode %{
14487     __ tst($op1$$Register, $op2$$Register);
14488   %}
14489   ins_pipe(ialu_reg_reg);
14490 %}
14491 
14492 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14493   match(Set cr (CmpI (AndI op1 op2) op3));
14494 
14495   ins_cost(INSN_COST);
14496   format %{ "tstw $op1, $op2 # int" %}
14497   ins_encode %{
14498     __ tstw($op1$$Register, $op2$$Register);
14499   %}
14500   ins_pipe(ialu_reg_reg);
14501 %}
14502 
14503 
14504 // Conditional Far Branch
14505 // Conditional Far Branch Unsigned
14506 // TODO: fixme
14507 
14508 // counted loop end branch near
14509 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14510 %{
14511   match(CountedLoopEnd cmp cr);
14512 
14513   effect(USE lbl);
14514 
14515   ins_cost(BRANCH_COST);
14516   // short variant.
14517   // ins_short_branch(1);
14518   format %{ "b$cmp $lbl \t// counted loop end" %}
14519 
14520   ins_encode(aarch64_enc_br_con(cmp, lbl));
14521 
14522   ins_pipe(pipe_branch);
14523 %}
14524 
14525 // counted loop end branch near Unsigned
14526 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14527 %{
14528   match(CountedLoopEnd cmp cr);
14529 
14530   effect(USE lbl);
14531 
14532   ins_cost(BRANCH_COST);
14533   // short variant.
14534   // ins_short_branch(1);
14535   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14536 
14537   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14538 
14539   ins_pipe(pipe_branch);
14540 %}
14541 
14542 // counted loop end branch far
14543 // counted loop end branch far unsigned
14544 // TODO: fixme
14545 
14546 // ============================================================================
14547 // inlined locking and unlocking
14548 
14549 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14550 %{
14551   match(Set cr (FastLock object box));
14552   effect(TEMP tmp, TEMP tmp2);
14553 
14554   // TODO
14555   // identify correct cost
14556   ins_cost(5 * INSN_COST);
14557   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14558 
14559   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14560 
14561   ins_pipe(pipe_serial);
14562 %}
14563 
14564 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14565 %{
14566   match(Set cr (FastUnlock object box));
14567   effect(TEMP tmp, TEMP tmp2);
14568 
14569   ins_cost(5 * INSN_COST);
14570   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14571 
14572   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14573 
14574   ins_pipe(pipe_serial);
14575 %}
14576 
14577 
14578 // ============================================================================
14579 // Safepoint Instructions
14580 
14581 // TODO
14582 // provide a near and far version of this code
14583 
14584 instruct safePoint(iRegP poll)
14585 %{
14586   match(SafePoint poll);
14587 
14588   format %{
14589     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14590   %}
14591   ins_encode %{
14592     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14593   %}
14594   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14595 %}
14596 
14597 
14598 // ============================================================================
14599 // Procedure Call/Return Instructions
14600 
14601 // Call Java Static Instruction
14602 
14603 instruct CallStaticJavaDirect(method meth)
14604 %{
14605   match(CallStaticJava);
14606 
14607   effect(USE meth);
14608 
14609   ins_cost(CALL_COST);
14610 
14611   format %{ "call,static $meth \t// ==> " %}
14612 
14613   ins_encode( aarch64_enc_java_static_call(meth),
14614               aarch64_enc_call_epilog );
14615 
14616   ins_pipe(pipe_class_call);
14617 %}
14618 
14619 // TO HERE
14620 
14621 // Call Java Dynamic Instruction
14622 instruct CallDynamicJavaDirect(method meth)
14623 %{
14624   match(CallDynamicJava);
14625 
14626   effect(USE meth);
14627 
14628   ins_cost(CALL_COST);
14629 
14630   format %{ "CALL,dynamic $meth \t// ==> " %}
14631 
14632   ins_encode( aarch64_enc_java_dynamic_call(meth),
14633                aarch64_enc_call_epilog );
14634 
14635   ins_pipe(pipe_class_call);
14636 %}
14637 
14638 // Call Runtime Instruction
14639 
14640 instruct CallRuntimeDirect(method meth)
14641 %{
14642   match(CallRuntime);
14643 
14644   effect(USE meth);
14645 
14646   ins_cost(CALL_COST);
14647 
14648   format %{ "CALL, runtime $meth" %}
14649 
14650   ins_encode( aarch64_enc_java_to_runtime(meth) );
14651 
14652   ins_pipe(pipe_class_call);
14653 %}
14654 
14655 // Call Runtime Instruction
14656 
14657 instruct CallLeafDirect(method meth)
14658 %{
14659   match(CallLeaf);
14660 
14661   effect(USE meth);
14662 
14663   ins_cost(CALL_COST);
14664 
14665   format %{ "CALL, runtime leaf $meth" %}
14666 
14667   ins_encode( aarch64_enc_java_to_runtime(meth) );
14668 
14669   ins_pipe(pipe_class_call);
14670 %}
14671 
14672 // Call Runtime Instruction
14673 
14674 instruct CallLeafNoFPDirect(method meth)
14675 %{
14676   match(CallLeafNoFP);
14677 
14678   effect(USE meth);
14679 
14680   ins_cost(CALL_COST);
14681 
14682   format %{ "CALL, runtime leaf nofp $meth" %}
14683 
14684   ins_encode( aarch64_enc_java_to_runtime(meth) );
14685 
14686   ins_pipe(pipe_class_call);
14687 %}
14688 
14689 // Tail Call; Jump from runtime stub to Java code.
14690 // Also known as an 'interprocedural jump'.
14691 // Target of jump will eventually return to caller.
14692 // TailJump below removes the return address.
14693 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14694 %{
14695   match(TailCall jump_target method_oop);
14696 
14697   ins_cost(CALL_COST);
14698 
14699   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14700 
14701   ins_encode(aarch64_enc_tail_call(jump_target));
14702 
14703   ins_pipe(pipe_class_call);
14704 %}
14705 
14706 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14707 %{
14708   match(TailJump jump_target ex_oop);
14709 
14710   ins_cost(CALL_COST);
14711 
14712   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14713 
14714   ins_encode(aarch64_enc_tail_jmp(jump_target));
14715 
14716   ins_pipe(pipe_class_call);
14717 %}
14718 
14719 // Create exception oop: created by stack-crawling runtime code.
14720 // Created exception is now available to this handler, and is setup
14721 // just prior to jumping to this handler. No code emitted.
14722 // TODO check
14723 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14724 instruct CreateException(iRegP_R0 ex_oop)
14725 %{
14726   match(Set ex_oop (CreateEx));
14727 
14728   format %{ " -- \t// exception oop; no code emitted" %}
14729 
14730   size(0);
14731 
14732   ins_encode( /*empty*/ );
14733 
14734   ins_pipe(pipe_class_empty);
14735 %}
14736 
14737 // Rethrow exception: The exception oop will come in the first
14738 // argument position. Then JUMP (not call) to the rethrow stub code.
14739 instruct RethrowException() %{
14740   match(Rethrow);
14741   ins_cost(CALL_COST);
14742 
14743   format %{ "b rethrow_stub" %}
14744 
14745   ins_encode( aarch64_enc_rethrow() );
14746 
14747   ins_pipe(pipe_class_call);
14748 %}
14749 
14750 
14751 // Return Instruction
14752 // epilog node loads ret address into lr as part of frame pop
14753 instruct Ret()
14754 %{
14755   match(Return);
14756 
14757   format %{ "ret\t// return register" %}
14758 
14759   ins_encode( aarch64_enc_ret() );
14760 
14761   ins_pipe(pipe_branch);
14762 %}
14763 
14764 // Die now.
14765 instruct ShouldNotReachHere() %{
14766   match(Halt);
14767 
14768   ins_cost(CALL_COST);
14769   format %{ "ShouldNotReachHere" %}
14770 
14771   ins_encode %{
14772     // TODO
14773     // implement proper trap call here
14774     __ brk(999);
14775   %}
14776 
14777   ins_pipe(pipe_class_default);
14778 %}
14779 
14780 // ============================================================================
14781 // Partial Subtype Check
14782 //
14783 // superklass array for an instance of the superklass.  Set a hidden
14784 // internal cache on a hit (cache is checked with exposed code in
14785 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14786 // encoding ALSO sets flags.
14787 
14788 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14789 %{
14790   match(Set result (PartialSubtypeCheck sub super));
14791   effect(KILL cr, KILL temp);
14792 
14793   ins_cost(1100);  // slightly larger than the next version
14794   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14795 
14796   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14797 
14798   opcode(0x1); // Force zero of result reg on hit
14799 
14800   ins_pipe(pipe_class_memory);
14801 %}
14802 
14803 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14804 %{
14805   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14806   effect(KILL temp, KILL result);
14807 
14808   ins_cost(1100);  // slightly larger than the next version
14809   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14810 
14811   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14812 
14813   opcode(0x0); // Don't zero result reg on hit
14814 
14815   ins_pipe(pipe_class_memory);
14816 %}
14817 
14818 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14819                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14820 %{
14821   predicate(!CompactStrings);
14822   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14823   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14824 
14825   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14826   ins_encode %{
14827     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14828     __ asrw($cnt1$$Register, $cnt1$$Register, 1);
14829     __ asrw($cnt2$$Register, $cnt2$$Register, 1);
14830     __ string_compare($str1$$Register, $str2$$Register,
14831                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14832                       $tmp1$$Register);
14833   %}
14834   ins_pipe(pipe_class_memory);
14835 %}
14836 
14837 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14838        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14839 %{
14840   predicate(!CompactStrings);
14841   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14842   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14843          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14844   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14845 
14846   ins_encode %{
14847     __ string_indexof($str1$$Register, $str2$$Register,
14848                       $cnt1$$Register, $cnt2$$Register,
14849                       $tmp1$$Register, $tmp2$$Register,
14850                       $tmp3$$Register, $tmp4$$Register,
14851                       -1, $result$$Register);
14852   %}
14853   ins_pipe(pipe_class_memory);
14854 %}
14855 
14856 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14857                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14858                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14859 %{
14860   predicate(!CompactStrings);
14861   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14862   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14863          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14864   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14865 
14866   ins_encode %{
14867     int icnt2 = (int)$int_cnt2$$constant;
14868     __ string_indexof($str1$$Register, $str2$$Register,
14869                       $cnt1$$Register, zr,
14870                       $tmp1$$Register, $tmp2$$Register,
14871                       $tmp3$$Register, $tmp4$$Register,
14872                       icnt2, $result$$Register);
14873   %}
14874   ins_pipe(pipe_class_memory);
14875 %}
14876 
14877 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14878                         iRegI_R0 result, rFlagsReg cr)
14879 %{
14880   predicate(!CompactStrings);
14881   match(Set result (StrEquals (Binary str1 str2) cnt));
14882   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14883 
14884   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
14885   ins_encode %{
14886     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14887     __ asrw($cnt$$Register, $cnt$$Register, 1);
14888     __ arrays_equals($str1$$Register, $str2$$Register,
14889                      $result$$Register, $cnt$$Register,
14890                      2, /*is_string*/true);
14891   %}
14892   ins_pipe(pipe_class_memory);
14893 %}
14894 
14895 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14896                       iRegP_R10 tmp, rFlagsReg cr)
14897 %{
14898   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
14899   match(Set result (AryEq ary1 ary2));
14900   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14901 
14902   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14903   ins_encode %{
14904     __ arrays_equals($ary1$$Register, $ary2$$Register,
14905                      $result$$Register, $tmp$$Register,
14906                      1, /*is_string*/false);
14907     %}
14908   ins_pipe(pipe_class_memory);
14909 %}
14910 
14911 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14912                       iRegP_R10 tmp, rFlagsReg cr)
14913 %{
14914   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14915   match(Set result (AryEq ary1 ary2));
14916   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14917 
14918   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14919   ins_encode %{
14920     __ arrays_equals($ary1$$Register, $ary2$$Register,
14921                      $result$$Register, $tmp$$Register,
14922                      2, /*is_string*/false);
14923   %}
14924   ins_pipe(pipe_class_memory);
14925 %}
14926 
14927 
14928 // encode char[] to byte[] in ISO_8859_1
14929 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14930                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14931                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14932                           iRegI_R0 result, rFlagsReg cr)
14933 %{
14934   match(Set result (EncodeISOArray src (Binary dst len)));
14935   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14936          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14937 
14938   format %{ "Encode array $src,$dst,$len -> $result" %}
14939   ins_encode %{
14940     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14941          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14942          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14943   %}
14944   ins_pipe( pipe_class_memory );
14945 %}
14946 
14947 // ============================================================================
14948 // This name is KNOWN by the ADLC and cannot be changed.
14949 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14950 // for this guy.
14951 instruct tlsLoadP(thread_RegP dst)
14952 %{
14953   match(Set dst (ThreadLocal));
14954 
14955   ins_cost(0);
14956 
14957   format %{ " -- \t// $dst=Thread::current(), empty" %}
14958 
14959   size(0);
14960 
14961   ins_encode( /*empty*/ );
14962 
14963   ins_pipe(pipe_class_empty);
14964 %}
14965 
14966 // ====================VECTOR INSTRUCTIONS=====================================
14967 
14968 // Load vector (32 bits)
14969 instruct loadV4(vecD dst, vmem mem)
14970 %{
14971   predicate(n->as_LoadVector()->memory_size() == 4);
14972   match(Set dst (LoadVector mem));
14973   ins_cost(4 * INSN_COST);
14974   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14975   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14976   ins_pipe(vload_reg_mem64);
14977 %}
14978 
14979 // Load vector (64 bits)
14980 instruct loadV8(vecD dst, vmem mem)
14981 %{
14982   predicate(n->as_LoadVector()->memory_size() == 8);
14983   match(Set dst (LoadVector mem));
14984   ins_cost(4 * INSN_COST);
14985   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
14986   ins_encode( aarch64_enc_ldrvD(dst, mem) );
14987   ins_pipe(vload_reg_mem64);
14988 %}
14989 
14990 // Load Vector (128 bits)
14991 instruct loadV16(vecX dst, vmem mem)
14992 %{
14993   predicate(n->as_LoadVector()->memory_size() == 16);
14994   match(Set dst (LoadVector mem));
14995   ins_cost(4 * INSN_COST);
14996   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
14997   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
14998   ins_pipe(vload_reg_mem128);
14999 %}
15000 
15001 // Store Vector (32 bits)
15002 instruct storeV4(vecD src, vmem mem)
15003 %{
15004   predicate(n->as_StoreVector()->memory_size() == 4);
15005   match(Set mem (StoreVector mem src));
15006   ins_cost(4 * INSN_COST);
15007   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15008   ins_encode( aarch64_enc_strvS(src, mem) );
15009   ins_pipe(vstore_reg_mem64);
15010 %}
15011 
15012 // Store Vector (64 bits)
15013 instruct storeV8(vecD src, vmem mem)
15014 %{
15015   predicate(n->as_StoreVector()->memory_size() == 8);
15016   match(Set mem (StoreVector mem src));
15017   ins_cost(4 * INSN_COST);
15018   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15019   ins_encode( aarch64_enc_strvD(src, mem) );
15020   ins_pipe(vstore_reg_mem64);
15021 %}
15022 
15023 // Store Vector (128 bits)
15024 instruct storeV16(vecX src, vmem mem)
15025 %{
15026   predicate(n->as_StoreVector()->memory_size() == 16);
15027   match(Set mem (StoreVector mem src));
15028   ins_cost(4 * INSN_COST);
15029   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15030   ins_encode( aarch64_enc_strvQ(src, mem) );
15031   ins_pipe(vstore_reg_mem128);
15032 %}
15033 
15034 instruct replicate8B(vecD dst, iRegIorL2I src)
15035 %{
15036   predicate(n->as_Vector()->length() == 4 ||
15037             n->as_Vector()->length() == 8);
15038   match(Set dst (ReplicateB src));
15039   ins_cost(INSN_COST);
15040   format %{ "dup  $dst, $src\t# vector (8B)" %}
15041   ins_encode %{
15042     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15043   %}
15044   ins_pipe(vdup_reg_reg64);
15045 %}
15046 
15047 instruct replicate16B(vecX dst, iRegIorL2I src)
15048 %{
15049   predicate(n->as_Vector()->length() == 16);
15050   match(Set dst (ReplicateB src));
15051   ins_cost(INSN_COST);
15052   format %{ "dup  $dst, $src\t# vector (16B)" %}
15053   ins_encode %{
15054     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15055   %}
15056   ins_pipe(vdup_reg_reg128);
15057 %}
15058 
15059 instruct replicate8B_imm(vecD dst, immI con)
15060 %{
15061   predicate(n->as_Vector()->length() == 4 ||
15062             n->as_Vector()->length() == 8);
15063   match(Set dst (ReplicateB con));
15064   ins_cost(INSN_COST);
15065   format %{ "movi  $dst, $con\t# vector(8B)" %}
15066   ins_encode %{
15067     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15068   %}
15069   ins_pipe(vmovi_reg_imm64);
15070 %}
15071 
15072 instruct replicate16B_imm(vecX dst, immI con)
15073 %{
15074   predicate(n->as_Vector()->length() == 16);
15075   match(Set dst (ReplicateB con));
15076   ins_cost(INSN_COST);
15077   format %{ "movi  $dst, $con\t# vector(16B)" %}
15078   ins_encode %{
15079     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15080   %}
15081   ins_pipe(vmovi_reg_imm128);
15082 %}
15083 
15084 instruct replicate4S(vecD dst, iRegIorL2I src)
15085 %{
15086   predicate(n->as_Vector()->length() == 2 ||
15087             n->as_Vector()->length() == 4);
15088   match(Set dst (ReplicateS src));
15089   ins_cost(INSN_COST);
15090   format %{ "dup  $dst, $src\t# vector (4S)" %}
15091   ins_encode %{
15092     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15093   %}
15094   ins_pipe(vdup_reg_reg64);
15095 %}
15096 
15097 instruct replicate8S(vecX dst, iRegIorL2I src)
15098 %{
15099   predicate(n->as_Vector()->length() == 8);
15100   match(Set dst (ReplicateS src));
15101   ins_cost(INSN_COST);
15102   format %{ "dup  $dst, $src\t# vector (8S)" %}
15103   ins_encode %{
15104     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15105   %}
15106   ins_pipe(vdup_reg_reg128);
15107 %}
15108 
15109 instruct replicate4S_imm(vecD dst, immI con)
15110 %{
15111   predicate(n->as_Vector()->length() == 2 ||
15112             n->as_Vector()->length() == 4);
15113   match(Set dst (ReplicateS con));
15114   ins_cost(INSN_COST);
15115   format %{ "movi  $dst, $con\t# vector(4H)" %}
15116   ins_encode %{
15117     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15118   %}
15119   ins_pipe(vmovi_reg_imm64);
15120 %}
15121 
15122 instruct replicate8S_imm(vecX dst, immI con)
15123 %{
15124   predicate(n->as_Vector()->length() == 8);
15125   match(Set dst (ReplicateS con));
15126   ins_cost(INSN_COST);
15127   format %{ "movi  $dst, $con\t# vector(8H)" %}
15128   ins_encode %{
15129     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15130   %}
15131   ins_pipe(vmovi_reg_imm128);
15132 %}
15133 
15134 instruct replicate2I(vecD dst, iRegIorL2I src)
15135 %{
15136   predicate(n->as_Vector()->length() == 2);
15137   match(Set dst (ReplicateI src));
15138   ins_cost(INSN_COST);
15139   format %{ "dup  $dst, $src\t# vector (2I)" %}
15140   ins_encode %{
15141     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15142   %}
15143   ins_pipe(vdup_reg_reg64);
15144 %}
15145 
15146 instruct replicate4I(vecX dst, iRegIorL2I src)
15147 %{
15148   predicate(n->as_Vector()->length() == 4);
15149   match(Set dst (ReplicateI src));
15150   ins_cost(INSN_COST);
15151   format %{ "dup  $dst, $src\t# vector (4I)" %}
15152   ins_encode %{
15153     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15154   %}
15155   ins_pipe(vdup_reg_reg128);
15156 %}
15157 
15158 instruct replicate2I_imm(vecD dst, immI con)
15159 %{
15160   predicate(n->as_Vector()->length() == 2);
15161   match(Set dst (ReplicateI con));
15162   ins_cost(INSN_COST);
15163   format %{ "movi  $dst, $con\t# vector(2I)" %}
15164   ins_encode %{
15165     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15166   %}
15167   ins_pipe(vmovi_reg_imm64);
15168 %}
15169 
15170 instruct replicate4I_imm(vecX dst, immI con)
15171 %{
15172   predicate(n->as_Vector()->length() == 4);
15173   match(Set dst (ReplicateI con));
15174   ins_cost(INSN_COST);
15175   format %{ "movi  $dst, $con\t# vector(4I)" %}
15176   ins_encode %{
15177     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15178   %}
15179   ins_pipe(vmovi_reg_imm128);
15180 %}
15181 
15182 instruct replicate2L(vecX dst, iRegL src)
15183 %{
15184   predicate(n->as_Vector()->length() == 2);
15185   match(Set dst (ReplicateL src));
15186   ins_cost(INSN_COST);
15187   format %{ "dup  $dst, $src\t# vector (2L)" %}
15188   ins_encode %{
15189     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15190   %}
15191   ins_pipe(vdup_reg_reg128);
15192 %}
15193 
15194 instruct replicate2L_zero(vecX dst, immI0 zero)
15195 %{
15196   predicate(n->as_Vector()->length() == 2);
15197   match(Set dst (ReplicateI zero));
15198   ins_cost(INSN_COST);
15199   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15200   ins_encode %{
15201     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15202            as_FloatRegister($dst$$reg),
15203            as_FloatRegister($dst$$reg));
15204   %}
15205   ins_pipe(vmovi_reg_imm128);
15206 %}
15207 
15208 instruct replicate2F(vecD dst, vRegF src)
15209 %{
15210   predicate(n->as_Vector()->length() == 2);
15211   match(Set dst (ReplicateF src));
15212   ins_cost(INSN_COST);
15213   format %{ "dup  $dst, $src\t# vector (2F)" %}
15214   ins_encode %{
15215     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15216            as_FloatRegister($src$$reg));
15217   %}
15218   ins_pipe(vdup_reg_freg64);
15219 %}
15220 
15221 instruct replicate4F(vecX dst, vRegF src)
15222 %{
15223   predicate(n->as_Vector()->length() == 4);
15224   match(Set dst (ReplicateF src));
15225   ins_cost(INSN_COST);
15226   format %{ "dup  $dst, $src\t# vector (4F)" %}
15227   ins_encode %{
15228     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15229            as_FloatRegister($src$$reg));
15230   %}
15231   ins_pipe(vdup_reg_freg128);
15232 %}
15233 
15234 instruct replicate2D(vecX dst, vRegD src)
15235 %{
15236   predicate(n->as_Vector()->length() == 2);
15237   match(Set dst (ReplicateD src));
15238   ins_cost(INSN_COST);
15239   format %{ "dup  $dst, $src\t# vector (2D)" %}
15240   ins_encode %{
15241     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15242            as_FloatRegister($src$$reg));
15243   %}
15244   ins_pipe(vdup_reg_dreg128);
15245 %}
15246 
15247 // ====================REDUCTION ARITHMETIC====================================
15248 
15249 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15250 %{
15251   match(Set dst (AddReductionVI src1 src2));
15252   ins_cost(INSN_COST);
15253   effect(TEMP tmp, TEMP tmp2);
15254   format %{ "umov  $tmp, $src2, S, 0\n\t"
15255             "umov  $tmp2, $src2, S, 1\n\t"
15256             "addw  $dst, $src1, $tmp\n\t"
15257             "addw  $dst, $dst, $tmp2\t add reduction2i"
15258   %}
15259   ins_encode %{
15260     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15261     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15262     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15263     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15264   %}
15265   ins_pipe(pipe_class_default);
15266 %}
15267 
15268 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15269 %{
15270   match(Set dst (AddReductionVI src1 src2));
15271   ins_cost(INSN_COST);
15272   effect(TEMP tmp, TEMP tmp2);
15273   format %{ "addv  $tmp, T4S, $src2\n\t"
15274             "umov  $tmp2, $tmp, S, 0\n\t"
15275             "addw  $dst, $tmp2, $src1\t add reduction4i"
15276   %}
15277   ins_encode %{
15278     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15279             as_FloatRegister($src2$$reg));
15280     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15281     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15282   %}
15283   ins_pipe(pipe_class_default);
15284 %}
15285 
15286 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15287 %{
15288   match(Set dst (MulReductionVI src1 src2));
15289   ins_cost(INSN_COST);
15290   effect(TEMP tmp, TEMP dst);
15291   format %{ "umov  $tmp, $src2, S, 0\n\t"
15292             "mul   $dst, $tmp, $src1\n\t"
15293             "umov  $tmp, $src2, S, 1\n\t"
15294             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15295   %}
15296   ins_encode %{
15297     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15298     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15299     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15300     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15301   %}
15302   ins_pipe(pipe_class_default);
15303 %}
15304 
15305 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15306 %{
15307   match(Set dst (MulReductionVI src1 src2));
15308   ins_cost(INSN_COST);
15309   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15310   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15311             "mul   $tmp, $tmp, $src2\n\t"
15312             "umov  $tmp2, $tmp, S, 0\n\t"
15313             "mul   $dst, $tmp2, $src1\n\t"
15314             "umov  $tmp2, $tmp, S, 1\n\t"
15315             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15316   %}
15317   ins_encode %{
15318     __ ins(as_FloatRegister($tmp$$reg), __ D,
15319            as_FloatRegister($src2$$reg), 0, 1);
15320     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15321            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15322     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15323     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15324     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15325     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15326   %}
15327   ins_pipe(pipe_class_default);
15328 %}
15329 
15330 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15331 %{
15332   match(Set dst (AddReductionVF src1 src2));
15333   ins_cost(INSN_COST);
15334   effect(TEMP tmp, TEMP dst);
15335   format %{ "fadds $dst, $src1, $src2\n\t"
15336             "ins   $tmp, S, $src2, 0, 1\n\t"
15337             "fadds $dst, $dst, $tmp\t add reduction2f"
15338   %}
15339   ins_encode %{
15340     __ fadds(as_FloatRegister($dst$$reg),
15341              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15342     __ ins(as_FloatRegister($tmp$$reg), __ S,
15343            as_FloatRegister($src2$$reg), 0, 1);
15344     __ fadds(as_FloatRegister($dst$$reg),
15345              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15346   %}
15347   ins_pipe(pipe_class_default);
15348 %}
15349 
15350 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15351 %{
15352   match(Set dst (AddReductionVF src1 src2));
15353   ins_cost(INSN_COST);
15354   effect(TEMP tmp, TEMP dst);
15355   format %{ "fadds $dst, $src1, $src2\n\t"
15356             "ins   $tmp, S, $src2, 0, 1\n\t"
15357             "fadds $dst, $dst, $tmp\n\t"
15358             "ins   $tmp, S, $src2, 0, 2\n\t"
15359             "fadds $dst, $dst, $tmp\n\t"
15360             "ins   $tmp, S, $src2, 0, 3\n\t"
15361             "fadds $dst, $dst, $tmp\t add reduction4f"
15362   %}
15363   ins_encode %{
15364     __ fadds(as_FloatRegister($dst$$reg),
15365              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15366     __ ins(as_FloatRegister($tmp$$reg), __ S,
15367            as_FloatRegister($src2$$reg), 0, 1);
15368     __ fadds(as_FloatRegister($dst$$reg),
15369              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15370     __ ins(as_FloatRegister($tmp$$reg), __ S,
15371            as_FloatRegister($src2$$reg), 0, 2);
15372     __ fadds(as_FloatRegister($dst$$reg),
15373              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15374     __ ins(as_FloatRegister($tmp$$reg), __ S,
15375            as_FloatRegister($src2$$reg), 0, 3);
15376     __ fadds(as_FloatRegister($dst$$reg),
15377              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15378   %}
15379   ins_pipe(pipe_class_default);
15380 %}
15381 
15382 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15383 %{
15384   match(Set dst (MulReductionVF src1 src2));
15385   ins_cost(INSN_COST);
15386   effect(TEMP tmp, TEMP dst);
15387   format %{ "fmuls $dst, $src1, $src2\n\t"
15388             "ins   $tmp, S, $src2, 0, 1\n\t"
15389             "fmuls $dst, $dst, $tmp\t add reduction4f"
15390   %}
15391   ins_encode %{
15392     __ fmuls(as_FloatRegister($dst$$reg),
15393              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15394     __ ins(as_FloatRegister($tmp$$reg), __ S,
15395            as_FloatRegister($src2$$reg), 0, 1);
15396     __ fmuls(as_FloatRegister($dst$$reg),
15397              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15398   %}
15399   ins_pipe(pipe_class_default);
15400 %}
15401 
15402 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15403 %{
15404   match(Set dst (MulReductionVF src1 src2));
15405   ins_cost(INSN_COST);
15406   effect(TEMP tmp, TEMP dst);
15407   format %{ "fmuls $dst, $src1, $src2\n\t"
15408             "ins   $tmp, S, $src2, 0, 1\n\t"
15409             "fmuls $dst, $dst, $tmp\n\t"
15410             "ins   $tmp, S, $src2, 0, 2\n\t"
15411             "fmuls $dst, $dst, $tmp\n\t"
15412             "ins   $tmp, S, $src2, 0, 3\n\t"
15413             "fmuls $dst, $dst, $tmp\t add reduction4f"
15414   %}
15415   ins_encode %{
15416     __ fmuls(as_FloatRegister($dst$$reg),
15417              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15418     __ ins(as_FloatRegister($tmp$$reg), __ S,
15419            as_FloatRegister($src2$$reg), 0, 1);
15420     __ fmuls(as_FloatRegister($dst$$reg),
15421              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15422     __ ins(as_FloatRegister($tmp$$reg), __ S,
15423            as_FloatRegister($src2$$reg), 0, 2);
15424     __ fmuls(as_FloatRegister($dst$$reg),
15425              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15426     __ ins(as_FloatRegister($tmp$$reg), __ S,
15427            as_FloatRegister($src2$$reg), 0, 3);
15428     __ fmuls(as_FloatRegister($dst$$reg),
15429              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15430   %}
15431   ins_pipe(pipe_class_default);
15432 %}
15433 
15434 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15435 %{
15436   match(Set dst (AddReductionVD src1 src2));
15437   ins_cost(INSN_COST);
15438   effect(TEMP tmp, TEMP dst);
15439   format %{ "faddd $dst, $src1, $src2\n\t"
15440             "ins   $tmp, D, $src2, 0, 1\n\t"
15441             "faddd $dst, $dst, $tmp\t add reduction2d"
15442   %}
15443   ins_encode %{
15444     __ faddd(as_FloatRegister($dst$$reg),
15445              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15446     __ ins(as_FloatRegister($tmp$$reg), __ D,
15447            as_FloatRegister($src2$$reg), 0, 1);
15448     __ faddd(as_FloatRegister($dst$$reg),
15449              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15450   %}
15451   ins_pipe(pipe_class_default);
15452 %}
15453 
15454 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15455 %{
15456   match(Set dst (MulReductionVD src1 src2));
15457   ins_cost(INSN_COST);
15458   effect(TEMP tmp, TEMP dst);
15459   format %{ "fmuld $dst, $src1, $src2\n\t"
15460             "ins   $tmp, D, $src2, 0, 1\n\t"
15461             "fmuld $dst, $dst, $tmp\t add reduction2d"
15462   %}
15463   ins_encode %{
15464     __ fmuld(as_FloatRegister($dst$$reg),
15465              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15466     __ ins(as_FloatRegister($tmp$$reg), __ D,
15467            as_FloatRegister($src2$$reg), 0, 1);
15468     __ fmuld(as_FloatRegister($dst$$reg),
15469              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15470   %}
15471   ins_pipe(pipe_class_default);
15472 %}
15473 
15474 // ====================VECTOR ARITHMETIC=======================================
15475 
15476 // --------------------------------- ADD --------------------------------------
15477 
15478 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15479 %{
15480   predicate(n->as_Vector()->length() == 4 ||
15481             n->as_Vector()->length() == 8);
15482   match(Set dst (AddVB src1 src2));
15483   ins_cost(INSN_COST);
15484   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15485   ins_encode %{
15486     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15487             as_FloatRegister($src1$$reg),
15488             as_FloatRegister($src2$$reg));
15489   %}
15490   ins_pipe(vdop64);
15491 %}
15492 
15493 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15494 %{
15495   predicate(n->as_Vector()->length() == 16);
15496   match(Set dst (AddVB src1 src2));
15497   ins_cost(INSN_COST);
15498   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15499   ins_encode %{
15500     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15501             as_FloatRegister($src1$$reg),
15502             as_FloatRegister($src2$$reg));
15503   %}
15504   ins_pipe(vdop128);
15505 %}
15506 
15507 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15508 %{
15509   predicate(n->as_Vector()->length() == 2 ||
15510             n->as_Vector()->length() == 4);
15511   match(Set dst (AddVS src1 src2));
15512   ins_cost(INSN_COST);
15513   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15514   ins_encode %{
15515     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15516             as_FloatRegister($src1$$reg),
15517             as_FloatRegister($src2$$reg));
15518   %}
15519   ins_pipe(vdop64);
15520 %}
15521 
15522 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15523 %{
15524   predicate(n->as_Vector()->length() == 8);
15525   match(Set dst (AddVS src1 src2));
15526   ins_cost(INSN_COST);
15527   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15528   ins_encode %{
15529     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15530             as_FloatRegister($src1$$reg),
15531             as_FloatRegister($src2$$reg));
15532   %}
15533   ins_pipe(vdop128);
15534 %}
15535 
15536 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15537 %{
15538   predicate(n->as_Vector()->length() == 2);
15539   match(Set dst (AddVI src1 src2));
15540   ins_cost(INSN_COST);
15541   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15542   ins_encode %{
15543     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15544             as_FloatRegister($src1$$reg),
15545             as_FloatRegister($src2$$reg));
15546   %}
15547   ins_pipe(vdop64);
15548 %}
15549 
15550 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15551 %{
15552   predicate(n->as_Vector()->length() == 4);
15553   match(Set dst (AddVI src1 src2));
15554   ins_cost(INSN_COST);
15555   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15556   ins_encode %{
15557     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15558             as_FloatRegister($src1$$reg),
15559             as_FloatRegister($src2$$reg));
15560   %}
15561   ins_pipe(vdop128);
15562 %}
15563 
15564 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15565 %{
15566   predicate(n->as_Vector()->length() == 2);
15567   match(Set dst (AddVL src1 src2));
15568   ins_cost(INSN_COST);
15569   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15570   ins_encode %{
15571     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15572             as_FloatRegister($src1$$reg),
15573             as_FloatRegister($src2$$reg));
15574   %}
15575   ins_pipe(vdop128);
15576 %}
15577 
15578 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15579 %{
15580   predicate(n->as_Vector()->length() == 2);
15581   match(Set dst (AddVF src1 src2));
15582   ins_cost(INSN_COST);
15583   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15584   ins_encode %{
15585     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15586             as_FloatRegister($src1$$reg),
15587             as_FloatRegister($src2$$reg));
15588   %}
15589   ins_pipe(vdop_fp64);
15590 %}
15591 
15592 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15593 %{
15594   predicate(n->as_Vector()->length() == 4);
15595   match(Set dst (AddVF src1 src2));
15596   ins_cost(INSN_COST);
15597   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15598   ins_encode %{
15599     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15600             as_FloatRegister($src1$$reg),
15601             as_FloatRegister($src2$$reg));
15602   %}
15603   ins_pipe(vdop_fp128);
15604 %}
15605 
15606 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15607 %{
15608   match(Set dst (AddVD src1 src2));
15609   ins_cost(INSN_COST);
15610   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15611   ins_encode %{
15612     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15613             as_FloatRegister($src1$$reg),
15614             as_FloatRegister($src2$$reg));
15615   %}
15616   ins_pipe(vdop_fp128);
15617 %}
15618 
15619 // --------------------------------- SUB --------------------------------------
15620 
15621 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15622 %{
15623   predicate(n->as_Vector()->length() == 4 ||
15624             n->as_Vector()->length() == 8);
15625   match(Set dst (SubVB src1 src2));
15626   ins_cost(INSN_COST);
15627   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15628   ins_encode %{
15629     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15630             as_FloatRegister($src1$$reg),
15631             as_FloatRegister($src2$$reg));
15632   %}
15633   ins_pipe(vdop64);
15634 %}
15635 
15636 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15637 %{
15638   predicate(n->as_Vector()->length() == 16);
15639   match(Set dst (SubVB src1 src2));
15640   ins_cost(INSN_COST);
15641   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15642   ins_encode %{
15643     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15644             as_FloatRegister($src1$$reg),
15645             as_FloatRegister($src2$$reg));
15646   %}
15647   ins_pipe(vdop128);
15648 %}
15649 
15650 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15651 %{
15652   predicate(n->as_Vector()->length() == 2 ||
15653             n->as_Vector()->length() == 4);
15654   match(Set dst (SubVS src1 src2));
15655   ins_cost(INSN_COST);
15656   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15657   ins_encode %{
15658     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15659             as_FloatRegister($src1$$reg),
15660             as_FloatRegister($src2$$reg));
15661   %}
15662   ins_pipe(vdop64);
15663 %}
15664 
15665 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15666 %{
15667   predicate(n->as_Vector()->length() == 8);
15668   match(Set dst (SubVS src1 src2));
15669   ins_cost(INSN_COST);
15670   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15671   ins_encode %{
15672     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15673             as_FloatRegister($src1$$reg),
15674             as_FloatRegister($src2$$reg));
15675   %}
15676   ins_pipe(vdop128);
15677 %}
15678 
15679 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15680 %{
15681   predicate(n->as_Vector()->length() == 2);
15682   match(Set dst (SubVI src1 src2));
15683   ins_cost(INSN_COST);
15684   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15685   ins_encode %{
15686     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15687             as_FloatRegister($src1$$reg),
15688             as_FloatRegister($src2$$reg));
15689   %}
15690   ins_pipe(vdop64);
15691 %}
15692 
15693 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15694 %{
15695   predicate(n->as_Vector()->length() == 4);
15696   match(Set dst (SubVI src1 src2));
15697   ins_cost(INSN_COST);
15698   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15699   ins_encode %{
15700     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15701             as_FloatRegister($src1$$reg),
15702             as_FloatRegister($src2$$reg));
15703   %}
15704   ins_pipe(vdop128);
15705 %}
15706 
15707 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15708 %{
15709   predicate(n->as_Vector()->length() == 2);
15710   match(Set dst (SubVL src1 src2));
15711   ins_cost(INSN_COST);
15712   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15713   ins_encode %{
15714     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15715             as_FloatRegister($src1$$reg),
15716             as_FloatRegister($src2$$reg));
15717   %}
15718   ins_pipe(vdop128);
15719 %}
15720 
15721 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15722 %{
15723   predicate(n->as_Vector()->length() == 2);
15724   match(Set dst (SubVF src1 src2));
15725   ins_cost(INSN_COST);
15726   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15727   ins_encode %{
15728     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15729             as_FloatRegister($src1$$reg),
15730             as_FloatRegister($src2$$reg));
15731   %}
15732   ins_pipe(vdop_fp64);
15733 %}
15734 
15735 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15736 %{
15737   predicate(n->as_Vector()->length() == 4);
15738   match(Set dst (SubVF src1 src2));
15739   ins_cost(INSN_COST);
15740   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15741   ins_encode %{
15742     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15743             as_FloatRegister($src1$$reg),
15744             as_FloatRegister($src2$$reg));
15745   %}
15746   ins_pipe(vdop_fp128);
15747 %}
15748 
15749 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15750 %{
15751   predicate(n->as_Vector()->length() == 2);
15752   match(Set dst (SubVD src1 src2));
15753   ins_cost(INSN_COST);
15754   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15755   ins_encode %{
15756     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15757             as_FloatRegister($src1$$reg),
15758             as_FloatRegister($src2$$reg));
15759   %}
15760   ins_pipe(vdop_fp128);
15761 %}
15762 
15763 // --------------------------------- MUL --------------------------------------
15764 
15765 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15766 %{
15767   predicate(n->as_Vector()->length() == 2 ||
15768             n->as_Vector()->length() == 4);
15769   match(Set dst (MulVS src1 src2));
15770   ins_cost(INSN_COST);
15771   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15772   ins_encode %{
15773     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15774             as_FloatRegister($src1$$reg),
15775             as_FloatRegister($src2$$reg));
15776   %}
15777   ins_pipe(vmul64);
15778 %}
15779 
15780 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15781 %{
15782   predicate(n->as_Vector()->length() == 8);
15783   match(Set dst (MulVS src1 src2));
15784   ins_cost(INSN_COST);
15785   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15786   ins_encode %{
15787     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15788             as_FloatRegister($src1$$reg),
15789             as_FloatRegister($src2$$reg));
15790   %}
15791   ins_pipe(vmul128);
15792 %}
15793 
15794 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15795 %{
15796   predicate(n->as_Vector()->length() == 2);
15797   match(Set dst (MulVI src1 src2));
15798   ins_cost(INSN_COST);
15799   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15800   ins_encode %{
15801     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15802             as_FloatRegister($src1$$reg),
15803             as_FloatRegister($src2$$reg));
15804   %}
15805   ins_pipe(vmul64);
15806 %}
15807 
15808 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15809 %{
15810   predicate(n->as_Vector()->length() == 4);
15811   match(Set dst (MulVI src1 src2));
15812   ins_cost(INSN_COST);
15813   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15814   ins_encode %{
15815     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15816             as_FloatRegister($src1$$reg),
15817             as_FloatRegister($src2$$reg));
15818   %}
15819   ins_pipe(vmul128);
15820 %}
15821 
15822 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15823 %{
15824   predicate(n->as_Vector()->length() == 2);
15825   match(Set dst (MulVF src1 src2));
15826   ins_cost(INSN_COST);
15827   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15828   ins_encode %{
15829     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15830             as_FloatRegister($src1$$reg),
15831             as_FloatRegister($src2$$reg));
15832   %}
15833   ins_pipe(vmuldiv_fp64);
15834 %}
15835 
15836 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15837 %{
15838   predicate(n->as_Vector()->length() == 4);
15839   match(Set dst (MulVF src1 src2));
15840   ins_cost(INSN_COST);
15841   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15842   ins_encode %{
15843     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15844             as_FloatRegister($src1$$reg),
15845             as_FloatRegister($src2$$reg));
15846   %}
15847   ins_pipe(vmuldiv_fp128);
15848 %}
15849 
15850 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15851 %{
15852   predicate(n->as_Vector()->length() == 2);
15853   match(Set dst (MulVD src1 src2));
15854   ins_cost(INSN_COST);
15855   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15856   ins_encode %{
15857     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15858             as_FloatRegister($src1$$reg),
15859             as_FloatRegister($src2$$reg));
15860   %}
15861   ins_pipe(vmuldiv_fp128);
15862 %}
15863 
15864 // --------------------------------- MLA --------------------------------------
15865 
15866 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15867 %{
15868   predicate(n->as_Vector()->length() == 2 ||
15869             n->as_Vector()->length() == 4);
15870   match(Set dst (AddVS dst (MulVS src1 src2)));
15871   ins_cost(INSN_COST);
15872   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
15873   ins_encode %{
15874     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
15875             as_FloatRegister($src1$$reg),
15876             as_FloatRegister($src2$$reg));
15877   %}
15878   ins_pipe(vmla64);
15879 %}
15880 
15881 instruct vmla8S(vecX dst, vecX src1, vecX src2)
15882 %{
15883   predicate(n->as_Vector()->length() == 8);
15884   match(Set dst (AddVS dst (MulVS src1 src2)));
15885   ins_cost(INSN_COST);
15886   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
15887   ins_encode %{
15888     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
15889             as_FloatRegister($src1$$reg),
15890             as_FloatRegister($src2$$reg));
15891   %}
15892   ins_pipe(vmla128);
15893 %}
15894 
15895 instruct vmla2I(vecD dst, vecD src1, vecD src2)
15896 %{
15897   predicate(n->as_Vector()->length() == 2);
15898   match(Set dst (AddVI dst (MulVI src1 src2)));
15899   ins_cost(INSN_COST);
15900   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
15901   ins_encode %{
15902     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
15903             as_FloatRegister($src1$$reg),
15904             as_FloatRegister($src2$$reg));
15905   %}
15906   ins_pipe(vmla64);
15907 %}
15908 
15909 instruct vmla4I(vecX dst, vecX src1, vecX src2)
15910 %{
15911   predicate(n->as_Vector()->length() == 4);
15912   match(Set dst (AddVI dst (MulVI src1 src2)));
15913   ins_cost(INSN_COST);
15914   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
15915   ins_encode %{
15916     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
15917             as_FloatRegister($src1$$reg),
15918             as_FloatRegister($src2$$reg));
15919   %}
15920   ins_pipe(vmla128);
15921 %}
15922 
15923 // --------------------------------- MLS --------------------------------------
15924 
15925 instruct vmls4S(vecD dst, vecD src1, vecD src2)
15926 %{
15927   predicate(n->as_Vector()->length() == 2 ||
15928             n->as_Vector()->length() == 4);
15929   match(Set dst (SubVS dst (MulVS src1 src2)));
15930   ins_cost(INSN_COST);
15931   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
15932   ins_encode %{
15933     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
15934             as_FloatRegister($src1$$reg),
15935             as_FloatRegister($src2$$reg));
15936   %}
15937   ins_pipe(vmla64);
15938 %}
15939 
15940 instruct vmls8S(vecX dst, vecX src1, vecX src2)
15941 %{
15942   predicate(n->as_Vector()->length() == 8);
15943   match(Set dst (SubVS dst (MulVS src1 src2)));
15944   ins_cost(INSN_COST);
15945   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
15946   ins_encode %{
15947     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
15948             as_FloatRegister($src1$$reg),
15949             as_FloatRegister($src2$$reg));
15950   %}
15951   ins_pipe(vmla128);
15952 %}
15953 
15954 instruct vmls2I(vecD dst, vecD src1, vecD src2)
15955 %{
15956   predicate(n->as_Vector()->length() == 2);
15957   match(Set dst (SubVI dst (MulVI src1 src2)));
15958   ins_cost(INSN_COST);
15959   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
15960   ins_encode %{
15961     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
15962             as_FloatRegister($src1$$reg),
15963             as_FloatRegister($src2$$reg));
15964   %}
15965   ins_pipe(vmla64);
15966 %}
15967 
15968 instruct vmls4I(vecX dst, vecX src1, vecX src2)
15969 %{
15970   predicate(n->as_Vector()->length() == 4);
15971   match(Set dst (SubVI dst (MulVI src1 src2)));
15972   ins_cost(INSN_COST);
15973   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
15974   ins_encode %{
15975     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
15976             as_FloatRegister($src1$$reg),
15977             as_FloatRegister($src2$$reg));
15978   %}
15979   ins_pipe(vmla128);
15980 %}
15981 
15982 // --------------------------------- DIV --------------------------------------
15983 
15984 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
15985 %{
15986   predicate(n->as_Vector()->length() == 2);
15987   match(Set dst (DivVF src1 src2));
15988   ins_cost(INSN_COST);
15989   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
15990   ins_encode %{
15991     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
15992             as_FloatRegister($src1$$reg),
15993             as_FloatRegister($src2$$reg));
15994   %}
15995   ins_pipe(vmuldiv_fp64);
15996 %}
15997 
15998 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
15999 %{
16000   predicate(n->as_Vector()->length() == 4);
16001   match(Set dst (DivVF src1 src2));
16002   ins_cost(INSN_COST);
16003   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16004   ins_encode %{
16005     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16006             as_FloatRegister($src1$$reg),
16007             as_FloatRegister($src2$$reg));
16008   %}
16009   ins_pipe(vmuldiv_fp128);
16010 %}
16011 
16012 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16013 %{
16014   predicate(n->as_Vector()->length() == 2);
16015   match(Set dst (DivVD src1 src2));
16016   ins_cost(INSN_COST);
16017   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16018   ins_encode %{
16019     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16020             as_FloatRegister($src1$$reg),
16021             as_FloatRegister($src2$$reg));
16022   %}
16023   ins_pipe(vmuldiv_fp128);
16024 %}
16025 
16026 // --------------------------------- SQRT -------------------------------------
16027 
16028 instruct vsqrt2D(vecX dst, vecX src)
16029 %{
16030   predicate(n->as_Vector()->length() == 2);
16031   match(Set dst (SqrtVD src));
16032   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16033   ins_encode %{
16034     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16035              as_FloatRegister($src$$reg));
16036   %}
16037   ins_pipe(vsqrt_fp128);
16038 %}
16039 
16040 // --------------------------------- ABS --------------------------------------
16041 
16042 instruct vabs2F(vecD dst, vecD src)
16043 %{
16044   predicate(n->as_Vector()->length() == 2);
16045   match(Set dst (AbsVF src));
16046   ins_cost(INSN_COST * 3);
16047   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16048   ins_encode %{
16049     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16050             as_FloatRegister($src$$reg));
16051   %}
16052   ins_pipe(vunop_fp64);
16053 %}
16054 
16055 instruct vabs4F(vecX dst, vecX src)
16056 %{
16057   predicate(n->as_Vector()->length() == 4);
16058   match(Set dst (AbsVF src));
16059   ins_cost(INSN_COST * 3);
16060   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16061   ins_encode %{
16062     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16063             as_FloatRegister($src$$reg));
16064   %}
16065   ins_pipe(vunop_fp128);
16066 %}
16067 
16068 instruct vabs2D(vecX dst, vecX src)
16069 %{
16070   predicate(n->as_Vector()->length() == 2);
16071   match(Set dst (AbsVD src));
16072   ins_cost(INSN_COST * 3);
16073   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16074   ins_encode %{
16075     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16076             as_FloatRegister($src$$reg));
16077   %}
16078   ins_pipe(vunop_fp128);
16079 %}
16080 
16081 // --------------------------------- NEG --------------------------------------
16082 
16083 instruct vneg2F(vecD dst, vecD src)
16084 %{
16085   predicate(n->as_Vector()->length() == 2);
16086   match(Set dst (NegVF src));
16087   ins_cost(INSN_COST * 3);
16088   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16089   ins_encode %{
16090     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16091             as_FloatRegister($src$$reg));
16092   %}
16093   ins_pipe(vunop_fp64);
16094 %}
16095 
16096 instruct vneg4F(vecX dst, vecX src)
16097 %{
16098   predicate(n->as_Vector()->length() == 4);
16099   match(Set dst (NegVF src));
16100   ins_cost(INSN_COST * 3);
16101   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16102   ins_encode %{
16103     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16104             as_FloatRegister($src$$reg));
16105   %}
16106   ins_pipe(vunop_fp128);
16107 %}
16108 
16109 instruct vneg2D(vecX dst, vecX src)
16110 %{
16111   predicate(n->as_Vector()->length() == 2);
16112   match(Set dst (NegVD src));
16113   ins_cost(INSN_COST * 3);
16114   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16115   ins_encode %{
16116     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16117             as_FloatRegister($src$$reg));
16118   %}
16119   ins_pipe(vunop_fp128);
16120 %}
16121 
16122 // --------------------------------- AND --------------------------------------
16123 
16124 instruct vand8B(vecD dst, vecD src1, vecD src2)
16125 %{
16126   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16127             n->as_Vector()->length_in_bytes() == 8);
16128   match(Set dst (AndV src1 src2));
16129   ins_cost(INSN_COST);
16130   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16131   ins_encode %{
16132     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16133             as_FloatRegister($src1$$reg),
16134             as_FloatRegister($src2$$reg));
16135   %}
16136   ins_pipe(vlogical64);
16137 %}
16138 
16139 instruct vand16B(vecX dst, vecX src1, vecX src2)
16140 %{
16141   predicate(n->as_Vector()->length_in_bytes() == 16);
16142   match(Set dst (AndV src1 src2));
16143   ins_cost(INSN_COST);
16144   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16145   ins_encode %{
16146     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16147             as_FloatRegister($src1$$reg),
16148             as_FloatRegister($src2$$reg));
16149   %}
16150   ins_pipe(vlogical128);
16151 %}
16152 
16153 // --------------------------------- OR ---------------------------------------
16154 
16155 instruct vor8B(vecD dst, vecD src1, vecD src2)
16156 %{
16157   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16158             n->as_Vector()->length_in_bytes() == 8);
16159   match(Set dst (OrV src1 src2));
16160   ins_cost(INSN_COST);
16161   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16162   ins_encode %{
16163     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16164             as_FloatRegister($src1$$reg),
16165             as_FloatRegister($src2$$reg));
16166   %}
16167   ins_pipe(vlogical64);
16168 %}
16169 
16170 instruct vor16B(vecX dst, vecX src1, vecX src2)
16171 %{
16172   predicate(n->as_Vector()->length_in_bytes() == 16);
16173   match(Set dst (OrV src1 src2));
16174   ins_cost(INSN_COST);
16175   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16176   ins_encode %{
16177     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16178             as_FloatRegister($src1$$reg),
16179             as_FloatRegister($src2$$reg));
16180   %}
16181   ins_pipe(vlogical128);
16182 %}
16183 
16184 // --------------------------------- XOR --------------------------------------
16185 
16186 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16187 %{
16188   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16189             n->as_Vector()->length_in_bytes() == 8);
16190   match(Set dst (XorV src1 src2));
16191   ins_cost(INSN_COST);
16192   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16193   ins_encode %{
16194     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16195             as_FloatRegister($src1$$reg),
16196             as_FloatRegister($src2$$reg));
16197   %}
16198   ins_pipe(vlogical64);
16199 %}
16200 
16201 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16202 %{
16203   predicate(n->as_Vector()->length_in_bytes() == 16);
16204   match(Set dst (XorV src1 src2));
16205   ins_cost(INSN_COST);
16206   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16207   ins_encode %{
16208     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16209             as_FloatRegister($src1$$reg),
16210             as_FloatRegister($src2$$reg));
16211   %}
16212   ins_pipe(vlogical128);
16213 %}
16214 
16215 // ------------------------------ Shift ---------------------------------------
16216 
16217 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16218   match(Set dst (LShiftCntV cnt));
16219   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16220   ins_encode %{
16221     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16222   %}
16223   ins_pipe(vdup_reg_reg128);
16224 %}
16225 
16226 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16227 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16228   match(Set dst (RShiftCntV cnt));
16229   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16230   ins_encode %{
16231     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16232     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16233   %}
16234   ins_pipe(vdup_reg_reg128);
16235 %}
16236 
16237 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16238   predicate(n->as_Vector()->length() == 4 ||
16239             n->as_Vector()->length() == 8);
16240   match(Set dst (LShiftVB src shift));
16241   match(Set dst (RShiftVB src shift));
16242   ins_cost(INSN_COST);
16243   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16244   ins_encode %{
16245     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16246             as_FloatRegister($src$$reg),
16247             as_FloatRegister($shift$$reg));
16248   %}
16249   ins_pipe(vshift64);
16250 %}
16251 
16252 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16253   predicate(n->as_Vector()->length() == 16);
16254   match(Set dst (LShiftVB src shift));
16255   match(Set dst (RShiftVB src shift));
16256   ins_cost(INSN_COST);
16257   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16258   ins_encode %{
16259     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16260             as_FloatRegister($src$$reg),
16261             as_FloatRegister($shift$$reg));
16262   %}
16263   ins_pipe(vshift128);
16264 %}
16265 
16266 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16267   predicate(n->as_Vector()->length() == 4 ||
16268             n->as_Vector()->length() == 8);
16269   match(Set dst (URShiftVB src shift));
16270   ins_cost(INSN_COST);
16271   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16272   ins_encode %{
16273     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16274             as_FloatRegister($src$$reg),
16275             as_FloatRegister($shift$$reg));
16276   %}
16277   ins_pipe(vshift64);
16278 %}
16279 
16280 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16281   predicate(n->as_Vector()->length() == 16);
16282   match(Set dst (URShiftVB src shift));
16283   ins_cost(INSN_COST);
16284   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16285   ins_encode %{
16286     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16287             as_FloatRegister($src$$reg),
16288             as_FloatRegister($shift$$reg));
16289   %}
16290   ins_pipe(vshift128);
16291 %}
16292 
16293 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16294   predicate(n->as_Vector()->length() == 4 ||
16295             n->as_Vector()->length() == 8);
16296   match(Set dst (LShiftVB src shift));
16297   ins_cost(INSN_COST);
16298   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16299   ins_encode %{
16300     int sh = (int)$shift$$constant & 31;
16301     if (sh >= 8) {
16302       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16303              as_FloatRegister($src$$reg),
16304              as_FloatRegister($src$$reg));
16305     } else {
16306       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16307              as_FloatRegister($src$$reg), sh);
16308     }
16309   %}
16310   ins_pipe(vshift64_imm);
16311 %}
16312 
16313 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16314   predicate(n->as_Vector()->length() == 16);
16315   match(Set dst (LShiftVB src shift));
16316   ins_cost(INSN_COST);
16317   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16318   ins_encode %{
16319     int sh = (int)$shift$$constant & 31;
16320     if (sh >= 8) {
16321       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16322              as_FloatRegister($src$$reg),
16323              as_FloatRegister($src$$reg));
16324     } else {
16325       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16326              as_FloatRegister($src$$reg), sh);
16327     }
16328   %}
16329   ins_pipe(vshift128_imm);
16330 %}
16331 
16332 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16333   predicate(n->as_Vector()->length() == 4 ||
16334             n->as_Vector()->length() == 8);
16335   match(Set dst (RShiftVB src shift));
16336   ins_cost(INSN_COST);
16337   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16338   ins_encode %{
16339     int sh = (int)$shift$$constant & 31;
16340     if (sh >= 8) sh = 7;
16341     sh = -sh & 7;
16342     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16343            as_FloatRegister($src$$reg), sh);
16344   %}
16345   ins_pipe(vshift64_imm);
16346 %}
16347 
16348 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16349   predicate(n->as_Vector()->length() == 16);
16350   match(Set dst (RShiftVB src shift));
16351   ins_cost(INSN_COST);
16352   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16353   ins_encode %{
16354     int sh = (int)$shift$$constant & 31;
16355     if (sh >= 8) sh = 7;
16356     sh = -sh & 7;
16357     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16358            as_FloatRegister($src$$reg), sh);
16359   %}
16360   ins_pipe(vshift128_imm);
16361 %}
16362 
16363 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16364   predicate(n->as_Vector()->length() == 4 ||
16365             n->as_Vector()->length() == 8);
16366   match(Set dst (URShiftVB src shift));
16367   ins_cost(INSN_COST);
16368   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16369   ins_encode %{
16370     int sh = (int)$shift$$constant & 31;
16371     if (sh >= 8) {
16372       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16373              as_FloatRegister($src$$reg),
16374              as_FloatRegister($src$$reg));
16375     } else {
16376       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16377              as_FloatRegister($src$$reg), -sh & 7);
16378     }
16379   %}
16380   ins_pipe(vshift64_imm);
16381 %}
16382 
16383 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16384   predicate(n->as_Vector()->length() == 16);
16385   match(Set dst (URShiftVB src shift));
16386   ins_cost(INSN_COST);
16387   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16388   ins_encode %{
16389     int sh = (int)$shift$$constant & 31;
16390     if (sh >= 8) {
16391       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16392              as_FloatRegister($src$$reg),
16393              as_FloatRegister($src$$reg));
16394     } else {
16395       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16396              as_FloatRegister($src$$reg), -sh & 7);
16397     }
16398   %}
16399   ins_pipe(vshift128_imm);
16400 %}
16401 
16402 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16403   predicate(n->as_Vector()->length() == 2 ||
16404             n->as_Vector()->length() == 4);
16405   match(Set dst (LShiftVS src shift));
16406   match(Set dst (RShiftVS src shift));
16407   ins_cost(INSN_COST);
16408   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16409   ins_encode %{
16410     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16411             as_FloatRegister($src$$reg),
16412             as_FloatRegister($shift$$reg));
16413   %}
16414   ins_pipe(vshift64);
16415 %}
16416 
16417 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16418   predicate(n->as_Vector()->length() == 8);
16419   match(Set dst (LShiftVS src shift));
16420   match(Set dst (RShiftVS src shift));
16421   ins_cost(INSN_COST);
16422   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16423   ins_encode %{
16424     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16425             as_FloatRegister($src$$reg),
16426             as_FloatRegister($shift$$reg));
16427   %}
16428   ins_pipe(vshift128);
16429 %}
16430 
16431 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16432   predicate(n->as_Vector()->length() == 2 ||
16433             n->as_Vector()->length() == 4);
16434   match(Set dst (URShiftVS src shift));
16435   ins_cost(INSN_COST);
16436   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16437   ins_encode %{
16438     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16439             as_FloatRegister($src$$reg),
16440             as_FloatRegister($shift$$reg));
16441   %}
16442   ins_pipe(vshift64);
16443 %}
16444 
16445 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16446   predicate(n->as_Vector()->length() == 8);
16447   match(Set dst (URShiftVS src shift));
16448   ins_cost(INSN_COST);
16449   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16450   ins_encode %{
16451     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16452             as_FloatRegister($src$$reg),
16453             as_FloatRegister($shift$$reg));
16454   %}
16455   ins_pipe(vshift128);
16456 %}
16457 
16458 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16459   predicate(n->as_Vector()->length() == 2 ||
16460             n->as_Vector()->length() == 4);
16461   match(Set dst (LShiftVS src shift));
16462   ins_cost(INSN_COST);
16463   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16464   ins_encode %{
16465     int sh = (int)$shift$$constant & 31;
16466     if (sh >= 16) {
16467       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16468              as_FloatRegister($src$$reg),
16469              as_FloatRegister($src$$reg));
16470     } else {
16471       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16472              as_FloatRegister($src$$reg), sh);
16473     }
16474   %}
16475   ins_pipe(vshift64_imm);
16476 %}
16477 
16478 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16479   predicate(n->as_Vector()->length() == 8);
16480   match(Set dst (LShiftVS src shift));
16481   ins_cost(INSN_COST);
16482   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16483   ins_encode %{
16484     int sh = (int)$shift$$constant & 31;
16485     if (sh >= 16) {
16486       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16487              as_FloatRegister($src$$reg),
16488              as_FloatRegister($src$$reg));
16489     } else {
16490       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16491              as_FloatRegister($src$$reg), sh);
16492     }
16493   %}
16494   ins_pipe(vshift128_imm);
16495 %}
16496 
16497 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16498   predicate(n->as_Vector()->length() == 2 ||
16499             n->as_Vector()->length() == 4);
16500   match(Set dst (RShiftVS src shift));
16501   ins_cost(INSN_COST);
16502   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16503   ins_encode %{
16504     int sh = (int)$shift$$constant & 31;
16505     if (sh >= 16) sh = 15;
16506     sh = -sh & 15;
16507     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16508            as_FloatRegister($src$$reg), sh);
16509   %}
16510   ins_pipe(vshift64_imm);
16511 %}
16512 
16513 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16514   predicate(n->as_Vector()->length() == 8);
16515   match(Set dst (RShiftVS src shift));
16516   ins_cost(INSN_COST);
16517   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16518   ins_encode %{
16519     int sh = (int)$shift$$constant & 31;
16520     if (sh >= 16) sh = 15;
16521     sh = -sh & 15;
16522     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16523            as_FloatRegister($src$$reg), sh);
16524   %}
16525   ins_pipe(vshift128_imm);
16526 %}
16527 
16528 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16529   predicate(n->as_Vector()->length() == 2 ||
16530             n->as_Vector()->length() == 4);
16531   match(Set dst (URShiftVS src shift));
16532   ins_cost(INSN_COST);
16533   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16534   ins_encode %{
16535     int sh = (int)$shift$$constant & 31;
16536     if (sh >= 16) {
16537       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16538              as_FloatRegister($src$$reg),
16539              as_FloatRegister($src$$reg));
16540     } else {
16541       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16542              as_FloatRegister($src$$reg), -sh & 15);
16543     }
16544   %}
16545   ins_pipe(vshift64_imm);
16546 %}
16547 
16548 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16549   predicate(n->as_Vector()->length() == 8);
16550   match(Set dst (URShiftVS src shift));
16551   ins_cost(INSN_COST);
16552   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16553   ins_encode %{
16554     int sh = (int)$shift$$constant & 31;
16555     if (sh >= 16) {
16556       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16557              as_FloatRegister($src$$reg),
16558              as_FloatRegister($src$$reg));
16559     } else {
16560       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16561              as_FloatRegister($src$$reg), -sh & 15);
16562     }
16563   %}
16564   ins_pipe(vshift128_imm);
16565 %}
16566 
16567 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16568   predicate(n->as_Vector()->length() == 2);
16569   match(Set dst (LShiftVI src shift));
16570   match(Set dst (RShiftVI src shift));
16571   ins_cost(INSN_COST);
16572   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16573   ins_encode %{
16574     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16575             as_FloatRegister($src$$reg),
16576             as_FloatRegister($shift$$reg));
16577   %}
16578   ins_pipe(vshift64);
16579 %}
16580 
16581 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16582   predicate(n->as_Vector()->length() == 4);
16583   match(Set dst (LShiftVI src shift));
16584   match(Set dst (RShiftVI src shift));
16585   ins_cost(INSN_COST);
16586   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16587   ins_encode %{
16588     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16589             as_FloatRegister($src$$reg),
16590             as_FloatRegister($shift$$reg));
16591   %}
16592   ins_pipe(vshift128);
16593 %}
16594 
16595 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16596   predicate(n->as_Vector()->length() == 2);
16597   match(Set dst (URShiftVI src shift));
16598   ins_cost(INSN_COST);
16599   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16600   ins_encode %{
16601     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16602             as_FloatRegister($src$$reg),
16603             as_FloatRegister($shift$$reg));
16604   %}
16605   ins_pipe(vshift64);
16606 %}
16607 
16608 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16609   predicate(n->as_Vector()->length() == 4);
16610   match(Set dst (URShiftVI src shift));
16611   ins_cost(INSN_COST);
16612   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16613   ins_encode %{
16614     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16615             as_FloatRegister($src$$reg),
16616             as_FloatRegister($shift$$reg));
16617   %}
16618   ins_pipe(vshift128);
16619 %}
16620 
16621 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16622   predicate(n->as_Vector()->length() == 2);
16623   match(Set dst (LShiftVI src shift));
16624   ins_cost(INSN_COST);
16625   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16626   ins_encode %{
16627     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16628            as_FloatRegister($src$$reg),
16629            (int)$shift$$constant & 31);
16630   %}
16631   ins_pipe(vshift64_imm);
16632 %}
16633 
16634 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16635   predicate(n->as_Vector()->length() == 4);
16636   match(Set dst (LShiftVI src shift));
16637   ins_cost(INSN_COST);
16638   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16639   ins_encode %{
16640     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16641            as_FloatRegister($src$$reg),
16642            (int)$shift$$constant & 31);
16643   %}
16644   ins_pipe(vshift128_imm);
16645 %}
16646 
16647 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16648   predicate(n->as_Vector()->length() == 2);
16649   match(Set dst (RShiftVI src shift));
16650   ins_cost(INSN_COST);
16651   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16652   ins_encode %{
16653     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16654             as_FloatRegister($src$$reg),
16655             -(int)$shift$$constant & 31);
16656   %}
16657   ins_pipe(vshift64_imm);
16658 %}
16659 
16660 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16661   predicate(n->as_Vector()->length() == 4);
16662   match(Set dst (RShiftVI src shift));
16663   ins_cost(INSN_COST);
16664   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16665   ins_encode %{
16666     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16667             as_FloatRegister($src$$reg),
16668             -(int)$shift$$constant & 31);
16669   %}
16670   ins_pipe(vshift128_imm);
16671 %}
16672 
16673 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16674   predicate(n->as_Vector()->length() == 2);
16675   match(Set dst (URShiftVI src shift));
16676   ins_cost(INSN_COST);
16677   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16678   ins_encode %{
16679     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16680             as_FloatRegister($src$$reg),
16681             -(int)$shift$$constant & 31);
16682   %}
16683   ins_pipe(vshift64_imm);
16684 %}
16685 
16686 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16687   predicate(n->as_Vector()->length() == 4);
16688   match(Set dst (URShiftVI src shift));
16689   ins_cost(INSN_COST);
16690   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16691   ins_encode %{
16692     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16693             as_FloatRegister($src$$reg),
16694             -(int)$shift$$constant & 31);
16695   %}
16696   ins_pipe(vshift128_imm);
16697 %}
16698 
16699 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16700   predicate(n->as_Vector()->length() == 2);
16701   match(Set dst (LShiftVL src shift));
16702   match(Set dst (RShiftVL src shift));
16703   ins_cost(INSN_COST);
16704   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16705   ins_encode %{
16706     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16707             as_FloatRegister($src$$reg),
16708             as_FloatRegister($shift$$reg));
16709   %}
16710   ins_pipe(vshift128);
16711 %}
16712 
16713 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16714   predicate(n->as_Vector()->length() == 2);
16715   match(Set dst (URShiftVL src shift));
16716   ins_cost(INSN_COST);
16717   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16718   ins_encode %{
16719     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16720             as_FloatRegister($src$$reg),
16721             as_FloatRegister($shift$$reg));
16722   %}
16723   ins_pipe(vshift128);
16724 %}
16725 
16726 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16727   predicate(n->as_Vector()->length() == 2);
16728   match(Set dst (LShiftVL src shift));
16729   ins_cost(INSN_COST);
16730   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16731   ins_encode %{
16732     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16733            as_FloatRegister($src$$reg),
16734            (int)$shift$$constant & 63);
16735   %}
16736   ins_pipe(vshift128_imm);
16737 %}
16738 
16739 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16740   predicate(n->as_Vector()->length() == 2);
16741   match(Set dst (RShiftVL src shift));
16742   ins_cost(INSN_COST);
16743   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16744   ins_encode %{
16745     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16746             as_FloatRegister($src$$reg),
16747             -(int)$shift$$constant & 63);
16748   %}
16749   ins_pipe(vshift128_imm);
16750 %}
16751 
16752 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16753   predicate(n->as_Vector()->length() == 2);
16754   match(Set dst (URShiftVL src shift));
16755   ins_cost(INSN_COST);
16756   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16757   ins_encode %{
16758     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16759             as_FloatRegister($src$$reg),
16760             -(int)$shift$$constant & 63);
16761   %}
16762   ins_pipe(vshift128_imm);
16763 %}
16764 
16765 //----------PEEPHOLE RULES-----------------------------------------------------
16766 // These must follow all instruction definitions as they use the names
16767 // defined in the instructions definitions.
16768 //
16769 // peepmatch ( root_instr_name [preceding_instruction]* );
16770 //
16771 // peepconstraint %{
16772 // (instruction_number.operand_name relational_op instruction_number.operand_name
16773 //  [, ...] );
16774 // // instruction numbers are zero-based using left to right order in peepmatch
16775 //
16776 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16777 // // provide an instruction_number.operand_name for each operand that appears
16778 // // in the replacement instruction's match rule
16779 //
16780 // ---------VM FLAGS---------------------------------------------------------
16781 //
16782 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16783 //
16784 // Each peephole rule is given an identifying number starting with zero and
16785 // increasing by one in the order seen by the parser.  An individual peephole
16786 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16787 // on the command-line.
16788 //
16789 // ---------CURRENT LIMITATIONS----------------------------------------------
16790 //
16791 // Only match adjacent instructions in same basic block
16792 // Only equality constraints
16793 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16794 // Only one replacement instruction
16795 //
16796 // ---------EXAMPLE----------------------------------------------------------
16797 //
16798 // // pertinent parts of existing instructions in architecture description
16799 // instruct movI(iRegINoSp dst, iRegI src)
16800 // %{
16801 //   match(Set dst (CopyI src));
16802 // %}
16803 //
16804 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16805 // %{
16806 //   match(Set dst (AddI dst src));
16807 //   effect(KILL cr);
16808 // %}
16809 //
16810 // // Change (inc mov) to lea
16811 // peephole %{
16812 //   // increment preceeded by register-register move
16813 //   peepmatch ( incI_iReg movI );
16814 //   // require that the destination register of the increment
16815 //   // match the destination register of the move
16816 //   peepconstraint ( 0.dst == 1.dst );
16817 //   // construct a replacement instruction that sets
16818 //   // the destination to ( move's source register + one )
16819 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16820 // %}
16821 //
16822 
16823 // Implementation no longer uses movX instructions since
16824 // machine-independent system no longer uses CopyX nodes.
16825 //
16826 // peephole
16827 // %{
16828 //   peepmatch (incI_iReg movI);
16829 //   peepconstraint (0.dst == 1.dst);
16830 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16831 // %}
16832 
16833 // peephole
16834 // %{
16835 //   peepmatch (decI_iReg movI);
16836 //   peepconstraint (0.dst == 1.dst);
16837 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16838 // %}
16839 
16840 // peephole
16841 // %{
16842 //   peepmatch (addI_iReg_imm movI);
16843 //   peepconstraint (0.dst == 1.dst);
16844 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16845 // %}
16846 
16847 // peephole
16848 // %{
16849 //   peepmatch (incL_iReg movL);
16850 //   peepconstraint (0.dst == 1.dst);
16851 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16852 // %}
16853 
16854 // peephole
16855 // %{
16856 //   peepmatch (decL_iReg movL);
16857 //   peepconstraint (0.dst == 1.dst);
16858 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16859 // %}
16860 
16861 // peephole
16862 // %{
16863 //   peepmatch (addL_iReg_imm movL);
16864 //   peepconstraint (0.dst == 1.dst);
16865 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16866 // %}
16867 
16868 // peephole
16869 // %{
16870 //   peepmatch (addP_iReg_imm movP);
16871 //   peepconstraint (0.dst == 1.dst);
16872 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16873 // %}
16874 
16875 // // Change load of spilled value to only a spill
16876 // instruct storeI(memory mem, iRegI src)
16877 // %{
16878 //   match(Set mem (StoreI mem src));
16879 // %}
16880 //
16881 // instruct loadI(iRegINoSp dst, memory mem)
16882 // %{
16883 //   match(Set dst (LoadI mem));
16884 // %}
16885 //
16886 
16887 //----------SMARTSPILL RULES---------------------------------------------------
16888 // These must follow all instruction definitions as they use the names
16889 // defined in the instructions definitions.
16890 
16891 // Local Variables:
16892 // mode: c++
16893 // End: