1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580  /* R29, */                     // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649  /* R29, R29_H, */              // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_trailing(MemBarNode *leading);
1045   MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
1046   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1047 
1048   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1049 
1050   bool unnecessary_acquire(const Node *barrier);
1051   bool needs_acquiring_load(const Node *load);
1052 
1053   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1054 
1055   bool unnecessary_release(const Node *barrier);
1056   bool unnecessary_volatile(const Node *barrier);
1057   bool needs_releasing_store(const Node *store);
1058 
1059   // predicate controlling translation of CompareAndSwapX
1060   bool needs_acquiring_load_exclusive(const Node *load);
1061 
1062   // predicate controlling translation of StoreCM
1063   bool unnecessary_storestore(const Node *storecm);
1064 %}
1065 
1066 source %{
1067 
1068   // Optimizaton of volatile gets and puts
1069   // -------------------------------------
1070   //
1071   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1072   // use to implement volatile reads and writes. For a volatile read
1073   // we simply need
1074   //
1075   //   ldar<x>
1076   //
1077   // and for a volatile write we need
1078   //
1079   //   stlr<x>
1080   //
1081   // Alternatively, we can implement them by pairing a normal
1082   // load/store with a memory barrier. For a volatile read we need
1083   //
1084   //   ldr<x>
1085   //   dmb ishld
1086   //
1087   // for a volatile write
1088   //
1089   //   dmb ish
1090   //   str<x>
1091   //   dmb ish
1092   //
1093   // We can also use ldaxr and stlxr to implement compare and swap CAS
1094   // sequences. These are normally translated to an instruction
1095   // sequence like the following
1096   //
1097   //   dmb      ish
1098   // retry:
1099   //   ldxr<x>   rval raddr
1100   //   cmp       rval rold
1101   //   b.ne done
1102   //   stlxr<x>  rval, rnew, rold
1103   //   cbnz      rval retry
1104   // done:
1105   //   cset      r0, eq
1106   //   dmb ishld
1107   //
1108   // Note that the exclusive store is already using an stlxr
1109   // instruction. That is required to ensure visibility to other
1110   // threads of the exclusive write (assuming it succeeds) before that
1111   // of any subsequent writes.
1112   //
1113   // The following instruction sequence is an improvement on the above
1114   //
1115   // retry:
1116   //   ldaxr<x>  rval raddr
1117   //   cmp       rval rold
1118   //   b.ne done
1119   //   stlxr<x>  rval, rnew, rold
1120   //   cbnz      rval retry
1121   // done:
1122   //   cset      r0, eq
1123   //
1124   // We don't need the leading dmb ish since the stlxr guarantees
1125   // visibility of prior writes in the case that the swap is
1126   // successful. Crucially we don't have to worry about the case where
1127   // the swap is not successful since no valid program should be
1128   // relying on visibility of prior changes by the attempting thread
1129   // in the case where the CAS fails.
1130   //
1131   // Similarly, we don't need the trailing dmb ishld if we substitute
1132   // an ldaxr instruction since that will provide all the guarantees we
1133   // require regarding observation of changes made by other threads
1134   // before any change to the CAS address observed by the load.
1135   //
1136   // In order to generate the desired instruction sequence we need to
1137   // be able to identify specific 'signature' ideal graph node
1138   // sequences which i) occur as a translation of a volatile reads or
1139   // writes or CAS operations and ii) do not occur through any other
1140   // translation or graph transformation. We can then provide
1141   // alternative aldc matching rules which translate these node
1142   // sequences to the desired machine code sequences. Selection of the
1143   // alternative rules can be implemented by predicates which identify
1144   // the relevant node sequences.
1145   //
1146   // The ideal graph generator translates a volatile read to the node
1147   // sequence
1148   //
1149   //   LoadX[mo_acquire]
1150   //   MemBarAcquire
1151   //
1152   // As a special case when using the compressed oops optimization we
1153   // may also see this variant
1154   //
1155   //   LoadN[mo_acquire]
1156   //   DecodeN
1157   //   MemBarAcquire
1158   //
1159   // A volatile write is translated to the node sequence
1160   //
1161   //   MemBarRelease
1162   //   StoreX[mo_release] {CardMark}-optional
1163   //   MemBarVolatile
1164   //
1165   // n.b. the above node patterns are generated with a strict
1166   // 'signature' configuration of input and output dependencies (see
1167   // the predicates below for exact details). The card mark may be as
1168   // simple as a few extra nodes or, in a few GC configurations, may
1169   // include more complex control flow between the leading and
1170   // trailing memory barriers. However, whatever the card mark
1171   // configuration these signatures are unique to translated volatile
1172   // reads/stores -- they will not appear as a result of any other
1173   // bytecode translation or inlining nor as a consequence of
1174   // optimizing transforms.
1175   //
1176   // We also want to catch inlined unsafe volatile gets and puts and
1177   // be able to implement them using either ldar<x>/stlr<x> or some
1178   // combination of ldr<x>/stlr<x> and dmb instructions.
1179   //
1180   // Inlined unsafe volatiles puts manifest as a minor variant of the
1181   // normal volatile put node sequence containing an extra cpuorder
1182   // membar
1183   //
1184   //   MemBarRelease
1185   //   MemBarCPUOrder
1186   //   StoreX[mo_release] {CardMark}-optional
1187   //   MemBarVolatile
1188   //
1189   // n.b. as an aside, the cpuorder membar is not itself subject to
1190   // matching and translation by adlc rules.  However, the rule
1191   // predicates need to detect its presence in order to correctly
1192   // select the desired adlc rules.
1193   //
1194   // Inlined unsafe volatile gets manifest as a somewhat different
1195   // node sequence to a normal volatile get
1196   //
1197   //   MemBarCPUOrder
1198   //        ||       \\
1199   //   MemBarAcquire LoadX[mo_acquire]
1200   //        ||
1201   //   MemBarCPUOrder
1202   //
1203   // In this case the acquire membar does not directly depend on the
1204   // load. However, we can be sure that the load is generated from an
1205   // inlined unsafe volatile get if we see it dependent on this unique
1206   // sequence of membar nodes. Similarly, given an acquire membar we
1207   // can know that it was added because of an inlined unsafe volatile
1208   // get if it is fed and feeds a cpuorder membar and if its feed
1209   // membar also feeds an acquiring load.
1210   //
1211   // Finally an inlined (Unsafe) CAS operation is translated to the
1212   // following ideal graph
1213   //
1214   //   MemBarRelease
1215   //   MemBarCPUOrder
1216   //   CompareAndSwapX {CardMark}-optional
1217   //   MemBarCPUOrder
1218   //   MemBarAcquire
1219   //
1220   // So, where we can identify these volatile read and write
1221   // signatures we can choose to plant either of the above two code
1222   // sequences. For a volatile read we can simply plant a normal
1223   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1224   // also choose to inhibit translation of the MemBarAcquire and
1225   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1226   //
1227   // When we recognise a volatile store signature we can choose to
1228   // plant at a dmb ish as a translation for the MemBarRelease, a
1229   // normal str<x> and then a dmb ish for the MemBarVolatile.
1230   // Alternatively, we can inhibit translation of the MemBarRelease
1231   // and MemBarVolatile and instead plant a simple stlr<x>
1232   // instruction.
1233   //
1234   // when we recognise a CAS signature we can choose to plant a dmb
1235   // ish as a translation for the MemBarRelease, the conventional
1236   // macro-instruction sequence for the CompareAndSwap node (which
1237   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1238   // Alternatively, we can elide generation of the dmb instructions
1239   // and plant the alternative CompareAndSwap macro-instruction
1240   // sequence (which uses ldaxr<x>).
1241   //
1242   // Of course, the above only applies when we see these signature
1243   // configurations. We still want to plant dmb instructions in any
1244   // other cases where we may see a MemBarAcquire, MemBarRelease or
1245   // MemBarVolatile. For example, at the end of a constructor which
1246   // writes final/volatile fields we will see a MemBarRelease
1247   // instruction and this needs a 'dmb ish' lest we risk the
1248   // constructed object being visible without making the
1249   // final/volatile field writes visible.
1250   //
1251   // n.b. the translation rules below which rely on detection of the
1252   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1253   // If we see anything other than the signature configurations we
1254   // always just translate the loads and stores to ldr<x> and str<x>
1255   // and translate acquire, release and volatile membars to the
1256   // relevant dmb instructions.
1257   //
1258 
1259   // graph traversal helpers used for volatile put/get and CAS
1260   // optimization
1261 
1262   // 1) general purpose helpers
1263 
1264   // if node n is linked to a parent MemBarNode by an intervening
1265   // Control and Memory ProjNode return the MemBarNode otherwise return
1266   // NULL.
1267   //
1268   // n may only be a Load or a MemBar.
1269 
1270   MemBarNode *parent_membar(const Node *n)
1271   {
1272     Node *ctl = NULL;
1273     Node *mem = NULL;
1274     Node *membar = NULL;
1275 
1276     if (n->is_Load()) {
1277       ctl = n->lookup(LoadNode::Control);
1278       mem = n->lookup(LoadNode::Memory);
1279     } else if (n->is_MemBar()) {
1280       ctl = n->lookup(TypeFunc::Control);
1281       mem = n->lookup(TypeFunc::Memory);
1282     } else {
1283         return NULL;
1284     }
1285 
1286     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1287       return NULL;
1288     }
1289 
1290     membar = ctl->lookup(0);
1291 
1292     if (!membar || !membar->is_MemBar()) {
1293       return NULL;
1294     }
1295 
1296     if (mem->lookup(0) != membar) {
1297       return NULL;
1298     }
1299 
1300     return membar->as_MemBar();
1301   }
1302 
1303   // if n is linked to a child MemBarNode by intervening Control and
1304   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1305 
1306   MemBarNode *child_membar(const MemBarNode *n)
1307   {
1308     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1309     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1310 
1311     // MemBar needs to have both a Ctl and Mem projection
1312     if (! ctl || ! mem)
1313       return NULL;
1314 
1315     MemBarNode *child = NULL;
1316     Node *x;
1317 
1318     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1319       x = ctl->fast_out(i);
1320       // if we see a membar we keep hold of it. we may also see a new
1321       // arena copy of the original but it will appear later
1322       if (x->is_MemBar()) {
1323           child = x->as_MemBar();
1324           break;
1325       }
1326     }
1327 
1328     if (child == NULL) {
1329       return NULL;
1330     }
1331 
1332     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1333       x = mem->fast_out(i);
1334       // if we see a membar we keep hold of it. we may also see a new
1335       // arena copy of the original but it will appear later
1336       if (x == child) {
1337         return child;
1338       }
1339     }
1340     return NULL;
1341   }
1342 
1343   // helper predicate use to filter candidates for a leading memory
1344   // barrier
1345   //
1346   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1347   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1348 
1349   bool leading_membar(const MemBarNode *barrier)
1350   {
1351     int opcode = barrier->Opcode();
1352     // if this is a release membar we are ok
1353     if (opcode == Op_MemBarRelease) {
1354       return true;
1355     }
1356     // if its a cpuorder membar . . .
1357     if (opcode != Op_MemBarCPUOrder) {
1358       return false;
1359     }
1360     // then the parent has to be a release membar
1361     MemBarNode *parent = parent_membar(barrier);
1362     if (!parent) {
1363       return false;
1364     }
1365     opcode = parent->Opcode();
1366     return opcode == Op_MemBarRelease;
1367   }
1368 
1369   // 2) card mark detection helper
1370 
1371   // helper predicate which can be used to detect a volatile membar
1372   // introduced as part of a conditional card mark sequence either by
1373   // G1 or by CMS when UseCondCardMark is true.
1374   //
1375   // membar can be definitively determined to be part of a card mark
1376   // sequence if and only if all the following hold
1377   //
1378   // i) it is a MemBarVolatile
1379   //
1380   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1381   // true
1382   //
1383   // iii) the node's Mem projection feeds a StoreCM node.
1384 
1385   bool is_card_mark_membar(const MemBarNode *barrier)
1386   {
1387     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1388       return false;
1389     }
1390 
1391     if (barrier->Opcode() != Op_MemBarVolatile) {
1392       return false;
1393     }
1394 
1395     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1396 
1397     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1398       Node *y = mem->fast_out(i);
1399       if (y->Opcode() == Op_StoreCM) {
1400         return true;
1401       }
1402     }
1403 
1404     return false;
1405   }
1406 
1407 
1408   // 3) helper predicates to traverse volatile put or CAS graphs which
1409   // may contain GC barrier subgraphs
1410 
1411   // Preamble
1412   // --------
1413   //
1414   // for volatile writes we can omit generating barriers and employ a
1415   // releasing store when we see a node sequence sequence with a
1416   // leading MemBarRelease and a trailing MemBarVolatile as follows
1417   //
1418   //   MemBarRelease
1419   //  {    ||        } -- optional
1420   //  {MemBarCPUOrder}
1421   //       ||       \\
1422   //       ||     StoreX[mo_release]
1423   //       | \ Bot    / ???
1424   //       | MergeMem
1425   //       | /
1426   //   MemBarVolatile
1427   //
1428   // where
1429   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1430   //  | \ and / indicate further routing of the Ctl and Mem feeds
1431   //
1432   // Note that the memory feed from the CPUOrder membar to the
1433   // MergeMem node is an AliasIdxBot slice while the feed from the
1434   // StoreX is for a slice determined by the type of value being
1435   // written.
1436   //
1437   // the diagram above shows the graph we see for non-object stores.
1438   // for a volatile Object store (StoreN/P) we may see other nodes
1439   // below the leading membar because of the need for a GC pre- or
1440   // post-write barrier.
1441   //
1442   // with most GC configurations we with see this simple variant which
1443   // includes a post-write barrier card mark.
1444   //
1445   //   MemBarRelease______________________________
1446   //         ||    \\               Ctl \        \\
1447   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1448   //         | \ Bot  / oop                 . . .  /
1449   //         | MergeMem
1450   //         | /
1451   //         ||      /
1452   //   MemBarVolatile
1453   //
1454   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1455   // the object address to an int used to compute the card offset) and
1456   // Ctl+Mem to a StoreB node (which does the actual card mark).
1457   //
1458   // n.b. a StoreCM node is only ever used when CMS (with or without
1459   // CondCardMark) or G1 is configured. This abstract instruction
1460   // differs from a normal card mark write (StoreB) because it implies
1461   // a requirement to order visibility of the card mark (StoreCM)
1462   // after that of the object put (StoreP/N) using a StoreStore memory
1463   // barrier. Note that this is /not/ a requirement to order the
1464   // instructions in the generated code (that is already guaranteed by
1465   // the order of memory dependencies). Rather it is a requirement to
1466   // ensure visibility order which only applies on architectures like
1467   // AArch64 which do not implement TSO. This ordering is required for
1468   // both non-volatile and volatile puts.
1469   //
1470   // That implies that we need to translate a StoreCM using the
1471   // sequence
1472   //
1473   //   dmb ishst
1474   //   stlrb
1475   //
1476   // This dmb cannot be omitted even when the associated StoreX or
1477   // CompareAndSwapX is implemented using stlr. However, as described
1478   // below there are circumstances where a specific GC configuration
1479   // requires a stronger barrier in which case it can be omitted.
1480   // 
1481   // With the Serial or Parallel GC using +CondCardMark the card mark
1482   // is performed conditionally on it currently being unmarked in
1483   // which case the volatile put graph looks slightly different
1484   //
1485   //   MemBarRelease____________________________________________
1486   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1487   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1488   //         | \ Bot / oop                          \            |
1489   //         | MergeMem                            . . .      StoreB
1490   //         | /                                                /
1491   //         ||     /
1492   //   MemBarVolatile
1493   //
1494   // It is worth noting at this stage that all the above
1495   // configurations can be uniquely identified by checking that the
1496   // memory flow includes the following subgraph:
1497   //
1498   //   MemBarRelease
1499   //  {MemBarCPUOrder}
1500   //      |  \      . . .
1501   //      |  StoreX[mo_release]  . . .
1502   //  Bot |   / oop
1503   //     MergeMem
1504   //      |
1505   //   MemBarVolatile
1506   //
1507   // This is referred to as a *normal* volatile store subgraph. It can
1508   // easily be detected starting from any candidate MemBarRelease,
1509   // StoreX[mo_release] or MemBarVolatile node.
1510   //
1511   // A small variation on this normal case occurs for an unsafe CAS
1512   // operation. The basic memory flow subgraph for a non-object CAS is
1513   // as follows
1514   //
1515   //   MemBarRelease
1516   //         ||
1517   //   MemBarCPUOrder
1518   //          |     \\   . . .
1519   //          |     CompareAndSwapX
1520   //          |       |
1521   //      Bot |     SCMemProj
1522   //           \     / Bot
1523   //           MergeMem
1524   //           /
1525   //   MemBarCPUOrder
1526   //         ||
1527   //   MemBarAcquire
1528   //
1529   // The same basic variations on this arrangement (mutatis mutandis)
1530   // occur when a card mark is introduced. i.e. the CPUOrder MemBar
1531   // feeds the extra CastP2X, LoadB etc nodes but the above memory
1532   // flow subgraph is still present.
1533   // 
1534   // This is referred to as a *normal* CAS subgraph. It can easily be
1535   // detected starting from any candidate MemBarRelease,
1536   // StoreX[mo_release] or MemBarAcquire node.
1537   //
1538   // The code below uses two helper predicates, leading_to_trailing
1539   // and trailing_to_leading to identify these normal graphs, one
1540   // validating the layout starting from the top membar and searching
1541   // down and the other validating the layout starting from the lower
1542   // membar and searching up.
1543   //
1544   // There are two special case GC configurations when the simple
1545   // normal graphs above may not be generated: when using G1 (which
1546   // always employs a conditional card mark); and when using CMS with
1547   // conditional card marking (+CondCardMark) configured. These GCs
1548   // are both concurrent rather than stop-the world GCs. So they
1549   // introduce extra Ctl+Mem flow into the graph between the leading
1550   // and trailing membar nodes, in particular enforcing stronger
1551   // memory serialisation beween the object put and the corresponding
1552   // conditional card mark. CMS employs a post-write GC barrier while
1553   // G1 employs both a pre- and post-write GC barrier.
1554   //
1555   // The post-write barrier subgraph for these configurations includes
1556   // a MemBarVolatile node -- referred to as a card mark membar --
1557   // which is needed to order the card write (StoreCM) operation in
1558   // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
1559   // operations performed by GC threads i.e. a card mark membar
1560   // constitutes a StoreLoad barrier hence must be translated to a dmb
1561   // ish (whether or not it sits inside a volatile store sequence).
1562   //
1563   // Of course, the use of the dmb ish for the card mark membar also
1564   // implies theat the StoreCM which follows can omit the dmb ishst
1565   // instruction. The necessary visibility ordering will already be
1566   // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
1567   // needs to be generated for as part of the StoreCM sequence with GC
1568   // configuration +CMS -CondCardMark.
1569   // 
1570   // Of course all these extra barrier nodes may well be absent --
1571   // they are only inserted for object puts. Their potential presence
1572   // significantly complicates the task of identifying whether a
1573   // MemBarRelease, StoreX[mo_release], MemBarVolatile or
1574   // MemBarAcquire forms part of a volatile put or CAS when using
1575   // these GC configurations (see below) and also complicates the
1576   // decision as to how to translate a MemBarVolatile and StoreCM.
1577   //
1578   // So, thjis means that a card mark MemBarVolatile occurring in the
1579   // post-barrier graph it needs to be distinguished from a normal
1580   // trailing MemBarVolatile. Resolving this is straightforward: a
1581   // card mark MemBarVolatile always projects a Mem feed to a StoreCM
1582   // node and that is a unique marker
1583   //
1584   //      MemBarVolatile (card mark)
1585   //       C |    \     . . .
1586   //         |   StoreCM   . . .
1587   //       . . .
1588   //
1589   // Returning to the task of translating the object put and the
1590   // leading/trailing membar nodes: what do the node graphs look like
1591   // for these 2 special cases? and how can we determine the status of
1592   // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
1593   // normal and non-normal cases?
1594   //
1595   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1596   // which selects conditonal execution based on the value loaded
1597   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1598   // intervening StoreLoad barrier (MemBarVolatile).
1599   //
1600   // So, with CMS we may see a node graph for a volatile object store
1601   // which looks like this
1602   //
1603   //   MemBarRelease
1604   //   MemBarCPUOrder_(leading)____________________
1605   //     C |  | M \       \\               M |   C \
1606   //       |  |    \    StoreN/P[mo_release] |  CastP2X
1607   //       |  | Bot \    / oop      \        |
1608   //       |  |    MergeMem          \      / 
1609   //       |  |      /                |    /
1610   //     MemBarVolatile (card mark)   |   /
1611   //     C |  ||    M |               |  /
1612   //       | LoadB    | Bot       oop | / Bot
1613   //       |   |      |              / /
1614   //       | Cmp      |\            / /
1615   //       | /        | \          / /
1616   //       If         |  \        / /
1617   //       | \        |   \      / /
1618   // IfFalse  IfTrue  |    \    / /
1619   //       \     / \  |    |   / /
1620   //        \   / StoreCM  |  / /
1621   //         \ /      \   /  / /
1622   //        Region     Phi  / /
1623   //          | \   Raw |  / /
1624   //          |  . . .  | / /
1625   //          |       MergeMem
1626   //          |           |
1627   //        MemBarVolatile (trailing)
1628   //
1629   // Notice that there are two MergeMem nodes below the leading
1630   // membar. The first MergeMem merges the AliasIdxBot Mem slice from
1631   // the leading membar and the oopptr Mem slice from the Store into
1632   // the card mark membar. The trailing MergeMem merges the
1633   // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
1634   // slice from the StoreCM and an oop slice from the StoreN/P node
1635   // into the trailing membar (n.b. the raw slice proceeds via a Phi
1636   // associated with the If region).
1637   //
1638   // So, in the case of CMS + CondCardMark the volatile object store
1639   // graph still includes a normal volatile store subgraph from the
1640   // leading membar to the trailing membar. However, it also contains
1641   // the same shape memory flow to the card mark membar. The two flows
1642   // can be distinguished by testing whether or not the downstream
1643   // membar is a card mark membar.
1644   //
1645   // The graph for a CAS also varies with CMS + CondCardMark, in
1646   // particular employing a control feed from the CompareAndSwapX node
1647   // through a CmpI and If to the card mark membar and StoreCM which
1648   // updates the associated card. This avoids executing the card mark
1649   // if the CAS fails. However, it can be seen from the diagram below
1650   // that the presence of the barrier does not alter the normal CAS
1651   // memory subgraph where the leading membar feeds a CompareAndSwapX,
1652   // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
1653   // MemBarAcquire pair.
1654   //
1655   //   MemBarRelease
1656   //   MemBarCPUOrder__(leading)_______________________
1657   //   C /  M |                        \\            C \
1658   //  . . .   | Bot                CompareAndSwapN/P   CastP2X
1659   //          |                  C /  M |
1660   //          |                 CmpI    |
1661   //          |                  /      |
1662   //          |               . . .     |
1663   //          |              IfTrue     |
1664   //          |              /          |
1665   //       MemBarVolatile (card mark)   |
1666   //        C |  ||    M |              |
1667   //          | LoadB    | Bot   ______/|
1668   //          |   |      |      /       |
1669   //          | Cmp      |     /      SCMemProj
1670   //          | /        |    /         |
1671   //          If         |   /         /
1672   //          | \        |  /         / Bot
1673   //     IfFalse  IfTrue | /         /
1674   //          |   / \   / / prec    /
1675   //   . . .  |  /  StoreCM        /
1676   //        \ | /      | raw      /
1677   //        Region    . . .      /
1678   //           | \              /
1679   //           |   . . .   \    / Bot
1680   //           |        MergeMem
1681   //           |          /
1682   //         MemBarCPUOrder
1683   //         MemBarAcquire (trailing)
1684   //
1685   // This has a slightly different memory subgraph to the one seen
1686   // previously but the core of it has a similar memory flow to the
1687   // CAS normal subgraph:
1688   //
1689   //   MemBarRelease
1690   //   MemBarCPUOrder____
1691   //         |          \      . . .
1692   //         |       CompareAndSwapX  . . .
1693   //         |       C /  M |
1694   //         |      CmpI    |
1695   //         |       /      |
1696   //         |      . .    /
1697   //     Bot |   IfTrue   /
1698   //         |   /       /
1699   //    MemBarVolatile  /
1700   //         | ...     /
1701   //      StoreCM ... /
1702   //         |       / 
1703   //       . . .  SCMemProj
1704   //      Raw \    / Bot
1705   //        MergeMem
1706   //           |
1707   //   MemBarCPUOrder
1708   //   MemBarAcquire
1709   //
1710   // The G1 graph for a volatile object put is a lot more complicated.
1711   // Nodes inserted on behalf of G1 may comprise: a pre-write graph
1712   // which adds the old value to the SATB queue; the releasing store
1713   // itself; and, finally, a post-write graph which performs a card
1714   // mark.
1715   //
1716   // The pre-write graph may be omitted, but only when the put is
1717   // writing to a newly allocated (young gen) object and then only if
1718   // there is a direct memory chain to the Initialize node for the
1719   // object allocation. This will not happen for a volatile put since
1720   // any memory chain passes through the leading membar.
1721   //
1722   // The pre-write graph includes a series of 3 If tests. The outermost
1723   // If tests whether SATB is enabled (no else case). The next If tests
1724   // whether the old value is non-NULL (no else case). The third tests
1725   // whether the SATB queue index is > 0, if so updating the queue. The
1726   // else case for this third If calls out to the runtime to allocate a
1727   // new queue buffer.
1728   //
1729   // So with G1 the pre-write and releasing store subgraph looks like
1730   // this (the nested Ifs are omitted).
1731   //
1732   //  MemBarRelease (leading)____________
1733   //     C |  ||  M \   M \    M \  M \ . . .
1734   //       | LoadB   \  LoadL  LoadN   \
1735   //       | /        \                 \
1736   //       If         |\                 \
1737   //       | \        | \                 \
1738   //  IfFalse  IfTrue |  \                 \
1739   //       |     |    |   \                 |
1740   //       |     If   |   /\                |
1741   //       |     |          \               |
1742   //       |                 \              |
1743   //       |    . . .         \             |
1744   //       | /       | /       |            |
1745   //      Region  Phi[M]       |            |
1746   //       | \       |         |            |
1747   //       |  \_____ | ___     |            |
1748   //     C | C \     |   C \ M |            |
1749   //       | CastP2X | StoreN/P[mo_release] |
1750   //       |         |         |            |
1751   //     C |       M |       M |          M |
1752   //        \        | Raw     | oop       / Bot
1753   //                  . . .
1754   //          (post write subtree elided)
1755   //                    . . .
1756   //             C \         M /
1757   //         MemBarVolatile (trailing)
1758   //
1759   // Note that the three memory feeds into the post-write tree are an
1760   // AliasRawIdx slice associated with the writes in the pre-write
1761   // tree, an oop type slice from the StoreX specific to the type of
1762   // the volatile field and the AliasBotIdx slice emanating from the
1763   // leading membar.
1764   //
1765   // n.b. the LoadB in this subgraph is not the card read -- it's a
1766   // read of the SATB queue active flag.
1767   //
1768   // The CAS graph is once again a variant of the above with a
1769   // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
1770   // value from the CompareAndSwapX node is fed into the post-write
1771   // graph aling with the AliasIdxRaw feed from the pre-barrier and
1772   // the AliasIdxBot feeds from the leading membar and the ScMemProj.
1773   //
1774   //  MemBarRelease (leading)____________
1775   //     C |  ||  M \   M \    M \  M \ . . .
1776   //       | LoadB   \  LoadL  LoadN   \
1777   //       | /        \                 \
1778   //       If         |\                 \
1779   //       | \        | \                 \
1780   //  IfFalse  IfTrue |  \                 \
1781   //       |     |    |   \                 \
1782   //       |     If   |    \                 |
1783   //       |     |          \                |
1784   //       |                 \               |
1785   //       |    . . .         \              |
1786   //       | /       | /       \             |
1787   //      Region  Phi[M]        \            |
1788   //       | \       |           \           |
1789   //       |  \_____ |            |          |
1790   //     C | C \     |            |          |
1791   //       | CastP2X |     CompareAndSwapX   |
1792   //       |         |   res |     |         |
1793   //     C |       M |       |  SCMemProj  M |
1794   //        \        | Raw   |     | Bot    / Bot
1795   //                  . . .
1796   //          (post write subtree elided)
1797   //                    . . .
1798   //             C \         M /
1799   //         MemBarVolatile (trailing)
1800   //
1801   // The G1 post-write subtree is also optional, this time when the
1802   // new value being written is either null or can be identified as a
1803   // newly allocated (young gen) object with no intervening control
1804   // flow. The latter cannot happen but the former may, in which case
1805   // the card mark membar is omitted and the memory feeds from the
1806   // leading membar and the SToreN/P are merged direct into the
1807   // trailing membar as per the normal subgraph. So, the only special
1808   // case which arises is when the post-write subgraph is generated.
1809   //
1810   // The kernel of the post-write G1 subgraph is the card mark itself
1811   // which includes a card mark memory barrier (MemBarVolatile), a
1812   // card test (LoadB), and a conditional update (If feeding a
1813   // StoreCM). These nodes are surrounded by a series of nested Ifs
1814   // which try to avoid doing the card mark. The top level If skips if
1815   // the object reference does not cross regions (i.e. it tests if
1816   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1817   // need not be recorded. The next If, which skips on a NULL value,
1818   // may be absent (it is not generated if the type of value is >=
1819   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1820   // checking if card_val != young).  n.b. although this test requires
1821   // a pre-read of the card it can safely be done before the StoreLoad
1822   // barrier. However that does not bypass the need to reread the card
1823   // after the barrier.
1824   //
1825   //                (pre-write subtree elided)
1826   //        . . .                  . . .    . . .  . . .
1827   //        C |               M |    M |    M |
1828   //       Region            Phi[M] StoreN    |
1829   //          |            Raw  |  oop |  Bot |
1830   //         / \_______         |\     |\     |\
1831   //      C / C \      . . .    | \    | \    | \
1832   //       If   CastP2X . . .   |  \   |  \   |  \
1833   //       / \                  |   \  |   \  |   \
1834   //      /   \                 |    \ |    \ |    \
1835   // IfFalse IfTrue             |      |      |     \
1836   //   |       |                 \     |     /       |
1837   //   |       If                 \    | \  /   \    |
1838   //   |      / \                  \   |   /     \   |
1839   //   |     /   \                  \  |  / \     |  |
1840   //   | IfFalse IfTrue           MergeMem   \    |  |
1841   //   |  . . .    / \                 |      \   |  |
1842   //   |          /   \                |       |  |  |
1843   //   |     IfFalse IfTrue            |       |  |  |
1844   //   |      . . .    |               |       |  |  |
1845   //   |               If             /        |  |  |
1846   //   |               / \           /         |  |  |
1847   //   |              /   \         /          |  |  |
1848   //   |         IfFalse IfTrue    /           |  |  |
1849   //   |           . . .   |      /            |  |  |
1850   //   |                    \    /             |  |  |
1851   //   |                     \  /              |  |  |
1852   //   |         MemBarVolatile__(card mark  ) |  |  |
1853   //   |              ||   C |     \           |  |  |
1854   //   |             LoadB   If     |         /   |  |
1855   //   |                    / \ Raw |        /   /  /
1856   //   |                   . . .    |       /   /  /
1857   //   |                        \   |      /   /  /
1858   //   |                        StoreCM   /   /  /
1859   //   |                           |     /   /  /
1860   //   |                            . . .   /  /
1861   //   |                                   /  /
1862   //   |   . . .                          /  /
1863   //   |    |             | /            /  /
1864   //   |    |           Phi[M] /        /  /
1865   //   |    |             |   /        /  /
1866   //   |    |             |  /        /  /
1867   //   |  Region  . . .  Phi[M]      /  /
1868   //   |    |             |         /  /
1869   //    \   |             |        /  /
1870   //     \  | . . .       |       /  /
1871   //      \ |             |      /  /
1872   //      Region         Phi[M] /  /
1873   //        |               \  /  /
1874   //         \             MergeMem
1875   //          \            /
1876   //          MemBarVolatile
1877   //
1878   // As with CMS + CondCardMark the first MergeMem merges the
1879   // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
1880   // slice from the Store into the card mark membar. However, in this
1881   // case it may also merge an AliasRawIdx mem slice from the pre
1882   // barrier write.
1883   //
1884   // The trailing MergeMem merges an AliasIdxBot Mem slice from the
1885   // leading membar with an oop slice from the StoreN and an
1886   // AliasRawIdx slice from the post barrier writes. In this case the
1887   // AliasIdxRaw Mem slice is merged through a series of Phi nodes
1888   // which combine feeds from the If regions in the post barrier
1889   // subgraph.
1890   //
1891   // So, for G1 the same characteristic subgraph arises as for CMS +
1892   // CondCardMark. There is a normal subgraph feeding the card mark
1893   // membar and a normal subgraph feeding the trailing membar.
1894   //
1895   // The CAS graph when using G1GC also includes an optional
1896   // post-write subgraph. It is very similar to the above graph except
1897   // for a few details.
1898   // 
1899   // - The control flow is gated by an additonal If which tests the
1900   // result from the CompareAndSwapX node
1901   // 
1902   //  - The MergeMem which feeds the card mark membar only merges the
1903   // AliasIdxBot slice from the leading membar and the AliasIdxRaw
1904   // slice from the pre-barrier. It does not merge the SCMemProj
1905   // AliasIdxBot slice. So, this subgraph does not look like the
1906   // normal CAS subgraph.
1907   //
1908   // - The MergeMem which feeds the trailing membar merges the
1909   // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
1910   // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
1911   // has two AliasIdxBot input slices. However, this subgraph does
1912   // still look like the normal CAS subgraph.
1913   //
1914   // So, the upshot is:
1915   //
1916   // In all cases a volatile put graph will include a *normal*
1917   // volatile store subgraph betwen the leading membar and the
1918   // trailing membar. It may also include a normal volatile store
1919   // subgraph betwen the leading membar and the card mark membar.
1920   //
1921   // In all cases a CAS graph will contain a unique normal CAS graph
1922   // feeding the trailing membar.
1923   //
1924   // In all cases where there is a card mark membar (either as part of
1925   // a volatile object put or CAS) it will be fed by a MergeMem whose
1926   // AliasIdxBot slice feed will be a leading membar.
1927   //
1928   // The predicates controlling generation of instructions for store
1929   // and barrier nodes employ a few simple helper functions (described
1930   // below) which identify the presence or absence of all these
1931   // subgraph configurations and provide a means of traversing from
1932   // one node in the subgraph to another.
1933 
1934   // is_CAS(int opcode)
1935   //
1936   // return true if opcode is one of the possible CompareAndSwapX
1937   // values otherwise false.
1938 
1939   bool is_CAS(int opcode)
1940   {
1941     return (opcode == Op_CompareAndSwapI ||
1942             opcode == Op_CompareAndSwapL ||
1943             opcode == Op_CompareAndSwapN ||
1944             opcode == Op_CompareAndSwapP);
1945   }
1946 
1947   // leading_to_trailing
1948   //
1949   //graph traversal helper which detects the normal case Mem feed from
1950   // a release membar (or, optionally, its cpuorder child) to a
1951   // dependent volatile membar i.e. it ensures that one or other of
1952   // the following Mem flow subgraph is present.
1953   //
1954   //   MemBarRelease {leading}
1955   //   {MemBarCPUOrder} {optional}
1956   //     Bot |  \      . . .
1957   //         |  StoreN/P[mo_release]  . . .
1958   //         |   /
1959   //        MergeMem
1960   //         |
1961   //   MemBarVolatile {not card mark}
1962   //
1963   //   MemBarRelease {leading}
1964   //   {MemBarCPUOrder} {optional}
1965   //      |       \      . . .
1966   //      |     CompareAndSwapX  . . .
1967   //               |
1968   //     . . .    SCMemProj
1969   //           \   |
1970   //      |    MergeMem
1971   //      |       /
1972   //    MemBarCPUOrder
1973   //    MemBarAcquire {trailing}
1974   //
1975   // the predicate needs to be capable of distinguishing the following
1976   // volatile put graph which may arises when a GC post barrier
1977   // inserts a card mark membar
1978   //
1979   //   MemBarRelease {leading}
1980   //   {MemBarCPUOrder}__
1981   //     Bot |   \       \
1982   //         |   StoreN/P \
1983   //         |    / \     |
1984   //        MergeMem \    |
1985   //         |        \   |
1986   //   MemBarVolatile  \  |
1987   //    {card mark}     \ |
1988   //                  MergeMem
1989   //                      |
1990   // {not card mark} MemBarVolatile
1991   //
1992   // if the correct configuration is present returns the trailing
1993   // membar otherwise NULL.
1994   //
1995   // the input membar is expected to be either a cpuorder membar or a
1996   // release membar. in the latter case it should not have a cpu membar
1997   // child.
1998   //
1999   // the returned value may be a card mark or trailing membar
2000   //
2001 
2002   MemBarNode *leading_to_trailing(MemBarNode *leading)
2003   {
2004     assert((leading->Opcode() == Op_MemBarRelease ||
2005             leading->Opcode() == Op_MemBarCPUOrder),
2006            "expecting a volatile or cpuroder membar!");
2007 
2008     // check the mem flow
2009     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2010 
2011     if (!mem) {
2012       return NULL;
2013     }
2014 
2015     Node *x = NULL;
2016     StoreNode * st = NULL;
2017     LoadStoreNode *cas = NULL;
2018     MergeMemNode *mm = NULL;
2019     MergeMemNode *mm2 = NULL;
2020 
2021     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2022       x = mem->fast_out(i);
2023       if (x->is_MergeMem()) {
2024         if (mm != NULL) {
2025           if (mm2 != NULL) {
2026           // should not see more than 2 merge mems
2027             return NULL;
2028           } else {
2029             mm2 = x->as_MergeMem();
2030           }
2031         } else {
2032           mm = x->as_MergeMem();
2033         }
2034       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2035         // two releasing stores/CAS nodes is one too many
2036         if (st != NULL || cas != NULL) {
2037           return NULL;
2038         }
2039         st = x->as_Store();
2040       } else if (is_CAS(x->Opcode())) {
2041         if (st != NULL || cas != NULL) {
2042           return NULL;
2043         }
2044         cas = x->as_LoadStore();
2045       }
2046     }
2047 
2048     // must have a store or a cas
2049     if (!st && !cas) {
2050       return NULL;
2051     }
2052 
2053     // must have at least one merge if we also have st
2054     if (st && !mm) {
2055       return NULL;
2056     }
2057 
2058     if (cas) {
2059       Node *y = NULL;
2060       // look for an SCMemProj
2061       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
2062         x = cas->fast_out(i);
2063         if (x->is_Proj()) {
2064           y = x;
2065           break;
2066         }
2067       }
2068       if (y == NULL) {
2069         return NULL;
2070       }
2071       // the proj must feed a MergeMem
2072       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
2073         x = y->fast_out(i);
2074         if (x->is_MergeMem()) {
2075           mm = x->as_MergeMem();
2076           break;
2077         }
2078       }
2079       if (mm == NULL) {
2080         return NULL;
2081       }
2082       MemBarNode *mbar = NULL;
2083       // ensure the merge feeds a trailing membar cpuorder + acquire pair
2084       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2085         x = mm->fast_out(i);
2086         if (x->is_MemBar()) {
2087           int opcode = x->Opcode();
2088           if (opcode == Op_MemBarCPUOrder) {
2089             MemBarNode *z =  x->as_MemBar();
2090             z = child_membar(z);
2091             if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
2092               mbar = z;
2093             }
2094           }
2095           break;
2096         }
2097       }
2098       return mbar;
2099     } else {
2100       Node *y = NULL;
2101       // ensure the store feeds the first mergemem;
2102       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2103         if (st->fast_out(i) == mm) {
2104           y = st;
2105           break;
2106         }
2107       }
2108       if (y == NULL) {
2109         return NULL;
2110       }
2111       if (mm2 != NULL) {
2112         // ensure the store feeds the second mergemem;
2113         y = NULL;
2114         for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2115           if (st->fast_out(i) == mm2) {
2116             y = st;
2117           }
2118         }
2119         if (y == NULL) {
2120           return NULL;
2121         }
2122       }
2123 
2124       MemBarNode *mbar = NULL;
2125       // ensure the first mergemem feeds a volatile membar
2126       for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2127         x = mm->fast_out(i);
2128         if (x->is_MemBar()) {
2129           int opcode = x->Opcode();
2130           if (opcode == Op_MemBarVolatile) {
2131             mbar = x->as_MemBar();
2132           }
2133           break;
2134         }
2135       }
2136       if (mm2 == NULL) {
2137         // this is our only option for a trailing membar
2138         return mbar;
2139       }
2140       // ensure the second mergemem feeds a volatile membar
2141       MemBarNode *mbar2 = NULL;
2142       for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
2143         x = mm2->fast_out(i);
2144         if (x->is_MemBar()) {
2145           int opcode = x->Opcode();
2146           if (opcode == Op_MemBarVolatile) {
2147             mbar2 = x->as_MemBar();
2148           }
2149           break;
2150         }
2151       }
2152       // if we have two merge mems we must have two volatile membars
2153       if (mbar == NULL || mbar2 == NULL) {
2154         return NULL;
2155       }
2156       // return the trailing membar
2157       if (is_card_mark_membar(mbar2)) {
2158         return mbar;
2159       } else {
2160         if (is_card_mark_membar(mbar)) {
2161           return mbar2;
2162         } else {
2163           return NULL;
2164         }
2165       }
2166     }
2167   }
2168 
2169   // trailing_to_leading
2170   //
2171   // graph traversal helper which detects the normal case Mem feed
2172   // from a trailing membar to a preceding release membar (optionally
2173   // its cpuorder child) i.e. it ensures that one or other of the
2174   // following Mem flow subgraphs is present.
2175   //
2176   //   MemBarRelease {leading}
2177   //   MemBarCPUOrder {optional}
2178   //    | Bot |  \      . . .
2179   //    |     |  StoreN/P[mo_release]  . . .
2180   //    |     |   /
2181   //    |    MergeMem
2182   //    |     |
2183   //   MemBarVolatile {not card mark}
2184   //
2185   //   MemBarRelease {leading}
2186   //   MemBarCPUOrder {optional}
2187   //      |       \      . . .
2188   //      |     CompareAndSwapX  . . .
2189   //               |
2190   //     . . .    SCMemProj
2191   //           \   |
2192   //      |    MergeMem
2193   //      |       |
2194   //    MemBarCPUOrder
2195   //    MemBarAcquire {trailing}
2196   //
2197   // this predicate checks for the same flow as the previous predicate
2198   // but starting from the bottom rather than the top.
2199   //
2200   // if the configuration is present returns the cpuorder member for
2201   // preference or when absent the release membar otherwise NULL.
2202   //
2203   // n.b. the input membar is expected to be a MemBarVolatile or
2204   // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
2205   // mark membar.
2206 
2207   MemBarNode *trailing_to_leading(const MemBarNode *barrier)
2208   {
2209     // input must be a volatile membar
2210     assert((barrier->Opcode() == Op_MemBarVolatile ||
2211             barrier->Opcode() == Op_MemBarAcquire),
2212            "expecting a volatile or an acquire membar");
2213 
2214     assert((barrier->Opcode() != Op_MemBarVolatile) ||
2215            !is_card_mark_membar(barrier),
2216            "not expecting a card mark membar");
2217     Node *x;
2218     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2219 
2220     // if we have an acquire membar then it must be fed via a CPUOrder
2221     // membar
2222 
2223     if (is_cas) {
2224       // skip to parent barrier which must be a cpuorder
2225       x = parent_membar(barrier);
2226       if (x->Opcode() != Op_MemBarCPUOrder)
2227         return NULL;
2228     } else {
2229       // start from the supplied barrier
2230       x = (Node *)barrier;
2231     }
2232 
2233     // the Mem feed to the membar should be a merge
2234     x = x ->in(TypeFunc::Memory);
2235     if (!x->is_MergeMem())
2236       return NULL;
2237 
2238     MergeMemNode *mm = x->as_MergeMem();
2239 
2240     if (is_cas) {
2241       // the merge should be fed from the CAS via an SCMemProj node
2242       x = NULL;
2243       for (uint idx = 1; idx < mm->req(); idx++) {
2244         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2245           x = mm->in(idx);
2246           break;
2247         }
2248       }
2249       if (x == NULL) {
2250         return NULL;
2251       }
2252       // check for a CAS feeding this proj
2253       x = x->in(0);
2254       int opcode = x->Opcode();
2255       if (!is_CAS(opcode)) {
2256         return NULL;
2257       }
2258       // the CAS should get its mem feed from the leading membar
2259       x = x->in(MemNode::Memory);
2260     } else {
2261       // the merge should get its Bottom mem feed from the leading membar
2262       x = mm->in(Compile::AliasIdxBot);
2263     }
2264 
2265     // ensure this is a non control projection
2266     if (!x->is_Proj() || x->is_CFG()) {
2267       return NULL;
2268     }
2269     // if it is fed by a membar that's the one we want
2270     x = x->in(0);
2271 
2272     if (!x->is_MemBar()) {
2273       return NULL;
2274     }
2275 
2276     MemBarNode *leading = x->as_MemBar();
2277     // reject invalid candidates
2278     if (!leading_membar(leading)) {
2279       return NULL;
2280     }
2281 
2282     // ok, we have a leading membar, now for the sanity clauses
2283 
2284     // the leading membar must feed Mem to a releasing store or CAS
2285     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2286     StoreNode *st = NULL;
2287     LoadStoreNode *cas = NULL;
2288     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2289       x = mem->fast_out(i);
2290       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2291         // two stores or CASes is one too many
2292         if (st != NULL || cas != NULL) {
2293           return NULL;
2294         }
2295         st = x->as_Store();
2296       } else if (is_CAS(x->Opcode())) {
2297         if (st != NULL || cas != NULL) {
2298           return NULL;
2299         }
2300         cas = x->as_LoadStore();
2301       }
2302     }
2303 
2304     // we should not have both a store and a cas
2305     if (st == NULL & cas == NULL) {
2306       return NULL;
2307     }
2308 
2309     if (st == NULL) {
2310       // nothing more to check
2311       return leading;
2312     } else {
2313       // we should not have a store if we started from an acquire
2314       if (is_cas) {
2315         return NULL;
2316       }
2317 
2318       // the store should feed the merge we used to get here
2319       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2320         if (st->fast_out(i) == mm) {
2321           return leading;
2322         }
2323       }
2324     }
2325 
2326     return NULL;
2327   }
2328 
2329   // card_mark_to_leading
2330   //
2331   // graph traversal helper which traverses from a card mark volatile
2332   // membar to a leading membar i.e. it ensures that the following Mem
2333   // flow subgraph is present.
2334   //
2335   //    MemBarRelease {leading}
2336   //   {MemBarCPUOrder} {optional}
2337   //         |   . . .
2338   //     Bot |   /
2339   //      MergeMem
2340   //         |
2341   //     MemBarVolatile (card mark)
2342   //        |     \
2343   //      . . .   StoreCM
2344   //
2345   // if the configuration is present returns the cpuorder member for
2346   // preference or when absent the release membar otherwise NULL.
2347   //
2348   // n.b. the input membar is expected to be a MemBarVolatile amd must
2349   // be a card mark membar.
2350 
2351   MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
2352   {
2353     // input must be a card mark volatile membar
2354     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2355 
2356     // the Mem feed to the membar should be a merge
2357     Node *x = barrier->in(TypeFunc::Memory);
2358     if (!x->is_MergeMem()) {
2359       return NULL;
2360     }
2361 
2362     MergeMemNode *mm = x->as_MergeMem();
2363 
2364     x = mm->in(Compile::AliasIdxBot);
2365 
2366     if (!x->is_MemBar()) {
2367       return NULL;
2368     }
2369 
2370     MemBarNode *leading = x->as_MemBar();
2371 
2372     if (leading_membar(leading)) {
2373       return leading;
2374     }
2375 
2376     return NULL;
2377   }
2378 
2379 bool unnecessary_acquire(const Node *barrier)
2380 {
2381   assert(barrier->is_MemBar(), "expecting a membar");
2382 
2383   if (UseBarriersForVolatile) {
2384     // we need to plant a dmb
2385     return false;
2386   }
2387 
2388   // a volatile read derived from bytecode (or also from an inlined
2389   // SHA field read via LibraryCallKit::load_field_from_object)
2390   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2391   // with a bogus read dependency on it's preceding load. so in those
2392   // cases we will find the load node at the PARMS offset of the
2393   // acquire membar.  n.b. there may be an intervening DecodeN node.
2394   //
2395   // a volatile load derived from an inlined unsafe field access
2396   // manifests as a cpuorder membar with Ctl and Mem projections
2397   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2398   // acquire then feeds another cpuorder membar via Ctl and Mem
2399   // projections. The load has no output dependency on these trailing
2400   // membars because subsequent nodes inserted into the graph take
2401   // their control feed from the final membar cpuorder meaning they
2402   // are all ordered after the load.
2403 
2404   Node *x = barrier->lookup(TypeFunc::Parms);
2405   if (x) {
2406     // we are starting from an acquire and it has a fake dependency
2407     //
2408     // need to check for
2409     //
2410     //   LoadX[mo_acquire]
2411     //   {  |1   }
2412     //   {DecodeN}
2413     //      |Parms
2414     //   MemBarAcquire*
2415     //
2416     // where * tags node we were passed
2417     // and |k means input k
2418     if (x->is_DecodeNarrowPtr()) {
2419       x = x->in(1);
2420     }
2421 
2422     return (x->is_Load() && x->as_Load()->is_acquire());
2423   }
2424 
2425   // now check for an unsafe volatile get
2426 
2427   // need to check for
2428   //
2429   //   MemBarCPUOrder
2430   //        ||       \\
2431   //   MemBarAcquire* LoadX[mo_acquire]
2432   //        ||
2433   //   MemBarCPUOrder
2434   //
2435   // where * tags node we were passed
2436   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2437 
2438   // check for a parent MemBarCPUOrder
2439   ProjNode *ctl;
2440   ProjNode *mem;
2441   MemBarNode *parent = parent_membar(barrier);
2442   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2443     return false;
2444   ctl = parent->proj_out(TypeFunc::Control);
2445   mem = parent->proj_out(TypeFunc::Memory);
2446   if (!ctl || !mem) {
2447     return false;
2448   }
2449   // ensure the proj nodes both feed a LoadX[mo_acquire]
2450   LoadNode *ld = NULL;
2451   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2452     x = ctl->fast_out(i);
2453     // if we see a load we keep hold of it and stop searching
2454     if (x->is_Load()) {
2455       ld = x->as_Load();
2456       break;
2457     }
2458   }
2459   // it must be an acquiring load
2460   if (ld && ld->is_acquire()) {
2461 
2462     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2463       x = mem->fast_out(i);
2464       // if we see the same load we drop it and stop searching
2465       if (x == ld) {
2466         ld = NULL;
2467         break;
2468       }
2469     }
2470     // we must have dropped the load
2471     if (ld == NULL) {
2472       // check for a child cpuorder membar
2473       MemBarNode *child  = child_membar(barrier->as_MemBar());
2474       if (child && child->Opcode() == Op_MemBarCPUOrder)
2475         return true;
2476     }
2477   }
2478 
2479   // final option for unnecessary mebar is that it is a trailing node
2480   // belonging to a CAS
2481 
2482   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2483 
2484   return leading != NULL;
2485 }
2486 
2487 bool needs_acquiring_load(const Node *n)
2488 {
2489   assert(n->is_Load(), "expecting a load");
2490   if (UseBarriersForVolatile) {
2491     // we use a normal load and a dmb
2492     return false;
2493   }
2494 
2495   LoadNode *ld = n->as_Load();
2496 
2497   if (!ld->is_acquire()) {
2498     return false;
2499   }
2500 
2501   // check if this load is feeding an acquire membar
2502   //
2503   //   LoadX[mo_acquire]
2504   //   {  |1   }
2505   //   {DecodeN}
2506   //      |Parms
2507   //   MemBarAcquire*
2508   //
2509   // where * tags node we were passed
2510   // and |k means input k
2511 
2512   Node *start = ld;
2513   Node *mbacq = NULL;
2514 
2515   // if we hit a DecodeNarrowPtr we reset the start node and restart
2516   // the search through the outputs
2517  restart:
2518 
2519   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2520     Node *x = start->fast_out(i);
2521     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2522       mbacq = x;
2523     } else if (!mbacq &&
2524                (x->is_DecodeNarrowPtr() ||
2525                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2526       start = x;
2527       goto restart;
2528     }
2529   }
2530 
2531   if (mbacq) {
2532     return true;
2533   }
2534 
2535   // now check for an unsafe volatile get
2536 
2537   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2538   //
2539   //     MemBarCPUOrder
2540   //        ||       \\
2541   //   MemBarAcquire* LoadX[mo_acquire]
2542   //        ||
2543   //   MemBarCPUOrder
2544 
2545   MemBarNode *membar;
2546 
2547   membar = parent_membar(ld);
2548 
2549   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2550     return false;
2551   }
2552 
2553   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2554 
2555   membar = child_membar(membar);
2556 
2557   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2558     return false;
2559   }
2560 
2561   membar = child_membar(membar);
2562 
2563   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2564     return false;
2565   }
2566 
2567   return true;
2568 }
2569 
2570 bool unnecessary_release(const Node *n)
2571 {
2572   assert((n->is_MemBar() &&
2573           n->Opcode() == Op_MemBarRelease),
2574          "expecting a release membar");
2575 
2576   if (UseBarriersForVolatile) {
2577     // we need to plant a dmb
2578     return false;
2579   }
2580 
2581   // if there is a dependent CPUOrder barrier then use that as the
2582   // leading
2583 
2584   MemBarNode *barrier = n->as_MemBar();
2585   // check for an intervening cpuorder membar
2586   MemBarNode *b = child_membar(barrier);
2587   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2588     // ok, so start the check from the dependent cpuorder barrier
2589     barrier = b;
2590   }
2591 
2592   // must start with a normal feed
2593   MemBarNode *trailing = leading_to_trailing(barrier);
2594 
2595   return (trailing != NULL);
2596 }
2597 
2598 bool unnecessary_volatile(const Node *n)
2599 {
2600   // assert n->is_MemBar();
2601   if (UseBarriersForVolatile) {
2602     // we need to plant a dmb
2603     return false;
2604   }
2605 
2606   MemBarNode *mbvol = n->as_MemBar();
2607 
2608   // first we check if this is part of a card mark. if so then we have
2609   // to generate a StoreLoad barrier
2610 
2611   if (is_card_mark_membar(mbvol)) {
2612       return false;
2613   }
2614 
2615   // ok, if it's not a card mark then we still need to check if it is
2616   // a trailing membar of a volatile put graph.
2617 
2618   return (trailing_to_leading(mbvol) != NULL);
2619 }
2620 
2621 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2622 
2623 bool needs_releasing_store(const Node *n)
2624 {
2625   // assert n->is_Store();
2626   if (UseBarriersForVolatile) {
2627     // we use a normal store and dmb combination
2628     return false;
2629   }
2630 
2631   StoreNode *st = n->as_Store();
2632 
2633   // the store must be marked as releasing
2634   if (!st->is_release()) {
2635     return false;
2636   }
2637 
2638   // the store must be fed by a membar
2639 
2640   Node *x = st->lookup(StoreNode::Memory);
2641 
2642   if (! x || !x->is_Proj()) {
2643     return false;
2644   }
2645 
2646   ProjNode *proj = x->as_Proj();
2647 
2648   x = proj->lookup(0);
2649 
2650   if (!x || !x->is_MemBar()) {
2651     return false;
2652   }
2653 
2654   MemBarNode *barrier = x->as_MemBar();
2655 
2656   // if the barrier is a release membar or a cpuorder mmebar fed by a
2657   // release membar then we need to check whether that forms part of a
2658   // volatile put graph.
2659 
2660   // reject invalid candidates
2661   if (!leading_membar(barrier)) {
2662     return false;
2663   }
2664 
2665   // does this lead a normal subgraph?
2666   MemBarNode *trailing = leading_to_trailing(barrier);
2667 
2668   return (trailing != NULL);
2669 }
2670 
2671 // predicate controlling translation of CAS
2672 //
2673 // returns true if CAS needs to use an acquiring load otherwise false
2674 
2675 bool needs_acquiring_load_exclusive(const Node *n)
2676 {
2677   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2678   if (UseBarriersForVolatile) {
2679     return false;
2680   }
2681 
2682   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2683 #ifdef ASSERT
2684   LoadStoreNode *st = n->as_LoadStore();
2685 
2686   // the store must be fed by a membar
2687 
2688   Node *x = st->lookup(StoreNode::Memory);
2689 
2690   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2691 
2692   ProjNode *proj = x->as_Proj();
2693 
2694   x = proj->lookup(0);
2695 
2696   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2697 
2698   MemBarNode *barrier = x->as_MemBar();
2699 
2700   // the barrier must be a cpuorder mmebar fed by a release membar
2701 
2702   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2703          "CAS not fed by cpuorder membar!");
2704 
2705   MemBarNode *b = parent_membar(barrier);
2706   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2707           "CAS not fed by cpuorder+release membar pair!");
2708 
2709   // does this lead a normal subgraph?
2710   MemBarNode *mbar = leading_to_trailing(barrier);
2711 
2712   assert(mbar != NULL, "CAS not embedded in normal graph!");
2713 
2714   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2715 #endif // ASSERT
2716   // so we can just return true here
2717   return true;
2718 }
2719 
2720 // predicate controlling translation of StoreCM
2721 //
2722 // returns true if a StoreStore must precede the card write otherwise
2723 // false
2724 
2725 bool unnecessary_storestore(const Node *storecm)
2726 {
2727   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2728 
2729   // we only ever need to generate a dmb ishst between an object put
2730   // and the associated card mark when we are using CMS without
2731   // conditional card marking. Any other occurence will happen when
2732   // performing a card mark using CMS with conditional card marking or
2733   // G1. In those cases the preceding MamBarVolatile will be
2734   // translated to a dmb ish which guarantes visibility of the
2735   // preceding StoreN/P before this StoreCM
2736 
2737   if (!UseConcMarkSweepGC || UseCondCardMark) {
2738     return true;
2739   }
2740 
2741   // if we are implementing volatile puts using barriers then we must
2742   // insert the dmb ishst
2743 
2744   if (UseBarriersForVolatile) {
2745     return false;
2746   }
2747 
2748   // we must be using CMS with conditional card marking so we ahve to
2749   // generate the StoreStore
2750 
2751   return false;
2752 }
2753 
2754 
2755 #define __ _masm.
2756 
2757 // advance declarations for helper functions to convert register
2758 // indices to register objects
2759 
2760 // the ad file has to provide implementations of certain methods
2761 // expected by the generic code
2762 //
2763 // REQUIRED FUNCTIONALITY
2764 
2765 //=============================================================================
2766 
2767 // !!!!! Special hack to get all types of calls to specify the byte offset
2768 //       from the start of the call to the point where the return address
2769 //       will point.
2770 
2771 int MachCallStaticJavaNode::ret_addr_offset()
2772 {
2773   // call should be a simple bl
2774   int off = 4;
2775   return off;
2776 }
2777 
2778 int MachCallDynamicJavaNode::ret_addr_offset()
2779 {
2780   return 16; // movz, movk, movk, bl
2781 }
2782 
2783 int MachCallRuntimeNode::ret_addr_offset() {
2784   // for generated stubs the call will be
2785   //   far_call(addr)
2786   // for real runtime callouts it will be six instructions
2787   // see aarch64_enc_java_to_runtime
2788   //   adr(rscratch2, retaddr)
2789   //   lea(rscratch1, RuntimeAddress(addr)
2790   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2791   //   blrt rscratch1
2792   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2793   if (cb) {
2794     return MacroAssembler::far_branch_size();
2795   } else {
2796     return 6 * NativeInstruction::instruction_size;
2797   }
2798 }
2799 
2800 // Indicate if the safepoint node needs the polling page as an input
2801 
2802 // the shared code plants the oop data at the start of the generated
2803 // code for the safepoint node and that needs ot be at the load
2804 // instruction itself. so we cannot plant a mov of the safepoint poll
2805 // address followed by a load. setting this to true means the mov is
2806 // scheduled as a prior instruction. that's better for scheduling
2807 // anyway.
2808 
2809 bool SafePointNode::needs_polling_address_input()
2810 {
2811   return true;
2812 }
2813 
2814 //=============================================================================
2815 
2816 #ifndef PRODUCT
2817 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2818   st->print("BREAKPOINT");
2819 }
2820 #endif
2821 
2822 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2823   MacroAssembler _masm(&cbuf);
2824   __ brk(0);
2825 }
2826 
2827 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2828   return MachNode::size(ra_);
2829 }
2830 
2831 //=============================================================================
2832 
2833 #ifndef PRODUCT
2834   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2835     st->print("nop \t# %d bytes pad for loops and calls", _count);
2836   }
2837 #endif
2838 
2839   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2840     MacroAssembler _masm(&cbuf);
2841     for (int i = 0; i < _count; i++) {
2842       __ nop();
2843     }
2844   }
2845 
2846   uint MachNopNode::size(PhaseRegAlloc*) const {
2847     return _count * NativeInstruction::instruction_size;
2848   }
2849 
2850 //=============================================================================
2851 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2852 
2853 int Compile::ConstantTable::calculate_table_base_offset() const {
2854   return 0;  // absolute addressing, no offset
2855 }
2856 
2857 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2858 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2859   ShouldNotReachHere();
2860 }
2861 
2862 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2863   // Empty encoding
2864 }
2865 
2866 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2867   return 0;
2868 }
2869 
2870 #ifndef PRODUCT
2871 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
2872   st->print("-- \t// MachConstantBaseNode (empty encoding)");
2873 }
2874 #endif
2875 
2876 #ifndef PRODUCT
2877 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2878   Compile* C = ra_->C;
2879 
2880   int framesize = C->frame_slots() << LogBytesPerInt;
2881 
2882   if (C->need_stack_bang(framesize))
2883     st->print("# stack bang size=%d\n\t", framesize);
2884 
2885   if (framesize < ((1 << 9) + 2 * wordSize)) {
2886     st->print("sub  sp, sp, #%d\n\t", framesize);
2887     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
2888     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
2889   } else {
2890     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
2891     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
2892     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2893     st->print("sub  sp, sp, rscratch1");
2894   }
2895 }
2896 #endif
2897 
2898 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2899   Compile* C = ra_->C;
2900   MacroAssembler _masm(&cbuf);
2901 
2902   // n.b. frame size includes space for return pc and rfp
2903   const long framesize = C->frame_size_in_bytes();
2904   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
2905 
2906   // insert a nop at the start of the prolog so we can patch in a
2907   // branch if we need to invalidate the method later
2908   __ nop();
2909 
2910   int bangsize = C->bang_size_in_bytes();
2911   if (C->need_stack_bang(bangsize) && UseStackBanging)
2912     __ generate_stack_overflow_check(bangsize);
2913 
2914   __ build_frame(framesize);
2915 
2916   if (NotifySimulator) {
2917     __ notify(Assembler::method_entry);
2918   }
2919 
2920   if (VerifyStackAtCalls) {
2921     Unimplemented();
2922   }
2923 
2924   C->set_frame_complete(cbuf.insts_size());
2925 
2926   if (C->has_mach_constant_base_node()) {
2927     // NOTE: We set the table base offset here because users might be
2928     // emitted before MachConstantBaseNode.
2929     Compile::ConstantTable& constant_table = C->constant_table();
2930     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
2931   }
2932 }
2933 
2934 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
2935 {
2936   return MachNode::size(ra_); // too many variables; just compute it
2937                               // the hard way
2938 }
2939 
2940 int MachPrologNode::reloc() const
2941 {
2942   return 0;
2943 }
2944 
2945 //=============================================================================
2946 
2947 #ifndef PRODUCT
2948 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2949   Compile* C = ra_->C;
2950   int framesize = C->frame_slots() << LogBytesPerInt;
2951 
2952   st->print("# pop frame %d\n\t",framesize);
2953 
2954   if (framesize == 0) {
2955     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2956   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
2957     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
2958     st->print("add  sp, sp, #%d\n\t", framesize);
2959   } else {
2960     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
2961     st->print("add  sp, sp, rscratch1\n\t");
2962     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
2963   }
2964 
2965   if (do_polling() && C->is_method_compilation()) {
2966     st->print("# touch polling page\n\t");
2967     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
2968     st->print("ldr zr, [rscratch1]");
2969   }
2970 }
2971 #endif
2972 
2973 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2974   Compile* C = ra_->C;
2975   MacroAssembler _masm(&cbuf);
2976   int framesize = C->frame_slots() << LogBytesPerInt;
2977 
2978   __ remove_frame(framesize);
2979 
2980   if (NotifySimulator) {
2981     __ notify(Assembler::method_reentry);
2982   }
2983 
2984   if (do_polling() && C->is_method_compilation()) {
2985     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
2986   }
2987 }
2988 
2989 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
2990   // Variable size. Determine dynamically.
2991   return MachNode::size(ra_);
2992 }
2993 
2994 int MachEpilogNode::reloc() const {
2995   // Return number of relocatable values contained in this instruction.
2996   return 1; // 1 for polling page.
2997 }
2998 
2999 const Pipeline * MachEpilogNode::pipeline() const {
3000   return MachNode::pipeline_class();
3001 }
3002 
3003 // This method seems to be obsolete. It is declared in machnode.hpp
3004 // and defined in all *.ad files, but it is never called. Should we
3005 // get rid of it?
3006 int MachEpilogNode::safepoint_offset() const {
3007   assert(do_polling(), "no return for this epilog node");
3008   return 4;
3009 }
3010 
3011 //=============================================================================
3012 
3013 // Figure out which register class each belongs in: rc_int, rc_float or
3014 // rc_stack.
3015 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3016 
3017 static enum RC rc_class(OptoReg::Name reg) {
3018 
3019   if (reg == OptoReg::Bad) {
3020     return rc_bad;
3021   }
3022 
3023   // we have 30 int registers * 2 halves
3024   // (rscratch1 and rscratch2 are omitted)
3025 
3026   if (reg < 60) {
3027     return rc_int;
3028   }
3029 
3030   // we have 32 float register * 2 halves
3031   if (reg < 60 + 128) {
3032     return rc_float;
3033   }
3034 
3035   // Between float regs & stack is the flags regs.
3036   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3037 
3038   return rc_stack;
3039 }
3040 
3041 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3042   Compile* C = ra_->C;
3043 
3044   // Get registers to move.
3045   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3046   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3047   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3048   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3049 
3050   enum RC src_hi_rc = rc_class(src_hi);
3051   enum RC src_lo_rc = rc_class(src_lo);
3052   enum RC dst_hi_rc = rc_class(dst_hi);
3053   enum RC dst_lo_rc = rc_class(dst_lo);
3054 
3055   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3056 
3057   if (src_hi != OptoReg::Bad) {
3058     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3059            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3060            "expected aligned-adjacent pairs");
3061   }
3062 
3063   if (src_lo == dst_lo && src_hi == dst_hi) {
3064     return 0;            // Self copy, no move.
3065   }
3066 
3067   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3068               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3069   int src_offset = ra_->reg2offset(src_lo);
3070   int dst_offset = ra_->reg2offset(dst_lo);
3071 
3072   if (bottom_type()->isa_vect() != NULL) {
3073     uint ireg = ideal_reg();
3074     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3075     if (cbuf) {
3076       MacroAssembler _masm(cbuf);
3077       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3078       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3079         // stack->stack
3080         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3081         if (ireg == Op_VecD) {
3082           __ unspill(rscratch1, true, src_offset);
3083           __ spill(rscratch1, true, dst_offset);
3084         } else {
3085           __ spill_copy128(src_offset, dst_offset);
3086         }
3087       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3088         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3089                ireg == Op_VecD ? __ T8B : __ T16B,
3090                as_FloatRegister(Matcher::_regEncode[src_lo]));
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3092         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3093                        ireg == Op_VecD ? __ D : __ Q,
3094                        ra_->reg2offset(dst_lo));
3095       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3096         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(src_lo));
3099       } else {
3100         ShouldNotReachHere();
3101       }
3102     }
3103   } else if (cbuf) {
3104     MacroAssembler _masm(cbuf);
3105     switch (src_lo_rc) {
3106     case rc_int:
3107       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3108         if (is64) {
3109             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3110                    as_Register(Matcher::_regEncode[src_lo]));
3111         } else {
3112             MacroAssembler _masm(cbuf);
3113             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3114                     as_Register(Matcher::_regEncode[src_lo]));
3115         }
3116       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3117         if (is64) {
3118             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3119                      as_Register(Matcher::_regEncode[src_lo]));
3120         } else {
3121             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3122                      as_Register(Matcher::_regEncode[src_lo]));
3123         }
3124       } else {                    // gpr --> stack spill
3125         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3126         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3127       }
3128       break;
3129     case rc_float:
3130       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3131         if (is64) {
3132             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3133                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3134         } else {
3135             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3136                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3137         }
3138       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3139           if (cbuf) {
3140             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3141                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3142         } else {
3143             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3144                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3145         }
3146       } else {                    // fpr --> stack spill
3147         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3148         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3149                  is64 ? __ D : __ S, dst_offset);
3150       }
3151       break;
3152     case rc_stack:
3153       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3154         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3155       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3156         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3157                    is64 ? __ D : __ S, src_offset);
3158       } else {                    // stack --> stack copy
3159         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3160         __ unspill(rscratch1, is64, src_offset);
3161         __ spill(rscratch1, is64, dst_offset);
3162       }
3163       break;
3164     default:
3165       assert(false, "bad rc_class for spill");
3166       ShouldNotReachHere();
3167     }
3168   }
3169 
3170   if (st) {
3171     st->print("spill ");
3172     if (src_lo_rc == rc_stack) {
3173       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3174     } else {
3175       st->print("%s -> ", Matcher::regName[src_lo]);
3176     }
3177     if (dst_lo_rc == rc_stack) {
3178       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3179     } else {
3180       st->print("%s", Matcher::regName[dst_lo]);
3181     }
3182     if (bottom_type()->isa_vect() != NULL) {
3183       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3184     } else {
3185       st->print("\t# spill size = %d", is64 ? 64:32);
3186     }
3187   }
3188 
3189   return 0;
3190 
3191 }
3192 
3193 #ifndef PRODUCT
3194 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3195   if (!ra_)
3196     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3197   else
3198     implementation(NULL, ra_, false, st);
3199 }
3200 #endif
3201 
3202 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3203   implementation(&cbuf, ra_, false, NULL);
3204 }
3205 
3206 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3207   return MachNode::size(ra_);
3208 }
3209 
3210 //=============================================================================
3211 
3212 #ifndef PRODUCT
3213 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3214   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3215   int reg = ra_->get_reg_first(this);
3216   st->print("add %s, rsp, #%d]\t# box lock",
3217             Matcher::regName[reg], offset);
3218 }
3219 #endif
3220 
3221 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3222   MacroAssembler _masm(&cbuf);
3223 
3224   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3225   int reg    = ra_->get_encode(this);
3226 
3227   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3228     __ add(as_Register(reg), sp, offset);
3229   } else {
3230     ShouldNotReachHere();
3231   }
3232 }
3233 
3234 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3235   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3236   return 4;
3237 }
3238 
3239 //=============================================================================
3240 
3241 #ifndef PRODUCT
3242 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3243 {
3244   st->print_cr("# MachUEPNode");
3245   if (UseCompressedClassPointers) {
3246     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3247     if (Universe::narrow_klass_shift() != 0) {
3248       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3249     }
3250   } else {
3251    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3252   }
3253   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3254   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3255 }
3256 #endif
3257 
3258 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3259 {
3260   // This is the unverified entry point.
3261   MacroAssembler _masm(&cbuf);
3262 
3263   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3264   Label skip;
3265   // TODO
3266   // can we avoid this skip and still use a reloc?
3267   __ br(Assembler::EQ, skip);
3268   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3269   __ bind(skip);
3270 }
3271 
3272 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3273 {
3274   return MachNode::size(ra_);
3275 }
3276 
3277 // REQUIRED EMIT CODE
3278 
3279 //=============================================================================
3280 
3281 // Emit exception handler code.
3282 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3283 {
3284   // mov rscratch1 #exception_blob_entry_point
3285   // br rscratch1
3286   // Note that the code buffer's insts_mark is always relative to insts.
3287   // That's why we must use the macroassembler to generate a handler.
3288   MacroAssembler _masm(&cbuf);
3289   address base = __ start_a_stub(size_exception_handler());
3290   if (base == NULL) {
3291     ciEnv::current()->record_failure("CodeCache is full");
3292     return 0;  // CodeBuffer::expand failed
3293   }
3294   int offset = __ offset();
3295   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3296   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3297   __ end_a_stub();
3298   return offset;
3299 }
3300 
3301 // Emit deopt handler code.
3302 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3303 {
3304   // Note that the code buffer's insts_mark is always relative to insts.
3305   // That's why we must use the macroassembler to generate a handler.
3306   MacroAssembler _masm(&cbuf);
3307   address base = __ start_a_stub(size_deopt_handler());
3308   if (base == NULL) {
3309     ciEnv::current()->record_failure("CodeCache is full");
3310     return 0;  // CodeBuffer::expand failed
3311   }
3312   int offset = __ offset();
3313 
3314   __ adr(lr, __ pc());
3315   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3316 
3317   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3318   __ end_a_stub();
3319   return offset;
3320 }
3321 
3322 // REQUIRED MATCHER CODE
3323 
3324 //=============================================================================
3325 
3326 const bool Matcher::match_rule_supported(int opcode) {
3327 
3328   // TODO
3329   // identify extra cases that we might want to provide match rules for
3330   // e.g. Op_StrEquals and other intrinsics
3331   if (!has_match_rule(opcode)) {
3332     return false;
3333   }
3334 
3335   return true;  // Per default match rules are supported.
3336 }
3337 
3338 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3339 
3340   // TODO
3341   // identify extra cases that we might want to provide match rules for
3342   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3343   bool ret_value = match_rule_supported(opcode);
3344   // Add rules here.
3345 
3346   return ret_value;  // Per default match rules are supported.
3347 }
3348 
3349 const bool Matcher::has_predicated_vectors(void) {
3350   return false;
3351 }
3352 
3353 const int Matcher::float_pressure(int default_pressure_threshold) {
3354   return default_pressure_threshold;
3355 }
3356 
3357 int Matcher::regnum_to_fpu_offset(int regnum)
3358 {
3359   Unimplemented();
3360   return 0;
3361 }
3362 
3363 // Is this branch offset short enough that a short branch can be used?
3364 //
3365 // NOTE: If the platform does not provide any short branch variants, then
3366 //       this method should return false for offset 0.
3367 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3368   // The passed offset is relative to address of the branch.
3369 
3370   return (-32768 <= offset && offset < 32768);
3371 }
3372 
3373 const bool Matcher::isSimpleConstant64(jlong value) {
3374   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3375   // Probably always true, even if a temp register is required.
3376   return true;
3377 }
3378 
3379 // true just means we have fast l2f conversion
3380 const bool Matcher::convL2FSupported(void) {
3381   return true;
3382 }
3383 
3384 // Vector width in bytes.
3385 const int Matcher::vector_width_in_bytes(BasicType bt) {
3386   int size = MIN2(16,(int)MaxVectorSize);
3387   // Minimum 2 values in vector
3388   if (size < 2*type2aelembytes(bt)) size = 0;
3389   // But never < 4
3390   if (size < 4) size = 0;
3391   return size;
3392 }
3393 
3394 // Limits on vector size (number of elements) loaded into vector.
3395 const int Matcher::max_vector_size(const BasicType bt) {
3396   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3397 }
3398 const int Matcher::min_vector_size(const BasicType bt) {
3399 //  For the moment limit the vector size to 8 bytes
3400     int size = 8 / type2aelembytes(bt);
3401     if (size < 2) size = 2;
3402     return size;
3403 }
3404 
3405 // Vector ideal reg.
3406 const int Matcher::vector_ideal_reg(int len) {
3407   switch(len) {
3408     case  8: return Op_VecD;
3409     case 16: return Op_VecX;
3410   }
3411   ShouldNotReachHere();
3412   return 0;
3413 }
3414 
3415 const int Matcher::vector_shift_count_ideal_reg(int size) {
3416   return Op_VecX;
3417 }
3418 
3419 // AES support not yet implemented
3420 const bool Matcher::pass_original_key_for_aes() {
3421   return false;
3422 }
3423 
3424 // x86 supports misaligned vectors store/load.
3425 const bool Matcher::misaligned_vectors_ok() {
3426   return !AlignVector; // can be changed by flag
3427 }
3428 
3429 // false => size gets scaled to BytesPerLong, ok.
3430 const bool Matcher::init_array_count_is_in_bytes = false;
3431 
3432 // Use conditional move (CMOVL)
3433 const int Matcher::long_cmove_cost() {
3434   // long cmoves are no more expensive than int cmoves
3435   return 0;
3436 }
3437 
3438 const int Matcher::float_cmove_cost() {
3439   // float cmoves are no more expensive than int cmoves
3440   return 0;
3441 }
3442 
3443 // Does the CPU require late expand (see block.cpp for description of late expand)?
3444 const bool Matcher::require_postalloc_expand = false;
3445 
3446 // Should the Matcher clone shifts on addressing modes, expecting them
3447 // to be subsumed into complex addressing expressions or compute them
3448 // into registers?  True for Intel but false for most RISCs
3449 const bool Matcher::clone_shift_expressions = false;
3450 
3451 // Do we need to mask the count passed to shift instructions or does
3452 // the cpu only look at the lower 5/6 bits anyway?
3453 const bool Matcher::need_masked_shift_count = false;
3454 
3455 // This affects two different things:
3456 //  - how Decode nodes are matched
3457 //  - how ImplicitNullCheck opportunities are recognized
3458 // If true, the matcher will try to remove all Decodes and match them
3459 // (as operands) into nodes. NullChecks are not prepared to deal with
3460 // Decodes by final_graph_reshaping().
3461 // If false, final_graph_reshaping() forces the decode behind the Cmp
3462 // for a NullCheck. The matcher matches the Decode node into a register.
3463 // Implicit_null_check optimization moves the Decode along with the
3464 // memory operation back up before the NullCheck.
3465 bool Matcher::narrow_oop_use_complex_address() {
3466   return Universe::narrow_oop_shift() == 0;
3467 }
3468 
3469 bool Matcher::narrow_klass_use_complex_address() {
3470 // TODO
3471 // decide whether we need to set this to true
3472   return false;
3473 }
3474 
3475 // Is it better to copy float constants, or load them directly from
3476 // memory?  Intel can load a float constant from a direct address,
3477 // requiring no extra registers.  Most RISCs will have to materialize
3478 // an address into a register first, so they would do better to copy
3479 // the constant from stack.
3480 const bool Matcher::rematerialize_float_constants = false;
3481 
3482 // If CPU can load and store mis-aligned doubles directly then no
3483 // fixup is needed.  Else we split the double into 2 integer pieces
3484 // and move it piece-by-piece.  Only happens when passing doubles into
3485 // C code as the Java calling convention forces doubles to be aligned.
3486 const bool Matcher::misaligned_doubles_ok = true;
3487 
3488 // No-op on amd64
3489 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3490   Unimplemented();
3491 }
3492 
3493 // Advertise here if the CPU requires explicit rounding operations to
3494 // implement the UseStrictFP mode.
3495 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3496 
3497 // Are floats converted to double when stored to stack during
3498 // deoptimization?
3499 bool Matcher::float_in_double() { return true; }
3500 
3501 // Do ints take an entire long register or just half?
3502 // The relevant question is how the int is callee-saved:
3503 // the whole long is written but de-opt'ing will have to extract
3504 // the relevant 32 bits.
3505 const bool Matcher::int_in_long = true;
3506 
3507 // Return whether or not this register is ever used as an argument.
3508 // This function is used on startup to build the trampoline stubs in
3509 // generateOptoStub.  Registers not mentioned will be killed by the VM
3510 // call in the trampoline, and arguments in those registers not be
3511 // available to the callee.
3512 bool Matcher::can_be_java_arg(int reg)
3513 {
3514   return
3515     reg ==  R0_num || reg == R0_H_num ||
3516     reg ==  R1_num || reg == R1_H_num ||
3517     reg ==  R2_num || reg == R2_H_num ||
3518     reg ==  R3_num || reg == R3_H_num ||
3519     reg ==  R4_num || reg == R4_H_num ||
3520     reg ==  R5_num || reg == R5_H_num ||
3521     reg ==  R6_num || reg == R6_H_num ||
3522     reg ==  R7_num || reg == R7_H_num ||
3523     reg ==  V0_num || reg == V0_H_num ||
3524     reg ==  V1_num || reg == V1_H_num ||
3525     reg ==  V2_num || reg == V2_H_num ||
3526     reg ==  V3_num || reg == V3_H_num ||
3527     reg ==  V4_num || reg == V4_H_num ||
3528     reg ==  V5_num || reg == V5_H_num ||
3529     reg ==  V6_num || reg == V6_H_num ||
3530     reg ==  V7_num || reg == V7_H_num;
3531 }
3532 
3533 bool Matcher::is_spillable_arg(int reg)
3534 {
3535   return can_be_java_arg(reg);
3536 }
3537 
3538 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3539   return false;
3540 }
3541 
3542 RegMask Matcher::divI_proj_mask() {
3543   ShouldNotReachHere();
3544   return RegMask();
3545 }
3546 
3547 // Register for MODI projection of divmodI.
3548 RegMask Matcher::modI_proj_mask() {
3549   ShouldNotReachHere();
3550   return RegMask();
3551 }
3552 
3553 // Register for DIVL projection of divmodL.
3554 RegMask Matcher::divL_proj_mask() {
3555   ShouldNotReachHere();
3556   return RegMask();
3557 }
3558 
3559 // Register for MODL projection of divmodL.
3560 RegMask Matcher::modL_proj_mask() {
3561   ShouldNotReachHere();
3562   return RegMask();
3563 }
3564 
3565 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3566   return FP_REG_mask();
3567 }
3568 
3569 // helper for encoding java_to_runtime calls on sim
3570 //
3571 // this is needed to compute the extra arguments required when
3572 // planting a call to the simulator blrt instruction. the TypeFunc
3573 // can be queried to identify the counts for integral, and floating
3574 // arguments and the return type
3575 
3576 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3577 {
3578   int gps = 0;
3579   int fps = 0;
3580   const TypeTuple *domain = tf->domain();
3581   int max = domain->cnt();
3582   for (int i = TypeFunc::Parms; i < max; i++) {
3583     const Type *t = domain->field_at(i);
3584     switch(t->basic_type()) {
3585     case T_FLOAT:
3586     case T_DOUBLE:
3587       fps++;
3588     default:
3589       gps++;
3590     }
3591   }
3592   gpcnt = gps;
3593   fpcnt = fps;
3594   BasicType rt = tf->return_type();
3595   switch (rt) {
3596   case T_VOID:
3597     rtype = MacroAssembler::ret_type_void;
3598     break;
3599   default:
3600     rtype = MacroAssembler::ret_type_integral;
3601     break;
3602   case T_FLOAT:
3603     rtype = MacroAssembler::ret_type_float;
3604     break;
3605   case T_DOUBLE:
3606     rtype = MacroAssembler::ret_type_double;
3607     break;
3608   }
3609 }
3610 
3611 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3612   MacroAssembler _masm(&cbuf);                                          \
3613   {                                                                     \
3614     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3615     guarantee(DISP == 0, "mode not permitted for volatile");            \
3616     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3617     __ INSN(REG, as_Register(BASE));                                    \
3618   }
3619 
3620 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3621 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3622 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3623                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3624 
3625   // Used for all non-volatile memory accesses.  The use of
3626   // $mem->opcode() to discover whether this pattern uses sign-extended
3627   // offsets is something of a kludge.
3628   static void loadStore(MacroAssembler masm, mem_insn insn,
3629                          Register reg, int opcode,
3630                          Register base, int index, int size, int disp)
3631   {
3632     Address::extend scale;
3633 
3634     // Hooboy, this is fugly.  We need a way to communicate to the
3635     // encoder that the index needs to be sign extended, so we have to
3636     // enumerate all the cases.
3637     switch (opcode) {
3638     case INDINDEXSCALEDOFFSETI2L:
3639     case INDINDEXSCALEDI2L:
3640     case INDINDEXSCALEDOFFSETI2LN:
3641     case INDINDEXSCALEDI2LN:
3642     case INDINDEXOFFSETI2L:
3643     case INDINDEXOFFSETI2LN:
3644       scale = Address::sxtw(size);
3645       break;
3646     default:
3647       scale = Address::lsl(size);
3648     }
3649 
3650     if (index == -1) {
3651       (masm.*insn)(reg, Address(base, disp));
3652     } else {
3653       if (disp == 0) {
3654         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3655       } else {
3656         masm.lea(rscratch1, Address(base, disp));
3657         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3658       }
3659     }
3660   }
3661 
3662   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3663                          FloatRegister reg, int opcode,
3664                          Register base, int index, int size, int disp)
3665   {
3666     Address::extend scale;
3667 
3668     switch (opcode) {
3669     case INDINDEXSCALEDOFFSETI2L:
3670     case INDINDEXSCALEDI2L:
3671     case INDINDEXSCALEDOFFSETI2LN:
3672     case INDINDEXSCALEDI2LN:
3673       scale = Address::sxtw(size);
3674       break;
3675     default:
3676       scale = Address::lsl(size);
3677     }
3678 
3679      if (index == -1) {
3680       (masm.*insn)(reg, Address(base, disp));
3681     } else {
3682       if (disp == 0) {
3683         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3684       } else {
3685         masm.lea(rscratch1, Address(base, disp));
3686         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3687       }
3688     }
3689   }
3690 
3691   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3692                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3693                          int opcode, Register base, int index, int size, int disp)
3694   {
3695     if (index == -1) {
3696       (masm.*insn)(reg, T, Address(base, disp));
3697     } else {
3698       assert(disp == 0, "unsupported address mode");
3699       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3700     }
3701   }
3702 
3703 %}
3704 
3705 
3706 
3707 //----------ENCODING BLOCK-----------------------------------------------------
3708 // This block specifies the encoding classes used by the compiler to
3709 // output byte streams.  Encoding classes are parameterized macros
3710 // used by Machine Instruction Nodes in order to generate the bit
3711 // encoding of the instruction.  Operands specify their base encoding
3712 // interface with the interface keyword.  There are currently
3713 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3714 // COND_INTER.  REG_INTER causes an operand to generate a function
3715 // which returns its register number when queried.  CONST_INTER causes
3716 // an operand to generate a function which returns the value of the
3717 // constant when queried.  MEMORY_INTER causes an operand to generate
3718 // four functions which return the Base Register, the Index Register,
3719 // the Scale Value, and the Offset Value of the operand when queried.
3720 // COND_INTER causes an operand to generate six functions which return
3721 // the encoding code (ie - encoding bits for the instruction)
3722 // associated with each basic boolean condition for a conditional
3723 // instruction.
3724 //
3725 // Instructions specify two basic values for encoding.  Again, a
3726 // function is available to check if the constant displacement is an
3727 // oop. They use the ins_encode keyword to specify their encoding
3728 // classes (which must be a sequence of enc_class names, and their
3729 // parameters, specified in the encoding block), and they use the
3730 // opcode keyword to specify, in order, their primary, secondary, and
3731 // tertiary opcode.  Only the opcode sections which a particular
3732 // instruction needs for encoding need to be specified.
3733 encode %{
3734   // Build emit functions for each basic byte or larger field in the
3735   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3736   // from C++ code in the enc_class source block.  Emit functions will
3737   // live in the main source block for now.  In future, we can
3738   // generalize this by adding a syntax that specifies the sizes of
3739   // fields in an order, so that the adlc can build the emit functions
3740   // automagically
3741 
3742   // catch all for unimplemented encodings
3743   enc_class enc_unimplemented %{
3744     MacroAssembler _masm(&cbuf);
3745     __ unimplemented("C2 catch all");
3746   %}
3747 
3748   // BEGIN Non-volatile memory access
3749 
3750   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3751     Register dst_reg = as_Register($dst$$reg);
3752     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3753                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3754   %}
3755 
3756   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3757     Register dst_reg = as_Register($dst$$reg);
3758     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3759                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3760   %}
3761 
3762   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3763     Register dst_reg = as_Register($dst$$reg);
3764     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3765                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3766   %}
3767 
3768   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3769     Register dst_reg = as_Register($dst$$reg);
3770     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3771                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3772   %}
3773 
3774   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3775     Register dst_reg = as_Register($dst$$reg);
3776     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3777                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3778   %}
3779 
3780   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3781     Register dst_reg = as_Register($dst$$reg);
3782     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3783                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3784   %}
3785 
3786   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3787     Register dst_reg = as_Register($dst$$reg);
3788     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3789                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3790   %}
3791 
3792   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3793     Register dst_reg = as_Register($dst$$reg);
3794     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3795                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3796   %}
3797 
3798   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3799     Register dst_reg = as_Register($dst$$reg);
3800     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3801                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3802   %}
3803 
3804   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3805     Register dst_reg = as_Register($dst$$reg);
3806     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3807                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3808   %}
3809 
3810   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3811     Register dst_reg = as_Register($dst$$reg);
3812     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3813                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3814   %}
3815 
3816   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3817     Register dst_reg = as_Register($dst$$reg);
3818     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3819                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3820   %}
3821 
3822   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3823     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3824     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3825                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3826   %}
3827 
3828   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3829     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3830     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3831                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3832   %}
3833 
3834   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3835     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3836     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3837        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3838   %}
3839 
3840   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3841     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3842     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3843        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3844   %}
3845 
3846   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3847     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3848     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3849        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3850   %}
3851 
3852   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3853     Register src_reg = as_Register($src$$reg);
3854     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3855                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3856   %}
3857 
3858   enc_class aarch64_enc_strb0(memory mem) %{
3859     MacroAssembler _masm(&cbuf);
3860     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3861                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3862   %}
3863 
3864   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3865     MacroAssembler _masm(&cbuf);
3866     __ membar(Assembler::StoreStore);
3867     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3868                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3869   %}
3870 
3871   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3872     Register src_reg = as_Register($src$$reg);
3873     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
3874                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3875   %}
3876 
3877   enc_class aarch64_enc_strh0(memory mem) %{
3878     MacroAssembler _masm(&cbuf);
3879     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
3880                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3881   %}
3882 
3883   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
3884     Register src_reg = as_Register($src$$reg);
3885     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
3886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3887   %}
3888 
3889   enc_class aarch64_enc_strw0(memory mem) %{
3890     MacroAssembler _masm(&cbuf);
3891     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
3892                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3893   %}
3894 
3895   enc_class aarch64_enc_str(iRegL src, memory mem) %{
3896     Register src_reg = as_Register($src$$reg);
3897     // we sometimes get asked to store the stack pointer into the
3898     // current thread -- we cannot do that directly on AArch64
3899     if (src_reg == r31_sp) {
3900       MacroAssembler _masm(&cbuf);
3901       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
3902       __ mov(rscratch2, sp);
3903       src_reg = rscratch2;
3904     }
3905     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
3906                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3907   %}
3908 
3909   enc_class aarch64_enc_str0(memory mem) %{
3910     MacroAssembler _masm(&cbuf);
3911     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
3912                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3913   %}
3914 
3915   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
3916     FloatRegister src_reg = as_FloatRegister($src$$reg);
3917     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
3918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3919   %}
3920 
3921   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
3922     FloatRegister src_reg = as_FloatRegister($src$$reg);
3923     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
3924                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3925   %}
3926 
3927   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
3928     FloatRegister src_reg = as_FloatRegister($src$$reg);
3929     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
3930        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3931   %}
3932 
3933   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
3934     FloatRegister src_reg = as_FloatRegister($src$$reg);
3935     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
3936        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3937   %}
3938 
3939   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
3940     FloatRegister src_reg = as_FloatRegister($src$$reg);
3941     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
3942        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3943   %}
3944 
3945   // END Non-volatile memory access
3946 
3947   // volatile loads and stores
3948 
3949   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
3950     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3951                  rscratch1, stlrb);
3952   %}
3953 
3954   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
3955     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3956                  rscratch1, stlrh);
3957   %}
3958 
3959   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
3960     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3961                  rscratch1, stlrw);
3962   %}
3963 
3964 
3965   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
3966     Register dst_reg = as_Register($dst$$reg);
3967     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3968              rscratch1, ldarb);
3969     __ sxtbw(dst_reg, dst_reg);
3970   %}
3971 
3972   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
3973     Register dst_reg = as_Register($dst$$reg);
3974     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3975              rscratch1, ldarb);
3976     __ sxtb(dst_reg, dst_reg);
3977   %}
3978 
3979   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
3980     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3981              rscratch1, ldarb);
3982   %}
3983 
3984   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
3985     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3986              rscratch1, ldarb);
3987   %}
3988 
3989   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
3990     Register dst_reg = as_Register($dst$$reg);
3991     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3992              rscratch1, ldarh);
3993     __ sxthw(dst_reg, dst_reg);
3994   %}
3995 
3996   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
3997     Register dst_reg = as_Register($dst$$reg);
3998     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
3999              rscratch1, ldarh);
4000     __ sxth(dst_reg, dst_reg);
4001   %}
4002 
4003   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4004     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4005              rscratch1, ldarh);
4006   %}
4007 
4008   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4009     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4010              rscratch1, ldarh);
4011   %}
4012 
4013   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4014     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4015              rscratch1, ldarw);
4016   %}
4017 
4018   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4019     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4020              rscratch1, ldarw);
4021   %}
4022 
4023   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4024     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4025              rscratch1, ldar);
4026   %}
4027 
4028   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4029     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4030              rscratch1, ldarw);
4031     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4032   %}
4033 
4034   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4035     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4036              rscratch1, ldar);
4037     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4038   %}
4039 
4040   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4041     Register src_reg = as_Register($src$$reg);
4042     // we sometimes get asked to store the stack pointer into the
4043     // current thread -- we cannot do that directly on AArch64
4044     if (src_reg == r31_sp) {
4045         MacroAssembler _masm(&cbuf);
4046       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4047       __ mov(rscratch2, sp);
4048       src_reg = rscratch2;
4049     }
4050     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4051                  rscratch1, stlr);
4052   %}
4053 
4054   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4055     {
4056       MacroAssembler _masm(&cbuf);
4057       FloatRegister src_reg = as_FloatRegister($src$$reg);
4058       __ fmovs(rscratch2, src_reg);
4059     }
4060     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4061                  rscratch1, stlrw);
4062   %}
4063 
4064   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4065     {
4066       MacroAssembler _masm(&cbuf);
4067       FloatRegister src_reg = as_FloatRegister($src$$reg);
4068       __ fmovd(rscratch2, src_reg);
4069     }
4070     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4071                  rscratch1, stlr);
4072   %}
4073 
4074   // synchronized read/update encodings
4075 
4076   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4077     MacroAssembler _masm(&cbuf);
4078     Register dst_reg = as_Register($dst$$reg);
4079     Register base = as_Register($mem$$base);
4080     int index = $mem$$index;
4081     int scale = $mem$$scale;
4082     int disp = $mem$$disp;
4083     if (index == -1) {
4084        if (disp != 0) {
4085         __ lea(rscratch1, Address(base, disp));
4086         __ ldaxr(dst_reg, rscratch1);
4087       } else {
4088         // TODO
4089         // should we ever get anything other than this case?
4090         __ ldaxr(dst_reg, base);
4091       }
4092     } else {
4093       Register index_reg = as_Register(index);
4094       if (disp == 0) {
4095         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4096         __ ldaxr(dst_reg, rscratch1);
4097       } else {
4098         __ lea(rscratch1, Address(base, disp));
4099         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4100         __ ldaxr(dst_reg, rscratch1);
4101       }
4102     }
4103   %}
4104 
4105   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4106     MacroAssembler _masm(&cbuf);
4107     Register src_reg = as_Register($src$$reg);
4108     Register base = as_Register($mem$$base);
4109     int index = $mem$$index;
4110     int scale = $mem$$scale;
4111     int disp = $mem$$disp;
4112     if (index == -1) {
4113        if (disp != 0) {
4114         __ lea(rscratch2, Address(base, disp));
4115         __ stlxr(rscratch1, src_reg, rscratch2);
4116       } else {
4117         // TODO
4118         // should we ever get anything other than this case?
4119         __ stlxr(rscratch1, src_reg, base);
4120       }
4121     } else {
4122       Register index_reg = as_Register(index);
4123       if (disp == 0) {
4124         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4125         __ stlxr(rscratch1, src_reg, rscratch2);
4126       } else {
4127         __ lea(rscratch2, Address(base, disp));
4128         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4129         __ stlxr(rscratch1, src_reg, rscratch2);
4130       }
4131     }
4132     __ cmpw(rscratch1, zr);
4133   %}
4134 
4135   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4136     MacroAssembler _masm(&cbuf);
4137     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4138     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4139                Assembler::xword, /*acquire*/ false, /*release*/ true);
4140   %}
4141 
4142   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4143     MacroAssembler _masm(&cbuf);
4144     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4145     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4146                Assembler::word, /*acquire*/ false, /*release*/ true);
4147   %}
4148 
4149 
4150   // The only difference between aarch64_enc_cmpxchg and
4151   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4152   // CompareAndSwap sequence to serve as a barrier on acquiring a
4153   // lock.
4154   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4155     MacroAssembler _masm(&cbuf);
4156     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4157     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4158                Assembler::xword, /*acquire*/ true, /*release*/ true);
4159   %}
4160 
4161   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4162     MacroAssembler _masm(&cbuf);
4163     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4164     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4165                Assembler::word, /*acquire*/ true, /*release*/ true);
4166   %}
4167 
4168 
4169   // auxiliary used for CompareAndSwapX to set result register
4170   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4171     MacroAssembler _masm(&cbuf);
4172     Register res_reg = as_Register($res$$reg);
4173     __ cset(res_reg, Assembler::EQ);
4174   %}
4175 
4176   // prefetch encodings
4177 
4178   enc_class aarch64_enc_prefetchw(memory mem) %{
4179     MacroAssembler _masm(&cbuf);
4180     Register base = as_Register($mem$$base);
4181     int index = $mem$$index;
4182     int scale = $mem$$scale;
4183     int disp = $mem$$disp;
4184     if (index == -1) {
4185       __ prfm(Address(base, disp), PSTL1KEEP);
4186     } else {
4187       Register index_reg = as_Register(index);
4188       if (disp == 0) {
4189         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4190       } else {
4191         __ lea(rscratch1, Address(base, disp));
4192         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4193       }
4194     }
4195   %}
4196 
4197   /// mov envcodings
4198 
4199   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4200     MacroAssembler _masm(&cbuf);
4201     u_int32_t con = (u_int32_t)$src$$constant;
4202     Register dst_reg = as_Register($dst$$reg);
4203     if (con == 0) {
4204       __ movw(dst_reg, zr);
4205     } else {
4206       __ movw(dst_reg, con);
4207     }
4208   %}
4209 
4210   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4211     MacroAssembler _masm(&cbuf);
4212     Register dst_reg = as_Register($dst$$reg);
4213     u_int64_t con = (u_int64_t)$src$$constant;
4214     if (con == 0) {
4215       __ mov(dst_reg, zr);
4216     } else {
4217       __ mov(dst_reg, con);
4218     }
4219   %}
4220 
4221   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4222     MacroAssembler _masm(&cbuf);
4223     Register dst_reg = as_Register($dst$$reg);
4224     address con = (address)$src$$constant;
4225     if (con == NULL || con == (address)1) {
4226       ShouldNotReachHere();
4227     } else {
4228       relocInfo::relocType rtype = $src->constant_reloc();
4229       if (rtype == relocInfo::oop_type) {
4230         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4231       } else if (rtype == relocInfo::metadata_type) {
4232         __ mov_metadata(dst_reg, (Metadata*)con);
4233       } else {
4234         assert(rtype == relocInfo::none, "unexpected reloc type");
4235         if (con < (address)(uintptr_t)os::vm_page_size()) {
4236           __ mov(dst_reg, con);
4237         } else {
4238           unsigned long offset;
4239           __ adrp(dst_reg, con, offset);
4240           __ add(dst_reg, dst_reg, offset);
4241         }
4242       }
4243     }
4244   %}
4245 
4246   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4247     MacroAssembler _masm(&cbuf);
4248     Register dst_reg = as_Register($dst$$reg);
4249     __ mov(dst_reg, zr);
4250   %}
4251 
4252   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4253     MacroAssembler _masm(&cbuf);
4254     Register dst_reg = as_Register($dst$$reg);
4255     __ mov(dst_reg, (u_int64_t)1);
4256   %}
4257 
4258   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4259     MacroAssembler _masm(&cbuf);
4260     address page = (address)$src$$constant;
4261     Register dst_reg = as_Register($dst$$reg);
4262     unsigned long off;
4263     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4264     assert(off == 0, "assumed offset == 0");
4265   %}
4266 
4267   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4268     MacroAssembler _masm(&cbuf);
4269     __ load_byte_map_base($dst$$Register);
4270   %}
4271 
4272   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4273     MacroAssembler _masm(&cbuf);
4274     Register dst_reg = as_Register($dst$$reg);
4275     address con = (address)$src$$constant;
4276     if (con == NULL) {
4277       ShouldNotReachHere();
4278     } else {
4279       relocInfo::relocType rtype = $src->constant_reloc();
4280       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4281       __ set_narrow_oop(dst_reg, (jobject)con);
4282     }
4283   %}
4284 
4285   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4286     MacroAssembler _masm(&cbuf);
4287     Register dst_reg = as_Register($dst$$reg);
4288     __ mov(dst_reg, zr);
4289   %}
4290 
4291   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4292     MacroAssembler _masm(&cbuf);
4293     Register dst_reg = as_Register($dst$$reg);
4294     address con = (address)$src$$constant;
4295     if (con == NULL) {
4296       ShouldNotReachHere();
4297     } else {
4298       relocInfo::relocType rtype = $src->constant_reloc();
4299       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4300       __ set_narrow_klass(dst_reg, (Klass *)con);
4301     }
4302   %}
4303 
4304   // arithmetic encodings
4305 
4306   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4307     MacroAssembler _masm(&cbuf);
4308     Register dst_reg = as_Register($dst$$reg);
4309     Register src_reg = as_Register($src1$$reg);
4310     int32_t con = (int32_t)$src2$$constant;
4311     // add has primary == 0, subtract has primary == 1
4312     if ($primary) { con = -con; }
4313     if (con < 0) {
4314       __ subw(dst_reg, src_reg, -con);
4315     } else {
4316       __ addw(dst_reg, src_reg, con);
4317     }
4318   %}
4319 
4320   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4321     MacroAssembler _masm(&cbuf);
4322     Register dst_reg = as_Register($dst$$reg);
4323     Register src_reg = as_Register($src1$$reg);
4324     int32_t con = (int32_t)$src2$$constant;
4325     // add has primary == 0, subtract has primary == 1
4326     if ($primary) { con = -con; }
4327     if (con < 0) {
4328       __ sub(dst_reg, src_reg, -con);
4329     } else {
4330       __ add(dst_reg, src_reg, con);
4331     }
4332   %}
4333 
4334   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4335     MacroAssembler _masm(&cbuf);
4336    Register dst_reg = as_Register($dst$$reg);
4337    Register src1_reg = as_Register($src1$$reg);
4338    Register src2_reg = as_Register($src2$$reg);
4339     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4340   %}
4341 
4342   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4343     MacroAssembler _masm(&cbuf);
4344    Register dst_reg = as_Register($dst$$reg);
4345    Register src1_reg = as_Register($src1$$reg);
4346    Register src2_reg = as_Register($src2$$reg);
4347     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4348   %}
4349 
4350   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4351     MacroAssembler _masm(&cbuf);
4352    Register dst_reg = as_Register($dst$$reg);
4353    Register src1_reg = as_Register($src1$$reg);
4354    Register src2_reg = as_Register($src2$$reg);
4355     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4356   %}
4357 
4358   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4359     MacroAssembler _masm(&cbuf);
4360    Register dst_reg = as_Register($dst$$reg);
4361    Register src1_reg = as_Register($src1$$reg);
4362    Register src2_reg = as_Register($src2$$reg);
4363     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4364   %}
4365 
4366   // compare instruction encodings
4367 
4368   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4369     MacroAssembler _masm(&cbuf);
4370     Register reg1 = as_Register($src1$$reg);
4371     Register reg2 = as_Register($src2$$reg);
4372     __ cmpw(reg1, reg2);
4373   %}
4374 
4375   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4376     MacroAssembler _masm(&cbuf);
4377     Register reg = as_Register($src1$$reg);
4378     int32_t val = $src2$$constant;
4379     if (val >= 0) {
4380       __ subsw(zr, reg, val);
4381     } else {
4382       __ addsw(zr, reg, -val);
4383     }
4384   %}
4385 
4386   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4387     MacroAssembler _masm(&cbuf);
4388     Register reg1 = as_Register($src1$$reg);
4389     u_int32_t val = (u_int32_t)$src2$$constant;
4390     __ movw(rscratch1, val);
4391     __ cmpw(reg1, rscratch1);
4392   %}
4393 
4394   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4395     MacroAssembler _masm(&cbuf);
4396     Register reg1 = as_Register($src1$$reg);
4397     Register reg2 = as_Register($src2$$reg);
4398     __ cmp(reg1, reg2);
4399   %}
4400 
4401   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4402     MacroAssembler _masm(&cbuf);
4403     Register reg = as_Register($src1$$reg);
4404     int64_t val = $src2$$constant;
4405     if (val >= 0) {
4406       __ subs(zr, reg, val);
4407     } else if (val != -val) {
4408       __ adds(zr, reg, -val);
4409     } else {
4410     // aargh, Long.MIN_VALUE is a special case
4411       __ orr(rscratch1, zr, (u_int64_t)val);
4412       __ subs(zr, reg, rscratch1);
4413     }
4414   %}
4415 
4416   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4417     MacroAssembler _masm(&cbuf);
4418     Register reg1 = as_Register($src1$$reg);
4419     u_int64_t val = (u_int64_t)$src2$$constant;
4420     __ mov(rscratch1, val);
4421     __ cmp(reg1, rscratch1);
4422   %}
4423 
4424   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4425     MacroAssembler _masm(&cbuf);
4426     Register reg1 = as_Register($src1$$reg);
4427     Register reg2 = as_Register($src2$$reg);
4428     __ cmp(reg1, reg2);
4429   %}
4430 
4431   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4432     MacroAssembler _masm(&cbuf);
4433     Register reg1 = as_Register($src1$$reg);
4434     Register reg2 = as_Register($src2$$reg);
4435     __ cmpw(reg1, reg2);
4436   %}
4437 
4438   enc_class aarch64_enc_testp(iRegP src) %{
4439     MacroAssembler _masm(&cbuf);
4440     Register reg = as_Register($src$$reg);
4441     __ cmp(reg, zr);
4442   %}
4443 
4444   enc_class aarch64_enc_testn(iRegN src) %{
4445     MacroAssembler _masm(&cbuf);
4446     Register reg = as_Register($src$$reg);
4447     __ cmpw(reg, zr);
4448   %}
4449 
4450   enc_class aarch64_enc_b(label lbl) %{
4451     MacroAssembler _masm(&cbuf);
4452     Label *L = $lbl$$label;
4453     __ b(*L);
4454   %}
4455 
4456   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4457     MacroAssembler _masm(&cbuf);
4458     Label *L = $lbl$$label;
4459     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4460   %}
4461 
4462   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4463     MacroAssembler _masm(&cbuf);
4464     Label *L = $lbl$$label;
4465     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4466   %}
4467 
4468   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4469   %{
4470      Register sub_reg = as_Register($sub$$reg);
4471      Register super_reg = as_Register($super$$reg);
4472      Register temp_reg = as_Register($temp$$reg);
4473      Register result_reg = as_Register($result$$reg);
4474 
4475      Label miss;
4476      MacroAssembler _masm(&cbuf);
4477      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4478                                      NULL, &miss,
4479                                      /*set_cond_codes:*/ true);
4480      if ($primary) {
4481        __ mov(result_reg, zr);
4482      }
4483      __ bind(miss);
4484   %}
4485 
4486   enc_class aarch64_enc_java_static_call(method meth) %{
4487     MacroAssembler _masm(&cbuf);
4488 
4489     address addr = (address)$meth$$method;
4490     address call;
4491     if (!_method) {
4492       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4493       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4494     } else {
4495       int method_index = resolved_method_index(cbuf);
4496       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4497                                                   : static_call_Relocation::spec(method_index);
4498       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4499 
4500       // Emit stub for static call
4501       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4502       if (stub == NULL) {
4503         ciEnv::current()->record_failure("CodeCache is full");
4504         return;
4505       }
4506     }
4507     if (call == NULL) {
4508       ciEnv::current()->record_failure("CodeCache is full");
4509       return;
4510     }
4511   %}
4512 
4513   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4514     MacroAssembler _masm(&cbuf);
4515     int method_index = resolved_method_index(cbuf);
4516     address call = __ ic_call((address)$meth$$method, method_index);
4517     if (call == NULL) {
4518       ciEnv::current()->record_failure("CodeCache is full");
4519       return;
4520     }
4521   %}
4522 
4523   enc_class aarch64_enc_call_epilog() %{
4524     MacroAssembler _masm(&cbuf);
4525     if (VerifyStackAtCalls) {
4526       // Check that stack depth is unchanged: find majik cookie on stack
4527       __ call_Unimplemented();
4528     }
4529   %}
4530 
4531   enc_class aarch64_enc_java_to_runtime(method meth) %{
4532     MacroAssembler _masm(&cbuf);
4533 
4534     // some calls to generated routines (arraycopy code) are scheduled
4535     // by C2 as runtime calls. if so we can call them using a br (they
4536     // will be in a reachable segment) otherwise we have to use a blrt
4537     // which loads the absolute address into a register.
4538     address entry = (address)$meth$$method;
4539     CodeBlob *cb = CodeCache::find_blob(entry);
4540     if (cb) {
4541       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4542       if (call == NULL) {
4543         ciEnv::current()->record_failure("CodeCache is full");
4544         return;
4545       }
4546     } else {
4547       int gpcnt;
4548       int fpcnt;
4549       int rtype;
4550       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4551       Label retaddr;
4552       __ adr(rscratch2, retaddr);
4553       __ lea(rscratch1, RuntimeAddress(entry));
4554       // Leave a breadcrumb for JavaThread::pd_last_frame().
4555       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4556       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4557       __ bind(retaddr);
4558       __ add(sp, sp, 2 * wordSize);
4559     }
4560   %}
4561 
4562   enc_class aarch64_enc_rethrow() %{
4563     MacroAssembler _masm(&cbuf);
4564     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4565   %}
4566 
4567   enc_class aarch64_enc_ret() %{
4568     MacroAssembler _masm(&cbuf);
4569     __ ret(lr);
4570   %}
4571 
4572   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4573     MacroAssembler _masm(&cbuf);
4574     Register target_reg = as_Register($jump_target$$reg);
4575     __ br(target_reg);
4576   %}
4577 
4578   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4579     MacroAssembler _masm(&cbuf);
4580     Register target_reg = as_Register($jump_target$$reg);
4581     // exception oop should be in r0
4582     // ret addr has been popped into lr
4583     // callee expects it in r3
4584     __ mov(r3, lr);
4585     __ br(target_reg);
4586   %}
4587 
4588   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4589     MacroAssembler _masm(&cbuf);
4590     Register oop = as_Register($object$$reg);
4591     Register box = as_Register($box$$reg);
4592     Register disp_hdr = as_Register($tmp$$reg);
4593     Register tmp = as_Register($tmp2$$reg);
4594     Label cont;
4595     Label object_has_monitor;
4596     Label cas_failed;
4597 
4598     assert_different_registers(oop, box, tmp, disp_hdr);
4599 
4600     // Load markOop from object into displaced_header.
4601     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4602 
4603     // Always do locking in runtime.
4604     if (EmitSync & 0x01) {
4605       __ cmp(oop, zr);
4606       return;
4607     }
4608 
4609     if (UseBiasedLocking && !UseOptoBiasInlining) {
4610       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4611     }
4612 
4613     // Handle existing monitor
4614     if ((EmitSync & 0x02) == 0) {
4615       // we can use AArch64's bit test and branch here but
4616       // markoopDesc does not define a bit index just the bit value
4617       // so assert in case the bit pos changes
4618 #     define __monitor_value_log2 1
4619       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4620       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4621 #     undef __monitor_value_log2
4622     }
4623 
4624     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4625     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4626 
4627     // Load Compare Value application register.
4628 
4629     // Initialize the box. (Must happen before we update the object mark!)
4630     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4631 
4632     // Compare object markOop with mark and if equal exchange scratch1
4633     // with object markOop.
4634     if (UseLSE) {
4635       __ mov(tmp, disp_hdr);
4636       __ casal(Assembler::xword, tmp, box, oop);
4637       __ cmp(tmp, disp_hdr);
4638       __ br(Assembler::EQ, cont);
4639     } else {
4640       Label retry_load;
4641       __ prfm(Address(oop), PSTL1STRM);
4642       __ bind(retry_load);
4643       __ ldaxr(tmp, oop);
4644       __ cmp(tmp, disp_hdr);
4645       __ br(Assembler::NE, cas_failed);
4646       // use stlxr to ensure update is immediately visible
4647       __ stlxr(tmp, box, oop);
4648       __ cbzw(tmp, cont);
4649       __ b(retry_load);
4650     }
4651 
4652     // Formerly:
4653     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4654     //               /*newv=*/box,
4655     //               /*addr=*/oop,
4656     //               /*tmp=*/tmp,
4657     //               cont,
4658     //               /*fail*/NULL);
4659 
4660     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4661 
4662     // If the compare-and-exchange succeeded, then we found an unlocked
4663     // object, will have now locked it will continue at label cont
4664 
4665     __ bind(cas_failed);
4666     // We did not see an unlocked object so try the fast recursive case.
4667 
4668     // Check if the owner is self by comparing the value in the
4669     // markOop of object (disp_hdr) with the stack pointer.
4670     __ mov(rscratch1, sp);
4671     __ sub(disp_hdr, disp_hdr, rscratch1);
4672     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4673     // If condition is true we are cont and hence we can store 0 as the
4674     // displaced header in the box, which indicates that it is a recursive lock.
4675     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4676     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4677 
4678     // Handle existing monitor.
4679     if ((EmitSync & 0x02) == 0) {
4680       __ b(cont);
4681 
4682       __ bind(object_has_monitor);
4683       // The object's monitor m is unlocked iff m->owner == NULL,
4684       // otherwise m->owner may contain a thread or a stack address.
4685       //
4686       // Try to CAS m->owner from NULL to current thread.
4687       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4688       __ mov(disp_hdr, zr);
4689 
4690       if (UseLSE) {
4691         __ mov(rscratch1, disp_hdr);
4692         __ casal(Assembler::xword, rscratch1, rthread, tmp);
4693         __ cmp(rscratch1, disp_hdr);
4694       } else {
4695         Label retry_load, fail;
4696         __ prfm(Address(tmp), PSTL1STRM);
4697         __ bind(retry_load);
4698         __ ldaxr(rscratch1, tmp);
4699         __ cmp(disp_hdr, rscratch1);
4700         __ br(Assembler::NE, fail);
4701         // use stlxr to ensure update is immediately visible
4702         __ stlxr(rscratch1, rthread, tmp);
4703         __ cbnzw(rscratch1, retry_load);
4704         __ bind(fail);
4705       }
4706 
4707       // Label next;
4708       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4709       //               /*newv=*/rthread,
4710       //               /*addr=*/tmp,
4711       //               /*tmp=*/rscratch1,
4712       //               /*succeed*/next,
4713       //               /*fail*/NULL);
4714       // __ bind(next);
4715 
4716       // store a non-null value into the box.
4717       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4718 
4719       // PPC port checks the following invariants
4720       // #ifdef ASSERT
4721       // bne(flag, cont);
4722       // We have acquired the monitor, check some invariants.
4723       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4724       // Invariant 1: _recursions should be 0.
4725       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4726       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4727       //                        "monitor->_recursions should be 0", -1);
4728       // Invariant 2: OwnerIsThread shouldn't be 0.
4729       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4730       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4731       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4732       // #endif
4733     }
4734 
4735     __ bind(cont);
4736     // flag == EQ indicates success
4737     // flag == NE indicates failure
4738 
4739   %}
4740 
4741   // TODO
4742   // reimplement this with custom cmpxchgptr code
4743   // which avoids some of the unnecessary branching
4744   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4745     MacroAssembler _masm(&cbuf);
4746     Register oop = as_Register($object$$reg);
4747     Register box = as_Register($box$$reg);
4748     Register disp_hdr = as_Register($tmp$$reg);
4749     Register tmp = as_Register($tmp2$$reg);
4750     Label cont;
4751     Label object_has_monitor;
4752     Label cas_failed;
4753 
4754     assert_different_registers(oop, box, tmp, disp_hdr);
4755 
4756     // Always do locking in runtime.
4757     if (EmitSync & 0x01) {
4758       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4759       return;
4760     }
4761 
4762     if (UseBiasedLocking && !UseOptoBiasInlining) {
4763       __ biased_locking_exit(oop, tmp, cont);
4764     }
4765 
4766     // Find the lock address and load the displaced header from the stack.
4767     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4768 
4769     // If the displaced header is 0, we have a recursive unlock.
4770     __ cmp(disp_hdr, zr);
4771     __ br(Assembler::EQ, cont);
4772 
4773 
4774     // Handle existing monitor.
4775     if ((EmitSync & 0x02) == 0) {
4776       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4777       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4778     }
4779 
4780     // Check if it is still a light weight lock, this is is true if we
4781     // see the stack address of the basicLock in the markOop of the
4782     // object.
4783 
4784       if (UseLSE) {
4785         __ mov(tmp, box);
4786         __ casl(Assembler::xword, tmp, disp_hdr, oop);
4787         __ cmp(tmp, box);
4788       } else {
4789         Label retry_load;
4790         __ prfm(Address(oop), PSTL1STRM);
4791         __ bind(retry_load);
4792         __ ldxr(tmp, oop);
4793         __ cmp(box, tmp);
4794         __ br(Assembler::NE, cas_failed);
4795         // use stlxr to ensure update is immediately visible
4796         __ stlxr(tmp, disp_hdr, oop);
4797         __ cbzw(tmp, cont);
4798         __ b(retry_load);
4799       }
4800 
4801     // __ cmpxchgptr(/*compare_value=*/box,
4802     //               /*exchange_value=*/disp_hdr,
4803     //               /*where=*/oop,
4804     //               /*result=*/tmp,
4805     //               cont,
4806     //               /*cas_failed*/NULL);
4807     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4808 
4809     __ bind(cas_failed);
4810 
4811     // Handle existing monitor.
4812     if ((EmitSync & 0x02) == 0) {
4813       __ b(cont);
4814 
4815       __ bind(object_has_monitor);
4816       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4817       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4818       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4819       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4820       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4821       __ cmp(rscratch1, zr);
4822       __ br(Assembler::NE, cont);
4823 
4824       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4825       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4826       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4827       __ cmp(rscratch1, zr);
4828       __ cbnz(rscratch1, cont);
4829       // need a release store here
4830       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4831       __ stlr(rscratch1, tmp); // rscratch1 is zero
4832     }
4833 
4834     __ bind(cont);
4835     // flag == EQ indicates success
4836     // flag == NE indicates failure
4837   %}
4838 
4839 %}
4840 
4841 //----------FRAME--------------------------------------------------------------
4842 // Definition of frame structure and management information.
4843 //
4844 //  S T A C K   L A Y O U T    Allocators stack-slot number
4845 //                             |   (to get allocators register number
4846 //  G  Owned by    |        |  v    add OptoReg::stack0())
4847 //  r   CALLER     |        |
4848 //  o     |        +--------+      pad to even-align allocators stack-slot
4849 //  w     V        |  pad0  |        numbers; owned by CALLER
4850 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
4851 //  h     ^        |   in   |  5
4852 //        |        |  args  |  4   Holes in incoming args owned by SELF
4853 //  |     |        |        |  3
4854 //  |     |        +--------+
4855 //  V     |        | old out|      Empty on Intel, window on Sparc
4856 //        |    old |preserve|      Must be even aligned.
4857 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
4858 //        |        |   in   |  3   area for Intel ret address
4859 //     Owned by    |preserve|      Empty on Sparc.
4860 //       SELF      +--------+
4861 //        |        |  pad2  |  2   pad to align old SP
4862 //        |        +--------+  1
4863 //        |        | locks  |  0
4864 //        |        +--------+----> OptoReg::stack0(), even aligned
4865 //        |        |  pad1  | 11   pad to align new SP
4866 //        |        +--------+
4867 //        |        |        | 10
4868 //        |        | spills |  9   spills
4869 //        V        |        |  8   (pad0 slot for callee)
4870 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
4871 //        ^        |  out   |  7
4872 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
4873 //     Owned by    +--------+
4874 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
4875 //        |    new |preserve|      Must be even-aligned.
4876 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
4877 //        |        |        |
4878 //
4879 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
4880 //         known from SELF's arguments and the Java calling convention.
4881 //         Region 6-7 is determined per call site.
4882 // Note 2: If the calling convention leaves holes in the incoming argument
4883 //         area, those holes are owned by SELF.  Holes in the outgoing area
4884 //         are owned by the CALLEE.  Holes should not be nessecary in the
4885 //         incoming area, as the Java calling convention is completely under
4886 //         the control of the AD file.  Doubles can be sorted and packed to
4887 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
4888 //         varargs C calling conventions.
4889 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
4890 //         even aligned with pad0 as needed.
4891 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
4892 //           (the latter is true on Intel but is it false on AArch64?)
4893 //         region 6-11 is even aligned; it may be padded out more so that
4894 //         the region from SP to FP meets the minimum stack alignment.
4895 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4896 //         alignment.  Region 11, pad1, may be dynamically extended so that
4897 //         SP meets the minimum alignment.
4898 
4899 frame %{
4900   // What direction does stack grow in (assumed to be same for C & Java)
4901   stack_direction(TOWARDS_LOW);
4902 
4903   // These three registers define part of the calling convention
4904   // between compiled code and the interpreter.
4905 
4906   // Inline Cache Register or methodOop for I2C.
4907   inline_cache_reg(R12);
4908 
4909   // Method Oop Register when calling interpreter.
4910   interpreter_method_oop_reg(R12);
4911 
4912   // Number of stack slots consumed by locking an object
4913   sync_stack_slots(2);
4914 
4915   // Compiled code's Frame Pointer
4916   frame_pointer(R31);
4917 
4918   // Interpreter stores its frame pointer in a register which is
4919   // stored to the stack by I2CAdaptors.
4920   // I2CAdaptors convert from interpreted java to compiled java.
4921   interpreter_frame_pointer(R29);
4922 
4923   // Stack alignment requirement
4924   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4925 
4926   // Number of stack slots between incoming argument block and the start of
4927   // a new frame.  The PROLOG must add this many slots to the stack.  The
4928   // EPILOG must remove this many slots. aarch64 needs two slots for
4929   // return address and fp.
4930   // TODO think this is correct but check
4931   in_preserve_stack_slots(4);
4932 
4933   // Number of outgoing stack slots killed above the out_preserve_stack_slots
4934   // for calls to C.  Supports the var-args backing area for register parms.
4935   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4936 
4937   // The after-PROLOG location of the return address.  Location of
4938   // return address specifies a type (REG or STACK) and a number
4939   // representing the register number (i.e. - use a register name) or
4940   // stack slot.
4941   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4942   // Otherwise, it is above the locks and verification slot and alignment word
4943   // TODO this may well be correct but need to check why that - 2 is there
4944   // ppc port uses 0 but we definitely need to allow for fixed_slots
4945   // which folds in the space used for monitors
4946   return_addr(STACK - 2 +
4947               round_to((Compile::current()->in_preserve_stack_slots() +
4948                         Compile::current()->fixed_slots()),
4949                        stack_alignment_in_slots()));
4950 
4951   // Body of function which returns an integer array locating
4952   // arguments either in registers or in stack slots.  Passed an array
4953   // of ideal registers called "sig" and a "length" count.  Stack-slot
4954   // offsets are based on outgoing arguments, i.e. a CALLER setting up
4955   // arguments for a CALLEE.  Incoming stack arguments are
4956   // automatically biased by the preserve_stack_slots field above.
4957 
4958   calling_convention
4959   %{
4960     // No difference between ingoing/outgoing just pass false
4961     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4962   %}
4963 
4964   c_calling_convention
4965   %{
4966     // This is obviously always outgoing
4967     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
4968   %}
4969 
4970   // Location of compiled Java return values.  Same as C for now.
4971   return_value
4972   %{
4973     // TODO do we allow ideal_reg == Op_RegN???
4974     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4975            "only return normal values");
4976 
4977     static const int lo[Op_RegL + 1] = { // enum name
4978       0,                                 // Op_Node
4979       0,                                 // Op_Set
4980       R0_num,                            // Op_RegN
4981       R0_num,                            // Op_RegI
4982       R0_num,                            // Op_RegP
4983       V0_num,                            // Op_RegF
4984       V0_num,                            // Op_RegD
4985       R0_num                             // Op_RegL
4986     };
4987 
4988     static const int hi[Op_RegL + 1] = { // enum name
4989       0,                                 // Op_Node
4990       0,                                 // Op_Set
4991       OptoReg::Bad,                       // Op_RegN
4992       OptoReg::Bad,                      // Op_RegI
4993       R0_H_num,                          // Op_RegP
4994       OptoReg::Bad,                      // Op_RegF
4995       V0_H_num,                          // Op_RegD
4996       R0_H_num                           // Op_RegL
4997     };
4998 
4999     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5000   %}
5001 %}
5002 
5003 //----------ATTRIBUTES---------------------------------------------------------
5004 //----------Operand Attributes-------------------------------------------------
5005 op_attrib op_cost(1);        // Required cost attribute
5006 
5007 //----------Instruction Attributes---------------------------------------------
5008 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5009 ins_attrib ins_size(32);        // Required size attribute (in bits)
5010 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5011                                 // a non-matching short branch variant
5012                                 // of some long branch?
5013 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5014                                 // be a power of 2) specifies the
5015                                 // alignment that some part of the
5016                                 // instruction (not necessarily the
5017                                 // start) requires.  If > 1, a
5018                                 // compute_padding() function must be
5019                                 // provided for the instruction
5020 
5021 //----------OPERANDS-----------------------------------------------------------
5022 // Operand definitions must precede instruction definitions for correct parsing
5023 // in the ADLC because operands constitute user defined types which are used in
5024 // instruction definitions.
5025 
5026 //----------Simple Operands----------------------------------------------------
5027 
5028 // Integer operands 32 bit
5029 // 32 bit immediate
5030 operand immI()
5031 %{
5032   match(ConI);
5033 
5034   op_cost(0);
5035   format %{ %}
5036   interface(CONST_INTER);
5037 %}
5038 
5039 // 32 bit zero
5040 operand immI0()
5041 %{
5042   predicate(n->get_int() == 0);
5043   match(ConI);
5044 
5045   op_cost(0);
5046   format %{ %}
5047   interface(CONST_INTER);
5048 %}
5049 
5050 // 32 bit unit increment
5051 operand immI_1()
5052 %{
5053   predicate(n->get_int() == 1);
5054   match(ConI);
5055 
5056   op_cost(0);
5057   format %{ %}
5058   interface(CONST_INTER);
5059 %}
5060 
5061 // 32 bit unit decrement
5062 operand immI_M1()
5063 %{
5064   predicate(n->get_int() == -1);
5065   match(ConI);
5066 
5067   op_cost(0);
5068   format %{ %}
5069   interface(CONST_INTER);
5070 %}
5071 
5072 operand immI_le_4()
5073 %{
5074   predicate(n->get_int() <= 4);
5075   match(ConI);
5076 
5077   op_cost(0);
5078   format %{ %}
5079   interface(CONST_INTER);
5080 %}
5081 
5082 operand immI_31()
5083 %{
5084   predicate(n->get_int() == 31);
5085   match(ConI);
5086 
5087   op_cost(0);
5088   format %{ %}
5089   interface(CONST_INTER);
5090 %}
5091 
5092 operand immI_8()
5093 %{
5094   predicate(n->get_int() == 8);
5095   match(ConI);
5096 
5097   op_cost(0);
5098   format %{ %}
5099   interface(CONST_INTER);
5100 %}
5101 
5102 operand immI_16()
5103 %{
5104   predicate(n->get_int() == 16);
5105   match(ConI);
5106 
5107   op_cost(0);
5108   format %{ %}
5109   interface(CONST_INTER);
5110 %}
5111 
5112 operand immI_24()
5113 %{
5114   predicate(n->get_int() == 24);
5115   match(ConI);
5116 
5117   op_cost(0);
5118   format %{ %}
5119   interface(CONST_INTER);
5120 %}
5121 
5122 operand immI_32()
5123 %{
5124   predicate(n->get_int() == 32);
5125   match(ConI);
5126 
5127   op_cost(0);
5128   format %{ %}
5129   interface(CONST_INTER);
5130 %}
5131 
5132 operand immI_48()
5133 %{
5134   predicate(n->get_int() == 48);
5135   match(ConI);
5136 
5137   op_cost(0);
5138   format %{ %}
5139   interface(CONST_INTER);
5140 %}
5141 
5142 operand immI_56()
5143 %{
5144   predicate(n->get_int() == 56);
5145   match(ConI);
5146 
5147   op_cost(0);
5148   format %{ %}
5149   interface(CONST_INTER);
5150 %}
5151 
5152 operand immI_64()
5153 %{
5154   predicate(n->get_int() == 64);
5155   match(ConI);
5156 
5157   op_cost(0);
5158   format %{ %}
5159   interface(CONST_INTER);
5160 %}
5161 
5162 operand immI_255()
5163 %{
5164   predicate(n->get_int() == 255);
5165   match(ConI);
5166 
5167   op_cost(0);
5168   format %{ %}
5169   interface(CONST_INTER);
5170 %}
5171 
5172 operand immI_65535()
5173 %{
5174   predicate(n->get_int() == 65535);
5175   match(ConI);
5176 
5177   op_cost(0);
5178   format %{ %}
5179   interface(CONST_INTER);
5180 %}
5181 
5182 operand immL_63()
5183 %{
5184   predicate(n->get_int() == 63);
5185   match(ConI);
5186 
5187   op_cost(0);
5188   format %{ %}
5189   interface(CONST_INTER);
5190 %}
5191 
5192 operand immL_255()
5193 %{
5194   predicate(n->get_int() == 255);
5195   match(ConI);
5196 
5197   op_cost(0);
5198   format %{ %}
5199   interface(CONST_INTER);
5200 %}
5201 
5202 operand immL_65535()
5203 %{
5204   predicate(n->get_long() == 65535L);
5205   match(ConL);
5206 
5207   op_cost(0);
5208   format %{ %}
5209   interface(CONST_INTER);
5210 %}
5211 
5212 operand immL_4294967295()
5213 %{
5214   predicate(n->get_long() == 4294967295L);
5215   match(ConL);
5216 
5217   op_cost(0);
5218   format %{ %}
5219   interface(CONST_INTER);
5220 %}
5221 
5222 operand immL_bitmask()
5223 %{
5224   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5225             && is_power_of_2(n->get_long() + 1));
5226   match(ConL);
5227 
5228   op_cost(0);
5229   format %{ %}
5230   interface(CONST_INTER);
5231 %}
5232 
5233 operand immI_bitmask()
5234 %{
5235   predicate(((n->get_int() & 0xc0000000) == 0)
5236             && is_power_of_2(n->get_int() + 1));
5237   match(ConI);
5238 
5239   op_cost(0);
5240   format %{ %}
5241   interface(CONST_INTER);
5242 %}
5243 
5244 // Scale values for scaled offset addressing modes (up to long but not quad)
5245 operand immIScale()
5246 %{
5247   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5248   match(ConI);
5249 
5250   op_cost(0);
5251   format %{ %}
5252   interface(CONST_INTER);
5253 %}
5254 
5255 // 26 bit signed offset -- for pc-relative branches
5256 operand immI26()
5257 %{
5258   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5259   match(ConI);
5260 
5261   op_cost(0);
5262   format %{ %}
5263   interface(CONST_INTER);
5264 %}
5265 
5266 // 19 bit signed offset -- for pc-relative loads
5267 operand immI19()
5268 %{
5269   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5270   match(ConI);
5271 
5272   op_cost(0);
5273   format %{ %}
5274   interface(CONST_INTER);
5275 %}
5276 
5277 // 12 bit unsigned offset -- for base plus immediate loads
5278 operand immIU12()
5279 %{
5280   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5281   match(ConI);
5282 
5283   op_cost(0);
5284   format %{ %}
5285   interface(CONST_INTER);
5286 %}
5287 
5288 operand immLU12()
5289 %{
5290   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5291   match(ConL);
5292 
5293   op_cost(0);
5294   format %{ %}
5295   interface(CONST_INTER);
5296 %}
5297 
5298 // Offset for scaled or unscaled immediate loads and stores
5299 operand immIOffset()
5300 %{
5301   predicate(Address::offset_ok_for_immed(n->get_int()));
5302   match(ConI);
5303 
5304   op_cost(0);
5305   format %{ %}
5306   interface(CONST_INTER);
5307 %}
5308 
5309 operand immIOffset4()
5310 %{
5311   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
5312   match(ConI);
5313 
5314   op_cost(0);
5315   format %{ %}
5316   interface(CONST_INTER);
5317 %}
5318 
5319 operand immIOffset8()
5320 %{
5321   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
5322   match(ConI);
5323 
5324   op_cost(0);
5325   format %{ %}
5326   interface(CONST_INTER);
5327 %}
5328 
5329 operand immIOffset16()
5330 %{
5331   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
5332   match(ConI);
5333 
5334   op_cost(0);
5335   format %{ %}
5336   interface(CONST_INTER);
5337 %}
5338 
5339 operand immLoffset()
5340 %{
5341   predicate(Address::offset_ok_for_immed(n->get_long()));
5342   match(ConL);
5343 
5344   op_cost(0);
5345   format %{ %}
5346   interface(CONST_INTER);
5347 %}
5348 
5349 operand immLoffset4()
5350 %{
5351   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
5352   match(ConL);
5353 
5354   op_cost(0);
5355   format %{ %}
5356   interface(CONST_INTER);
5357 %}
5358 
5359 operand immLoffset8()
5360 %{
5361   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
5362   match(ConL);
5363 
5364   op_cost(0);
5365   format %{ %}
5366   interface(CONST_INTER);
5367 %}
5368 
5369 operand immLoffset16()
5370 %{
5371   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
5372   match(ConL);
5373 
5374   op_cost(0);
5375   format %{ %}
5376   interface(CONST_INTER);
5377 %}
5378 
5379 // 32 bit integer valid for add sub immediate
5380 operand immIAddSub()
5381 %{
5382   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5383   match(ConI);
5384   op_cost(0);
5385   format %{ %}
5386   interface(CONST_INTER);
5387 %}
5388 
5389 // 32 bit unsigned integer valid for logical immediate
5390 // TODO -- check this is right when e.g the mask is 0x80000000
5391 operand immILog()
5392 %{
5393   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5394   match(ConI);
5395 
5396   op_cost(0);
5397   format %{ %}
5398   interface(CONST_INTER);
5399 %}
5400 
5401 // Integer operands 64 bit
5402 // 64 bit immediate
5403 operand immL()
5404 %{
5405   match(ConL);
5406 
5407   op_cost(0);
5408   format %{ %}
5409   interface(CONST_INTER);
5410 %}
5411 
5412 // 64 bit zero
5413 operand immL0()
5414 %{
5415   predicate(n->get_long() == 0);
5416   match(ConL);
5417 
5418   op_cost(0);
5419   format %{ %}
5420   interface(CONST_INTER);
5421 %}
5422 
5423 // 64 bit unit increment
5424 operand immL_1()
5425 %{
5426   predicate(n->get_long() == 1);
5427   match(ConL);
5428 
5429   op_cost(0);
5430   format %{ %}
5431   interface(CONST_INTER);
5432 %}
5433 
5434 // 64 bit unit decrement
5435 operand immL_M1()
5436 %{
5437   predicate(n->get_long() == -1);
5438   match(ConL);
5439 
5440   op_cost(0);
5441   format %{ %}
5442   interface(CONST_INTER);
5443 %}
5444 
5445 // 32 bit offset of pc in thread anchor
5446 
5447 operand immL_pc_off()
5448 %{
5449   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5450                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5451   match(ConL);
5452 
5453   op_cost(0);
5454   format %{ %}
5455   interface(CONST_INTER);
5456 %}
5457 
5458 // 64 bit integer valid for add sub immediate
5459 operand immLAddSub()
5460 %{
5461   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5462   match(ConL);
5463   op_cost(0);
5464   format %{ %}
5465   interface(CONST_INTER);
5466 %}
5467 
5468 // 64 bit integer valid for logical immediate
5469 operand immLLog()
5470 %{
5471   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5472   match(ConL);
5473   op_cost(0);
5474   format %{ %}
5475   interface(CONST_INTER);
5476 %}
5477 
5478 // Long Immediate: low 32-bit mask
5479 operand immL_32bits()
5480 %{
5481   predicate(n->get_long() == 0xFFFFFFFFL);
5482   match(ConL);
5483   op_cost(0);
5484   format %{ %}
5485   interface(CONST_INTER);
5486 %}
5487 
5488 // Pointer operands
5489 // Pointer Immediate
5490 operand immP()
5491 %{
5492   match(ConP);
5493 
5494   op_cost(0);
5495   format %{ %}
5496   interface(CONST_INTER);
5497 %}
5498 
5499 // NULL Pointer Immediate
5500 operand immP0()
5501 %{
5502   predicate(n->get_ptr() == 0);
5503   match(ConP);
5504 
5505   op_cost(0);
5506   format %{ %}
5507   interface(CONST_INTER);
5508 %}
5509 
5510 // Pointer Immediate One
5511 // this is used in object initialization (initial object header)
5512 operand immP_1()
5513 %{
5514   predicate(n->get_ptr() == 1);
5515   match(ConP);
5516 
5517   op_cost(0);
5518   format %{ %}
5519   interface(CONST_INTER);
5520 %}
5521 
5522 // Polling Page Pointer Immediate
5523 operand immPollPage()
5524 %{
5525   predicate((address)n->get_ptr() == os::get_polling_page());
5526   match(ConP);
5527 
5528   op_cost(0);
5529   format %{ %}
5530   interface(CONST_INTER);
5531 %}
5532 
5533 // Card Table Byte Map Base
5534 operand immByteMapBase()
5535 %{
5536   // Get base of card map
5537   predicate((jbyte*)n->get_ptr() ==
5538         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5539   match(ConP);
5540 
5541   op_cost(0);
5542   format %{ %}
5543   interface(CONST_INTER);
5544 %}
5545 
5546 // Pointer Immediate Minus One
5547 // this is used when we want to write the current PC to the thread anchor
5548 operand immP_M1()
5549 %{
5550   predicate(n->get_ptr() == -1);
5551   match(ConP);
5552 
5553   op_cost(0);
5554   format %{ %}
5555   interface(CONST_INTER);
5556 %}
5557 
5558 // Pointer Immediate Minus Two
5559 // this is used when we want to write the current PC to the thread anchor
5560 operand immP_M2()
5561 %{
5562   predicate(n->get_ptr() == -2);
5563   match(ConP);
5564 
5565   op_cost(0);
5566   format %{ %}
5567   interface(CONST_INTER);
5568 %}
5569 
5570 // Float and Double operands
5571 // Double Immediate
5572 operand immD()
5573 %{
5574   match(ConD);
5575   op_cost(0);
5576   format %{ %}
5577   interface(CONST_INTER);
5578 %}
5579 
5580 // Double Immediate: +0.0d
5581 operand immD0()
5582 %{
5583   predicate(jlong_cast(n->getd()) == 0);
5584   match(ConD);
5585 
5586   op_cost(0);
5587   format %{ %}
5588   interface(CONST_INTER);
5589 %}
5590 
5591 // constant 'double +0.0'.
5592 operand immDPacked()
5593 %{
5594   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5595   match(ConD);
5596   op_cost(0);
5597   format %{ %}
5598   interface(CONST_INTER);
5599 %}
5600 
5601 // Float Immediate
5602 operand immF()
5603 %{
5604   match(ConF);
5605   op_cost(0);
5606   format %{ %}
5607   interface(CONST_INTER);
5608 %}
5609 
5610 // Float Immediate: +0.0f.
5611 operand immF0()
5612 %{
5613   predicate(jint_cast(n->getf()) == 0);
5614   match(ConF);
5615 
5616   op_cost(0);
5617   format %{ %}
5618   interface(CONST_INTER);
5619 %}
5620 
5621 //
5622 operand immFPacked()
5623 %{
5624   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5625   match(ConF);
5626   op_cost(0);
5627   format %{ %}
5628   interface(CONST_INTER);
5629 %}
5630 
5631 // Narrow pointer operands
5632 // Narrow Pointer Immediate
5633 operand immN()
5634 %{
5635   match(ConN);
5636 
5637   op_cost(0);
5638   format %{ %}
5639   interface(CONST_INTER);
5640 %}
5641 
5642 // Narrow NULL Pointer Immediate
5643 operand immN0()
5644 %{
5645   predicate(n->get_narrowcon() == 0);
5646   match(ConN);
5647 
5648   op_cost(0);
5649   format %{ %}
5650   interface(CONST_INTER);
5651 %}
5652 
5653 operand immNKlass()
5654 %{
5655   match(ConNKlass);
5656 
5657   op_cost(0);
5658   format %{ %}
5659   interface(CONST_INTER);
5660 %}
5661 
5662 // Integer 32 bit Register Operands
5663 // Integer 32 bitRegister (excludes SP)
5664 operand iRegI()
5665 %{
5666   constraint(ALLOC_IN_RC(any_reg32));
5667   match(RegI);
5668   match(iRegINoSp);
5669   op_cost(0);
5670   format %{ %}
5671   interface(REG_INTER);
5672 %}
5673 
5674 // Integer 32 bit Register not Special
5675 operand iRegINoSp()
5676 %{
5677   constraint(ALLOC_IN_RC(no_special_reg32));
5678   match(RegI);
5679   op_cost(0);
5680   format %{ %}
5681   interface(REG_INTER);
5682 %}
5683 
5684 // Integer 64 bit Register Operands
5685 // Integer 64 bit Register (includes SP)
5686 operand iRegL()
5687 %{
5688   constraint(ALLOC_IN_RC(any_reg));
5689   match(RegL);
5690   match(iRegLNoSp);
5691   op_cost(0);
5692   format %{ %}
5693   interface(REG_INTER);
5694 %}
5695 
5696 // Integer 64 bit Register not Special
5697 operand iRegLNoSp()
5698 %{
5699   constraint(ALLOC_IN_RC(no_special_reg));
5700   match(RegL);
5701   format %{ %}
5702   interface(REG_INTER);
5703 %}
5704 
5705 // Pointer Register Operands
5706 // Pointer Register
5707 operand iRegP()
5708 %{
5709   constraint(ALLOC_IN_RC(ptr_reg));
5710   match(RegP);
5711   match(iRegPNoSp);
5712   match(iRegP_R0);
5713   //match(iRegP_R2);
5714   //match(iRegP_R4);
5715   //match(iRegP_R5);
5716   match(thread_RegP);
5717   op_cost(0);
5718   format %{ %}
5719   interface(REG_INTER);
5720 %}
5721 
5722 // Pointer 64 bit Register not Special
5723 operand iRegPNoSp()
5724 %{
5725   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5726   match(RegP);
5727   // match(iRegP);
5728   // match(iRegP_R0);
5729   // match(iRegP_R2);
5730   // match(iRegP_R4);
5731   // match(iRegP_R5);
5732   // match(thread_RegP);
5733   op_cost(0);
5734   format %{ %}
5735   interface(REG_INTER);
5736 %}
5737 
5738 // Pointer 64 bit Register R0 only
5739 operand iRegP_R0()
5740 %{
5741   constraint(ALLOC_IN_RC(r0_reg));
5742   match(RegP);
5743   // match(iRegP);
5744   match(iRegPNoSp);
5745   op_cost(0);
5746   format %{ %}
5747   interface(REG_INTER);
5748 %}
5749 
5750 // Pointer 64 bit Register R1 only
5751 operand iRegP_R1()
5752 %{
5753   constraint(ALLOC_IN_RC(r1_reg));
5754   match(RegP);
5755   // match(iRegP);
5756   match(iRegPNoSp);
5757   op_cost(0);
5758   format %{ %}
5759   interface(REG_INTER);
5760 %}
5761 
5762 // Pointer 64 bit Register R2 only
5763 operand iRegP_R2()
5764 %{
5765   constraint(ALLOC_IN_RC(r2_reg));
5766   match(RegP);
5767   // match(iRegP);
5768   match(iRegPNoSp);
5769   op_cost(0);
5770   format %{ %}
5771   interface(REG_INTER);
5772 %}
5773 
5774 // Pointer 64 bit Register R3 only
5775 operand iRegP_R3()
5776 %{
5777   constraint(ALLOC_IN_RC(r3_reg));
5778   match(RegP);
5779   // match(iRegP);
5780   match(iRegPNoSp);
5781   op_cost(0);
5782   format %{ %}
5783   interface(REG_INTER);
5784 %}
5785 
5786 // Pointer 64 bit Register R4 only
5787 operand iRegP_R4()
5788 %{
5789   constraint(ALLOC_IN_RC(r4_reg));
5790   match(RegP);
5791   // match(iRegP);
5792   match(iRegPNoSp);
5793   op_cost(0);
5794   format %{ %}
5795   interface(REG_INTER);
5796 %}
5797 
5798 // Pointer 64 bit Register R5 only
5799 operand iRegP_R5()
5800 %{
5801   constraint(ALLOC_IN_RC(r5_reg));
5802   match(RegP);
5803   // match(iRegP);
5804   match(iRegPNoSp);
5805   op_cost(0);
5806   format %{ %}
5807   interface(REG_INTER);
5808 %}
5809 
5810 // Pointer 64 bit Register R10 only
5811 operand iRegP_R10()
5812 %{
5813   constraint(ALLOC_IN_RC(r10_reg));
5814   match(RegP);
5815   // match(iRegP);
5816   match(iRegPNoSp);
5817   op_cost(0);
5818   format %{ %}
5819   interface(REG_INTER);
5820 %}
5821 
5822 // Long 64 bit Register R11 only
5823 operand iRegL_R11()
5824 %{
5825   constraint(ALLOC_IN_RC(r11_reg));
5826   match(RegL);
5827   match(iRegLNoSp);
5828   op_cost(0);
5829   format %{ %}
5830   interface(REG_INTER);
5831 %}
5832 
5833 // Pointer 64 bit Register FP only
5834 operand iRegP_FP()
5835 %{
5836   constraint(ALLOC_IN_RC(fp_reg));
5837   match(RegP);
5838   // match(iRegP);
5839   op_cost(0);
5840   format %{ %}
5841   interface(REG_INTER);
5842 %}
5843 
5844 // Register R0 only
5845 operand iRegI_R0()
5846 %{
5847   constraint(ALLOC_IN_RC(int_r0_reg));
5848   match(RegI);
5849   match(iRegINoSp);
5850   op_cost(0);
5851   format %{ %}
5852   interface(REG_INTER);
5853 %}
5854 
5855 // Register R2 only
5856 operand iRegI_R2()
5857 %{
5858   constraint(ALLOC_IN_RC(int_r2_reg));
5859   match(RegI);
5860   match(iRegINoSp);
5861   op_cost(0);
5862   format %{ %}
5863   interface(REG_INTER);
5864 %}
5865 
5866 // Register R3 only
5867 operand iRegI_R3()
5868 %{
5869   constraint(ALLOC_IN_RC(int_r3_reg));
5870   match(RegI);
5871   match(iRegINoSp);
5872   op_cost(0);
5873   format %{ %}
5874   interface(REG_INTER);
5875 %}
5876 
5877 
5878 // Register R2 only
5879 operand iRegI_R4()
5880 %{
5881   constraint(ALLOC_IN_RC(int_r4_reg));
5882   match(RegI);
5883   match(iRegINoSp);
5884   op_cost(0);
5885   format %{ %}
5886   interface(REG_INTER);
5887 %}
5888 
5889 
5890 // Pointer Register Operands
5891 // Narrow Pointer Register
5892 operand iRegN()
5893 %{
5894   constraint(ALLOC_IN_RC(any_reg32));
5895   match(RegN);
5896   match(iRegNNoSp);
5897   op_cost(0);
5898   format %{ %}
5899   interface(REG_INTER);
5900 %}
5901 
5902 // Integer 64 bit Register not Special
5903 operand iRegNNoSp()
5904 %{
5905   constraint(ALLOC_IN_RC(no_special_reg32));
5906   match(RegN);
5907   op_cost(0);
5908   format %{ %}
5909   interface(REG_INTER);
5910 %}
5911 
5912 // heap base register -- used for encoding immN0
5913 
5914 operand iRegIHeapbase()
5915 %{
5916   constraint(ALLOC_IN_RC(heapbase_reg));
5917   match(RegI);
5918   op_cost(0);
5919   format %{ %}
5920   interface(REG_INTER);
5921 %}
5922 
5923 // Float Register
5924 // Float register operands
5925 operand vRegF()
5926 %{
5927   constraint(ALLOC_IN_RC(float_reg));
5928   match(RegF);
5929 
5930   op_cost(0);
5931   format %{ %}
5932   interface(REG_INTER);
5933 %}
5934 
5935 // Double Register
5936 // Double register operands
5937 operand vRegD()
5938 %{
5939   constraint(ALLOC_IN_RC(double_reg));
5940   match(RegD);
5941 
5942   op_cost(0);
5943   format %{ %}
5944   interface(REG_INTER);
5945 %}
5946 
5947 operand vecD()
5948 %{
5949   constraint(ALLOC_IN_RC(vectord_reg));
5950   match(VecD);
5951 
5952   op_cost(0);
5953   format %{ %}
5954   interface(REG_INTER);
5955 %}
5956 
5957 operand vecX()
5958 %{
5959   constraint(ALLOC_IN_RC(vectorx_reg));
5960   match(VecX);
5961 
5962   op_cost(0);
5963   format %{ %}
5964   interface(REG_INTER);
5965 %}
5966 
5967 operand vRegD_V0()
5968 %{
5969   constraint(ALLOC_IN_RC(v0_reg));
5970   match(RegD);
5971   op_cost(0);
5972   format %{ %}
5973   interface(REG_INTER);
5974 %}
5975 
5976 operand vRegD_V1()
5977 %{
5978   constraint(ALLOC_IN_RC(v1_reg));
5979   match(RegD);
5980   op_cost(0);
5981   format %{ %}
5982   interface(REG_INTER);
5983 %}
5984 
5985 operand vRegD_V2()
5986 %{
5987   constraint(ALLOC_IN_RC(v2_reg));
5988   match(RegD);
5989   op_cost(0);
5990   format %{ %}
5991   interface(REG_INTER);
5992 %}
5993 
5994 operand vRegD_V3()
5995 %{
5996   constraint(ALLOC_IN_RC(v3_reg));
5997   match(RegD);
5998   op_cost(0);
5999   format %{ %}
6000   interface(REG_INTER);
6001 %}
6002 
6003 // Flags register, used as output of signed compare instructions
6004 
6005 // note that on AArch64 we also use this register as the output for
6006 // for floating point compare instructions (CmpF CmpD). this ensures
6007 // that ordered inequality tests use GT, GE, LT or LE none of which
6008 // pass through cases where the result is unordered i.e. one or both
6009 // inputs to the compare is a NaN. this means that the ideal code can
6010 // replace e.g. a GT with an LE and not end up capturing the NaN case
6011 // (where the comparison should always fail). EQ and NE tests are
6012 // always generated in ideal code so that unordered folds into the NE
6013 // case, matching the behaviour of AArch64 NE.
6014 //
6015 // This differs from x86 where the outputs of FP compares use a
6016 // special FP flags registers and where compares based on this
6017 // register are distinguished into ordered inequalities (cmpOpUCF) and
6018 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6019 // to explicitly handle the unordered case in branches. x86 also has
6020 // to include extra CMoveX rules to accept a cmpOpUCF input.
6021 
6022 operand rFlagsReg()
6023 %{
6024   constraint(ALLOC_IN_RC(int_flags));
6025   match(RegFlags);
6026 
6027   op_cost(0);
6028   format %{ "RFLAGS" %}
6029   interface(REG_INTER);
6030 %}
6031 
6032 // Flags register, used as output of unsigned compare instructions
6033 operand rFlagsRegU()
6034 %{
6035   constraint(ALLOC_IN_RC(int_flags));
6036   match(RegFlags);
6037 
6038   op_cost(0);
6039   format %{ "RFLAGSU" %}
6040   interface(REG_INTER);
6041 %}
6042 
6043 // Special Registers
6044 
6045 // Method Register
6046 operand inline_cache_RegP(iRegP reg)
6047 %{
6048   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6049   match(reg);
6050   match(iRegPNoSp);
6051   op_cost(0);
6052   format %{ %}
6053   interface(REG_INTER);
6054 %}
6055 
6056 operand interpreter_method_oop_RegP(iRegP reg)
6057 %{
6058   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6059   match(reg);
6060   match(iRegPNoSp);
6061   op_cost(0);
6062   format %{ %}
6063   interface(REG_INTER);
6064 %}
6065 
6066 // Thread Register
6067 operand thread_RegP(iRegP reg)
6068 %{
6069   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6070   match(reg);
6071   op_cost(0);
6072   format %{ %}
6073   interface(REG_INTER);
6074 %}
6075 
6076 operand lr_RegP(iRegP reg)
6077 %{
6078   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6079   match(reg);
6080   op_cost(0);
6081   format %{ %}
6082   interface(REG_INTER);
6083 %}
6084 
6085 //----------Memory Operands----------------------------------------------------
6086 
6087 operand indirect(iRegP reg)
6088 %{
6089   constraint(ALLOC_IN_RC(ptr_reg));
6090   match(reg);
6091   op_cost(0);
6092   format %{ "[$reg]" %}
6093   interface(MEMORY_INTER) %{
6094     base($reg);
6095     index(0xffffffff);
6096     scale(0x0);
6097     disp(0x0);
6098   %}
6099 %}
6100 
6101 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6102 %{
6103   constraint(ALLOC_IN_RC(ptr_reg));
6104   match(AddP (AddP reg (LShiftL lreg scale)) off);
6105   op_cost(INSN_COST);
6106   format %{ "$reg, $lreg lsl($scale), $off" %}
6107   interface(MEMORY_INTER) %{
6108     base($reg);
6109     index($lreg);
6110     scale($scale);
6111     disp($off);
6112   %}
6113 %}
6114 
6115 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6116 %{
6117   constraint(ALLOC_IN_RC(ptr_reg));
6118   match(AddP (AddP reg (LShiftL lreg scale)) off);
6119   op_cost(INSN_COST);
6120   format %{ "$reg, $lreg lsl($scale), $off" %}
6121   interface(MEMORY_INTER) %{
6122     base($reg);
6123     index($lreg);
6124     scale($scale);
6125     disp($off);
6126   %}
6127 %}
6128 
6129 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6130 %{
6131   constraint(ALLOC_IN_RC(ptr_reg));
6132   match(AddP (AddP reg (ConvI2L ireg)) off);
6133   op_cost(INSN_COST);
6134   format %{ "$reg, $ireg, $off I2L" %}
6135   interface(MEMORY_INTER) %{
6136     base($reg);
6137     index($ireg);
6138     scale(0x0);
6139     disp($off);
6140   %}
6141 %}
6142 
6143 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6144 %{
6145   constraint(ALLOC_IN_RC(ptr_reg));
6146   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6147   op_cost(INSN_COST);
6148   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6149   interface(MEMORY_INTER) %{
6150     base($reg);
6151     index($ireg);
6152     scale($scale);
6153     disp($off);
6154   %}
6155 %}
6156 
6157 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6158 %{
6159   constraint(ALLOC_IN_RC(ptr_reg));
6160   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6161   op_cost(0);
6162   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6163   interface(MEMORY_INTER) %{
6164     base($reg);
6165     index($ireg);
6166     scale($scale);
6167     disp(0x0);
6168   %}
6169 %}
6170 
6171 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6172 %{
6173   constraint(ALLOC_IN_RC(ptr_reg));
6174   match(AddP reg (LShiftL lreg scale));
6175   op_cost(0);
6176   format %{ "$reg, $lreg lsl($scale)" %}
6177   interface(MEMORY_INTER) %{
6178     base($reg);
6179     index($lreg);
6180     scale($scale);
6181     disp(0x0);
6182   %}
6183 %}
6184 
6185 operand indIndex(iRegP reg, iRegL lreg)
6186 %{
6187   constraint(ALLOC_IN_RC(ptr_reg));
6188   match(AddP reg lreg);
6189   op_cost(0);
6190   format %{ "$reg, $lreg" %}
6191   interface(MEMORY_INTER) %{
6192     base($reg);
6193     index($lreg);
6194     scale(0x0);
6195     disp(0x0);
6196   %}
6197 %}
6198 
6199 operand indOffI(iRegP reg, immIOffset off)
6200 %{
6201   constraint(ALLOC_IN_RC(ptr_reg));
6202   match(AddP reg off);
6203   op_cost(0);
6204   format %{ "[$reg, $off]" %}
6205   interface(MEMORY_INTER) %{
6206     base($reg);
6207     index(0xffffffff);
6208     scale(0x0);
6209     disp($off);
6210   %}
6211 %}
6212 
6213 operand indOffI4(iRegP reg, immIOffset4 off)
6214 %{
6215   constraint(ALLOC_IN_RC(ptr_reg));
6216   match(AddP reg off);
6217   op_cost(0);
6218   format %{ "[$reg, $off]" %}
6219   interface(MEMORY_INTER) %{
6220     base($reg);
6221     index(0xffffffff);
6222     scale(0x0);
6223     disp($off);
6224   %}
6225 %}
6226 
6227 operand indOffI8(iRegP reg, immIOffset8 off)
6228 %{
6229   constraint(ALLOC_IN_RC(ptr_reg));
6230   match(AddP reg off);
6231   op_cost(0);
6232   format %{ "[$reg, $off]" %}
6233   interface(MEMORY_INTER) %{
6234     base($reg);
6235     index(0xffffffff);
6236     scale(0x0);
6237     disp($off);
6238   %}
6239 %}
6240 
6241 operand indOffI16(iRegP reg, immIOffset16 off)
6242 %{
6243   constraint(ALLOC_IN_RC(ptr_reg));
6244   match(AddP reg off);
6245   op_cost(0);
6246   format %{ "[$reg, $off]" %}
6247   interface(MEMORY_INTER) %{
6248     base($reg);
6249     index(0xffffffff);
6250     scale(0x0);
6251     disp($off);
6252   %}
6253 %}
6254 
6255 operand indOffL(iRegP reg, immLoffset off)
6256 %{
6257   constraint(ALLOC_IN_RC(ptr_reg));
6258   match(AddP reg off);
6259   op_cost(0);
6260   format %{ "[$reg, $off]" %}
6261   interface(MEMORY_INTER) %{
6262     base($reg);
6263     index(0xffffffff);
6264     scale(0x0);
6265     disp($off);
6266   %}
6267 %}
6268 
6269 operand indOffL4(iRegP reg, immLoffset4 off)
6270 %{
6271   constraint(ALLOC_IN_RC(ptr_reg));
6272   match(AddP reg off);
6273   op_cost(0);
6274   format %{ "[$reg, $off]" %}
6275   interface(MEMORY_INTER) %{
6276     base($reg);
6277     index(0xffffffff);
6278     scale(0x0);
6279     disp($off);
6280   %}
6281 %}
6282 
6283 operand indOffL8(iRegP reg, immLoffset8 off)
6284 %{
6285   constraint(ALLOC_IN_RC(ptr_reg));
6286   match(AddP reg off);
6287   op_cost(0);
6288   format %{ "[$reg, $off]" %}
6289   interface(MEMORY_INTER) %{
6290     base($reg);
6291     index(0xffffffff);
6292     scale(0x0);
6293     disp($off);
6294   %}
6295 %}
6296 
6297 operand indOffL16(iRegP reg, immLoffset16 off)
6298 %{
6299   constraint(ALLOC_IN_RC(ptr_reg));
6300   match(AddP reg off);
6301   op_cost(0);
6302   format %{ "[$reg, $off]" %}
6303   interface(MEMORY_INTER) %{
6304     base($reg);
6305     index(0xffffffff);
6306     scale(0x0);
6307     disp($off);
6308   %}
6309 %}
6310 
6311 operand indirectN(iRegN reg)
6312 %{
6313   predicate(Universe::narrow_oop_shift() == 0);
6314   constraint(ALLOC_IN_RC(ptr_reg));
6315   match(DecodeN reg);
6316   op_cost(0);
6317   format %{ "[$reg]\t# narrow" %}
6318   interface(MEMORY_INTER) %{
6319     base($reg);
6320     index(0xffffffff);
6321     scale(0x0);
6322     disp(0x0);
6323   %}
6324 %}
6325 
6326 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6327 %{
6328   predicate(Universe::narrow_oop_shift() == 0);
6329   constraint(ALLOC_IN_RC(ptr_reg));
6330   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6331   op_cost(0);
6332   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6333   interface(MEMORY_INTER) %{
6334     base($reg);
6335     index($lreg);
6336     scale($scale);
6337     disp($off);
6338   %}
6339 %}
6340 
6341 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6342 %{
6343   predicate(Universe::narrow_oop_shift() == 0);
6344   constraint(ALLOC_IN_RC(ptr_reg));
6345   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6346   op_cost(INSN_COST);
6347   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6348   interface(MEMORY_INTER) %{
6349     base($reg);
6350     index($lreg);
6351     scale($scale);
6352     disp($off);
6353   %}
6354 %}
6355 
6356 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6357 %{
6358   predicate(Universe::narrow_oop_shift() == 0);
6359   constraint(ALLOC_IN_RC(ptr_reg));
6360   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6361   op_cost(INSN_COST);
6362   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6363   interface(MEMORY_INTER) %{
6364     base($reg);
6365     index($ireg);
6366     scale(0x0);
6367     disp($off);
6368   %}
6369 %}
6370 
6371 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6372 %{
6373   predicate(Universe::narrow_oop_shift() == 0);
6374   constraint(ALLOC_IN_RC(ptr_reg));
6375   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6376   op_cost(INSN_COST);
6377   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6378   interface(MEMORY_INTER) %{
6379     base($reg);
6380     index($ireg);
6381     scale($scale);
6382     disp($off);
6383   %}
6384 %}
6385 
6386 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6387 %{
6388   predicate(Universe::narrow_oop_shift() == 0);
6389   constraint(ALLOC_IN_RC(ptr_reg));
6390   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6391   op_cost(0);
6392   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6393   interface(MEMORY_INTER) %{
6394     base($reg);
6395     index($ireg);
6396     scale($scale);
6397     disp(0x0);
6398   %}
6399 %}
6400 
6401 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6402 %{
6403   predicate(Universe::narrow_oop_shift() == 0);
6404   constraint(ALLOC_IN_RC(ptr_reg));
6405   match(AddP (DecodeN reg) (LShiftL lreg scale));
6406   op_cost(0);
6407   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6408   interface(MEMORY_INTER) %{
6409     base($reg);
6410     index($lreg);
6411     scale($scale);
6412     disp(0x0);
6413   %}
6414 %}
6415 
6416 operand indIndexN(iRegN reg, iRegL lreg)
6417 %{
6418   predicate(Universe::narrow_oop_shift() == 0);
6419   constraint(ALLOC_IN_RC(ptr_reg));
6420   match(AddP (DecodeN reg) lreg);
6421   op_cost(0);
6422   format %{ "$reg, $lreg\t# narrow" %}
6423   interface(MEMORY_INTER) %{
6424     base($reg);
6425     index($lreg);
6426     scale(0x0);
6427     disp(0x0);
6428   %}
6429 %}
6430 
6431 operand indOffIN(iRegN reg, immIOffset off)
6432 %{
6433   predicate(Universe::narrow_oop_shift() == 0);
6434   constraint(ALLOC_IN_RC(ptr_reg));
6435   match(AddP (DecodeN reg) off);
6436   op_cost(0);
6437   format %{ "[$reg, $off]\t# narrow" %}
6438   interface(MEMORY_INTER) %{
6439     base($reg);
6440     index(0xffffffff);
6441     scale(0x0);
6442     disp($off);
6443   %}
6444 %}
6445 
6446 operand indOffLN(iRegN reg, immLoffset off)
6447 %{
6448   predicate(Universe::narrow_oop_shift() == 0);
6449   constraint(ALLOC_IN_RC(ptr_reg));
6450   match(AddP (DecodeN reg) off);
6451   op_cost(0);
6452   format %{ "[$reg, $off]\t# narrow" %}
6453   interface(MEMORY_INTER) %{
6454     base($reg);
6455     index(0xffffffff);
6456     scale(0x0);
6457     disp($off);
6458   %}
6459 %}
6460 
6461 
6462 
6463 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6464 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6465 %{
6466   constraint(ALLOC_IN_RC(ptr_reg));
6467   match(AddP reg off);
6468   op_cost(0);
6469   format %{ "[$reg, $off]" %}
6470   interface(MEMORY_INTER) %{
6471     base($reg);
6472     index(0xffffffff);
6473     scale(0x0);
6474     disp($off);
6475   %}
6476 %}
6477 
6478 //----------Special Memory Operands--------------------------------------------
6479 // Stack Slot Operand - This operand is used for loading and storing temporary
6480 //                      values on the stack where a match requires a value to
6481 //                      flow through memory.
6482 operand stackSlotP(sRegP reg)
6483 %{
6484   constraint(ALLOC_IN_RC(stack_slots));
6485   op_cost(100);
6486   // No match rule because this operand is only generated in matching
6487   // match(RegP);
6488   format %{ "[$reg]" %}
6489   interface(MEMORY_INTER) %{
6490     base(0x1e);  // RSP
6491     index(0x0);  // No Index
6492     scale(0x0);  // No Scale
6493     disp($reg);  // Stack Offset
6494   %}
6495 %}
6496 
6497 operand stackSlotI(sRegI reg)
6498 %{
6499   constraint(ALLOC_IN_RC(stack_slots));
6500   // No match rule because this operand is only generated in matching
6501   // match(RegI);
6502   format %{ "[$reg]" %}
6503   interface(MEMORY_INTER) %{
6504     base(0x1e);  // RSP
6505     index(0x0);  // No Index
6506     scale(0x0);  // No Scale
6507     disp($reg);  // Stack Offset
6508   %}
6509 %}
6510 
6511 operand stackSlotF(sRegF reg)
6512 %{
6513   constraint(ALLOC_IN_RC(stack_slots));
6514   // No match rule because this operand is only generated in matching
6515   // match(RegF);
6516   format %{ "[$reg]" %}
6517   interface(MEMORY_INTER) %{
6518     base(0x1e);  // RSP
6519     index(0x0);  // No Index
6520     scale(0x0);  // No Scale
6521     disp($reg);  // Stack Offset
6522   %}
6523 %}
6524 
6525 operand stackSlotD(sRegD reg)
6526 %{
6527   constraint(ALLOC_IN_RC(stack_slots));
6528   // No match rule because this operand is only generated in matching
6529   // match(RegD);
6530   format %{ "[$reg]" %}
6531   interface(MEMORY_INTER) %{
6532     base(0x1e);  // RSP
6533     index(0x0);  // No Index
6534     scale(0x0);  // No Scale
6535     disp($reg);  // Stack Offset
6536   %}
6537 %}
6538 
6539 operand stackSlotL(sRegL reg)
6540 %{
6541   constraint(ALLOC_IN_RC(stack_slots));
6542   // No match rule because this operand is only generated in matching
6543   // match(RegL);
6544   format %{ "[$reg]" %}
6545   interface(MEMORY_INTER) %{
6546     base(0x1e);  // RSP
6547     index(0x0);  // No Index
6548     scale(0x0);  // No Scale
6549     disp($reg);  // Stack Offset
6550   %}
6551 %}
6552 
6553 // Operands for expressing Control Flow
6554 // NOTE: Label is a predefined operand which should not be redefined in
6555 //       the AD file. It is generically handled within the ADLC.
6556 
6557 //----------Conditional Branch Operands----------------------------------------
6558 // Comparison Op  - This is the operation of the comparison, and is limited to
6559 //                  the following set of codes:
6560 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6561 //
6562 // Other attributes of the comparison, such as unsignedness, are specified
6563 // by the comparison instruction that sets a condition code flags register.
6564 // That result is represented by a flags operand whose subtype is appropriate
6565 // to the unsignedness (etc.) of the comparison.
6566 //
6567 // Later, the instruction which matches both the Comparison Op (a Bool) and
6568 // the flags (produced by the Cmp) specifies the coding of the comparison op
6569 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6570 
6571 // used for signed integral comparisons and fp comparisons
6572 
6573 operand cmpOp()
6574 %{
6575   match(Bool);
6576 
6577   format %{ "" %}
6578   interface(COND_INTER) %{
6579     equal(0x0, "eq");
6580     not_equal(0x1, "ne");
6581     less(0xb, "lt");
6582     greater_equal(0xa, "ge");
6583     less_equal(0xd, "le");
6584     greater(0xc, "gt");
6585     overflow(0x6, "vs");
6586     no_overflow(0x7, "vc");
6587   %}
6588 %}
6589 
6590 // used for unsigned integral comparisons
6591 
6592 operand cmpOpU()
6593 %{
6594   match(Bool);
6595 
6596   format %{ "" %}
6597   interface(COND_INTER) %{
6598     equal(0x0, "eq");
6599     not_equal(0x1, "ne");
6600     less(0x3, "lo");
6601     greater_equal(0x2, "hs");
6602     less_equal(0x9, "ls");
6603     greater(0x8, "hi");
6604     overflow(0x6, "vs");
6605     no_overflow(0x7, "vc");
6606   %}
6607 %}
6608 
6609 // Special operand allowing long args to int ops to be truncated for free
6610 
6611 operand iRegL2I(iRegL reg) %{
6612 
6613   op_cost(0);
6614 
6615   match(ConvL2I reg);
6616 
6617   format %{ "l2i($reg)" %}
6618 
6619   interface(REG_INTER)
6620 %}
6621 
6622 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
6623 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
6624 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
6625 
6626 //----------OPERAND CLASSES----------------------------------------------------
6627 // Operand Classes are groups of operands that are used as to simplify
6628 // instruction definitions by not requiring the AD writer to specify
6629 // separate instructions for every form of operand when the
6630 // instruction accepts multiple operand types with the same basic
6631 // encoding and format. The classic case of this is memory operands.
6632 
6633 // memory is used to define read/write location for load/store
6634 // instruction defs. we can turn a memory op into an Address
6635 
6636 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6637                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6638 
6639 
6640 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6641 // operations. it allows the src to be either an iRegI or a (ConvL2I
6642 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6643 // can be elided because the 32-bit instruction will just employ the
6644 // lower 32 bits anyway.
6645 //
6646 // n.b. this does not elide all L2I conversions. if the truncated
6647 // value is consumed by more than one operation then the ConvL2I
6648 // cannot be bundled into the consuming nodes so an l2i gets planted
6649 // (actually a movw $dst $src) and the downstream instructions consume
6650 // the result of the l2i as an iRegI input. That's a shame since the
6651 // movw is actually redundant but its not too costly.
6652 
6653 opclass iRegIorL2I(iRegI, iRegL2I);
6654 
6655 //----------PIPELINE-----------------------------------------------------------
6656 // Rules which define the behavior of the target architectures pipeline.
6657 
6658 // For specific pipelines, eg A53, define the stages of that pipeline
6659 //pipe_desc(ISS, EX1, EX2, WR);
6660 #define ISS S0
6661 #define EX1 S1
6662 #define EX2 S2
6663 #define WR  S3
6664 
6665 // Integer ALU reg operation
6666 pipeline %{
6667 
6668 attributes %{
6669   // ARM instructions are of fixed length
6670   fixed_size_instructions;        // Fixed size instructions TODO does
6671   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6672   // ARM instructions come in 32-bit word units
6673   instruction_unit_size = 4;         // An instruction is 4 bytes long
6674   instruction_fetch_unit_size = 64;  // The processor fetches one line
6675   instruction_fetch_units = 1;       // of 64 bytes
6676 
6677   // List of nop instructions
6678   nops( MachNop );
6679 %}
6680 
6681 // We don't use an actual pipeline model so don't care about resources
6682 // or description. we do use pipeline classes to introduce fixed
6683 // latencies
6684 
6685 //----------RESOURCES----------------------------------------------------------
6686 // Resources are the functional units available to the machine
6687 
6688 resources( INS0, INS1, INS01 = INS0 | INS1,
6689            ALU0, ALU1, ALU = ALU0 | ALU1,
6690            MAC,
6691            DIV,
6692            BRANCH,
6693            LDST,
6694            NEON_FP);
6695 
6696 //----------PIPELINE DESCRIPTION-----------------------------------------------
6697 // Pipeline Description specifies the stages in the machine's pipeline
6698 
6699 // Define the pipeline as a generic 6 stage pipeline
6700 pipe_desc(S0, S1, S2, S3, S4, S5);
6701 
6702 //----------PIPELINE CLASSES---------------------------------------------------
6703 // Pipeline Classes describe the stages in which input and output are
6704 // referenced by the hardware pipeline.
6705 
6706 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6707 %{
6708   single_instruction;
6709   src1   : S1(read);
6710   src2   : S2(read);
6711   dst    : S5(write);
6712   INS01  : ISS;
6713   NEON_FP : S5;
6714 %}
6715 
6716 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6717 %{
6718   single_instruction;
6719   src1   : S1(read);
6720   src2   : S2(read);
6721   dst    : S5(write);
6722   INS01  : ISS;
6723   NEON_FP : S5;
6724 %}
6725 
6726 pipe_class fp_uop_s(vRegF dst, vRegF src)
6727 %{
6728   single_instruction;
6729   src    : S1(read);
6730   dst    : S5(write);
6731   INS01  : ISS;
6732   NEON_FP : S5;
6733 %}
6734 
6735 pipe_class fp_uop_d(vRegD dst, vRegD src)
6736 %{
6737   single_instruction;
6738   src    : S1(read);
6739   dst    : S5(write);
6740   INS01  : ISS;
6741   NEON_FP : S5;
6742 %}
6743 
6744 pipe_class fp_d2f(vRegF dst, vRegD src)
6745 %{
6746   single_instruction;
6747   src    : S1(read);
6748   dst    : S5(write);
6749   INS01  : ISS;
6750   NEON_FP : S5;
6751 %}
6752 
6753 pipe_class fp_f2d(vRegD dst, vRegF src)
6754 %{
6755   single_instruction;
6756   src    : S1(read);
6757   dst    : S5(write);
6758   INS01  : ISS;
6759   NEON_FP : S5;
6760 %}
6761 
6762 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6763 %{
6764   single_instruction;
6765   src    : S1(read);
6766   dst    : S5(write);
6767   INS01  : ISS;
6768   NEON_FP : S5;
6769 %}
6770 
6771 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6772 %{
6773   single_instruction;
6774   src    : S1(read);
6775   dst    : S5(write);
6776   INS01  : ISS;
6777   NEON_FP : S5;
6778 %}
6779 
6780 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6781 %{
6782   single_instruction;
6783   src    : S1(read);
6784   dst    : S5(write);
6785   INS01  : ISS;
6786   NEON_FP : S5;
6787 %}
6788 
6789 pipe_class fp_l2f(vRegF dst, iRegL src)
6790 %{
6791   single_instruction;
6792   src    : S1(read);
6793   dst    : S5(write);
6794   INS01  : ISS;
6795   NEON_FP : S5;
6796 %}
6797 
6798 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6799 %{
6800   single_instruction;
6801   src    : S1(read);
6802   dst    : S5(write);
6803   INS01  : ISS;
6804   NEON_FP : S5;
6805 %}
6806 
6807 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6808 %{
6809   single_instruction;
6810   src    : S1(read);
6811   dst    : S5(write);
6812   INS01  : ISS;
6813   NEON_FP : S5;
6814 %}
6815 
6816 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6817 %{
6818   single_instruction;
6819   src    : S1(read);
6820   dst    : S5(write);
6821   INS01  : ISS;
6822   NEON_FP : S5;
6823 %}
6824 
6825 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6826 %{
6827   single_instruction;
6828   src    : S1(read);
6829   dst    : S5(write);
6830   INS01  : ISS;
6831   NEON_FP : S5;
6832 %}
6833 
6834 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6835 %{
6836   single_instruction;
6837   src1   : S1(read);
6838   src2   : S2(read);
6839   dst    : S5(write);
6840   INS0   : ISS;
6841   NEON_FP : S5;
6842 %}
6843 
6844 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6845 %{
6846   single_instruction;
6847   src1   : S1(read);
6848   src2   : S2(read);
6849   dst    : S5(write);
6850   INS0   : ISS;
6851   NEON_FP : S5;
6852 %}
6853 
6854 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6855 %{
6856   single_instruction;
6857   cr     : S1(read);
6858   src1   : S1(read);
6859   src2   : S1(read);
6860   dst    : S3(write);
6861   INS01  : ISS;
6862   NEON_FP : S3;
6863 %}
6864 
6865 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6866 %{
6867   single_instruction;
6868   cr     : S1(read);
6869   src1   : S1(read);
6870   src2   : S1(read);
6871   dst    : S3(write);
6872   INS01  : ISS;
6873   NEON_FP : S3;
6874 %}
6875 
6876 pipe_class fp_imm_s(vRegF dst)
6877 %{
6878   single_instruction;
6879   dst    : S3(write);
6880   INS01  : ISS;
6881   NEON_FP : S3;
6882 %}
6883 
6884 pipe_class fp_imm_d(vRegD dst)
6885 %{
6886   single_instruction;
6887   dst    : S3(write);
6888   INS01  : ISS;
6889   NEON_FP : S3;
6890 %}
6891 
6892 pipe_class fp_load_constant_s(vRegF dst)
6893 %{
6894   single_instruction;
6895   dst    : S4(write);
6896   INS01  : ISS;
6897   NEON_FP : S4;
6898 %}
6899 
6900 pipe_class fp_load_constant_d(vRegD dst)
6901 %{
6902   single_instruction;
6903   dst    : S4(write);
6904   INS01  : ISS;
6905   NEON_FP : S4;
6906 %}
6907 
6908 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6909 %{
6910   single_instruction;
6911   dst    : S5(write);
6912   src1   : S1(read);
6913   src2   : S1(read);
6914   INS01  : ISS;
6915   NEON_FP : S5;
6916 %}
6917 
6918 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6919 %{
6920   single_instruction;
6921   dst    : S5(write);
6922   src1   : S1(read);
6923   src2   : S1(read);
6924   INS0   : ISS;
6925   NEON_FP : S5;
6926 %}
6927 
6928 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6929 %{
6930   single_instruction;
6931   dst    : S5(write);
6932   src1   : S1(read);
6933   src2   : S1(read);
6934   dst    : S1(read);
6935   INS01  : ISS;
6936   NEON_FP : S5;
6937 %}
6938 
6939 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6940 %{
6941   single_instruction;
6942   dst    : S5(write);
6943   src1   : S1(read);
6944   src2   : S1(read);
6945   dst    : S1(read);
6946   INS0   : ISS;
6947   NEON_FP : S5;
6948 %}
6949 
6950 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6951 %{
6952   single_instruction;
6953   dst    : S4(write);
6954   src1   : S2(read);
6955   src2   : S2(read);
6956   INS01  : ISS;
6957   NEON_FP : S4;
6958 %}
6959 
6960 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6961 %{
6962   single_instruction;
6963   dst    : S4(write);
6964   src1   : S2(read);
6965   src2   : S2(read);
6966   INS0   : ISS;
6967   NEON_FP : S4;
6968 %}
6969 
6970 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6971 %{
6972   single_instruction;
6973   dst    : S3(write);
6974   src1   : S2(read);
6975   src2   : S2(read);
6976   INS01  : ISS;
6977   NEON_FP : S3;
6978 %}
6979 
6980 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6981 %{
6982   single_instruction;
6983   dst    : S3(write);
6984   src1   : S2(read);
6985   src2   : S2(read);
6986   INS0   : ISS;
6987   NEON_FP : S3;
6988 %}
6989 
6990 pipe_class vshift64(vecD dst, vecD src, vecX shift)
6991 %{
6992   single_instruction;
6993   dst    : S3(write);
6994   src    : S1(read);
6995   shift  : S1(read);
6996   INS01  : ISS;
6997   NEON_FP : S3;
6998 %}
6999 
7000 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7001 %{
7002   single_instruction;
7003   dst    : S3(write);
7004   src    : S1(read);
7005   shift  : S1(read);
7006   INS0   : ISS;
7007   NEON_FP : S3;
7008 %}
7009 
7010 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7011 %{
7012   single_instruction;
7013   dst    : S3(write);
7014   src    : S1(read);
7015   INS01  : ISS;
7016   NEON_FP : S3;
7017 %}
7018 
7019 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7020 %{
7021   single_instruction;
7022   dst    : S3(write);
7023   src    : S1(read);
7024   INS0   : ISS;
7025   NEON_FP : S3;
7026 %}
7027 
7028 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7029 %{
7030   single_instruction;
7031   dst    : S5(write);
7032   src1   : S1(read);
7033   src2   : S1(read);
7034   INS01  : ISS;
7035   NEON_FP : S5;
7036 %}
7037 
7038 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7039 %{
7040   single_instruction;
7041   dst    : S5(write);
7042   src1   : S1(read);
7043   src2   : S1(read);
7044   INS0   : ISS;
7045   NEON_FP : S5;
7046 %}
7047 
7048 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7049 %{
7050   single_instruction;
7051   dst    : S5(write);
7052   src1   : S1(read);
7053   src2   : S1(read);
7054   INS0   : ISS;
7055   NEON_FP : S5;
7056 %}
7057 
7058 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7059 %{
7060   single_instruction;
7061   dst    : S5(write);
7062   src1   : S1(read);
7063   src2   : S1(read);
7064   INS0   : ISS;
7065   NEON_FP : S5;
7066 %}
7067 
7068 pipe_class vsqrt_fp128(vecX dst, vecX src)
7069 %{
7070   single_instruction;
7071   dst    : S5(write);
7072   src    : S1(read);
7073   INS0   : ISS;
7074   NEON_FP : S5;
7075 %}
7076 
7077 pipe_class vunop_fp64(vecD dst, vecD src)
7078 %{
7079   single_instruction;
7080   dst    : S5(write);
7081   src    : S1(read);
7082   INS01  : ISS;
7083   NEON_FP : S5;
7084 %}
7085 
7086 pipe_class vunop_fp128(vecX dst, vecX src)
7087 %{
7088   single_instruction;
7089   dst    : S5(write);
7090   src    : S1(read);
7091   INS0   : ISS;
7092   NEON_FP : S5;
7093 %}
7094 
7095 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7096 %{
7097   single_instruction;
7098   dst    : S3(write);
7099   src    : S1(read);
7100   INS01  : ISS;
7101   NEON_FP : S3;
7102 %}
7103 
7104 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7105 %{
7106   single_instruction;
7107   dst    : S3(write);
7108   src    : S1(read);
7109   INS01  : ISS;
7110   NEON_FP : S3;
7111 %}
7112 
7113 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7114 %{
7115   single_instruction;
7116   dst    : S3(write);
7117   src    : S1(read);
7118   INS01  : ISS;
7119   NEON_FP : S3;
7120 %}
7121 
7122 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7123 %{
7124   single_instruction;
7125   dst    : S3(write);
7126   src    : S1(read);
7127   INS01  : ISS;
7128   NEON_FP : S3;
7129 %}
7130 
7131 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7132 %{
7133   single_instruction;
7134   dst    : S3(write);
7135   src    : S1(read);
7136   INS01  : ISS;
7137   NEON_FP : S3;
7138 %}
7139 
7140 pipe_class vmovi_reg_imm64(vecD dst)
7141 %{
7142   single_instruction;
7143   dst    : S3(write);
7144   INS01  : ISS;
7145   NEON_FP : S3;
7146 %}
7147 
7148 pipe_class vmovi_reg_imm128(vecX dst)
7149 %{
7150   single_instruction;
7151   dst    : S3(write);
7152   INS0   : ISS;
7153   NEON_FP : S3;
7154 %}
7155 
7156 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
7157 %{
7158   single_instruction;
7159   dst    : S5(write);
7160   mem    : ISS(read);
7161   INS01  : ISS;
7162   NEON_FP : S3;
7163 %}
7164 
7165 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
7166 %{
7167   single_instruction;
7168   dst    : S5(write);
7169   mem    : ISS(read);
7170   INS01  : ISS;
7171   NEON_FP : S3;
7172 %}
7173 
7174 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
7175 %{
7176   single_instruction;
7177   mem    : ISS(read);
7178   src    : S2(read);
7179   INS01  : ISS;
7180   NEON_FP : S3;
7181 %}
7182 
7183 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
7184 %{
7185   single_instruction;
7186   mem    : ISS(read);
7187   src    : S2(read);
7188   INS01  : ISS;
7189   NEON_FP : S3;
7190 %}
7191 
7192 //------- Integer ALU operations --------------------------
7193 
7194 // Integer ALU reg-reg operation
7195 // Operands needed in EX1, result generated in EX2
7196 // Eg.  ADD     x0, x1, x2
7197 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7198 %{
7199   single_instruction;
7200   dst    : EX2(write);
7201   src1   : EX1(read);
7202   src2   : EX1(read);
7203   INS01  : ISS; // Dual issue as instruction 0 or 1
7204   ALU    : EX2;
7205 %}
7206 
7207 // Integer ALU reg-reg operation with constant shift
7208 // Shifted register must be available in LATE_ISS instead of EX1
7209 // Eg.  ADD     x0, x1, x2, LSL #2
7210 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7211 %{
7212   single_instruction;
7213   dst    : EX2(write);
7214   src1   : EX1(read);
7215   src2   : ISS(read);
7216   INS01  : ISS;
7217   ALU    : EX2;
7218 %}
7219 
7220 // Integer ALU reg operation with constant shift
7221 // Eg.  LSL     x0, x1, #shift
7222 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7223 %{
7224   single_instruction;
7225   dst    : EX2(write);
7226   src1   : ISS(read);
7227   INS01  : ISS;
7228   ALU    : EX2;
7229 %}
7230 
7231 // Integer ALU reg-reg operation with variable shift
7232 // Both operands must be available in LATE_ISS instead of EX1
7233 // Result is available in EX1 instead of EX2
7234 // Eg.  LSLV    x0, x1, x2
7235 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7236 %{
7237   single_instruction;
7238   dst    : EX1(write);
7239   src1   : ISS(read);
7240   src2   : ISS(read);
7241   INS01  : ISS;
7242   ALU    : EX1;
7243 %}
7244 
7245 // Integer ALU reg-reg operation with extract
7246 // As for _vshift above, but result generated in EX2
7247 // Eg.  EXTR    x0, x1, x2, #N
7248 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7249 %{
7250   single_instruction;
7251   dst    : EX2(write);
7252   src1   : ISS(read);
7253   src2   : ISS(read);
7254   INS1   : ISS; // Can only dual issue as Instruction 1
7255   ALU    : EX1;
7256 %}
7257 
7258 // Integer ALU reg operation
7259 // Eg.  NEG     x0, x1
7260 pipe_class ialu_reg(iRegI dst, iRegI src)
7261 %{
7262   single_instruction;
7263   dst    : EX2(write);
7264   src    : EX1(read);
7265   INS01  : ISS;
7266   ALU    : EX2;
7267 %}
7268 
7269 // Integer ALU reg mmediate operation
7270 // Eg.  ADD     x0, x1, #N
7271 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7272 %{
7273   single_instruction;
7274   dst    : EX2(write);
7275   src1   : EX1(read);
7276   INS01  : ISS;
7277   ALU    : EX2;
7278 %}
7279 
7280 // Integer ALU immediate operation (no source operands)
7281 // Eg.  MOV     x0, #N
7282 pipe_class ialu_imm(iRegI dst)
7283 %{
7284   single_instruction;
7285   dst    : EX1(write);
7286   INS01  : ISS;
7287   ALU    : EX1;
7288 %}
7289 
7290 //------- Compare operation -------------------------------
7291 
7292 // Compare reg-reg
7293 // Eg.  CMP     x0, x1
7294 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7295 %{
7296   single_instruction;
7297 //  fixed_latency(16);
7298   cr     : EX2(write);
7299   op1    : EX1(read);
7300   op2    : EX1(read);
7301   INS01  : ISS;
7302   ALU    : EX2;
7303 %}
7304 
7305 // Compare reg-reg
7306 // Eg.  CMP     x0, #N
7307 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7308 %{
7309   single_instruction;
7310 //  fixed_latency(16);
7311   cr     : EX2(write);
7312   op1    : EX1(read);
7313   INS01  : ISS;
7314   ALU    : EX2;
7315 %}
7316 
7317 //------- Conditional instructions ------------------------
7318 
7319 // Conditional no operands
7320 // Eg.  CSINC   x0, zr, zr, <cond>
7321 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7322 %{
7323   single_instruction;
7324   cr     : EX1(read);
7325   dst    : EX2(write);
7326   INS01  : ISS;
7327   ALU    : EX2;
7328 %}
7329 
7330 // Conditional 2 operand
7331 // EG.  CSEL    X0, X1, X2, <cond>
7332 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7333 %{
7334   single_instruction;
7335   cr     : EX1(read);
7336   src1   : EX1(read);
7337   src2   : EX1(read);
7338   dst    : EX2(write);
7339   INS01  : ISS;
7340   ALU    : EX2;
7341 %}
7342 
7343 // Conditional 2 operand
7344 // EG.  CSEL    X0, X1, X2, <cond>
7345 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7346 %{
7347   single_instruction;
7348   cr     : EX1(read);
7349   src    : EX1(read);
7350   dst    : EX2(write);
7351   INS01  : ISS;
7352   ALU    : EX2;
7353 %}
7354 
7355 //------- Multiply pipeline operations --------------------
7356 
7357 // Multiply reg-reg
7358 // Eg.  MUL     w0, w1, w2
7359 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7360 %{
7361   single_instruction;
7362   dst    : WR(write);
7363   src1   : ISS(read);
7364   src2   : ISS(read);
7365   INS01  : ISS;
7366   MAC    : WR;
7367 %}
7368 
7369 // Multiply accumulate
7370 // Eg.  MADD    w0, w1, w2, w3
7371 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7372 %{
7373   single_instruction;
7374   dst    : WR(write);
7375   src1   : ISS(read);
7376   src2   : ISS(read);
7377   src3   : ISS(read);
7378   INS01  : ISS;
7379   MAC    : WR;
7380 %}
7381 
7382 // Eg.  MUL     w0, w1, w2
7383 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7384 %{
7385   single_instruction;
7386   fixed_latency(3); // Maximum latency for 64 bit mul
7387   dst    : WR(write);
7388   src1   : ISS(read);
7389   src2   : ISS(read);
7390   INS01  : ISS;
7391   MAC    : WR;
7392 %}
7393 
7394 // Multiply accumulate
7395 // Eg.  MADD    w0, w1, w2, w3
7396 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7397 %{
7398   single_instruction;
7399   fixed_latency(3); // Maximum latency for 64 bit mul
7400   dst    : WR(write);
7401   src1   : ISS(read);
7402   src2   : ISS(read);
7403   src3   : ISS(read);
7404   INS01  : ISS;
7405   MAC    : WR;
7406 %}
7407 
7408 //------- Divide pipeline operations --------------------
7409 
7410 // Eg.  SDIV    w0, w1, w2
7411 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7412 %{
7413   single_instruction;
7414   fixed_latency(8); // Maximum latency for 32 bit divide
7415   dst    : WR(write);
7416   src1   : ISS(read);
7417   src2   : ISS(read);
7418   INS0   : ISS; // Can only dual issue as instruction 0
7419   DIV    : WR;
7420 %}
7421 
7422 // Eg.  SDIV    x0, x1, x2
7423 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7424 %{
7425   single_instruction;
7426   fixed_latency(16); // Maximum latency for 64 bit divide
7427   dst    : WR(write);
7428   src1   : ISS(read);
7429   src2   : ISS(read);
7430   INS0   : ISS; // Can only dual issue as instruction 0
7431   DIV    : WR;
7432 %}
7433 
7434 //------- Load pipeline operations ------------------------
7435 
7436 // Load - prefetch
7437 // Eg.  PFRM    <mem>
7438 pipe_class iload_prefetch(memory mem)
7439 %{
7440   single_instruction;
7441   mem    : ISS(read);
7442   INS01  : ISS;
7443   LDST   : WR;
7444 %}
7445 
7446 // Load - reg, mem
7447 // Eg.  LDR     x0, <mem>
7448 pipe_class iload_reg_mem(iRegI dst, memory mem)
7449 %{
7450   single_instruction;
7451   dst    : WR(write);
7452   mem    : ISS(read);
7453   INS01  : ISS;
7454   LDST   : WR;
7455 %}
7456 
7457 // Load - reg, reg
7458 // Eg.  LDR     x0, [sp, x1]
7459 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7460 %{
7461   single_instruction;
7462   dst    : WR(write);
7463   src    : ISS(read);
7464   INS01  : ISS;
7465   LDST   : WR;
7466 %}
7467 
7468 //------- Store pipeline operations -----------------------
7469 
7470 // Store - zr, mem
7471 // Eg.  STR     zr, <mem>
7472 pipe_class istore_mem(memory mem)
7473 %{
7474   single_instruction;
7475   mem    : ISS(read);
7476   INS01  : ISS;
7477   LDST   : WR;
7478 %}
7479 
7480 // Store - reg, mem
7481 // Eg.  STR     x0, <mem>
7482 pipe_class istore_reg_mem(iRegI src, memory mem)
7483 %{
7484   single_instruction;
7485   mem    : ISS(read);
7486   src    : EX2(read);
7487   INS01  : ISS;
7488   LDST   : WR;
7489 %}
7490 
7491 // Store - reg, reg
7492 // Eg. STR      x0, [sp, x1]
7493 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7494 %{
7495   single_instruction;
7496   dst    : ISS(read);
7497   src    : EX2(read);
7498   INS01  : ISS;
7499   LDST   : WR;
7500 %}
7501 
7502 //------- Store pipeline operations -----------------------
7503 
7504 // Branch
7505 pipe_class pipe_branch()
7506 %{
7507   single_instruction;
7508   INS01  : ISS;
7509   BRANCH : EX1;
7510 %}
7511 
7512 // Conditional branch
7513 pipe_class pipe_branch_cond(rFlagsReg cr)
7514 %{
7515   single_instruction;
7516   cr     : EX1(read);
7517   INS01  : ISS;
7518   BRANCH : EX1;
7519 %}
7520 
7521 // Compare & Branch
7522 // EG.  CBZ/CBNZ
7523 pipe_class pipe_cmp_branch(iRegI op1)
7524 %{
7525   single_instruction;
7526   op1    : EX1(read);
7527   INS01  : ISS;
7528   BRANCH : EX1;
7529 %}
7530 
7531 //------- Synchronisation operations ----------------------
7532 
7533 // Any operation requiring serialization.
7534 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7535 pipe_class pipe_serial()
7536 %{
7537   single_instruction;
7538   force_serialization;
7539   fixed_latency(16);
7540   INS01  : ISS(2); // Cannot dual issue with any other instruction
7541   LDST   : WR;
7542 %}
7543 
7544 // Generic big/slow expanded idiom - also serialized
7545 pipe_class pipe_slow()
7546 %{
7547   instruction_count(10);
7548   multiple_bundles;
7549   force_serialization;
7550   fixed_latency(16);
7551   INS01  : ISS(2); // Cannot dual issue with any other instruction
7552   LDST   : WR;
7553 %}
7554 
7555 // Empty pipeline class
7556 pipe_class pipe_class_empty()
7557 %{
7558   single_instruction;
7559   fixed_latency(0);
7560 %}
7561 
7562 // Default pipeline class.
7563 pipe_class pipe_class_default()
7564 %{
7565   single_instruction;
7566   fixed_latency(2);
7567 %}
7568 
7569 // Pipeline class for compares.
7570 pipe_class pipe_class_compare()
7571 %{
7572   single_instruction;
7573   fixed_latency(16);
7574 %}
7575 
7576 // Pipeline class for memory operations.
7577 pipe_class pipe_class_memory()
7578 %{
7579   single_instruction;
7580   fixed_latency(16);
7581 %}
7582 
7583 // Pipeline class for call.
7584 pipe_class pipe_class_call()
7585 %{
7586   single_instruction;
7587   fixed_latency(100);
7588 %}
7589 
7590 // Define the class for the Nop node.
7591 define %{
7592    MachNop = pipe_class_empty;
7593 %}
7594 
7595 %}
7596 //----------INSTRUCTIONS-------------------------------------------------------
7597 //
7598 // match      -- States which machine-independent subtree may be replaced
7599 //               by this instruction.
7600 // ins_cost   -- The estimated cost of this instruction is used by instruction
7601 //               selection to identify a minimum cost tree of machine
7602 //               instructions that matches a tree of machine-independent
7603 //               instructions.
7604 // format     -- A string providing the disassembly for this instruction.
7605 //               The value of an instruction's operand may be inserted
7606 //               by referring to it with a '$' prefix.
7607 // opcode     -- Three instruction opcodes may be provided.  These are referred
7608 //               to within an encode class as $primary, $secondary, and $tertiary
7609 //               rrspectively.  The primary opcode is commonly used to
7610 //               indicate the type of machine instruction, while secondary
7611 //               and tertiary are often used for prefix options or addressing
7612 //               modes.
7613 // ins_encode -- A list of encode classes with parameters. The encode class
7614 //               name must have been defined in an 'enc_class' specification
7615 //               in the encode section of the architecture description.
7616 
7617 // ============================================================================
7618 // Memory (Load/Store) Instructions
7619 
7620 // Load Instructions
7621 
7622 // Load Byte (8 bit signed)
7623 instruct loadB(iRegINoSp dst, memory mem)
7624 %{
7625   match(Set dst (LoadB mem));
7626   predicate(!needs_acquiring_load(n));
7627 
7628   ins_cost(4 * INSN_COST);
7629   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7630 
7631   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7632 
7633   ins_pipe(iload_reg_mem);
7634 %}
7635 
7636 // Load Byte (8 bit signed) into long
7637 instruct loadB2L(iRegLNoSp dst, memory mem)
7638 %{
7639   match(Set dst (ConvI2L (LoadB mem)));
7640   predicate(!needs_acquiring_load(n->in(1)));
7641 
7642   ins_cost(4 * INSN_COST);
7643   format %{ "ldrsb  $dst, $mem\t# byte" %}
7644 
7645   ins_encode(aarch64_enc_ldrsb(dst, mem));
7646 
7647   ins_pipe(iload_reg_mem);
7648 %}
7649 
7650 // Load Byte (8 bit unsigned)
7651 instruct loadUB(iRegINoSp dst, memory mem)
7652 %{
7653   match(Set dst (LoadUB mem));
7654   predicate(!needs_acquiring_load(n));
7655 
7656   ins_cost(4 * INSN_COST);
7657   format %{ "ldrbw  $dst, $mem\t# byte" %}
7658 
7659   ins_encode(aarch64_enc_ldrb(dst, mem));
7660 
7661   ins_pipe(iload_reg_mem);
7662 %}
7663 
7664 // Load Byte (8 bit unsigned) into long
7665 instruct loadUB2L(iRegLNoSp dst, memory mem)
7666 %{
7667   match(Set dst (ConvI2L (LoadUB mem)));
7668   predicate(!needs_acquiring_load(n->in(1)));
7669 
7670   ins_cost(4 * INSN_COST);
7671   format %{ "ldrb  $dst, $mem\t# byte" %}
7672 
7673   ins_encode(aarch64_enc_ldrb(dst, mem));
7674 
7675   ins_pipe(iload_reg_mem);
7676 %}
7677 
7678 // Load Short (16 bit signed)
7679 instruct loadS(iRegINoSp dst, memory mem)
7680 %{
7681   match(Set dst (LoadS mem));
7682   predicate(!needs_acquiring_load(n));
7683 
7684   ins_cost(4 * INSN_COST);
7685   format %{ "ldrshw  $dst, $mem\t# short" %}
7686 
7687   ins_encode(aarch64_enc_ldrshw(dst, mem));
7688 
7689   ins_pipe(iload_reg_mem);
7690 %}
7691 
7692 // Load Short (16 bit signed) into long
7693 instruct loadS2L(iRegLNoSp dst, memory mem)
7694 %{
7695   match(Set dst (ConvI2L (LoadS mem)));
7696   predicate(!needs_acquiring_load(n->in(1)));
7697 
7698   ins_cost(4 * INSN_COST);
7699   format %{ "ldrsh  $dst, $mem\t# short" %}
7700 
7701   ins_encode(aarch64_enc_ldrsh(dst, mem));
7702 
7703   ins_pipe(iload_reg_mem);
7704 %}
7705 
7706 // Load Char (16 bit unsigned)
7707 instruct loadUS(iRegINoSp dst, memory mem)
7708 %{
7709   match(Set dst (LoadUS mem));
7710   predicate(!needs_acquiring_load(n));
7711 
7712   ins_cost(4 * INSN_COST);
7713   format %{ "ldrh  $dst, $mem\t# short" %}
7714 
7715   ins_encode(aarch64_enc_ldrh(dst, mem));
7716 
7717   ins_pipe(iload_reg_mem);
7718 %}
7719 
7720 // Load Short/Char (16 bit unsigned) into long
7721 instruct loadUS2L(iRegLNoSp dst, memory mem)
7722 %{
7723   match(Set dst (ConvI2L (LoadUS mem)));
7724   predicate(!needs_acquiring_load(n->in(1)));
7725 
7726   ins_cost(4 * INSN_COST);
7727   format %{ "ldrh  $dst, $mem\t# short" %}
7728 
7729   ins_encode(aarch64_enc_ldrh(dst, mem));
7730 
7731   ins_pipe(iload_reg_mem);
7732 %}
7733 
7734 // Load Integer (32 bit signed)
7735 instruct loadI(iRegINoSp dst, memory mem)
7736 %{
7737   match(Set dst (LoadI mem));
7738   predicate(!needs_acquiring_load(n));
7739 
7740   ins_cost(4 * INSN_COST);
7741   format %{ "ldrw  $dst, $mem\t# int" %}
7742 
7743   ins_encode(aarch64_enc_ldrw(dst, mem));
7744 
7745   ins_pipe(iload_reg_mem);
7746 %}
7747 
7748 // Load Integer (32 bit signed) into long
7749 instruct loadI2L(iRegLNoSp dst, memory mem)
7750 %{
7751   match(Set dst (ConvI2L (LoadI mem)));
7752   predicate(!needs_acquiring_load(n->in(1)));
7753 
7754   ins_cost(4 * INSN_COST);
7755   format %{ "ldrsw  $dst, $mem\t# int" %}
7756 
7757   ins_encode(aarch64_enc_ldrsw(dst, mem));
7758 
7759   ins_pipe(iload_reg_mem);
7760 %}
7761 
7762 // Load Integer (32 bit unsigned) into long
7763 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7764 %{
7765   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7766   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7767 
7768   ins_cost(4 * INSN_COST);
7769   format %{ "ldrw  $dst, $mem\t# int" %}
7770 
7771   ins_encode(aarch64_enc_ldrw(dst, mem));
7772 
7773   ins_pipe(iload_reg_mem);
7774 %}
7775 
7776 // Load Long (64 bit signed)
7777 instruct loadL(iRegLNoSp dst, memory mem)
7778 %{
7779   match(Set dst (LoadL mem));
7780   predicate(!needs_acquiring_load(n));
7781 
7782   ins_cost(4 * INSN_COST);
7783   format %{ "ldr  $dst, $mem\t# int" %}
7784 
7785   ins_encode(aarch64_enc_ldr(dst, mem));
7786 
7787   ins_pipe(iload_reg_mem);
7788 %}
7789 
7790 // Load Range
7791 instruct loadRange(iRegINoSp dst, memory mem)
7792 %{
7793   match(Set dst (LoadRange mem));
7794 
7795   ins_cost(4 * INSN_COST);
7796   format %{ "ldrw  $dst, $mem\t# range" %}
7797 
7798   ins_encode(aarch64_enc_ldrw(dst, mem));
7799 
7800   ins_pipe(iload_reg_mem);
7801 %}
7802 
7803 // Load Pointer
7804 instruct loadP(iRegPNoSp dst, memory mem)
7805 %{
7806   match(Set dst (LoadP mem));
7807   predicate(!needs_acquiring_load(n));
7808 
7809   ins_cost(4 * INSN_COST);
7810   format %{ "ldr  $dst, $mem\t# ptr" %}
7811 
7812   ins_encode(aarch64_enc_ldr(dst, mem));
7813 
7814   ins_pipe(iload_reg_mem);
7815 %}
7816 
7817 // Load Compressed Pointer
7818 instruct loadN(iRegNNoSp dst, memory mem)
7819 %{
7820   match(Set dst (LoadN mem));
7821   predicate(!needs_acquiring_load(n));
7822 
7823   ins_cost(4 * INSN_COST);
7824   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7825 
7826   ins_encode(aarch64_enc_ldrw(dst, mem));
7827 
7828   ins_pipe(iload_reg_mem);
7829 %}
7830 
7831 // Load Klass Pointer
7832 instruct loadKlass(iRegPNoSp dst, memory mem)
7833 %{
7834   match(Set dst (LoadKlass mem));
7835   predicate(!needs_acquiring_load(n));
7836 
7837   ins_cost(4 * INSN_COST);
7838   format %{ "ldr  $dst, $mem\t# class" %}
7839 
7840   ins_encode(aarch64_enc_ldr(dst, mem));
7841 
7842   ins_pipe(iload_reg_mem);
7843 %}
7844 
7845 // Load Narrow Klass Pointer
7846 instruct loadNKlass(iRegNNoSp dst, memory mem)
7847 %{
7848   match(Set dst (LoadNKlass mem));
7849   predicate(!needs_acquiring_load(n));
7850 
7851   ins_cost(4 * INSN_COST);
7852   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7853 
7854   ins_encode(aarch64_enc_ldrw(dst, mem));
7855 
7856   ins_pipe(iload_reg_mem);
7857 %}
7858 
7859 // Load Float
7860 instruct loadF(vRegF dst, memory mem)
7861 %{
7862   match(Set dst (LoadF mem));
7863   predicate(!needs_acquiring_load(n));
7864 
7865   ins_cost(4 * INSN_COST);
7866   format %{ "ldrs  $dst, $mem\t# float" %}
7867 
7868   ins_encode( aarch64_enc_ldrs(dst, mem) );
7869 
7870   ins_pipe(pipe_class_memory);
7871 %}
7872 
7873 // Load Double
7874 instruct loadD(vRegD dst, memory mem)
7875 %{
7876   match(Set dst (LoadD mem));
7877   predicate(!needs_acquiring_load(n));
7878 
7879   ins_cost(4 * INSN_COST);
7880   format %{ "ldrd  $dst, $mem\t# double" %}
7881 
7882   ins_encode( aarch64_enc_ldrd(dst, mem) );
7883 
7884   ins_pipe(pipe_class_memory);
7885 %}
7886 
7887 
7888 // Load Int Constant
7889 instruct loadConI(iRegINoSp dst, immI src)
7890 %{
7891   match(Set dst src);
7892 
7893   ins_cost(INSN_COST);
7894   format %{ "mov $dst, $src\t# int" %}
7895 
7896   ins_encode( aarch64_enc_movw_imm(dst, src) );
7897 
7898   ins_pipe(ialu_imm);
7899 %}
7900 
7901 // Load Long Constant
7902 instruct loadConL(iRegLNoSp dst, immL src)
7903 %{
7904   match(Set dst src);
7905 
7906   ins_cost(INSN_COST);
7907   format %{ "mov $dst, $src\t# long" %}
7908 
7909   ins_encode( aarch64_enc_mov_imm(dst, src) );
7910 
7911   ins_pipe(ialu_imm);
7912 %}
7913 
7914 // Load Pointer Constant
7915 
7916 instruct loadConP(iRegPNoSp dst, immP con)
7917 %{
7918   match(Set dst con);
7919 
7920   ins_cost(INSN_COST * 4);
7921   format %{
7922     "mov  $dst, $con\t# ptr\n\t"
7923   %}
7924 
7925   ins_encode(aarch64_enc_mov_p(dst, con));
7926 
7927   ins_pipe(ialu_imm);
7928 %}
7929 
7930 // Load Null Pointer Constant
7931 
7932 instruct loadConP0(iRegPNoSp dst, immP0 con)
7933 %{
7934   match(Set dst con);
7935 
7936   ins_cost(INSN_COST);
7937   format %{ "mov  $dst, $con\t# NULL ptr" %}
7938 
7939   ins_encode(aarch64_enc_mov_p0(dst, con));
7940 
7941   ins_pipe(ialu_imm);
7942 %}
7943 
7944 // Load Pointer Constant One
7945 
7946 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7947 %{
7948   match(Set dst con);
7949 
7950   ins_cost(INSN_COST);
7951   format %{ "mov  $dst, $con\t# NULL ptr" %}
7952 
7953   ins_encode(aarch64_enc_mov_p1(dst, con));
7954 
7955   ins_pipe(ialu_imm);
7956 %}
7957 
7958 // Load Poll Page Constant
7959 
7960 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7961 %{
7962   match(Set dst con);
7963 
7964   ins_cost(INSN_COST);
7965   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7966 
7967   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7968 
7969   ins_pipe(ialu_imm);
7970 %}
7971 
7972 // Load Byte Map Base Constant
7973 
7974 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7975 %{
7976   match(Set dst con);
7977 
7978   ins_cost(INSN_COST);
7979   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7980 
7981   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7982 
7983   ins_pipe(ialu_imm);
7984 %}
7985 
7986 // Load Narrow Pointer Constant
7987 
7988 instruct loadConN(iRegNNoSp dst, immN con)
7989 %{
7990   match(Set dst con);
7991 
7992   ins_cost(INSN_COST * 4);
7993   format %{ "mov  $dst, $con\t# compressed ptr" %}
7994 
7995   ins_encode(aarch64_enc_mov_n(dst, con));
7996 
7997   ins_pipe(ialu_imm);
7998 %}
7999 
8000 // Load Narrow Null Pointer Constant
8001 
8002 instruct loadConN0(iRegNNoSp dst, immN0 con)
8003 %{
8004   match(Set dst con);
8005 
8006   ins_cost(INSN_COST);
8007   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8008 
8009   ins_encode(aarch64_enc_mov_n0(dst, con));
8010 
8011   ins_pipe(ialu_imm);
8012 %}
8013 
8014 // Load Narrow Klass Constant
8015 
8016 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8017 %{
8018   match(Set dst con);
8019 
8020   ins_cost(INSN_COST);
8021   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8022 
8023   ins_encode(aarch64_enc_mov_nk(dst, con));
8024 
8025   ins_pipe(ialu_imm);
8026 %}
8027 
8028 // Load Packed Float Constant
8029 
8030 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8031   match(Set dst con);
8032   ins_cost(INSN_COST * 4);
8033   format %{ "fmovs  $dst, $con"%}
8034   ins_encode %{
8035     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8036   %}
8037 
8038   ins_pipe(fp_imm_s);
8039 %}
8040 
8041 // Load Float Constant
8042 
8043 instruct loadConF(vRegF dst, immF con) %{
8044   match(Set dst con);
8045 
8046   ins_cost(INSN_COST * 4);
8047 
8048   format %{
8049     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8050   %}
8051 
8052   ins_encode %{
8053     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8054   %}
8055 
8056   ins_pipe(fp_load_constant_s);
8057 %}
8058 
8059 // Load Packed Double Constant
8060 
8061 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8062   match(Set dst con);
8063   ins_cost(INSN_COST);
8064   format %{ "fmovd  $dst, $con"%}
8065   ins_encode %{
8066     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8067   %}
8068 
8069   ins_pipe(fp_imm_d);
8070 %}
8071 
8072 // Load Double Constant
8073 
8074 instruct loadConD(vRegD dst, immD con) %{
8075   match(Set dst con);
8076 
8077   ins_cost(INSN_COST * 5);
8078   format %{
8079     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8080   %}
8081 
8082   ins_encode %{
8083     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8084   %}
8085 
8086   ins_pipe(fp_load_constant_d);
8087 %}
8088 
8089 // Store Instructions
8090 
8091 // Store CMS card-mark Immediate
8092 instruct storeimmCM0(immI0 zero, memory mem)
8093 %{
8094   match(Set mem (StoreCM mem zero));
8095   predicate(unnecessary_storestore(n));
8096 
8097   ins_cost(INSN_COST);
8098   format %{ "strb zr, $mem\t# byte" %}
8099 
8100   ins_encode(aarch64_enc_strb0(mem));
8101 
8102   ins_pipe(istore_mem);
8103 %}
8104 
8105 // Store CMS card-mark Immediate with intervening StoreStore
8106 // needed when using CMS with no conditional card marking
8107 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8108 %{
8109   match(Set mem (StoreCM mem zero));
8110 
8111   ins_cost(INSN_COST * 2);
8112   format %{ "dmb ishst"
8113       "\n\tstrb zr, $mem\t# byte" %}
8114 
8115   ins_encode(aarch64_enc_strb0_ordered(mem));
8116 
8117   ins_pipe(istore_mem);
8118 %}
8119 
8120 // Store Byte
8121 instruct storeB(iRegIorL2I src, memory mem)
8122 %{
8123   match(Set mem (StoreB mem src));
8124   predicate(!needs_releasing_store(n));
8125 
8126   ins_cost(INSN_COST);
8127   format %{ "strb  $src, $mem\t# byte" %}
8128 
8129   ins_encode(aarch64_enc_strb(src, mem));
8130 
8131   ins_pipe(istore_reg_mem);
8132 %}
8133 
8134 
8135 instruct storeimmB0(immI0 zero, memory mem)
8136 %{
8137   match(Set mem (StoreB mem zero));
8138   predicate(!needs_releasing_store(n));
8139 
8140   ins_cost(INSN_COST);
8141   format %{ "strb rscractch2, $mem\t# byte" %}
8142 
8143   ins_encode(aarch64_enc_strb0(mem));
8144 
8145   ins_pipe(istore_mem);
8146 %}
8147 
8148 // Store Char/Short
8149 instruct storeC(iRegIorL2I src, memory mem)
8150 %{
8151   match(Set mem (StoreC mem src));
8152   predicate(!needs_releasing_store(n));
8153 
8154   ins_cost(INSN_COST);
8155   format %{ "strh  $src, $mem\t# short" %}
8156 
8157   ins_encode(aarch64_enc_strh(src, mem));
8158 
8159   ins_pipe(istore_reg_mem);
8160 %}
8161 
8162 instruct storeimmC0(immI0 zero, memory mem)
8163 %{
8164   match(Set mem (StoreC mem zero));
8165   predicate(!needs_releasing_store(n));
8166 
8167   ins_cost(INSN_COST);
8168   format %{ "strh  zr, $mem\t# short" %}
8169 
8170   ins_encode(aarch64_enc_strh0(mem));
8171 
8172   ins_pipe(istore_mem);
8173 %}
8174 
8175 // Store Integer
8176 
8177 instruct storeI(iRegIorL2I src, memory mem)
8178 %{
8179   match(Set mem(StoreI mem src));
8180   predicate(!needs_releasing_store(n));
8181 
8182   ins_cost(INSN_COST);
8183   format %{ "strw  $src, $mem\t# int" %}
8184 
8185   ins_encode(aarch64_enc_strw(src, mem));
8186 
8187   ins_pipe(istore_reg_mem);
8188 %}
8189 
8190 instruct storeimmI0(immI0 zero, memory mem)
8191 %{
8192   match(Set mem(StoreI mem zero));
8193   predicate(!needs_releasing_store(n));
8194 
8195   ins_cost(INSN_COST);
8196   format %{ "strw  zr, $mem\t# int" %}
8197 
8198   ins_encode(aarch64_enc_strw0(mem));
8199 
8200   ins_pipe(istore_mem);
8201 %}
8202 
8203 // Store Long (64 bit signed)
8204 instruct storeL(iRegL src, memory mem)
8205 %{
8206   match(Set mem (StoreL mem src));
8207   predicate(!needs_releasing_store(n));
8208 
8209   ins_cost(INSN_COST);
8210   format %{ "str  $src, $mem\t# int" %}
8211 
8212   ins_encode(aarch64_enc_str(src, mem));
8213 
8214   ins_pipe(istore_reg_mem);
8215 %}
8216 
8217 // Store Long (64 bit signed)
8218 instruct storeimmL0(immL0 zero, memory mem)
8219 %{
8220   match(Set mem (StoreL mem zero));
8221   predicate(!needs_releasing_store(n));
8222 
8223   ins_cost(INSN_COST);
8224   format %{ "str  zr, $mem\t# int" %}
8225 
8226   ins_encode(aarch64_enc_str0(mem));
8227 
8228   ins_pipe(istore_mem);
8229 %}
8230 
8231 // Store Pointer
8232 instruct storeP(iRegP src, memory mem)
8233 %{
8234   match(Set mem (StoreP mem src));
8235   predicate(!needs_releasing_store(n));
8236 
8237   ins_cost(INSN_COST);
8238   format %{ "str  $src, $mem\t# ptr" %}
8239 
8240   ins_encode(aarch64_enc_str(src, mem));
8241 
8242   ins_pipe(istore_reg_mem);
8243 %}
8244 
8245 // Store Pointer
8246 instruct storeimmP0(immP0 zero, memory mem)
8247 %{
8248   match(Set mem (StoreP mem zero));
8249   predicate(!needs_releasing_store(n));
8250 
8251   ins_cost(INSN_COST);
8252   format %{ "str zr, $mem\t# ptr" %}
8253 
8254   ins_encode(aarch64_enc_str0(mem));
8255 
8256   ins_pipe(istore_mem);
8257 %}
8258 
8259 // Store Compressed Pointer
8260 instruct storeN(iRegN src, memory mem)
8261 %{
8262   match(Set mem (StoreN mem src));
8263   predicate(!needs_releasing_store(n));
8264 
8265   ins_cost(INSN_COST);
8266   format %{ "strw  $src, $mem\t# compressed ptr" %}
8267 
8268   ins_encode(aarch64_enc_strw(src, mem));
8269 
8270   ins_pipe(istore_reg_mem);
8271 %}
8272 
8273 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8274 %{
8275   match(Set mem (StoreN mem zero));
8276   predicate(Universe::narrow_oop_base() == NULL &&
8277             Universe::narrow_klass_base() == NULL &&
8278             (!needs_releasing_store(n)));
8279 
8280   ins_cost(INSN_COST);
8281   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8282 
8283   ins_encode(aarch64_enc_strw(heapbase, mem));
8284 
8285   ins_pipe(istore_reg_mem);
8286 %}
8287 
8288 // Store Float
8289 instruct storeF(vRegF src, memory mem)
8290 %{
8291   match(Set mem (StoreF mem src));
8292   predicate(!needs_releasing_store(n));
8293 
8294   ins_cost(INSN_COST);
8295   format %{ "strs  $src, $mem\t# float" %}
8296 
8297   ins_encode( aarch64_enc_strs(src, mem) );
8298 
8299   ins_pipe(pipe_class_memory);
8300 %}
8301 
8302 // TODO
8303 // implement storeImmF0 and storeFImmPacked
8304 
8305 // Store Double
8306 instruct storeD(vRegD src, memory mem)
8307 %{
8308   match(Set mem (StoreD mem src));
8309   predicate(!needs_releasing_store(n));
8310 
8311   ins_cost(INSN_COST);
8312   format %{ "strd  $src, $mem\t# double" %}
8313 
8314   ins_encode( aarch64_enc_strd(src, mem) );
8315 
8316   ins_pipe(pipe_class_memory);
8317 %}
8318 
8319 // Store Compressed Klass Pointer
8320 instruct storeNKlass(iRegN src, memory mem)
8321 %{
8322   predicate(!needs_releasing_store(n));
8323   match(Set mem (StoreNKlass mem src));
8324 
8325   ins_cost(INSN_COST);
8326   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8327 
8328   ins_encode(aarch64_enc_strw(src, mem));
8329 
8330   ins_pipe(istore_reg_mem);
8331 %}
8332 
8333 // TODO
8334 // implement storeImmD0 and storeDImmPacked
8335 
8336 // prefetch instructions
8337 // Must be safe to execute with invalid address (cannot fault).
8338 
8339 instruct prefetchalloc( memory mem ) %{
8340   match(PrefetchAllocation mem);
8341 
8342   ins_cost(INSN_COST);
8343   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8344 
8345   ins_encode( aarch64_enc_prefetchw(mem) );
8346 
8347   ins_pipe(iload_prefetch);
8348 %}
8349 
8350 //  ---------------- volatile loads and stores ----------------
8351 
8352 // Load Byte (8 bit signed)
8353 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8354 %{
8355   match(Set dst (LoadB mem));
8356 
8357   ins_cost(VOLATILE_REF_COST);
8358   format %{ "ldarsb  $dst, $mem\t# byte" %}
8359 
8360   ins_encode(aarch64_enc_ldarsb(dst, mem));
8361 
8362   ins_pipe(pipe_serial);
8363 %}
8364 
8365 // Load Byte (8 bit signed) into long
8366 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8367 %{
8368   match(Set dst (ConvI2L (LoadB mem)));
8369 
8370   ins_cost(VOLATILE_REF_COST);
8371   format %{ "ldarsb  $dst, $mem\t# byte" %}
8372 
8373   ins_encode(aarch64_enc_ldarsb(dst, mem));
8374 
8375   ins_pipe(pipe_serial);
8376 %}
8377 
8378 // Load Byte (8 bit unsigned)
8379 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8380 %{
8381   match(Set dst (LoadUB mem));
8382 
8383   ins_cost(VOLATILE_REF_COST);
8384   format %{ "ldarb  $dst, $mem\t# byte" %}
8385 
8386   ins_encode(aarch64_enc_ldarb(dst, mem));
8387 
8388   ins_pipe(pipe_serial);
8389 %}
8390 
8391 // Load Byte (8 bit unsigned) into long
8392 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8393 %{
8394   match(Set dst (ConvI2L (LoadUB mem)));
8395 
8396   ins_cost(VOLATILE_REF_COST);
8397   format %{ "ldarb  $dst, $mem\t# byte" %}
8398 
8399   ins_encode(aarch64_enc_ldarb(dst, mem));
8400 
8401   ins_pipe(pipe_serial);
8402 %}
8403 
8404 // Load Short (16 bit signed)
8405 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8406 %{
8407   match(Set dst (LoadS mem));
8408 
8409   ins_cost(VOLATILE_REF_COST);
8410   format %{ "ldarshw  $dst, $mem\t# short" %}
8411 
8412   ins_encode(aarch64_enc_ldarshw(dst, mem));
8413 
8414   ins_pipe(pipe_serial);
8415 %}
8416 
8417 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8418 %{
8419   match(Set dst (LoadUS mem));
8420 
8421   ins_cost(VOLATILE_REF_COST);
8422   format %{ "ldarhw  $dst, $mem\t# short" %}
8423 
8424   ins_encode(aarch64_enc_ldarhw(dst, mem));
8425 
8426   ins_pipe(pipe_serial);
8427 %}
8428 
8429 // Load Short/Char (16 bit unsigned) into long
8430 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8431 %{
8432   match(Set dst (ConvI2L (LoadUS mem)));
8433 
8434   ins_cost(VOLATILE_REF_COST);
8435   format %{ "ldarh  $dst, $mem\t# short" %}
8436 
8437   ins_encode(aarch64_enc_ldarh(dst, mem));
8438 
8439   ins_pipe(pipe_serial);
8440 %}
8441 
8442 // Load Short/Char (16 bit signed) into long
8443 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8444 %{
8445   match(Set dst (ConvI2L (LoadS mem)));
8446 
8447   ins_cost(VOLATILE_REF_COST);
8448   format %{ "ldarh  $dst, $mem\t# short" %}
8449 
8450   ins_encode(aarch64_enc_ldarsh(dst, mem));
8451 
8452   ins_pipe(pipe_serial);
8453 %}
8454 
8455 // Load Integer (32 bit signed)
8456 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8457 %{
8458   match(Set dst (LoadI mem));
8459 
8460   ins_cost(VOLATILE_REF_COST);
8461   format %{ "ldarw  $dst, $mem\t# int" %}
8462 
8463   ins_encode(aarch64_enc_ldarw(dst, mem));
8464 
8465   ins_pipe(pipe_serial);
8466 %}
8467 
8468 // Load Integer (32 bit unsigned) into long
8469 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8470 %{
8471   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8472 
8473   ins_cost(VOLATILE_REF_COST);
8474   format %{ "ldarw  $dst, $mem\t# int" %}
8475 
8476   ins_encode(aarch64_enc_ldarw(dst, mem));
8477 
8478   ins_pipe(pipe_serial);
8479 %}
8480 
8481 // Load Long (64 bit signed)
8482 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8483 %{
8484   match(Set dst (LoadL mem));
8485 
8486   ins_cost(VOLATILE_REF_COST);
8487   format %{ "ldar  $dst, $mem\t# int" %}
8488 
8489   ins_encode(aarch64_enc_ldar(dst, mem));
8490 
8491   ins_pipe(pipe_serial);
8492 %}
8493 
8494 // Load Pointer
8495 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8496 %{
8497   match(Set dst (LoadP mem));
8498 
8499   ins_cost(VOLATILE_REF_COST);
8500   format %{ "ldar  $dst, $mem\t# ptr" %}
8501 
8502   ins_encode(aarch64_enc_ldar(dst, mem));
8503 
8504   ins_pipe(pipe_serial);
8505 %}
8506 
8507 // Load Compressed Pointer
8508 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8509 %{
8510   match(Set dst (LoadN mem));
8511 
8512   ins_cost(VOLATILE_REF_COST);
8513   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8514 
8515   ins_encode(aarch64_enc_ldarw(dst, mem));
8516 
8517   ins_pipe(pipe_serial);
8518 %}
8519 
8520 // Load Float
8521 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8522 %{
8523   match(Set dst (LoadF mem));
8524 
8525   ins_cost(VOLATILE_REF_COST);
8526   format %{ "ldars  $dst, $mem\t# float" %}
8527 
8528   ins_encode( aarch64_enc_fldars(dst, mem) );
8529 
8530   ins_pipe(pipe_serial);
8531 %}
8532 
8533 // Load Double
8534 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8535 %{
8536   match(Set dst (LoadD mem));
8537 
8538   ins_cost(VOLATILE_REF_COST);
8539   format %{ "ldard  $dst, $mem\t# double" %}
8540 
8541   ins_encode( aarch64_enc_fldard(dst, mem) );
8542 
8543   ins_pipe(pipe_serial);
8544 %}
8545 
8546 // Store Byte
8547 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8548 %{
8549   match(Set mem (StoreB mem src));
8550 
8551   ins_cost(VOLATILE_REF_COST);
8552   format %{ "stlrb  $src, $mem\t# byte" %}
8553 
8554   ins_encode(aarch64_enc_stlrb(src, mem));
8555 
8556   ins_pipe(pipe_class_memory);
8557 %}
8558 
8559 // Store Char/Short
8560 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8561 %{
8562   match(Set mem (StoreC mem src));
8563 
8564   ins_cost(VOLATILE_REF_COST);
8565   format %{ "stlrh  $src, $mem\t# short" %}
8566 
8567   ins_encode(aarch64_enc_stlrh(src, mem));
8568 
8569   ins_pipe(pipe_class_memory);
8570 %}
8571 
8572 // Store Integer
8573 
8574 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8575 %{
8576   match(Set mem(StoreI mem src));
8577 
8578   ins_cost(VOLATILE_REF_COST);
8579   format %{ "stlrw  $src, $mem\t# int" %}
8580 
8581   ins_encode(aarch64_enc_stlrw(src, mem));
8582 
8583   ins_pipe(pipe_class_memory);
8584 %}
8585 
8586 // Store Long (64 bit signed)
8587 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8588 %{
8589   match(Set mem (StoreL mem src));
8590 
8591   ins_cost(VOLATILE_REF_COST);
8592   format %{ "stlr  $src, $mem\t# int" %}
8593 
8594   ins_encode(aarch64_enc_stlr(src, mem));
8595 
8596   ins_pipe(pipe_class_memory);
8597 %}
8598 
8599 // Store Pointer
8600 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8601 %{
8602   match(Set mem (StoreP mem src));
8603 
8604   ins_cost(VOLATILE_REF_COST);
8605   format %{ "stlr  $src, $mem\t# ptr" %}
8606 
8607   ins_encode(aarch64_enc_stlr(src, mem));
8608 
8609   ins_pipe(pipe_class_memory);
8610 %}
8611 
8612 // Store Compressed Pointer
8613 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8614 %{
8615   match(Set mem (StoreN mem src));
8616 
8617   ins_cost(VOLATILE_REF_COST);
8618   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8619 
8620   ins_encode(aarch64_enc_stlrw(src, mem));
8621 
8622   ins_pipe(pipe_class_memory);
8623 %}
8624 
8625 // Store Float
8626 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8627 %{
8628   match(Set mem (StoreF mem src));
8629 
8630   ins_cost(VOLATILE_REF_COST);
8631   format %{ "stlrs  $src, $mem\t# float" %}
8632 
8633   ins_encode( aarch64_enc_fstlrs(src, mem) );
8634 
8635   ins_pipe(pipe_class_memory);
8636 %}
8637 
8638 // TODO
8639 // implement storeImmF0 and storeFImmPacked
8640 
8641 // Store Double
8642 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8643 %{
8644   match(Set mem (StoreD mem src));
8645 
8646   ins_cost(VOLATILE_REF_COST);
8647   format %{ "stlrd  $src, $mem\t# double" %}
8648 
8649   ins_encode( aarch64_enc_fstlrd(src, mem) );
8650 
8651   ins_pipe(pipe_class_memory);
8652 %}
8653 
8654 //  ---------------- end of volatile loads and stores ----------------
8655 
8656 // ============================================================================
8657 // BSWAP Instructions
8658 
8659 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8660   match(Set dst (ReverseBytesI src));
8661 
8662   ins_cost(INSN_COST);
8663   format %{ "revw  $dst, $src" %}
8664 
8665   ins_encode %{
8666     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8667   %}
8668 
8669   ins_pipe(ialu_reg);
8670 %}
8671 
8672 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8673   match(Set dst (ReverseBytesL src));
8674 
8675   ins_cost(INSN_COST);
8676   format %{ "rev  $dst, $src" %}
8677 
8678   ins_encode %{
8679     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8680   %}
8681 
8682   ins_pipe(ialu_reg);
8683 %}
8684 
8685 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8686   match(Set dst (ReverseBytesUS src));
8687 
8688   ins_cost(INSN_COST);
8689   format %{ "rev16w  $dst, $src" %}
8690 
8691   ins_encode %{
8692     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8693   %}
8694 
8695   ins_pipe(ialu_reg);
8696 %}
8697 
8698 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8699   match(Set dst (ReverseBytesS src));
8700 
8701   ins_cost(INSN_COST);
8702   format %{ "rev16w  $dst, $src\n\t"
8703             "sbfmw $dst, $dst, #0, #15" %}
8704 
8705   ins_encode %{
8706     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8707     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8708   %}
8709 
8710   ins_pipe(ialu_reg);
8711 %}
8712 
8713 // ============================================================================
8714 // Zero Count Instructions
8715 
8716 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8717   match(Set dst (CountLeadingZerosI src));
8718 
8719   ins_cost(INSN_COST);
8720   format %{ "clzw  $dst, $src" %}
8721   ins_encode %{
8722     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8723   %}
8724 
8725   ins_pipe(ialu_reg);
8726 %}
8727 
8728 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8729   match(Set dst (CountLeadingZerosL src));
8730 
8731   ins_cost(INSN_COST);
8732   format %{ "clz   $dst, $src" %}
8733   ins_encode %{
8734     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8735   %}
8736 
8737   ins_pipe(ialu_reg);
8738 %}
8739 
8740 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8741   match(Set dst (CountTrailingZerosI src));
8742 
8743   ins_cost(INSN_COST * 2);
8744   format %{ "rbitw  $dst, $src\n\t"
8745             "clzw   $dst, $dst" %}
8746   ins_encode %{
8747     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8748     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8749   %}
8750 
8751   ins_pipe(ialu_reg);
8752 %}
8753 
8754 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8755   match(Set dst (CountTrailingZerosL src));
8756 
8757   ins_cost(INSN_COST * 2);
8758   format %{ "rbit   $dst, $src\n\t"
8759             "clz    $dst, $dst" %}
8760   ins_encode %{
8761     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8762     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8763   %}
8764 
8765   ins_pipe(ialu_reg);
8766 %}
8767 
8768 //---------- Population Count Instructions -------------------------------------
8769 //
8770 
8771 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8772   predicate(UsePopCountInstruction);
8773   match(Set dst (PopCountI src));
8774   effect(TEMP tmp);
8775   ins_cost(INSN_COST * 13);
8776 
8777   format %{ "movw   $src, $src\n\t"
8778             "mov    $tmp, $src\t# vector (1D)\n\t"
8779             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8780             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8781             "mov    $dst, $tmp\t# vector (1D)" %}
8782   ins_encode %{
8783     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8784     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8785     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8786     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8787     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8788   %}
8789 
8790   ins_pipe(pipe_class_default);
8791 %}
8792 
8793 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8794   predicate(UsePopCountInstruction);
8795   match(Set dst (PopCountI (LoadI mem)));
8796   effect(TEMP tmp);
8797   ins_cost(INSN_COST * 13);
8798 
8799   format %{ "ldrs   $tmp, $mem\n\t"
8800             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8801             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8802             "mov    $dst, $tmp\t# vector (1D)" %}
8803   ins_encode %{
8804     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8805     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8806                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8807     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8808     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8809     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8810   %}
8811 
8812   ins_pipe(pipe_class_default);
8813 %}
8814 
8815 // Note: Long.bitCount(long) returns an int.
8816 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8817   predicate(UsePopCountInstruction);
8818   match(Set dst (PopCountL src));
8819   effect(TEMP tmp);
8820   ins_cost(INSN_COST * 13);
8821 
8822   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8823             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8824             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8825             "mov    $dst, $tmp\t# vector (1D)" %}
8826   ins_encode %{
8827     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8828     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8829     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8830     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8831   %}
8832 
8833   ins_pipe(pipe_class_default);
8834 %}
8835 
8836 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8837   predicate(UsePopCountInstruction);
8838   match(Set dst (PopCountL (LoadL mem)));
8839   effect(TEMP tmp);
8840   ins_cost(INSN_COST * 13);
8841 
8842   format %{ "ldrd   $tmp, $mem\n\t"
8843             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8844             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8845             "mov    $dst, $tmp\t# vector (1D)" %}
8846   ins_encode %{
8847     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8848     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8849                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8850     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8851     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8852     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8853   %}
8854 
8855   ins_pipe(pipe_class_default);
8856 %}
8857 
8858 // ============================================================================
8859 // MemBar Instruction
8860 
8861 instruct load_fence() %{
8862   match(LoadFence);
8863   ins_cost(VOLATILE_REF_COST);
8864 
8865   format %{ "load_fence" %}
8866 
8867   ins_encode %{
8868     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8869   %}
8870   ins_pipe(pipe_serial);
8871 %}
8872 
8873 instruct unnecessary_membar_acquire() %{
8874   predicate(unnecessary_acquire(n));
8875   match(MemBarAcquire);
8876   ins_cost(0);
8877 
8878   format %{ "membar_acquire (elided)" %}
8879 
8880   ins_encode %{
8881     __ block_comment("membar_acquire (elided)");
8882   %}
8883 
8884   ins_pipe(pipe_class_empty);
8885 %}
8886 
8887 instruct membar_acquire() %{
8888   match(MemBarAcquire);
8889   ins_cost(VOLATILE_REF_COST);
8890 
8891   format %{ "membar_acquire" %}
8892 
8893   ins_encode %{
8894     __ block_comment("membar_acquire");
8895     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8896   %}
8897 
8898   ins_pipe(pipe_serial);
8899 %}
8900 
8901 
8902 instruct membar_acquire_lock() %{
8903   match(MemBarAcquireLock);
8904   ins_cost(VOLATILE_REF_COST);
8905 
8906   format %{ "membar_acquire_lock (elided)" %}
8907 
8908   ins_encode %{
8909     __ block_comment("membar_acquire_lock (elided)");
8910   %}
8911 
8912   ins_pipe(pipe_serial);
8913 %}
8914 
8915 instruct store_fence() %{
8916   match(StoreFence);
8917   ins_cost(VOLATILE_REF_COST);
8918 
8919   format %{ "store_fence" %}
8920 
8921   ins_encode %{
8922     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8923   %}
8924   ins_pipe(pipe_serial);
8925 %}
8926 
8927 instruct unnecessary_membar_release() %{
8928   predicate(unnecessary_release(n));
8929   match(MemBarRelease);
8930   ins_cost(0);
8931 
8932   format %{ "membar_release (elided)" %}
8933 
8934   ins_encode %{
8935     __ block_comment("membar_release (elided)");
8936   %}
8937   ins_pipe(pipe_serial);
8938 %}
8939 
8940 instruct membar_release() %{
8941   match(MemBarRelease);
8942   ins_cost(VOLATILE_REF_COST);
8943 
8944   format %{ "membar_release" %}
8945 
8946   ins_encode %{
8947     __ block_comment("membar_release");
8948     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8949   %}
8950   ins_pipe(pipe_serial);
8951 %}
8952 
8953 instruct membar_storestore() %{
8954   match(MemBarStoreStore);
8955   ins_cost(VOLATILE_REF_COST);
8956 
8957   format %{ "MEMBAR-store-store" %}
8958 
8959   ins_encode %{
8960     __ membar(Assembler::StoreStore);
8961   %}
8962   ins_pipe(pipe_serial);
8963 %}
8964 
8965 instruct membar_release_lock() %{
8966   match(MemBarReleaseLock);
8967   ins_cost(VOLATILE_REF_COST);
8968 
8969   format %{ "membar_release_lock (elided)" %}
8970 
8971   ins_encode %{
8972     __ block_comment("membar_release_lock (elided)");
8973   %}
8974 
8975   ins_pipe(pipe_serial);
8976 %}
8977 
8978 instruct unnecessary_membar_volatile() %{
8979   predicate(unnecessary_volatile(n));
8980   match(MemBarVolatile);
8981   ins_cost(0);
8982 
8983   format %{ "membar_volatile (elided)" %}
8984 
8985   ins_encode %{
8986     __ block_comment("membar_volatile (elided)");
8987   %}
8988 
8989   ins_pipe(pipe_serial);
8990 %}
8991 
8992 instruct membar_volatile() %{
8993   match(MemBarVolatile);
8994   ins_cost(VOLATILE_REF_COST*100);
8995 
8996   format %{ "membar_volatile" %}
8997 
8998   ins_encode %{
8999     __ block_comment("membar_volatile");
9000     __ membar(Assembler::StoreLoad);
9001   %}
9002 
9003   ins_pipe(pipe_serial);
9004 %}
9005 
9006 // ============================================================================
9007 // Cast/Convert Instructions
9008 
9009 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9010   match(Set dst (CastX2P src));
9011 
9012   ins_cost(INSN_COST);
9013   format %{ "mov $dst, $src\t# long -> ptr" %}
9014 
9015   ins_encode %{
9016     if ($dst$$reg != $src$$reg) {
9017       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9018     }
9019   %}
9020 
9021   ins_pipe(ialu_reg);
9022 %}
9023 
9024 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9025   match(Set dst (CastP2X src));
9026 
9027   ins_cost(INSN_COST);
9028   format %{ "mov $dst, $src\t# ptr -> long" %}
9029 
9030   ins_encode %{
9031     if ($dst$$reg != $src$$reg) {
9032       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9033     }
9034   %}
9035 
9036   ins_pipe(ialu_reg);
9037 %}
9038 
9039 // Convert oop into int for vectors alignment masking
9040 instruct convP2I(iRegINoSp dst, iRegP src) %{
9041   match(Set dst (ConvL2I (CastP2X src)));
9042 
9043   ins_cost(INSN_COST);
9044   format %{ "movw $dst, $src\t# ptr -> int" %}
9045   ins_encode %{
9046     __ movw($dst$$Register, $src$$Register);
9047   %}
9048 
9049   ins_pipe(ialu_reg);
9050 %}
9051 
9052 // Convert compressed oop into int for vectors alignment masking
9053 // in case of 32bit oops (heap < 4Gb).
9054 instruct convN2I(iRegINoSp dst, iRegN src)
9055 %{
9056   predicate(Universe::narrow_oop_shift() == 0);
9057   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9058 
9059   ins_cost(INSN_COST);
9060   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9061   ins_encode %{
9062     __ movw($dst$$Register, $src$$Register);
9063   %}
9064 
9065   ins_pipe(ialu_reg);
9066 %}
9067 
9068 
9069 // Convert oop pointer into compressed form
9070 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9071   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9072   match(Set dst (EncodeP src));
9073   effect(KILL cr);
9074   ins_cost(INSN_COST * 3);
9075   format %{ "encode_heap_oop $dst, $src" %}
9076   ins_encode %{
9077     Register s = $src$$Register;
9078     Register d = $dst$$Register;
9079     __ encode_heap_oop(d, s);
9080   %}
9081   ins_pipe(ialu_reg);
9082 %}
9083 
9084 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9085   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9086   match(Set dst (EncodeP src));
9087   ins_cost(INSN_COST * 3);
9088   format %{ "encode_heap_oop_not_null $dst, $src" %}
9089   ins_encode %{
9090     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9091   %}
9092   ins_pipe(ialu_reg);
9093 %}
9094 
9095 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9096   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9097             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9098   match(Set dst (DecodeN src));
9099   ins_cost(INSN_COST * 3);
9100   format %{ "decode_heap_oop $dst, $src" %}
9101   ins_encode %{
9102     Register s = $src$$Register;
9103     Register d = $dst$$Register;
9104     __ decode_heap_oop(d, s);
9105   %}
9106   ins_pipe(ialu_reg);
9107 %}
9108 
9109 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9110   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9111             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9112   match(Set dst (DecodeN src));
9113   ins_cost(INSN_COST * 3);
9114   format %{ "decode_heap_oop_not_null $dst, $src" %}
9115   ins_encode %{
9116     Register s = $src$$Register;
9117     Register d = $dst$$Register;
9118     __ decode_heap_oop_not_null(d, s);
9119   %}
9120   ins_pipe(ialu_reg);
9121 %}
9122 
9123 // n.b. AArch64 implementations of encode_klass_not_null and
9124 // decode_klass_not_null do not modify the flags register so, unlike
9125 // Intel, we don't kill CR as a side effect here
9126 
9127 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9128   match(Set dst (EncodePKlass src));
9129 
9130   ins_cost(INSN_COST * 3);
9131   format %{ "encode_klass_not_null $dst,$src" %}
9132 
9133   ins_encode %{
9134     Register src_reg = as_Register($src$$reg);
9135     Register dst_reg = as_Register($dst$$reg);
9136     __ encode_klass_not_null(dst_reg, src_reg);
9137   %}
9138 
9139    ins_pipe(ialu_reg);
9140 %}
9141 
9142 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9143   match(Set dst (DecodeNKlass src));
9144 
9145   ins_cost(INSN_COST * 3);
9146   format %{ "decode_klass_not_null $dst,$src" %}
9147 
9148   ins_encode %{
9149     Register src_reg = as_Register($src$$reg);
9150     Register dst_reg = as_Register($dst$$reg);
9151     if (dst_reg != src_reg) {
9152       __ decode_klass_not_null(dst_reg, src_reg);
9153     } else {
9154       __ decode_klass_not_null(dst_reg);
9155     }
9156   %}
9157 
9158    ins_pipe(ialu_reg);
9159 %}
9160 
9161 instruct checkCastPP(iRegPNoSp dst)
9162 %{
9163   match(Set dst (CheckCastPP dst));
9164 
9165   size(0);
9166   format %{ "# checkcastPP of $dst" %}
9167   ins_encode(/* empty encoding */);
9168   ins_pipe(pipe_class_empty);
9169 %}
9170 
9171 instruct castPP(iRegPNoSp dst)
9172 %{
9173   match(Set dst (CastPP dst));
9174 
9175   size(0);
9176   format %{ "# castPP of $dst" %}
9177   ins_encode(/* empty encoding */);
9178   ins_pipe(pipe_class_empty);
9179 %}
9180 
9181 instruct castII(iRegI dst)
9182 %{
9183   match(Set dst (CastII dst));
9184 
9185   size(0);
9186   format %{ "# castII of $dst" %}
9187   ins_encode(/* empty encoding */);
9188   ins_cost(0);
9189   ins_pipe(pipe_class_empty);
9190 %}
9191 
9192 // ============================================================================
9193 // Atomic operation instructions
9194 //
9195 // Intel and SPARC both implement Ideal Node LoadPLocked and
9196 // Store{PIL}Conditional instructions using a normal load for the
9197 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9198 //
9199 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9200 // pair to lock object allocations from Eden space when not using
9201 // TLABs.
9202 //
9203 // There does not appear to be a Load{IL}Locked Ideal Node and the
9204 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9205 // and to use StoreIConditional only for 32-bit and StoreLConditional
9206 // only for 64-bit.
9207 //
9208 // We implement LoadPLocked and StorePLocked instructions using,
9209 // respectively the AArch64 hw load-exclusive and store-conditional
9210 // instructions. Whereas we must implement each of
9211 // Store{IL}Conditional using a CAS which employs a pair of
9212 // instructions comprising a load-exclusive followed by a
9213 // store-conditional.
9214 
9215 
9216 // Locked-load (linked load) of the current heap-top
9217 // used when updating the eden heap top
9218 // implemented using ldaxr on AArch64
9219 
9220 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9221 %{
9222   match(Set dst (LoadPLocked mem));
9223 
9224   ins_cost(VOLATILE_REF_COST);
9225 
9226   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9227 
9228   ins_encode(aarch64_enc_ldaxr(dst, mem));
9229 
9230   ins_pipe(pipe_serial);
9231 %}
9232 
9233 // Conditional-store of the updated heap-top.
9234 // Used during allocation of the shared heap.
9235 // Sets flag (EQ) on success.
9236 // implemented using stlxr on AArch64.
9237 
9238 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9239 %{
9240   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9241 
9242   ins_cost(VOLATILE_REF_COST);
9243 
9244  // TODO
9245  // do we need to do a store-conditional release or can we just use a
9246  // plain store-conditional?
9247 
9248   format %{
9249     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9250     "cmpw rscratch1, zr\t# EQ on successful write"
9251   %}
9252 
9253   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9254 
9255   ins_pipe(pipe_serial);
9256 %}
9257 
9258 
9259 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9260 // when attempting to rebias a lock towards the current thread.  We
9261 // must use the acquire form of cmpxchg in order to guarantee acquire
9262 // semantics in this case.
9263 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9264 %{
9265   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9266 
9267   ins_cost(VOLATILE_REF_COST);
9268 
9269   format %{
9270     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9271     "cmpw rscratch1, zr\t# EQ on successful write"
9272   %}
9273 
9274   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9275 
9276   ins_pipe(pipe_slow);
9277 %}
9278 
9279 // storeIConditional also has acquire semantics, for no better reason
9280 // than matching storeLConditional.  At the time of writing this
9281 // comment storeIConditional was not used anywhere by AArch64.
9282 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9283 %{
9284   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9285 
9286   ins_cost(VOLATILE_REF_COST);
9287 
9288   format %{
9289     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9290     "cmpw rscratch1, zr\t# EQ on successful write"
9291   %}
9292 
9293   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9294 
9295   ins_pipe(pipe_slow);
9296 %}
9297 
9298 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9299 // can't match them
9300 
9301 // standard CompareAndSwapX when we are using barriers
9302 // these have higher priority than the rules selected by a predicate
9303 
9304 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9305 
9306   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9307   ins_cost(2 * VOLATILE_REF_COST);
9308 
9309   effect(KILL cr);
9310 
9311  format %{
9312     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9313     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9314  %}
9315 
9316  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9317             aarch64_enc_cset_eq(res));
9318 
9319   ins_pipe(pipe_slow);
9320 %}
9321 
9322 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9323 
9324   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9325   ins_cost(2 * VOLATILE_REF_COST);
9326 
9327   effect(KILL cr);
9328 
9329  format %{
9330     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9331     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9332  %}
9333 
9334  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9335             aarch64_enc_cset_eq(res));
9336 
9337   ins_pipe(pipe_slow);
9338 %}
9339 
9340 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9341 
9342   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9343   ins_cost(2 * VOLATILE_REF_COST);
9344 
9345   effect(KILL cr);
9346 
9347  format %{
9348     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9349     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9350  %}
9351 
9352  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9353             aarch64_enc_cset_eq(res));
9354 
9355   ins_pipe(pipe_slow);
9356 %}
9357 
9358 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9359 
9360   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9361   ins_cost(2 * VOLATILE_REF_COST);
9362 
9363   effect(KILL cr);
9364 
9365  format %{
9366     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9367     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9368  %}
9369 
9370  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9371             aarch64_enc_cset_eq(res));
9372 
9373   ins_pipe(pipe_slow);
9374 %}
9375 
9376 // alternative CompareAndSwapX when we are eliding barriers
9377 
9378 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9379 
9380   predicate(needs_acquiring_load_exclusive(n));
9381   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9382   ins_cost(VOLATILE_REF_COST);
9383 
9384   effect(KILL cr);
9385 
9386  format %{
9387     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9388     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9389  %}
9390 
9391  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9392             aarch64_enc_cset_eq(res));
9393 
9394   ins_pipe(pipe_slow);
9395 %}
9396 
9397 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9398 
9399   predicate(needs_acquiring_load_exclusive(n));
9400   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9401   ins_cost(VOLATILE_REF_COST);
9402 
9403   effect(KILL cr);
9404 
9405  format %{
9406     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9407     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9408  %}
9409 
9410  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9411             aarch64_enc_cset_eq(res));
9412 
9413   ins_pipe(pipe_slow);
9414 %}
9415 
9416 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9417 
9418   predicate(needs_acquiring_load_exclusive(n));
9419   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9420   ins_cost(VOLATILE_REF_COST);
9421 
9422   effect(KILL cr);
9423 
9424  format %{
9425     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9426     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9427  %}
9428 
9429  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9430             aarch64_enc_cset_eq(res));
9431 
9432   ins_pipe(pipe_slow);
9433 %}
9434 
9435 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9436 
9437   predicate(needs_acquiring_load_exclusive(n));
9438   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9439   ins_cost(VOLATILE_REF_COST);
9440 
9441   effect(KILL cr);
9442 
9443  format %{
9444     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9445     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9446  %}
9447 
9448  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9449             aarch64_enc_cset_eq(res));
9450 
9451   ins_pipe(pipe_slow);
9452 %}
9453 
9454 
9455 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9456   match(Set prev (GetAndSetI mem newv));
9457   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9458   ins_encode %{
9459     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9460   %}
9461   ins_pipe(pipe_serial);
9462 %}
9463 
9464 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9465   match(Set prev (GetAndSetL mem newv));
9466   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9467   ins_encode %{
9468     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9469   %}
9470   ins_pipe(pipe_serial);
9471 %}
9472 
9473 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9474   match(Set prev (GetAndSetN mem newv));
9475   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9476   ins_encode %{
9477     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9478   %}
9479   ins_pipe(pipe_serial);
9480 %}
9481 
9482 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9483   match(Set prev (GetAndSetP mem newv));
9484   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9485   ins_encode %{
9486     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9487   %}
9488   ins_pipe(pipe_serial);
9489 %}
9490 
9491 
9492 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9493   match(Set newval (GetAndAddL mem incr));
9494   ins_cost(INSN_COST * 10);
9495   format %{ "get_and_addL $newval, [$mem], $incr" %}
9496   ins_encode %{
9497     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9498   %}
9499   ins_pipe(pipe_serial);
9500 %}
9501 
9502 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9503   predicate(n->as_LoadStore()->result_not_used());
9504   match(Set dummy (GetAndAddL mem incr));
9505   ins_cost(INSN_COST * 9);
9506   format %{ "get_and_addL [$mem], $incr" %}
9507   ins_encode %{
9508     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9509   %}
9510   ins_pipe(pipe_serial);
9511 %}
9512 
9513 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9514   match(Set newval (GetAndAddL mem incr));
9515   ins_cost(INSN_COST * 10);
9516   format %{ "get_and_addL $newval, [$mem], $incr" %}
9517   ins_encode %{
9518     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9519   %}
9520   ins_pipe(pipe_serial);
9521 %}
9522 
9523 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9524   predicate(n->as_LoadStore()->result_not_used());
9525   match(Set dummy (GetAndAddL mem incr));
9526   ins_cost(INSN_COST * 9);
9527   format %{ "get_and_addL [$mem], $incr" %}
9528   ins_encode %{
9529     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9530   %}
9531   ins_pipe(pipe_serial);
9532 %}
9533 
9534 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9535   match(Set newval (GetAndAddI mem incr));
9536   ins_cost(INSN_COST * 10);
9537   format %{ "get_and_addI $newval, [$mem], $incr" %}
9538   ins_encode %{
9539     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9540   %}
9541   ins_pipe(pipe_serial);
9542 %}
9543 
9544 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9545   predicate(n->as_LoadStore()->result_not_used());
9546   match(Set dummy (GetAndAddI mem incr));
9547   ins_cost(INSN_COST * 9);
9548   format %{ "get_and_addI [$mem], $incr" %}
9549   ins_encode %{
9550     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9551   %}
9552   ins_pipe(pipe_serial);
9553 %}
9554 
9555 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9556   match(Set newval (GetAndAddI mem incr));
9557   ins_cost(INSN_COST * 10);
9558   format %{ "get_and_addI $newval, [$mem], $incr" %}
9559   ins_encode %{
9560     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9561   %}
9562   ins_pipe(pipe_serial);
9563 %}
9564 
9565 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9566   predicate(n->as_LoadStore()->result_not_used());
9567   match(Set dummy (GetAndAddI mem incr));
9568   ins_cost(INSN_COST * 9);
9569   format %{ "get_and_addI [$mem], $incr" %}
9570   ins_encode %{
9571     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9572   %}
9573   ins_pipe(pipe_serial);
9574 %}
9575 
9576 // Manifest a CmpL result in an integer register.
9577 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9578 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9579 %{
9580   match(Set dst (CmpL3 src1 src2));
9581   effect(KILL flags);
9582 
9583   ins_cost(INSN_COST * 6);
9584   format %{
9585       "cmp $src1, $src2"
9586       "csetw $dst, ne"
9587       "cnegw $dst, lt"
9588   %}
9589   // format %{ "CmpL3 $dst, $src1, $src2" %}
9590   ins_encode %{
9591     __ cmp($src1$$Register, $src2$$Register);
9592     __ csetw($dst$$Register, Assembler::NE);
9593     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9594   %}
9595 
9596   ins_pipe(pipe_class_default);
9597 %}
9598 
9599 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9600 %{
9601   match(Set dst (CmpL3 src1 src2));
9602   effect(KILL flags);
9603 
9604   ins_cost(INSN_COST * 6);
9605   format %{
9606       "cmp $src1, $src2"
9607       "csetw $dst, ne"
9608       "cnegw $dst, lt"
9609   %}
9610   ins_encode %{
9611     int32_t con = (int32_t)$src2$$constant;
9612      if (con < 0) {
9613       __ adds(zr, $src1$$Register, -con);
9614     } else {
9615       __ subs(zr, $src1$$Register, con);
9616     }
9617     __ csetw($dst$$Register, Assembler::NE);
9618     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9619   %}
9620 
9621   ins_pipe(pipe_class_default);
9622 %}
9623 
9624 // ============================================================================
9625 // Conditional Move Instructions
9626 
9627 // n.b. we have identical rules for both a signed compare op (cmpOp)
9628 // and an unsigned compare op (cmpOpU). it would be nice if we could
9629 // define an op class which merged both inputs and use it to type the
9630 // argument to a single rule. unfortunatelyt his fails because the
9631 // opclass does not live up to the COND_INTER interface of its
9632 // component operands. When the generic code tries to negate the
9633 // operand it ends up running the generci Machoper::negate method
9634 // which throws a ShouldNotHappen. So, we have to provide two flavours
9635 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9636 
9637 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9638   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9639 
9640   ins_cost(INSN_COST * 2);
9641   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9642 
9643   ins_encode %{
9644     __ cselw(as_Register($dst$$reg),
9645              as_Register($src2$$reg),
9646              as_Register($src1$$reg),
9647              (Assembler::Condition)$cmp$$cmpcode);
9648   %}
9649 
9650   ins_pipe(icond_reg_reg);
9651 %}
9652 
9653 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9654   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9655 
9656   ins_cost(INSN_COST * 2);
9657   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9658 
9659   ins_encode %{
9660     __ cselw(as_Register($dst$$reg),
9661              as_Register($src2$$reg),
9662              as_Register($src1$$reg),
9663              (Assembler::Condition)$cmp$$cmpcode);
9664   %}
9665 
9666   ins_pipe(icond_reg_reg);
9667 %}
9668 
9669 // special cases where one arg is zero
9670 
9671 // n.b. this is selected in preference to the rule above because it
9672 // avoids loading constant 0 into a source register
9673 
9674 // TODO
9675 // we ought only to be able to cull one of these variants as the ideal
9676 // transforms ought always to order the zero consistently (to left/right?)
9677 
9678 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9679   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9680 
9681   ins_cost(INSN_COST * 2);
9682   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9683 
9684   ins_encode %{
9685     __ cselw(as_Register($dst$$reg),
9686              as_Register($src$$reg),
9687              zr,
9688              (Assembler::Condition)$cmp$$cmpcode);
9689   %}
9690 
9691   ins_pipe(icond_reg);
9692 %}
9693 
9694 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9695   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9696 
9697   ins_cost(INSN_COST * 2);
9698   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9699 
9700   ins_encode %{
9701     __ cselw(as_Register($dst$$reg),
9702              as_Register($src$$reg),
9703              zr,
9704              (Assembler::Condition)$cmp$$cmpcode);
9705   %}
9706 
9707   ins_pipe(icond_reg);
9708 %}
9709 
9710 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9711   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9712 
9713   ins_cost(INSN_COST * 2);
9714   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9715 
9716   ins_encode %{
9717     __ cselw(as_Register($dst$$reg),
9718              zr,
9719              as_Register($src$$reg),
9720              (Assembler::Condition)$cmp$$cmpcode);
9721   %}
9722 
9723   ins_pipe(icond_reg);
9724 %}
9725 
9726 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9727   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9728 
9729   ins_cost(INSN_COST * 2);
9730   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9731 
9732   ins_encode %{
9733     __ cselw(as_Register($dst$$reg),
9734              zr,
9735              as_Register($src$$reg),
9736              (Assembler::Condition)$cmp$$cmpcode);
9737   %}
9738 
9739   ins_pipe(icond_reg);
9740 %}
9741 
9742 // special case for creating a boolean 0 or 1
9743 
9744 // n.b. this is selected in preference to the rule above because it
9745 // avoids loading constants 0 and 1 into a source register
9746 
9747 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9748   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9749 
9750   ins_cost(INSN_COST * 2);
9751   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9752 
9753   ins_encode %{
9754     // equivalently
9755     // cset(as_Register($dst$$reg),
9756     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9757     __ csincw(as_Register($dst$$reg),
9758              zr,
9759              zr,
9760              (Assembler::Condition)$cmp$$cmpcode);
9761   %}
9762 
9763   ins_pipe(icond_none);
9764 %}
9765 
9766 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9767   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9768 
9769   ins_cost(INSN_COST * 2);
9770   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9771 
9772   ins_encode %{
9773     // equivalently
9774     // cset(as_Register($dst$$reg),
9775     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9776     __ csincw(as_Register($dst$$reg),
9777              zr,
9778              zr,
9779              (Assembler::Condition)$cmp$$cmpcode);
9780   %}
9781 
9782   ins_pipe(icond_none);
9783 %}
9784 
9785 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9786   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9787 
9788   ins_cost(INSN_COST * 2);
9789   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9790 
9791   ins_encode %{
9792     __ csel(as_Register($dst$$reg),
9793             as_Register($src2$$reg),
9794             as_Register($src1$$reg),
9795             (Assembler::Condition)$cmp$$cmpcode);
9796   %}
9797 
9798   ins_pipe(icond_reg_reg);
9799 %}
9800 
9801 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9802   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9803 
9804   ins_cost(INSN_COST * 2);
9805   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9806 
9807   ins_encode %{
9808     __ csel(as_Register($dst$$reg),
9809             as_Register($src2$$reg),
9810             as_Register($src1$$reg),
9811             (Assembler::Condition)$cmp$$cmpcode);
9812   %}
9813 
9814   ins_pipe(icond_reg_reg);
9815 %}
9816 
9817 // special cases where one arg is zero
9818 
9819 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9820   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9821 
9822   ins_cost(INSN_COST * 2);
9823   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9824 
9825   ins_encode %{
9826     __ csel(as_Register($dst$$reg),
9827             zr,
9828             as_Register($src$$reg),
9829             (Assembler::Condition)$cmp$$cmpcode);
9830   %}
9831 
9832   ins_pipe(icond_reg);
9833 %}
9834 
9835 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9836   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9837 
9838   ins_cost(INSN_COST * 2);
9839   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9840 
9841   ins_encode %{
9842     __ csel(as_Register($dst$$reg),
9843             zr,
9844             as_Register($src$$reg),
9845             (Assembler::Condition)$cmp$$cmpcode);
9846   %}
9847 
9848   ins_pipe(icond_reg);
9849 %}
9850 
9851 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9852   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9853 
9854   ins_cost(INSN_COST * 2);
9855   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9856 
9857   ins_encode %{
9858     __ csel(as_Register($dst$$reg),
9859             as_Register($src$$reg),
9860             zr,
9861             (Assembler::Condition)$cmp$$cmpcode);
9862   %}
9863 
9864   ins_pipe(icond_reg);
9865 %}
9866 
9867 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9868   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9869 
9870   ins_cost(INSN_COST * 2);
9871   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9872 
9873   ins_encode %{
9874     __ csel(as_Register($dst$$reg),
9875             as_Register($src$$reg),
9876             zr,
9877             (Assembler::Condition)$cmp$$cmpcode);
9878   %}
9879 
9880   ins_pipe(icond_reg);
9881 %}
9882 
9883 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9884   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9885 
9886   ins_cost(INSN_COST * 2);
9887   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9888 
9889   ins_encode %{
9890     __ csel(as_Register($dst$$reg),
9891             as_Register($src2$$reg),
9892             as_Register($src1$$reg),
9893             (Assembler::Condition)$cmp$$cmpcode);
9894   %}
9895 
9896   ins_pipe(icond_reg_reg);
9897 %}
9898 
9899 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9900   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9901 
9902   ins_cost(INSN_COST * 2);
9903   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9904 
9905   ins_encode %{
9906     __ csel(as_Register($dst$$reg),
9907             as_Register($src2$$reg),
9908             as_Register($src1$$reg),
9909             (Assembler::Condition)$cmp$$cmpcode);
9910   %}
9911 
9912   ins_pipe(icond_reg_reg);
9913 %}
9914 
9915 // special cases where one arg is zero
9916 
9917 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9918   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9919 
9920   ins_cost(INSN_COST * 2);
9921   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9922 
9923   ins_encode %{
9924     __ csel(as_Register($dst$$reg),
9925             zr,
9926             as_Register($src$$reg),
9927             (Assembler::Condition)$cmp$$cmpcode);
9928   %}
9929 
9930   ins_pipe(icond_reg);
9931 %}
9932 
9933 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9934   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9935 
9936   ins_cost(INSN_COST * 2);
9937   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9938 
9939   ins_encode %{
9940     __ csel(as_Register($dst$$reg),
9941             zr,
9942             as_Register($src$$reg),
9943             (Assembler::Condition)$cmp$$cmpcode);
9944   %}
9945 
9946   ins_pipe(icond_reg);
9947 %}
9948 
9949 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9950   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9951 
9952   ins_cost(INSN_COST * 2);
9953   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9954 
9955   ins_encode %{
9956     __ csel(as_Register($dst$$reg),
9957             as_Register($src$$reg),
9958             zr,
9959             (Assembler::Condition)$cmp$$cmpcode);
9960   %}
9961 
9962   ins_pipe(icond_reg);
9963 %}
9964 
9965 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9966   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9967 
9968   ins_cost(INSN_COST * 2);
9969   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9970 
9971   ins_encode %{
9972     __ csel(as_Register($dst$$reg),
9973             as_Register($src$$reg),
9974             zr,
9975             (Assembler::Condition)$cmp$$cmpcode);
9976   %}
9977 
9978   ins_pipe(icond_reg);
9979 %}
9980 
9981 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9982   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9983 
9984   ins_cost(INSN_COST * 2);
9985   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
9986 
9987   ins_encode %{
9988     __ cselw(as_Register($dst$$reg),
9989              as_Register($src2$$reg),
9990              as_Register($src1$$reg),
9991              (Assembler::Condition)$cmp$$cmpcode);
9992   %}
9993 
9994   ins_pipe(icond_reg_reg);
9995 %}
9996 
9997 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9998   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9999 
10000   ins_cost(INSN_COST * 2);
10001   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10002 
10003   ins_encode %{
10004     __ cselw(as_Register($dst$$reg),
10005              as_Register($src2$$reg),
10006              as_Register($src1$$reg),
10007              (Assembler::Condition)$cmp$$cmpcode);
10008   %}
10009 
10010   ins_pipe(icond_reg_reg);
10011 %}
10012 
10013 // special cases where one arg is zero
10014 
10015 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10016   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10017 
10018   ins_cost(INSN_COST * 2);
10019   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10020 
10021   ins_encode %{
10022     __ cselw(as_Register($dst$$reg),
10023              zr,
10024              as_Register($src$$reg),
10025              (Assembler::Condition)$cmp$$cmpcode);
10026   %}
10027 
10028   ins_pipe(icond_reg);
10029 %}
10030 
10031 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10032   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10033 
10034   ins_cost(INSN_COST * 2);
10035   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10036 
10037   ins_encode %{
10038     __ cselw(as_Register($dst$$reg),
10039              zr,
10040              as_Register($src$$reg),
10041              (Assembler::Condition)$cmp$$cmpcode);
10042   %}
10043 
10044   ins_pipe(icond_reg);
10045 %}
10046 
10047 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10048   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10049 
10050   ins_cost(INSN_COST * 2);
10051   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10052 
10053   ins_encode %{
10054     __ cselw(as_Register($dst$$reg),
10055              as_Register($src$$reg),
10056              zr,
10057              (Assembler::Condition)$cmp$$cmpcode);
10058   %}
10059 
10060   ins_pipe(icond_reg);
10061 %}
10062 
10063 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10064   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10065 
10066   ins_cost(INSN_COST * 2);
10067   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10068 
10069   ins_encode %{
10070     __ cselw(as_Register($dst$$reg),
10071              as_Register($src$$reg),
10072              zr,
10073              (Assembler::Condition)$cmp$$cmpcode);
10074   %}
10075 
10076   ins_pipe(icond_reg);
10077 %}
10078 
10079 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10080 %{
10081   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10082 
10083   ins_cost(INSN_COST * 3);
10084 
10085   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10086   ins_encode %{
10087     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10088     __ fcsels(as_FloatRegister($dst$$reg),
10089               as_FloatRegister($src2$$reg),
10090               as_FloatRegister($src1$$reg),
10091               cond);
10092   %}
10093 
10094   ins_pipe(fp_cond_reg_reg_s);
10095 %}
10096 
10097 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10098 %{
10099   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10100 
10101   ins_cost(INSN_COST * 3);
10102 
10103   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10104   ins_encode %{
10105     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10106     __ fcsels(as_FloatRegister($dst$$reg),
10107               as_FloatRegister($src2$$reg),
10108               as_FloatRegister($src1$$reg),
10109               cond);
10110   %}
10111 
10112   ins_pipe(fp_cond_reg_reg_s);
10113 %}
10114 
10115 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10116 %{
10117   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10118 
10119   ins_cost(INSN_COST * 3);
10120 
10121   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10122   ins_encode %{
10123     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10124     __ fcseld(as_FloatRegister($dst$$reg),
10125               as_FloatRegister($src2$$reg),
10126               as_FloatRegister($src1$$reg),
10127               cond);
10128   %}
10129 
10130   ins_pipe(fp_cond_reg_reg_d);
10131 %}
10132 
10133 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10134 %{
10135   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10136 
10137   ins_cost(INSN_COST * 3);
10138 
10139   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10140   ins_encode %{
10141     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10142     __ fcseld(as_FloatRegister($dst$$reg),
10143               as_FloatRegister($src2$$reg),
10144               as_FloatRegister($src1$$reg),
10145               cond);
10146   %}
10147 
10148   ins_pipe(fp_cond_reg_reg_d);
10149 %}
10150 
10151 // ============================================================================
10152 // Arithmetic Instructions
10153 //
10154 
10155 // Integer Addition
10156 
10157 // TODO
10158 // these currently employ operations which do not set CR and hence are
10159 // not flagged as killing CR but we would like to isolate the cases
10160 // where we want to set flags from those where we don't. need to work
10161 // out how to do that.
10162 
10163 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10164   match(Set dst (AddI src1 src2));
10165 
10166   ins_cost(INSN_COST);
10167   format %{ "addw  $dst, $src1, $src2" %}
10168 
10169   ins_encode %{
10170     __ addw(as_Register($dst$$reg),
10171             as_Register($src1$$reg),
10172             as_Register($src2$$reg));
10173   %}
10174 
10175   ins_pipe(ialu_reg_reg);
10176 %}
10177 
10178 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10179   match(Set dst (AddI src1 src2));
10180 
10181   ins_cost(INSN_COST);
10182   format %{ "addw $dst, $src1, $src2" %}
10183 
10184   // use opcode to indicate that this is an add not a sub
10185   opcode(0x0);
10186 
10187   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10188 
10189   ins_pipe(ialu_reg_imm);
10190 %}
10191 
10192 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10193   match(Set dst (AddI (ConvL2I src1) src2));
10194 
10195   ins_cost(INSN_COST);
10196   format %{ "addw $dst, $src1, $src2" %}
10197 
10198   // use opcode to indicate that this is an add not a sub
10199   opcode(0x0);
10200 
10201   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10202 
10203   ins_pipe(ialu_reg_imm);
10204 %}
10205 
10206 // Pointer Addition
10207 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10208   match(Set dst (AddP src1 src2));
10209 
10210   ins_cost(INSN_COST);
10211   format %{ "add $dst, $src1, $src2\t# ptr" %}
10212 
10213   ins_encode %{
10214     __ add(as_Register($dst$$reg),
10215            as_Register($src1$$reg),
10216            as_Register($src2$$reg));
10217   %}
10218 
10219   ins_pipe(ialu_reg_reg);
10220 %}
10221 
10222 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10223   match(Set dst (AddP src1 (ConvI2L src2)));
10224 
10225   ins_cost(1.9 * INSN_COST);
10226   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10227 
10228   ins_encode %{
10229     __ add(as_Register($dst$$reg),
10230            as_Register($src1$$reg),
10231            as_Register($src2$$reg), ext::sxtw);
10232   %}
10233 
10234   ins_pipe(ialu_reg_reg);
10235 %}
10236 
10237 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10238   match(Set dst (AddP src1 (LShiftL src2 scale)));
10239 
10240   ins_cost(1.9 * INSN_COST);
10241   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10242 
10243   ins_encode %{
10244     __ lea(as_Register($dst$$reg),
10245            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10246                    Address::lsl($scale$$constant)));
10247   %}
10248 
10249   ins_pipe(ialu_reg_reg_shift);
10250 %}
10251 
10252 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10253   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10254 
10255   ins_cost(1.9 * INSN_COST);
10256   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10257 
10258   ins_encode %{
10259     __ lea(as_Register($dst$$reg),
10260            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10261                    Address::sxtw($scale$$constant)));
10262   %}
10263 
10264   ins_pipe(ialu_reg_reg_shift);
10265 %}
10266 
10267 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10268   match(Set dst (LShiftL (ConvI2L src) scale));
10269 
10270   ins_cost(INSN_COST);
10271   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10272 
10273   ins_encode %{
10274     __ sbfiz(as_Register($dst$$reg),
10275           as_Register($src$$reg),
10276           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10277   %}
10278 
10279   ins_pipe(ialu_reg_shift);
10280 %}
10281 
10282 // Pointer Immediate Addition
10283 // n.b. this needs to be more expensive than using an indirect memory
10284 // operand
10285 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10286   match(Set dst (AddP src1 src2));
10287 
10288   ins_cost(INSN_COST);
10289   format %{ "add $dst, $src1, $src2\t# ptr" %}
10290 
10291   // use opcode to indicate that this is an add not a sub
10292   opcode(0x0);
10293 
10294   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10295 
10296   ins_pipe(ialu_reg_imm);
10297 %}
10298 
10299 // Long Addition
10300 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10301 
10302   match(Set dst (AddL src1 src2));
10303 
10304   ins_cost(INSN_COST);
10305   format %{ "add  $dst, $src1, $src2" %}
10306 
10307   ins_encode %{
10308     __ add(as_Register($dst$$reg),
10309            as_Register($src1$$reg),
10310            as_Register($src2$$reg));
10311   %}
10312 
10313   ins_pipe(ialu_reg_reg);
10314 %}
10315 
10316 // No constant pool entries requiredLong Immediate Addition.
10317 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10318   match(Set dst (AddL src1 src2));
10319 
10320   ins_cost(INSN_COST);
10321   format %{ "add $dst, $src1, $src2" %}
10322 
10323   // use opcode to indicate that this is an add not a sub
10324   opcode(0x0);
10325 
10326   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10327 
10328   ins_pipe(ialu_reg_imm);
10329 %}
10330 
10331 // Integer Subtraction
10332 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10333   match(Set dst (SubI src1 src2));
10334 
10335   ins_cost(INSN_COST);
10336   format %{ "subw  $dst, $src1, $src2" %}
10337 
10338   ins_encode %{
10339     __ subw(as_Register($dst$$reg),
10340             as_Register($src1$$reg),
10341             as_Register($src2$$reg));
10342   %}
10343 
10344   ins_pipe(ialu_reg_reg);
10345 %}
10346 
10347 // Immediate Subtraction
10348 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10349   match(Set dst (SubI src1 src2));
10350 
10351   ins_cost(INSN_COST);
10352   format %{ "subw $dst, $src1, $src2" %}
10353 
10354   // use opcode to indicate that this is a sub not an add
10355   opcode(0x1);
10356 
10357   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10358 
10359   ins_pipe(ialu_reg_imm);
10360 %}
10361 
10362 // Long Subtraction
10363 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10364 
10365   match(Set dst (SubL src1 src2));
10366 
10367   ins_cost(INSN_COST);
10368   format %{ "sub  $dst, $src1, $src2" %}
10369 
10370   ins_encode %{
10371     __ sub(as_Register($dst$$reg),
10372            as_Register($src1$$reg),
10373            as_Register($src2$$reg));
10374   %}
10375 
10376   ins_pipe(ialu_reg_reg);
10377 %}
10378 
10379 // No constant pool entries requiredLong Immediate Subtraction.
10380 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10381   match(Set dst (SubL src1 src2));
10382 
10383   ins_cost(INSN_COST);
10384   format %{ "sub$dst, $src1, $src2" %}
10385 
10386   // use opcode to indicate that this is a sub not an add
10387   opcode(0x1);
10388 
10389   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10390 
10391   ins_pipe(ialu_reg_imm);
10392 %}
10393 
10394 // Integer Negation (special case for sub)
10395 
10396 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10397   match(Set dst (SubI zero src));
10398 
10399   ins_cost(INSN_COST);
10400   format %{ "negw $dst, $src\t# int" %}
10401 
10402   ins_encode %{
10403     __ negw(as_Register($dst$$reg),
10404             as_Register($src$$reg));
10405   %}
10406 
10407   ins_pipe(ialu_reg);
10408 %}
10409 
10410 // Long Negation
10411 
10412 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10413   match(Set dst (SubL zero src));
10414 
10415   ins_cost(INSN_COST);
10416   format %{ "neg $dst, $src\t# long" %}
10417 
10418   ins_encode %{
10419     __ neg(as_Register($dst$$reg),
10420            as_Register($src$$reg));
10421   %}
10422 
10423   ins_pipe(ialu_reg);
10424 %}
10425 
10426 // Integer Multiply
10427 
10428 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10429   match(Set dst (MulI src1 src2));
10430 
10431   ins_cost(INSN_COST * 3);
10432   format %{ "mulw  $dst, $src1, $src2" %}
10433 
10434   ins_encode %{
10435     __ mulw(as_Register($dst$$reg),
10436             as_Register($src1$$reg),
10437             as_Register($src2$$reg));
10438   %}
10439 
10440   ins_pipe(imul_reg_reg);
10441 %}
10442 
10443 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10444   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10445 
10446   ins_cost(INSN_COST * 3);
10447   format %{ "smull  $dst, $src1, $src2" %}
10448 
10449   ins_encode %{
10450     __ smull(as_Register($dst$$reg),
10451              as_Register($src1$$reg),
10452              as_Register($src2$$reg));
10453   %}
10454 
10455   ins_pipe(imul_reg_reg);
10456 %}
10457 
10458 // Long Multiply
10459 
10460 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10461   match(Set dst (MulL src1 src2));
10462 
10463   ins_cost(INSN_COST * 5);
10464   format %{ "mul  $dst, $src1, $src2" %}
10465 
10466   ins_encode %{
10467     __ mul(as_Register($dst$$reg),
10468            as_Register($src1$$reg),
10469            as_Register($src2$$reg));
10470   %}
10471 
10472   ins_pipe(lmul_reg_reg);
10473 %}
10474 
10475 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10476 %{
10477   match(Set dst (MulHiL src1 src2));
10478 
10479   ins_cost(INSN_COST * 7);
10480   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10481 
10482   ins_encode %{
10483     __ smulh(as_Register($dst$$reg),
10484              as_Register($src1$$reg),
10485              as_Register($src2$$reg));
10486   %}
10487 
10488   ins_pipe(lmul_reg_reg);
10489 %}
10490 
10491 // Combined Integer Multiply & Add/Sub
10492 
10493 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10494   match(Set dst (AddI src3 (MulI src1 src2)));
10495 
10496   ins_cost(INSN_COST * 3);
10497   format %{ "madd  $dst, $src1, $src2, $src3" %}
10498 
10499   ins_encode %{
10500     __ maddw(as_Register($dst$$reg),
10501              as_Register($src1$$reg),
10502              as_Register($src2$$reg),
10503              as_Register($src3$$reg));
10504   %}
10505 
10506   ins_pipe(imac_reg_reg);
10507 %}
10508 
10509 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10510   match(Set dst (SubI src3 (MulI src1 src2)));
10511 
10512   ins_cost(INSN_COST * 3);
10513   format %{ "msub  $dst, $src1, $src2, $src3" %}
10514 
10515   ins_encode %{
10516     __ msubw(as_Register($dst$$reg),
10517              as_Register($src1$$reg),
10518              as_Register($src2$$reg),
10519              as_Register($src3$$reg));
10520   %}
10521 
10522   ins_pipe(imac_reg_reg);
10523 %}
10524 
10525 // Combined Long Multiply & Add/Sub
10526 
10527 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10528   match(Set dst (AddL src3 (MulL src1 src2)));
10529 
10530   ins_cost(INSN_COST * 5);
10531   format %{ "madd  $dst, $src1, $src2, $src3" %}
10532 
10533   ins_encode %{
10534     __ madd(as_Register($dst$$reg),
10535             as_Register($src1$$reg),
10536             as_Register($src2$$reg),
10537             as_Register($src3$$reg));
10538   %}
10539 
10540   ins_pipe(lmac_reg_reg);
10541 %}
10542 
10543 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10544   match(Set dst (SubL src3 (MulL src1 src2)));
10545 
10546   ins_cost(INSN_COST * 5);
10547   format %{ "msub  $dst, $src1, $src2, $src3" %}
10548 
10549   ins_encode %{
10550     __ msub(as_Register($dst$$reg),
10551             as_Register($src1$$reg),
10552             as_Register($src2$$reg),
10553             as_Register($src3$$reg));
10554   %}
10555 
10556   ins_pipe(lmac_reg_reg);
10557 %}
10558 
10559 // Integer Divide
10560 
10561 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10562   match(Set dst (DivI src1 src2));
10563 
10564   ins_cost(INSN_COST * 19);
10565   format %{ "sdivw  $dst, $src1, $src2" %}
10566 
10567   ins_encode(aarch64_enc_divw(dst, src1, src2));
10568   ins_pipe(idiv_reg_reg);
10569 %}
10570 
10571 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10572   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10573   ins_cost(INSN_COST);
10574   format %{ "lsrw $dst, $src1, $div1" %}
10575   ins_encode %{
10576     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10577   %}
10578   ins_pipe(ialu_reg_shift);
10579 %}
10580 
10581 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10582   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10583   ins_cost(INSN_COST);
10584   format %{ "addw $dst, $src, LSR $div1" %}
10585 
10586   ins_encode %{
10587     __ addw(as_Register($dst$$reg),
10588               as_Register($src$$reg),
10589               as_Register($src$$reg),
10590               Assembler::LSR, 31);
10591   %}
10592   ins_pipe(ialu_reg);
10593 %}
10594 
10595 // Long Divide
10596 
10597 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10598   match(Set dst (DivL src1 src2));
10599 
10600   ins_cost(INSN_COST * 35);
10601   format %{ "sdiv   $dst, $src1, $src2" %}
10602 
10603   ins_encode(aarch64_enc_div(dst, src1, src2));
10604   ins_pipe(ldiv_reg_reg);
10605 %}
10606 
10607 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10608   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10609   ins_cost(INSN_COST);
10610   format %{ "lsr $dst, $src1, $div1" %}
10611   ins_encode %{
10612     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10613   %}
10614   ins_pipe(ialu_reg_shift);
10615 %}
10616 
10617 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10618   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10619   ins_cost(INSN_COST);
10620   format %{ "add $dst, $src, $div1" %}
10621 
10622   ins_encode %{
10623     __ add(as_Register($dst$$reg),
10624               as_Register($src$$reg),
10625               as_Register($src$$reg),
10626               Assembler::LSR, 63);
10627   %}
10628   ins_pipe(ialu_reg);
10629 %}
10630 
10631 // Integer Remainder
10632 
10633 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10634   match(Set dst (ModI src1 src2));
10635 
10636   ins_cost(INSN_COST * 22);
10637   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10638             "msubw($dst, rscratch1, $src2, $src1" %}
10639 
10640   ins_encode(aarch64_enc_modw(dst, src1, src2));
10641   ins_pipe(idiv_reg_reg);
10642 %}
10643 
10644 // Long Remainder
10645 
10646 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10647   match(Set dst (ModL src1 src2));
10648 
10649   ins_cost(INSN_COST * 38);
10650   format %{ "sdiv   rscratch1, $src1, $src2\n"
10651             "msub($dst, rscratch1, $src2, $src1" %}
10652 
10653   ins_encode(aarch64_enc_mod(dst, src1, src2));
10654   ins_pipe(ldiv_reg_reg);
10655 %}
10656 
10657 // Integer Shifts
10658 
10659 // Shift Left Register
10660 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10661   match(Set dst (LShiftI src1 src2));
10662 
10663   ins_cost(INSN_COST * 2);
10664   format %{ "lslvw  $dst, $src1, $src2" %}
10665 
10666   ins_encode %{
10667     __ lslvw(as_Register($dst$$reg),
10668              as_Register($src1$$reg),
10669              as_Register($src2$$reg));
10670   %}
10671 
10672   ins_pipe(ialu_reg_reg_vshift);
10673 %}
10674 
10675 // Shift Left Immediate
10676 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10677   match(Set dst (LShiftI src1 src2));
10678 
10679   ins_cost(INSN_COST);
10680   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10681 
10682   ins_encode %{
10683     __ lslw(as_Register($dst$$reg),
10684             as_Register($src1$$reg),
10685             $src2$$constant & 0x1f);
10686   %}
10687 
10688   ins_pipe(ialu_reg_shift);
10689 %}
10690 
10691 // Shift Right Logical Register
10692 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10693   match(Set dst (URShiftI src1 src2));
10694 
10695   ins_cost(INSN_COST * 2);
10696   format %{ "lsrvw  $dst, $src1, $src2" %}
10697 
10698   ins_encode %{
10699     __ lsrvw(as_Register($dst$$reg),
10700              as_Register($src1$$reg),
10701              as_Register($src2$$reg));
10702   %}
10703 
10704   ins_pipe(ialu_reg_reg_vshift);
10705 %}
10706 
10707 // Shift Right Logical Immediate
10708 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10709   match(Set dst (URShiftI src1 src2));
10710 
10711   ins_cost(INSN_COST);
10712   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10713 
10714   ins_encode %{
10715     __ lsrw(as_Register($dst$$reg),
10716             as_Register($src1$$reg),
10717             $src2$$constant & 0x1f);
10718   %}
10719 
10720   ins_pipe(ialu_reg_shift);
10721 %}
10722 
10723 // Shift Right Arithmetic Register
10724 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10725   match(Set dst (RShiftI src1 src2));
10726 
10727   ins_cost(INSN_COST * 2);
10728   format %{ "asrvw  $dst, $src1, $src2" %}
10729 
10730   ins_encode %{
10731     __ asrvw(as_Register($dst$$reg),
10732              as_Register($src1$$reg),
10733              as_Register($src2$$reg));
10734   %}
10735 
10736   ins_pipe(ialu_reg_reg_vshift);
10737 %}
10738 
10739 // Shift Right Arithmetic Immediate
10740 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10741   match(Set dst (RShiftI src1 src2));
10742 
10743   ins_cost(INSN_COST);
10744   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10745 
10746   ins_encode %{
10747     __ asrw(as_Register($dst$$reg),
10748             as_Register($src1$$reg),
10749             $src2$$constant & 0x1f);
10750   %}
10751 
10752   ins_pipe(ialu_reg_shift);
10753 %}
10754 
10755 // Combined Int Mask and Right Shift (using UBFM)
10756 // TODO
10757 
10758 // Long Shifts
10759 
10760 // Shift Left Register
10761 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10762   match(Set dst (LShiftL src1 src2));
10763 
10764   ins_cost(INSN_COST * 2);
10765   format %{ "lslv  $dst, $src1, $src2" %}
10766 
10767   ins_encode %{
10768     __ lslv(as_Register($dst$$reg),
10769             as_Register($src1$$reg),
10770             as_Register($src2$$reg));
10771   %}
10772 
10773   ins_pipe(ialu_reg_reg_vshift);
10774 %}
10775 
10776 // Shift Left Immediate
10777 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10778   match(Set dst (LShiftL src1 src2));
10779 
10780   ins_cost(INSN_COST);
10781   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10782 
10783   ins_encode %{
10784     __ lsl(as_Register($dst$$reg),
10785             as_Register($src1$$reg),
10786             $src2$$constant & 0x3f);
10787   %}
10788 
10789   ins_pipe(ialu_reg_shift);
10790 %}
10791 
10792 // Shift Right Logical Register
10793 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10794   match(Set dst (URShiftL src1 src2));
10795 
10796   ins_cost(INSN_COST * 2);
10797   format %{ "lsrv  $dst, $src1, $src2" %}
10798 
10799   ins_encode %{
10800     __ lsrv(as_Register($dst$$reg),
10801             as_Register($src1$$reg),
10802             as_Register($src2$$reg));
10803   %}
10804 
10805   ins_pipe(ialu_reg_reg_vshift);
10806 %}
10807 
10808 // Shift Right Logical Immediate
10809 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10810   match(Set dst (URShiftL src1 src2));
10811 
10812   ins_cost(INSN_COST);
10813   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10814 
10815   ins_encode %{
10816     __ lsr(as_Register($dst$$reg),
10817            as_Register($src1$$reg),
10818            $src2$$constant & 0x3f);
10819   %}
10820 
10821   ins_pipe(ialu_reg_shift);
10822 %}
10823 
10824 // A special-case pattern for card table stores.
10825 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10826   match(Set dst (URShiftL (CastP2X src1) src2));
10827 
10828   ins_cost(INSN_COST);
10829   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10830 
10831   ins_encode %{
10832     __ lsr(as_Register($dst$$reg),
10833            as_Register($src1$$reg),
10834            $src2$$constant & 0x3f);
10835   %}
10836 
10837   ins_pipe(ialu_reg_shift);
10838 %}
10839 
10840 // Shift Right Arithmetic Register
10841 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10842   match(Set dst (RShiftL src1 src2));
10843 
10844   ins_cost(INSN_COST * 2);
10845   format %{ "asrv  $dst, $src1, $src2" %}
10846 
10847   ins_encode %{
10848     __ asrv(as_Register($dst$$reg),
10849             as_Register($src1$$reg),
10850             as_Register($src2$$reg));
10851   %}
10852 
10853   ins_pipe(ialu_reg_reg_vshift);
10854 %}
10855 
10856 // Shift Right Arithmetic Immediate
10857 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10858   match(Set dst (RShiftL src1 src2));
10859 
10860   ins_cost(INSN_COST);
10861   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10862 
10863   ins_encode %{
10864     __ asr(as_Register($dst$$reg),
10865            as_Register($src1$$reg),
10866            $src2$$constant & 0x3f);
10867   %}
10868 
10869   ins_pipe(ialu_reg_shift);
10870 %}
10871 
10872 // BEGIN This section of the file is automatically generated. Do not edit --------------
10873 
10874 instruct regL_not_reg(iRegLNoSp dst,
10875                          iRegL src1, immL_M1 m1,
10876                          rFlagsReg cr) %{
10877   match(Set dst (XorL src1 m1));
10878   ins_cost(INSN_COST);
10879   format %{ "eon  $dst, $src1, zr" %}
10880 
10881   ins_encode %{
10882     __ eon(as_Register($dst$$reg),
10883               as_Register($src1$$reg),
10884               zr,
10885               Assembler::LSL, 0);
10886   %}
10887 
10888   ins_pipe(ialu_reg);
10889 %}
10890 instruct regI_not_reg(iRegINoSp dst,
10891                          iRegIorL2I src1, immI_M1 m1,
10892                          rFlagsReg cr) %{
10893   match(Set dst (XorI src1 m1));
10894   ins_cost(INSN_COST);
10895   format %{ "eonw  $dst, $src1, zr" %}
10896 
10897   ins_encode %{
10898     __ eonw(as_Register($dst$$reg),
10899               as_Register($src1$$reg),
10900               zr,
10901               Assembler::LSL, 0);
10902   %}
10903 
10904   ins_pipe(ialu_reg);
10905 %}
10906 
10907 instruct AndI_reg_not_reg(iRegINoSp dst,
10908                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10909                          rFlagsReg cr) %{
10910   match(Set dst (AndI src1 (XorI src2 m1)));
10911   ins_cost(INSN_COST);
10912   format %{ "bicw  $dst, $src1, $src2" %}
10913 
10914   ins_encode %{
10915     __ bicw(as_Register($dst$$reg),
10916               as_Register($src1$$reg),
10917               as_Register($src2$$reg),
10918               Assembler::LSL, 0);
10919   %}
10920 
10921   ins_pipe(ialu_reg_reg);
10922 %}
10923 
10924 instruct AndL_reg_not_reg(iRegLNoSp dst,
10925                          iRegL src1, iRegL src2, immL_M1 m1,
10926                          rFlagsReg cr) %{
10927   match(Set dst (AndL src1 (XorL src2 m1)));
10928   ins_cost(INSN_COST);
10929   format %{ "bic  $dst, $src1, $src2" %}
10930 
10931   ins_encode %{
10932     __ bic(as_Register($dst$$reg),
10933               as_Register($src1$$reg),
10934               as_Register($src2$$reg),
10935               Assembler::LSL, 0);
10936   %}
10937 
10938   ins_pipe(ialu_reg_reg);
10939 %}
10940 
10941 instruct OrI_reg_not_reg(iRegINoSp dst,
10942                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10943                          rFlagsReg cr) %{
10944   match(Set dst (OrI src1 (XorI src2 m1)));
10945   ins_cost(INSN_COST);
10946   format %{ "ornw  $dst, $src1, $src2" %}
10947 
10948   ins_encode %{
10949     __ ornw(as_Register($dst$$reg),
10950               as_Register($src1$$reg),
10951               as_Register($src2$$reg),
10952               Assembler::LSL, 0);
10953   %}
10954 
10955   ins_pipe(ialu_reg_reg);
10956 %}
10957 
10958 instruct OrL_reg_not_reg(iRegLNoSp dst,
10959                          iRegL src1, iRegL src2, immL_M1 m1,
10960                          rFlagsReg cr) %{
10961   match(Set dst (OrL src1 (XorL src2 m1)));
10962   ins_cost(INSN_COST);
10963   format %{ "orn  $dst, $src1, $src2" %}
10964 
10965   ins_encode %{
10966     __ orn(as_Register($dst$$reg),
10967               as_Register($src1$$reg),
10968               as_Register($src2$$reg),
10969               Assembler::LSL, 0);
10970   %}
10971 
10972   ins_pipe(ialu_reg_reg);
10973 %}
10974 
10975 instruct XorI_reg_not_reg(iRegINoSp dst,
10976                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10977                          rFlagsReg cr) %{
10978   match(Set dst (XorI m1 (XorI src2 src1)));
10979   ins_cost(INSN_COST);
10980   format %{ "eonw  $dst, $src1, $src2" %}
10981 
10982   ins_encode %{
10983     __ eonw(as_Register($dst$$reg),
10984               as_Register($src1$$reg),
10985               as_Register($src2$$reg),
10986               Assembler::LSL, 0);
10987   %}
10988 
10989   ins_pipe(ialu_reg_reg);
10990 %}
10991 
10992 instruct XorL_reg_not_reg(iRegLNoSp dst,
10993                          iRegL src1, iRegL src2, immL_M1 m1,
10994                          rFlagsReg cr) %{
10995   match(Set dst (XorL m1 (XorL src2 src1)));
10996   ins_cost(INSN_COST);
10997   format %{ "eon  $dst, $src1, $src2" %}
10998 
10999   ins_encode %{
11000     __ eon(as_Register($dst$$reg),
11001               as_Register($src1$$reg),
11002               as_Register($src2$$reg),
11003               Assembler::LSL, 0);
11004   %}
11005 
11006   ins_pipe(ialu_reg_reg);
11007 %}
11008 
11009 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11010                          iRegIorL2I src1, iRegIorL2I src2,
11011                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11012   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11013   ins_cost(1.9 * INSN_COST);
11014   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11015 
11016   ins_encode %{
11017     __ bicw(as_Register($dst$$reg),
11018               as_Register($src1$$reg),
11019               as_Register($src2$$reg),
11020               Assembler::LSR,
11021               $src3$$constant & 0x1f);
11022   %}
11023 
11024   ins_pipe(ialu_reg_reg_shift);
11025 %}
11026 
11027 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11028                          iRegL src1, iRegL src2,
11029                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11030   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11031   ins_cost(1.9 * INSN_COST);
11032   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11033 
11034   ins_encode %{
11035     __ bic(as_Register($dst$$reg),
11036               as_Register($src1$$reg),
11037               as_Register($src2$$reg),
11038               Assembler::LSR,
11039               $src3$$constant & 0x3f);
11040   %}
11041 
11042   ins_pipe(ialu_reg_reg_shift);
11043 %}
11044 
11045 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11046                          iRegIorL2I src1, iRegIorL2I src2,
11047                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11048   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11049   ins_cost(1.9 * INSN_COST);
11050   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11051 
11052   ins_encode %{
11053     __ bicw(as_Register($dst$$reg),
11054               as_Register($src1$$reg),
11055               as_Register($src2$$reg),
11056               Assembler::ASR,
11057               $src3$$constant & 0x1f);
11058   %}
11059 
11060   ins_pipe(ialu_reg_reg_shift);
11061 %}
11062 
11063 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11064                          iRegL src1, iRegL src2,
11065                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11066   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11067   ins_cost(1.9 * INSN_COST);
11068   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11069 
11070   ins_encode %{
11071     __ bic(as_Register($dst$$reg),
11072               as_Register($src1$$reg),
11073               as_Register($src2$$reg),
11074               Assembler::ASR,
11075               $src3$$constant & 0x3f);
11076   %}
11077 
11078   ins_pipe(ialu_reg_reg_shift);
11079 %}
11080 
11081 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11082                          iRegIorL2I src1, iRegIorL2I src2,
11083                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11084   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11085   ins_cost(1.9 * INSN_COST);
11086   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11087 
11088   ins_encode %{
11089     __ bicw(as_Register($dst$$reg),
11090               as_Register($src1$$reg),
11091               as_Register($src2$$reg),
11092               Assembler::LSL,
11093               $src3$$constant & 0x1f);
11094   %}
11095 
11096   ins_pipe(ialu_reg_reg_shift);
11097 %}
11098 
11099 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11100                          iRegL src1, iRegL src2,
11101                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11102   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11103   ins_cost(1.9 * INSN_COST);
11104   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11105 
11106   ins_encode %{
11107     __ bic(as_Register($dst$$reg),
11108               as_Register($src1$$reg),
11109               as_Register($src2$$reg),
11110               Assembler::LSL,
11111               $src3$$constant & 0x3f);
11112   %}
11113 
11114   ins_pipe(ialu_reg_reg_shift);
11115 %}
11116 
11117 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11118                          iRegIorL2I src1, iRegIorL2I src2,
11119                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11120   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11121   ins_cost(1.9 * INSN_COST);
11122   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11123 
11124   ins_encode %{
11125     __ eonw(as_Register($dst$$reg),
11126               as_Register($src1$$reg),
11127               as_Register($src2$$reg),
11128               Assembler::LSR,
11129               $src3$$constant & 0x1f);
11130   %}
11131 
11132   ins_pipe(ialu_reg_reg_shift);
11133 %}
11134 
11135 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11136                          iRegL src1, iRegL src2,
11137                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11138   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11139   ins_cost(1.9 * INSN_COST);
11140   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11141 
11142   ins_encode %{
11143     __ eon(as_Register($dst$$reg),
11144               as_Register($src1$$reg),
11145               as_Register($src2$$reg),
11146               Assembler::LSR,
11147               $src3$$constant & 0x3f);
11148   %}
11149 
11150   ins_pipe(ialu_reg_reg_shift);
11151 %}
11152 
11153 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11154                          iRegIorL2I src1, iRegIorL2I src2,
11155                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11156   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11157   ins_cost(1.9 * INSN_COST);
11158   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11159 
11160   ins_encode %{
11161     __ eonw(as_Register($dst$$reg),
11162               as_Register($src1$$reg),
11163               as_Register($src2$$reg),
11164               Assembler::ASR,
11165               $src3$$constant & 0x1f);
11166   %}
11167 
11168   ins_pipe(ialu_reg_reg_shift);
11169 %}
11170 
11171 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11172                          iRegL src1, iRegL src2,
11173                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11174   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11175   ins_cost(1.9 * INSN_COST);
11176   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11177 
11178   ins_encode %{
11179     __ eon(as_Register($dst$$reg),
11180               as_Register($src1$$reg),
11181               as_Register($src2$$reg),
11182               Assembler::ASR,
11183               $src3$$constant & 0x3f);
11184   %}
11185 
11186   ins_pipe(ialu_reg_reg_shift);
11187 %}
11188 
11189 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11190                          iRegIorL2I src1, iRegIorL2I src2,
11191                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11192   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11193   ins_cost(1.9 * INSN_COST);
11194   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11195 
11196   ins_encode %{
11197     __ eonw(as_Register($dst$$reg),
11198               as_Register($src1$$reg),
11199               as_Register($src2$$reg),
11200               Assembler::LSL,
11201               $src3$$constant & 0x1f);
11202   %}
11203 
11204   ins_pipe(ialu_reg_reg_shift);
11205 %}
11206 
11207 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11208                          iRegL src1, iRegL src2,
11209                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11210   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11211   ins_cost(1.9 * INSN_COST);
11212   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11213 
11214   ins_encode %{
11215     __ eon(as_Register($dst$$reg),
11216               as_Register($src1$$reg),
11217               as_Register($src2$$reg),
11218               Assembler::LSL,
11219               $src3$$constant & 0x3f);
11220   %}
11221 
11222   ins_pipe(ialu_reg_reg_shift);
11223 %}
11224 
11225 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11226                          iRegIorL2I src1, iRegIorL2I src2,
11227                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11228   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11229   ins_cost(1.9 * INSN_COST);
11230   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11231 
11232   ins_encode %{
11233     __ ornw(as_Register($dst$$reg),
11234               as_Register($src1$$reg),
11235               as_Register($src2$$reg),
11236               Assembler::LSR,
11237               $src3$$constant & 0x1f);
11238   %}
11239 
11240   ins_pipe(ialu_reg_reg_shift);
11241 %}
11242 
11243 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11244                          iRegL src1, iRegL src2,
11245                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11246   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11247   ins_cost(1.9 * INSN_COST);
11248   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11249 
11250   ins_encode %{
11251     __ orn(as_Register($dst$$reg),
11252               as_Register($src1$$reg),
11253               as_Register($src2$$reg),
11254               Assembler::LSR,
11255               $src3$$constant & 0x3f);
11256   %}
11257 
11258   ins_pipe(ialu_reg_reg_shift);
11259 %}
11260 
11261 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11262                          iRegIorL2I src1, iRegIorL2I src2,
11263                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11264   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11265   ins_cost(1.9 * INSN_COST);
11266   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11267 
11268   ins_encode %{
11269     __ ornw(as_Register($dst$$reg),
11270               as_Register($src1$$reg),
11271               as_Register($src2$$reg),
11272               Assembler::ASR,
11273               $src3$$constant & 0x1f);
11274   %}
11275 
11276   ins_pipe(ialu_reg_reg_shift);
11277 %}
11278 
11279 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11280                          iRegL src1, iRegL src2,
11281                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11282   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11283   ins_cost(1.9 * INSN_COST);
11284   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11285 
11286   ins_encode %{
11287     __ orn(as_Register($dst$$reg),
11288               as_Register($src1$$reg),
11289               as_Register($src2$$reg),
11290               Assembler::ASR,
11291               $src3$$constant & 0x3f);
11292   %}
11293 
11294   ins_pipe(ialu_reg_reg_shift);
11295 %}
11296 
11297 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11298                          iRegIorL2I src1, iRegIorL2I src2,
11299                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11300   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11301   ins_cost(1.9 * INSN_COST);
11302   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11303 
11304   ins_encode %{
11305     __ ornw(as_Register($dst$$reg),
11306               as_Register($src1$$reg),
11307               as_Register($src2$$reg),
11308               Assembler::LSL,
11309               $src3$$constant & 0x1f);
11310   %}
11311 
11312   ins_pipe(ialu_reg_reg_shift);
11313 %}
11314 
11315 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11316                          iRegL src1, iRegL src2,
11317                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11318   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11319   ins_cost(1.9 * INSN_COST);
11320   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11321 
11322   ins_encode %{
11323     __ orn(as_Register($dst$$reg),
11324               as_Register($src1$$reg),
11325               as_Register($src2$$reg),
11326               Assembler::LSL,
11327               $src3$$constant & 0x3f);
11328   %}
11329 
11330   ins_pipe(ialu_reg_reg_shift);
11331 %}
11332 
11333 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11334                          iRegIorL2I src1, iRegIorL2I src2,
11335                          immI src3, rFlagsReg cr) %{
11336   match(Set dst (AndI src1 (URShiftI src2 src3)));
11337 
11338   ins_cost(1.9 * INSN_COST);
11339   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11340 
11341   ins_encode %{
11342     __ andw(as_Register($dst$$reg),
11343               as_Register($src1$$reg),
11344               as_Register($src2$$reg),
11345               Assembler::LSR,
11346               $src3$$constant & 0x1f);
11347   %}
11348 
11349   ins_pipe(ialu_reg_reg_shift);
11350 %}
11351 
11352 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11353                          iRegL src1, iRegL src2,
11354                          immI src3, rFlagsReg cr) %{
11355   match(Set dst (AndL src1 (URShiftL src2 src3)));
11356 
11357   ins_cost(1.9 * INSN_COST);
11358   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11359 
11360   ins_encode %{
11361     __ andr(as_Register($dst$$reg),
11362               as_Register($src1$$reg),
11363               as_Register($src2$$reg),
11364               Assembler::LSR,
11365               $src3$$constant & 0x3f);
11366   %}
11367 
11368   ins_pipe(ialu_reg_reg_shift);
11369 %}
11370 
11371 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11372                          iRegIorL2I src1, iRegIorL2I src2,
11373                          immI src3, rFlagsReg cr) %{
11374   match(Set dst (AndI src1 (RShiftI src2 src3)));
11375 
11376   ins_cost(1.9 * INSN_COST);
11377   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11378 
11379   ins_encode %{
11380     __ andw(as_Register($dst$$reg),
11381               as_Register($src1$$reg),
11382               as_Register($src2$$reg),
11383               Assembler::ASR,
11384               $src3$$constant & 0x1f);
11385   %}
11386 
11387   ins_pipe(ialu_reg_reg_shift);
11388 %}
11389 
11390 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11391                          iRegL src1, iRegL src2,
11392                          immI src3, rFlagsReg cr) %{
11393   match(Set dst (AndL src1 (RShiftL src2 src3)));
11394 
11395   ins_cost(1.9 * INSN_COST);
11396   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11397 
11398   ins_encode %{
11399     __ andr(as_Register($dst$$reg),
11400               as_Register($src1$$reg),
11401               as_Register($src2$$reg),
11402               Assembler::ASR,
11403               $src3$$constant & 0x3f);
11404   %}
11405 
11406   ins_pipe(ialu_reg_reg_shift);
11407 %}
11408 
11409 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11410                          iRegIorL2I src1, iRegIorL2I src2,
11411                          immI src3, rFlagsReg cr) %{
11412   match(Set dst (AndI src1 (LShiftI src2 src3)));
11413 
11414   ins_cost(1.9 * INSN_COST);
11415   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11416 
11417   ins_encode %{
11418     __ andw(as_Register($dst$$reg),
11419               as_Register($src1$$reg),
11420               as_Register($src2$$reg),
11421               Assembler::LSL,
11422               $src3$$constant & 0x1f);
11423   %}
11424 
11425   ins_pipe(ialu_reg_reg_shift);
11426 %}
11427 
11428 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11429                          iRegL src1, iRegL src2,
11430                          immI src3, rFlagsReg cr) %{
11431   match(Set dst (AndL src1 (LShiftL src2 src3)));
11432 
11433   ins_cost(1.9 * INSN_COST);
11434   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11435 
11436   ins_encode %{
11437     __ andr(as_Register($dst$$reg),
11438               as_Register($src1$$reg),
11439               as_Register($src2$$reg),
11440               Assembler::LSL,
11441               $src3$$constant & 0x3f);
11442   %}
11443 
11444   ins_pipe(ialu_reg_reg_shift);
11445 %}
11446 
11447 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11448                          iRegIorL2I src1, iRegIorL2I src2,
11449                          immI src3, rFlagsReg cr) %{
11450   match(Set dst (XorI src1 (URShiftI src2 src3)));
11451 
11452   ins_cost(1.9 * INSN_COST);
11453   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11454 
11455   ins_encode %{
11456     __ eorw(as_Register($dst$$reg),
11457               as_Register($src1$$reg),
11458               as_Register($src2$$reg),
11459               Assembler::LSR,
11460               $src3$$constant & 0x1f);
11461   %}
11462 
11463   ins_pipe(ialu_reg_reg_shift);
11464 %}
11465 
11466 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11467                          iRegL src1, iRegL src2,
11468                          immI src3, rFlagsReg cr) %{
11469   match(Set dst (XorL src1 (URShiftL src2 src3)));
11470 
11471   ins_cost(1.9 * INSN_COST);
11472   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11473 
11474   ins_encode %{
11475     __ eor(as_Register($dst$$reg),
11476               as_Register($src1$$reg),
11477               as_Register($src2$$reg),
11478               Assembler::LSR,
11479               $src3$$constant & 0x3f);
11480   %}
11481 
11482   ins_pipe(ialu_reg_reg_shift);
11483 %}
11484 
11485 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11486                          iRegIorL2I src1, iRegIorL2I src2,
11487                          immI src3, rFlagsReg cr) %{
11488   match(Set dst (XorI src1 (RShiftI src2 src3)));
11489 
11490   ins_cost(1.9 * INSN_COST);
11491   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11492 
11493   ins_encode %{
11494     __ eorw(as_Register($dst$$reg),
11495               as_Register($src1$$reg),
11496               as_Register($src2$$reg),
11497               Assembler::ASR,
11498               $src3$$constant & 0x1f);
11499   %}
11500 
11501   ins_pipe(ialu_reg_reg_shift);
11502 %}
11503 
11504 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11505                          iRegL src1, iRegL src2,
11506                          immI src3, rFlagsReg cr) %{
11507   match(Set dst (XorL src1 (RShiftL src2 src3)));
11508 
11509   ins_cost(1.9 * INSN_COST);
11510   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11511 
11512   ins_encode %{
11513     __ eor(as_Register($dst$$reg),
11514               as_Register($src1$$reg),
11515               as_Register($src2$$reg),
11516               Assembler::ASR,
11517               $src3$$constant & 0x3f);
11518   %}
11519 
11520   ins_pipe(ialu_reg_reg_shift);
11521 %}
11522 
11523 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11524                          iRegIorL2I src1, iRegIorL2I src2,
11525                          immI src3, rFlagsReg cr) %{
11526   match(Set dst (XorI src1 (LShiftI src2 src3)));
11527 
11528   ins_cost(1.9 * INSN_COST);
11529   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11530 
11531   ins_encode %{
11532     __ eorw(as_Register($dst$$reg),
11533               as_Register($src1$$reg),
11534               as_Register($src2$$reg),
11535               Assembler::LSL,
11536               $src3$$constant & 0x1f);
11537   %}
11538 
11539   ins_pipe(ialu_reg_reg_shift);
11540 %}
11541 
11542 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11543                          iRegL src1, iRegL src2,
11544                          immI src3, rFlagsReg cr) %{
11545   match(Set dst (XorL src1 (LShiftL src2 src3)));
11546 
11547   ins_cost(1.9 * INSN_COST);
11548   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11549 
11550   ins_encode %{
11551     __ eor(as_Register($dst$$reg),
11552               as_Register($src1$$reg),
11553               as_Register($src2$$reg),
11554               Assembler::LSL,
11555               $src3$$constant & 0x3f);
11556   %}
11557 
11558   ins_pipe(ialu_reg_reg_shift);
11559 %}
11560 
11561 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11562                          iRegIorL2I src1, iRegIorL2I src2,
11563                          immI src3, rFlagsReg cr) %{
11564   match(Set dst (OrI src1 (URShiftI src2 src3)));
11565 
11566   ins_cost(1.9 * INSN_COST);
11567   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11568 
11569   ins_encode %{
11570     __ orrw(as_Register($dst$$reg),
11571               as_Register($src1$$reg),
11572               as_Register($src2$$reg),
11573               Assembler::LSR,
11574               $src3$$constant & 0x1f);
11575   %}
11576 
11577   ins_pipe(ialu_reg_reg_shift);
11578 %}
11579 
11580 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11581                          iRegL src1, iRegL src2,
11582                          immI src3, rFlagsReg cr) %{
11583   match(Set dst (OrL src1 (URShiftL src2 src3)));
11584 
11585   ins_cost(1.9 * INSN_COST);
11586   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11587 
11588   ins_encode %{
11589     __ orr(as_Register($dst$$reg),
11590               as_Register($src1$$reg),
11591               as_Register($src2$$reg),
11592               Assembler::LSR,
11593               $src3$$constant & 0x3f);
11594   %}
11595 
11596   ins_pipe(ialu_reg_reg_shift);
11597 %}
11598 
11599 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11600                          iRegIorL2I src1, iRegIorL2I src2,
11601                          immI src3, rFlagsReg cr) %{
11602   match(Set dst (OrI src1 (RShiftI src2 src3)));
11603 
11604   ins_cost(1.9 * INSN_COST);
11605   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11606 
11607   ins_encode %{
11608     __ orrw(as_Register($dst$$reg),
11609               as_Register($src1$$reg),
11610               as_Register($src2$$reg),
11611               Assembler::ASR,
11612               $src3$$constant & 0x1f);
11613   %}
11614 
11615   ins_pipe(ialu_reg_reg_shift);
11616 %}
11617 
11618 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11619                          iRegL src1, iRegL src2,
11620                          immI src3, rFlagsReg cr) %{
11621   match(Set dst (OrL src1 (RShiftL src2 src3)));
11622 
11623   ins_cost(1.9 * INSN_COST);
11624   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11625 
11626   ins_encode %{
11627     __ orr(as_Register($dst$$reg),
11628               as_Register($src1$$reg),
11629               as_Register($src2$$reg),
11630               Assembler::ASR,
11631               $src3$$constant & 0x3f);
11632   %}
11633 
11634   ins_pipe(ialu_reg_reg_shift);
11635 %}
11636 
11637 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11638                          iRegIorL2I src1, iRegIorL2I src2,
11639                          immI src3, rFlagsReg cr) %{
11640   match(Set dst (OrI src1 (LShiftI src2 src3)));
11641 
11642   ins_cost(1.9 * INSN_COST);
11643   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11644 
11645   ins_encode %{
11646     __ orrw(as_Register($dst$$reg),
11647               as_Register($src1$$reg),
11648               as_Register($src2$$reg),
11649               Assembler::LSL,
11650               $src3$$constant & 0x1f);
11651   %}
11652 
11653   ins_pipe(ialu_reg_reg_shift);
11654 %}
11655 
11656 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11657                          iRegL src1, iRegL src2,
11658                          immI src3, rFlagsReg cr) %{
11659   match(Set dst (OrL src1 (LShiftL src2 src3)));
11660 
11661   ins_cost(1.9 * INSN_COST);
11662   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11663 
11664   ins_encode %{
11665     __ orr(as_Register($dst$$reg),
11666               as_Register($src1$$reg),
11667               as_Register($src2$$reg),
11668               Assembler::LSL,
11669               $src3$$constant & 0x3f);
11670   %}
11671 
11672   ins_pipe(ialu_reg_reg_shift);
11673 %}
11674 
11675 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11676                          iRegIorL2I src1, iRegIorL2I src2,
11677                          immI src3, rFlagsReg cr) %{
11678   match(Set dst (AddI src1 (URShiftI src2 src3)));
11679 
11680   ins_cost(1.9 * INSN_COST);
11681   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11682 
11683   ins_encode %{
11684     __ addw(as_Register($dst$$reg),
11685               as_Register($src1$$reg),
11686               as_Register($src2$$reg),
11687               Assembler::LSR,
11688               $src3$$constant & 0x1f);
11689   %}
11690 
11691   ins_pipe(ialu_reg_reg_shift);
11692 %}
11693 
11694 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11695                          iRegL src1, iRegL src2,
11696                          immI src3, rFlagsReg cr) %{
11697   match(Set dst (AddL src1 (URShiftL src2 src3)));
11698 
11699   ins_cost(1.9 * INSN_COST);
11700   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11701 
11702   ins_encode %{
11703     __ add(as_Register($dst$$reg),
11704               as_Register($src1$$reg),
11705               as_Register($src2$$reg),
11706               Assembler::LSR,
11707               $src3$$constant & 0x3f);
11708   %}
11709 
11710   ins_pipe(ialu_reg_reg_shift);
11711 %}
11712 
11713 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11714                          iRegIorL2I src1, iRegIorL2I src2,
11715                          immI src3, rFlagsReg cr) %{
11716   match(Set dst (AddI src1 (RShiftI src2 src3)));
11717 
11718   ins_cost(1.9 * INSN_COST);
11719   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11720 
11721   ins_encode %{
11722     __ addw(as_Register($dst$$reg),
11723               as_Register($src1$$reg),
11724               as_Register($src2$$reg),
11725               Assembler::ASR,
11726               $src3$$constant & 0x1f);
11727   %}
11728 
11729   ins_pipe(ialu_reg_reg_shift);
11730 %}
11731 
11732 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11733                          iRegL src1, iRegL src2,
11734                          immI src3, rFlagsReg cr) %{
11735   match(Set dst (AddL src1 (RShiftL src2 src3)));
11736 
11737   ins_cost(1.9 * INSN_COST);
11738   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11739 
11740   ins_encode %{
11741     __ add(as_Register($dst$$reg),
11742               as_Register($src1$$reg),
11743               as_Register($src2$$reg),
11744               Assembler::ASR,
11745               $src3$$constant & 0x3f);
11746   %}
11747 
11748   ins_pipe(ialu_reg_reg_shift);
11749 %}
11750 
11751 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11752                          iRegIorL2I src1, iRegIorL2I src2,
11753                          immI src3, rFlagsReg cr) %{
11754   match(Set dst (AddI src1 (LShiftI src2 src3)));
11755 
11756   ins_cost(1.9 * INSN_COST);
11757   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11758 
11759   ins_encode %{
11760     __ addw(as_Register($dst$$reg),
11761               as_Register($src1$$reg),
11762               as_Register($src2$$reg),
11763               Assembler::LSL,
11764               $src3$$constant & 0x1f);
11765   %}
11766 
11767   ins_pipe(ialu_reg_reg_shift);
11768 %}
11769 
11770 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11771                          iRegL src1, iRegL src2,
11772                          immI src3, rFlagsReg cr) %{
11773   match(Set dst (AddL src1 (LShiftL src2 src3)));
11774 
11775   ins_cost(1.9 * INSN_COST);
11776   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11777 
11778   ins_encode %{
11779     __ add(as_Register($dst$$reg),
11780               as_Register($src1$$reg),
11781               as_Register($src2$$reg),
11782               Assembler::LSL,
11783               $src3$$constant & 0x3f);
11784   %}
11785 
11786   ins_pipe(ialu_reg_reg_shift);
11787 %}
11788 
11789 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11790                          iRegIorL2I src1, iRegIorL2I src2,
11791                          immI src3, rFlagsReg cr) %{
11792   match(Set dst (SubI src1 (URShiftI src2 src3)));
11793 
11794   ins_cost(1.9 * INSN_COST);
11795   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11796 
11797   ins_encode %{
11798     __ subw(as_Register($dst$$reg),
11799               as_Register($src1$$reg),
11800               as_Register($src2$$reg),
11801               Assembler::LSR,
11802               $src3$$constant & 0x1f);
11803   %}
11804 
11805   ins_pipe(ialu_reg_reg_shift);
11806 %}
11807 
11808 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11809                          iRegL src1, iRegL src2,
11810                          immI src3, rFlagsReg cr) %{
11811   match(Set dst (SubL src1 (URShiftL src2 src3)));
11812 
11813   ins_cost(1.9 * INSN_COST);
11814   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11815 
11816   ins_encode %{
11817     __ sub(as_Register($dst$$reg),
11818               as_Register($src1$$reg),
11819               as_Register($src2$$reg),
11820               Assembler::LSR,
11821               $src3$$constant & 0x3f);
11822   %}
11823 
11824   ins_pipe(ialu_reg_reg_shift);
11825 %}
11826 
11827 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11828                          iRegIorL2I src1, iRegIorL2I src2,
11829                          immI src3, rFlagsReg cr) %{
11830   match(Set dst (SubI src1 (RShiftI src2 src3)));
11831 
11832   ins_cost(1.9 * INSN_COST);
11833   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11834 
11835   ins_encode %{
11836     __ subw(as_Register($dst$$reg),
11837               as_Register($src1$$reg),
11838               as_Register($src2$$reg),
11839               Assembler::ASR,
11840               $src3$$constant & 0x1f);
11841   %}
11842 
11843   ins_pipe(ialu_reg_reg_shift);
11844 %}
11845 
11846 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11847                          iRegL src1, iRegL src2,
11848                          immI src3, rFlagsReg cr) %{
11849   match(Set dst (SubL src1 (RShiftL src2 src3)));
11850 
11851   ins_cost(1.9 * INSN_COST);
11852   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11853 
11854   ins_encode %{
11855     __ sub(as_Register($dst$$reg),
11856               as_Register($src1$$reg),
11857               as_Register($src2$$reg),
11858               Assembler::ASR,
11859               $src3$$constant & 0x3f);
11860   %}
11861 
11862   ins_pipe(ialu_reg_reg_shift);
11863 %}
11864 
11865 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11866                          iRegIorL2I src1, iRegIorL2I src2,
11867                          immI src3, rFlagsReg cr) %{
11868   match(Set dst (SubI src1 (LShiftI src2 src3)));
11869 
11870   ins_cost(1.9 * INSN_COST);
11871   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11872 
11873   ins_encode %{
11874     __ subw(as_Register($dst$$reg),
11875               as_Register($src1$$reg),
11876               as_Register($src2$$reg),
11877               Assembler::LSL,
11878               $src3$$constant & 0x1f);
11879   %}
11880 
11881   ins_pipe(ialu_reg_reg_shift);
11882 %}
11883 
11884 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11885                          iRegL src1, iRegL src2,
11886                          immI src3, rFlagsReg cr) %{
11887   match(Set dst (SubL src1 (LShiftL src2 src3)));
11888 
11889   ins_cost(1.9 * INSN_COST);
11890   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11891 
11892   ins_encode %{
11893     __ sub(as_Register($dst$$reg),
11894               as_Register($src1$$reg),
11895               as_Register($src2$$reg),
11896               Assembler::LSL,
11897               $src3$$constant & 0x3f);
11898   %}
11899 
11900   ins_pipe(ialu_reg_reg_shift);
11901 %}
11902 
11903 
11904 
11905 // Shift Left followed by Shift Right.
11906 // This idiom is used by the compiler for the i2b bytecode etc.
11907 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11908 %{
11909   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11910   // Make sure we are not going to exceed what sbfm can do.
11911   predicate((unsigned int)n->in(2)->get_int() <= 63
11912             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11913 
11914   ins_cost(INSN_COST * 2);
11915   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11916   ins_encode %{
11917     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11918     int s = 63 - lshift;
11919     int r = (rshift - lshift) & 63;
11920     __ sbfm(as_Register($dst$$reg),
11921             as_Register($src$$reg),
11922             r, s);
11923   %}
11924 
11925   ins_pipe(ialu_reg_shift);
11926 %}
11927 
11928 // Shift Left followed by Shift Right.
11929 // This idiom is used by the compiler for the i2b bytecode etc.
11930 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11931 %{
11932   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11933   // Make sure we are not going to exceed what sbfmw can do.
11934   predicate((unsigned int)n->in(2)->get_int() <= 31
11935             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11936 
11937   ins_cost(INSN_COST * 2);
11938   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11939   ins_encode %{
11940     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11941     int s = 31 - lshift;
11942     int r = (rshift - lshift) & 31;
11943     __ sbfmw(as_Register($dst$$reg),
11944             as_Register($src$$reg),
11945             r, s);
11946   %}
11947 
11948   ins_pipe(ialu_reg_shift);
11949 %}
11950 
11951 // Shift Left followed by Shift Right.
11952 // This idiom is used by the compiler for the i2b bytecode etc.
11953 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11954 %{
11955   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11956   // Make sure we are not going to exceed what ubfm can do.
11957   predicate((unsigned int)n->in(2)->get_int() <= 63
11958             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11959 
11960   ins_cost(INSN_COST * 2);
11961   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11962   ins_encode %{
11963     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11964     int s = 63 - lshift;
11965     int r = (rshift - lshift) & 63;
11966     __ ubfm(as_Register($dst$$reg),
11967             as_Register($src$$reg),
11968             r, s);
11969   %}
11970 
11971   ins_pipe(ialu_reg_shift);
11972 %}
11973 
11974 // Shift Left followed by Shift Right.
11975 // This idiom is used by the compiler for the i2b bytecode etc.
11976 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11977 %{
11978   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11979   // Make sure we are not going to exceed what ubfmw can do.
11980   predicate((unsigned int)n->in(2)->get_int() <= 31
11981             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11982 
11983   ins_cost(INSN_COST * 2);
11984   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11985   ins_encode %{
11986     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11987     int s = 31 - lshift;
11988     int r = (rshift - lshift) & 31;
11989     __ ubfmw(as_Register($dst$$reg),
11990             as_Register($src$$reg),
11991             r, s);
11992   %}
11993 
11994   ins_pipe(ialu_reg_shift);
11995 %}
11996 // Bitfield extract with shift & mask
11997 
11998 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11999 %{
12000   match(Set dst (AndI (URShiftI src rshift) mask));
12001 
12002   ins_cost(INSN_COST);
12003   format %{ "ubfxw $dst, $src, $mask" %}
12004   ins_encode %{
12005     int rshift = $rshift$$constant;
12006     long mask = $mask$$constant;
12007     int width = exact_log2(mask+1);
12008     __ ubfxw(as_Register($dst$$reg),
12009             as_Register($src$$reg), rshift, width);
12010   %}
12011   ins_pipe(ialu_reg_shift);
12012 %}
12013 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12014 %{
12015   match(Set dst (AndL (URShiftL src rshift) mask));
12016 
12017   ins_cost(INSN_COST);
12018   format %{ "ubfx $dst, $src, $mask" %}
12019   ins_encode %{
12020     int rshift = $rshift$$constant;
12021     long mask = $mask$$constant;
12022     int width = exact_log2(mask+1);
12023     __ ubfx(as_Register($dst$$reg),
12024             as_Register($src$$reg), rshift, width);
12025   %}
12026   ins_pipe(ialu_reg_shift);
12027 %}
12028 
12029 // We can use ubfx when extending an And with a mask when we know mask
12030 // is positive.  We know that because immI_bitmask guarantees it.
12031 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12032 %{
12033   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12034 
12035   ins_cost(INSN_COST * 2);
12036   format %{ "ubfx $dst, $src, $mask" %}
12037   ins_encode %{
12038     int rshift = $rshift$$constant;
12039     long mask = $mask$$constant;
12040     int width = exact_log2(mask+1);
12041     __ ubfx(as_Register($dst$$reg),
12042             as_Register($src$$reg), rshift, width);
12043   %}
12044   ins_pipe(ialu_reg_shift);
12045 %}
12046 
12047 // Rotations
12048 
12049 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12050 %{
12051   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12052   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12053 
12054   ins_cost(INSN_COST);
12055   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12056 
12057   ins_encode %{
12058     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12059             $rshift$$constant & 63);
12060   %}
12061   ins_pipe(ialu_reg_reg_extr);
12062 %}
12063 
12064 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12065 %{
12066   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12067   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12068 
12069   ins_cost(INSN_COST);
12070   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12071 
12072   ins_encode %{
12073     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12074             $rshift$$constant & 31);
12075   %}
12076   ins_pipe(ialu_reg_reg_extr);
12077 %}
12078 
12079 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12080 %{
12081   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12082   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12083 
12084   ins_cost(INSN_COST);
12085   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12086 
12087   ins_encode %{
12088     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12089             $rshift$$constant & 63);
12090   %}
12091   ins_pipe(ialu_reg_reg_extr);
12092 %}
12093 
12094 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12095 %{
12096   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12097   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12098 
12099   ins_cost(INSN_COST);
12100   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12101 
12102   ins_encode %{
12103     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12104             $rshift$$constant & 31);
12105   %}
12106   ins_pipe(ialu_reg_reg_extr);
12107 %}
12108 
12109 
12110 // rol expander
12111 
12112 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12113 %{
12114   effect(DEF dst, USE src, USE shift);
12115 
12116   format %{ "rol    $dst, $src, $shift" %}
12117   ins_cost(INSN_COST * 3);
12118   ins_encode %{
12119     __ subw(rscratch1, zr, as_Register($shift$$reg));
12120     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12121             rscratch1);
12122     %}
12123   ins_pipe(ialu_reg_reg_vshift);
12124 %}
12125 
12126 // rol expander
12127 
12128 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12129 %{
12130   effect(DEF dst, USE src, USE shift);
12131 
12132   format %{ "rol    $dst, $src, $shift" %}
12133   ins_cost(INSN_COST * 3);
12134   ins_encode %{
12135     __ subw(rscratch1, zr, as_Register($shift$$reg));
12136     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12137             rscratch1);
12138     %}
12139   ins_pipe(ialu_reg_reg_vshift);
12140 %}
12141 
12142 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12143 %{
12144   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12145 
12146   expand %{
12147     rolL_rReg(dst, src, shift, cr);
12148   %}
12149 %}
12150 
12151 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12152 %{
12153   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12154 
12155   expand %{
12156     rolL_rReg(dst, src, shift, cr);
12157   %}
12158 %}
12159 
12160 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12161 %{
12162   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12163 
12164   expand %{
12165     rolL_rReg(dst, src, shift, cr);
12166   %}
12167 %}
12168 
12169 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12170 %{
12171   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12172 
12173   expand %{
12174     rolL_rReg(dst, src, shift, cr);
12175   %}
12176 %}
12177 
12178 // ror expander
12179 
12180 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12181 %{
12182   effect(DEF dst, USE src, USE shift);
12183 
12184   format %{ "ror    $dst, $src, $shift" %}
12185   ins_cost(INSN_COST);
12186   ins_encode %{
12187     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12188             as_Register($shift$$reg));
12189     %}
12190   ins_pipe(ialu_reg_reg_vshift);
12191 %}
12192 
12193 // ror expander
12194 
12195 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12196 %{
12197   effect(DEF dst, USE src, USE shift);
12198 
12199   format %{ "ror    $dst, $src, $shift" %}
12200   ins_cost(INSN_COST);
12201   ins_encode %{
12202     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12203             as_Register($shift$$reg));
12204     %}
12205   ins_pipe(ialu_reg_reg_vshift);
12206 %}
12207 
12208 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12209 %{
12210   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12211 
12212   expand %{
12213     rorL_rReg(dst, src, shift, cr);
12214   %}
12215 %}
12216 
12217 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12218 %{
12219   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12220 
12221   expand %{
12222     rorL_rReg(dst, src, shift, cr);
12223   %}
12224 %}
12225 
12226 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12227 %{
12228   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12229 
12230   expand %{
12231     rorI_rReg(dst, src, shift, cr);
12232   %}
12233 %}
12234 
12235 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
12236 %{
12237   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12238 
12239   expand %{
12240     rorI_rReg(dst, src, shift, cr);
12241   %}
12242 %}
12243 
12244 // Add/subtract (extended)
12245 
12246 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12247 %{
12248   match(Set dst (AddL src1 (ConvI2L src2)));
12249   ins_cost(INSN_COST);
12250   format %{ "add  $dst, $src1, sxtw $src2" %}
12251 
12252    ins_encode %{
12253      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12254             as_Register($src2$$reg), ext::sxtw);
12255    %}
12256   ins_pipe(ialu_reg_reg);
12257 %};
12258 
12259 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12260 %{
12261   match(Set dst (SubL src1 (ConvI2L src2)));
12262   ins_cost(INSN_COST);
12263   format %{ "sub  $dst, $src1, sxtw $src2" %}
12264 
12265    ins_encode %{
12266      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12267             as_Register($src2$$reg), ext::sxtw);
12268    %}
12269   ins_pipe(ialu_reg_reg);
12270 %};
12271 
12272 
12273 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12274 %{
12275   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12276   ins_cost(INSN_COST);
12277   format %{ "add  $dst, $src1, sxth $src2" %}
12278 
12279    ins_encode %{
12280      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12281             as_Register($src2$$reg), ext::sxth);
12282    %}
12283   ins_pipe(ialu_reg_reg);
12284 %}
12285 
12286 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12287 %{
12288   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12289   ins_cost(INSN_COST);
12290   format %{ "add  $dst, $src1, sxtb $src2" %}
12291 
12292    ins_encode %{
12293      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12294             as_Register($src2$$reg), ext::sxtb);
12295    %}
12296   ins_pipe(ialu_reg_reg);
12297 %}
12298 
12299 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12300 %{
12301   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12302   ins_cost(INSN_COST);
12303   format %{ "add  $dst, $src1, uxtb $src2" %}
12304 
12305    ins_encode %{
12306      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12307             as_Register($src2$$reg), ext::uxtb);
12308    %}
12309   ins_pipe(ialu_reg_reg);
12310 %}
12311 
12312 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12313 %{
12314   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12315   ins_cost(INSN_COST);
12316   format %{ "add  $dst, $src1, sxth $src2" %}
12317 
12318    ins_encode %{
12319      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12320             as_Register($src2$$reg), ext::sxth);
12321    %}
12322   ins_pipe(ialu_reg_reg);
12323 %}
12324 
12325 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12326 %{
12327   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12328   ins_cost(INSN_COST);
12329   format %{ "add  $dst, $src1, sxtw $src2" %}
12330 
12331    ins_encode %{
12332      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12333             as_Register($src2$$reg), ext::sxtw);
12334    %}
12335   ins_pipe(ialu_reg_reg);
12336 %}
12337 
12338 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12339 %{
12340   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12341   ins_cost(INSN_COST);
12342   format %{ "add  $dst, $src1, sxtb $src2" %}
12343 
12344    ins_encode %{
12345      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12346             as_Register($src2$$reg), ext::sxtb);
12347    %}
12348   ins_pipe(ialu_reg_reg);
12349 %}
12350 
12351 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12352 %{
12353   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12354   ins_cost(INSN_COST);
12355   format %{ "add  $dst, $src1, uxtb $src2" %}
12356 
12357    ins_encode %{
12358      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12359             as_Register($src2$$reg), ext::uxtb);
12360    %}
12361   ins_pipe(ialu_reg_reg);
12362 %}
12363 
12364 
12365 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12366 %{
12367   match(Set dst (AddI src1 (AndI src2 mask)));
12368   ins_cost(INSN_COST);
12369   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12370 
12371    ins_encode %{
12372      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12373             as_Register($src2$$reg), ext::uxtb);
12374    %}
12375   ins_pipe(ialu_reg_reg);
12376 %}
12377 
12378 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12379 %{
12380   match(Set dst (AddI src1 (AndI src2 mask)));
12381   ins_cost(INSN_COST);
12382   format %{ "addw  $dst, $src1, $src2, uxth" %}
12383 
12384    ins_encode %{
12385      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12386             as_Register($src2$$reg), ext::uxth);
12387    %}
12388   ins_pipe(ialu_reg_reg);
12389 %}
12390 
12391 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12392 %{
12393   match(Set dst (AddL src1 (AndL src2 mask)));
12394   ins_cost(INSN_COST);
12395   format %{ "add  $dst, $src1, $src2, uxtb" %}
12396 
12397    ins_encode %{
12398      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12399             as_Register($src2$$reg), ext::uxtb);
12400    %}
12401   ins_pipe(ialu_reg_reg);
12402 %}
12403 
12404 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12405 %{
12406   match(Set dst (AddL src1 (AndL src2 mask)));
12407   ins_cost(INSN_COST);
12408   format %{ "add  $dst, $src1, $src2, uxth" %}
12409 
12410    ins_encode %{
12411      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12412             as_Register($src2$$reg), ext::uxth);
12413    %}
12414   ins_pipe(ialu_reg_reg);
12415 %}
12416 
12417 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12418 %{
12419   match(Set dst (AddL src1 (AndL src2 mask)));
12420   ins_cost(INSN_COST);
12421   format %{ "add  $dst, $src1, $src2, uxtw" %}
12422 
12423    ins_encode %{
12424      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12425             as_Register($src2$$reg), ext::uxtw);
12426    %}
12427   ins_pipe(ialu_reg_reg);
12428 %}
12429 
12430 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12431 %{
12432   match(Set dst (SubI src1 (AndI src2 mask)));
12433   ins_cost(INSN_COST);
12434   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12435 
12436    ins_encode %{
12437      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12438             as_Register($src2$$reg), ext::uxtb);
12439    %}
12440   ins_pipe(ialu_reg_reg);
12441 %}
12442 
12443 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12444 %{
12445   match(Set dst (SubI src1 (AndI src2 mask)));
12446   ins_cost(INSN_COST);
12447   format %{ "subw  $dst, $src1, $src2, uxth" %}
12448 
12449    ins_encode %{
12450      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12451             as_Register($src2$$reg), ext::uxth);
12452    %}
12453   ins_pipe(ialu_reg_reg);
12454 %}
12455 
12456 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12457 %{
12458   match(Set dst (SubL src1 (AndL src2 mask)));
12459   ins_cost(INSN_COST);
12460   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12461 
12462    ins_encode %{
12463      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12464             as_Register($src2$$reg), ext::uxtb);
12465    %}
12466   ins_pipe(ialu_reg_reg);
12467 %}
12468 
12469 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12470 %{
12471   match(Set dst (SubL src1 (AndL src2 mask)));
12472   ins_cost(INSN_COST);
12473   format %{ "sub  $dst, $src1, $src2, uxth" %}
12474 
12475    ins_encode %{
12476      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12477             as_Register($src2$$reg), ext::uxth);
12478    %}
12479   ins_pipe(ialu_reg_reg);
12480 %}
12481 
12482 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12483 %{
12484   match(Set dst (SubL src1 (AndL src2 mask)));
12485   ins_cost(INSN_COST);
12486   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12487 
12488    ins_encode %{
12489      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12490             as_Register($src2$$reg), ext::uxtw);
12491    %}
12492   ins_pipe(ialu_reg_reg);
12493 %}
12494 
12495 // END This section of the file is automatically generated. Do not edit --------------
12496 
12497 // ============================================================================
12498 // Floating Point Arithmetic Instructions
12499 
12500 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12501   match(Set dst (AddF src1 src2));
12502 
12503   ins_cost(INSN_COST * 5);
12504   format %{ "fadds   $dst, $src1, $src2" %}
12505 
12506   ins_encode %{
12507     __ fadds(as_FloatRegister($dst$$reg),
12508              as_FloatRegister($src1$$reg),
12509              as_FloatRegister($src2$$reg));
12510   %}
12511 
12512   ins_pipe(fp_dop_reg_reg_s);
12513 %}
12514 
12515 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12516   match(Set dst (AddD src1 src2));
12517 
12518   ins_cost(INSN_COST * 5);
12519   format %{ "faddd   $dst, $src1, $src2" %}
12520 
12521   ins_encode %{
12522     __ faddd(as_FloatRegister($dst$$reg),
12523              as_FloatRegister($src1$$reg),
12524              as_FloatRegister($src2$$reg));
12525   %}
12526 
12527   ins_pipe(fp_dop_reg_reg_d);
12528 %}
12529 
12530 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12531   match(Set dst (SubF src1 src2));
12532 
12533   ins_cost(INSN_COST * 5);
12534   format %{ "fsubs   $dst, $src1, $src2" %}
12535 
12536   ins_encode %{
12537     __ fsubs(as_FloatRegister($dst$$reg),
12538              as_FloatRegister($src1$$reg),
12539              as_FloatRegister($src2$$reg));
12540   %}
12541 
12542   ins_pipe(fp_dop_reg_reg_s);
12543 %}
12544 
12545 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12546   match(Set dst (SubD src1 src2));
12547 
12548   ins_cost(INSN_COST * 5);
12549   format %{ "fsubd   $dst, $src1, $src2" %}
12550 
12551   ins_encode %{
12552     __ fsubd(as_FloatRegister($dst$$reg),
12553              as_FloatRegister($src1$$reg),
12554              as_FloatRegister($src2$$reg));
12555   %}
12556 
12557   ins_pipe(fp_dop_reg_reg_d);
12558 %}
12559 
12560 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12561   match(Set dst (MulF src1 src2));
12562 
12563   ins_cost(INSN_COST * 6);
12564   format %{ "fmuls   $dst, $src1, $src2" %}
12565 
12566   ins_encode %{
12567     __ fmuls(as_FloatRegister($dst$$reg),
12568              as_FloatRegister($src1$$reg),
12569              as_FloatRegister($src2$$reg));
12570   %}
12571 
12572   ins_pipe(fp_dop_reg_reg_s);
12573 %}
12574 
12575 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12576   match(Set dst (MulD src1 src2));
12577 
12578   ins_cost(INSN_COST * 6);
12579   format %{ "fmuld   $dst, $src1, $src2" %}
12580 
12581   ins_encode %{
12582     __ fmuld(as_FloatRegister($dst$$reg),
12583              as_FloatRegister($src1$$reg),
12584              as_FloatRegister($src2$$reg));
12585   %}
12586 
12587   ins_pipe(fp_dop_reg_reg_d);
12588 %}
12589 
12590 // We cannot use these fused mul w add/sub ops because they don't
12591 // produce the same result as the equivalent separated ops
12592 // (essentially they don't round the intermediate result). that's a
12593 // shame. leaving them here in case we can idenitfy cases where it is
12594 // legitimate to use them
12595 
12596 
12597 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12598 //   match(Set dst (AddF (MulF src1 src2) src3));
12599 
12600 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12601 
12602 //   ins_encode %{
12603 //     __ fmadds(as_FloatRegister($dst$$reg),
12604 //              as_FloatRegister($src1$$reg),
12605 //              as_FloatRegister($src2$$reg),
12606 //              as_FloatRegister($src3$$reg));
12607 //   %}
12608 
12609 //   ins_pipe(pipe_class_default);
12610 // %}
12611 
12612 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12613 //   match(Set dst (AddD (MulD src1 src2) src3));
12614 
12615 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12616 
12617 //   ins_encode %{
12618 //     __ fmaddd(as_FloatRegister($dst$$reg),
12619 //              as_FloatRegister($src1$$reg),
12620 //              as_FloatRegister($src2$$reg),
12621 //              as_FloatRegister($src3$$reg));
12622 //   %}
12623 
12624 //   ins_pipe(pipe_class_default);
12625 // %}
12626 
12627 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12628 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12629 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12630 
12631 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12632 
12633 //   ins_encode %{
12634 //     __ fmsubs(as_FloatRegister($dst$$reg),
12635 //               as_FloatRegister($src1$$reg),
12636 //               as_FloatRegister($src2$$reg),
12637 //              as_FloatRegister($src3$$reg));
12638 //   %}
12639 
12640 //   ins_pipe(pipe_class_default);
12641 // %}
12642 
12643 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12644 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12645 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12646 
12647 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12648 
12649 //   ins_encode %{
12650 //     __ fmsubd(as_FloatRegister($dst$$reg),
12651 //               as_FloatRegister($src1$$reg),
12652 //               as_FloatRegister($src2$$reg),
12653 //               as_FloatRegister($src3$$reg));
12654 //   %}
12655 
12656 //   ins_pipe(pipe_class_default);
12657 // %}
12658 
12659 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12660 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12661 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12662 
12663 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12664 
12665 //   ins_encode %{
12666 //     __ fnmadds(as_FloatRegister($dst$$reg),
12667 //                as_FloatRegister($src1$$reg),
12668 //                as_FloatRegister($src2$$reg),
12669 //                as_FloatRegister($src3$$reg));
12670 //   %}
12671 
12672 //   ins_pipe(pipe_class_default);
12673 // %}
12674 
12675 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12676 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12677 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12678 
12679 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12680 
12681 //   ins_encode %{
12682 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12683 //                as_FloatRegister($src1$$reg),
12684 //                as_FloatRegister($src2$$reg),
12685 //                as_FloatRegister($src3$$reg));
12686 //   %}
12687 
12688 //   ins_pipe(pipe_class_default);
12689 // %}
12690 
12691 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12692 //   match(Set dst (SubF (MulF src1 src2) src3));
12693 
12694 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12695 
12696 //   ins_encode %{
12697 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12698 //                as_FloatRegister($src1$$reg),
12699 //                as_FloatRegister($src2$$reg),
12700 //                as_FloatRegister($src3$$reg));
12701 //   %}
12702 
12703 //   ins_pipe(pipe_class_default);
12704 // %}
12705 
12706 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12707 //   match(Set dst (SubD (MulD src1 src2) src3));
12708 
12709 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12710 
12711 //   ins_encode %{
12712 //   // n.b. insn name should be fnmsubd
12713 //     __ fnmsub(as_FloatRegister($dst$$reg),
12714 //                as_FloatRegister($src1$$reg),
12715 //                as_FloatRegister($src2$$reg),
12716 //                as_FloatRegister($src3$$reg));
12717 //   %}
12718 
12719 //   ins_pipe(pipe_class_default);
12720 // %}
12721 
12722 
12723 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12724   match(Set dst (DivF src1  src2));
12725 
12726   ins_cost(INSN_COST * 18);
12727   format %{ "fdivs   $dst, $src1, $src2" %}
12728 
12729   ins_encode %{
12730     __ fdivs(as_FloatRegister($dst$$reg),
12731              as_FloatRegister($src1$$reg),
12732              as_FloatRegister($src2$$reg));
12733   %}
12734 
12735   ins_pipe(fp_div_s);
12736 %}
12737 
12738 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12739   match(Set dst (DivD src1  src2));
12740 
12741   ins_cost(INSN_COST * 32);
12742   format %{ "fdivd   $dst, $src1, $src2" %}
12743 
12744   ins_encode %{
12745     __ fdivd(as_FloatRegister($dst$$reg),
12746              as_FloatRegister($src1$$reg),
12747              as_FloatRegister($src2$$reg));
12748   %}
12749 
12750   ins_pipe(fp_div_d);
12751 %}
12752 
12753 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12754   match(Set dst (NegF src));
12755 
12756   ins_cost(INSN_COST * 3);
12757   format %{ "fneg   $dst, $src" %}
12758 
12759   ins_encode %{
12760     __ fnegs(as_FloatRegister($dst$$reg),
12761              as_FloatRegister($src$$reg));
12762   %}
12763 
12764   ins_pipe(fp_uop_s);
12765 %}
12766 
12767 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12768   match(Set dst (NegD src));
12769 
12770   ins_cost(INSN_COST * 3);
12771   format %{ "fnegd   $dst, $src" %}
12772 
12773   ins_encode %{
12774     __ fnegd(as_FloatRegister($dst$$reg),
12775              as_FloatRegister($src$$reg));
12776   %}
12777 
12778   ins_pipe(fp_uop_d);
12779 %}
12780 
12781 instruct absF_reg(vRegF dst, vRegF src) %{
12782   match(Set dst (AbsF src));
12783 
12784   ins_cost(INSN_COST * 3);
12785   format %{ "fabss   $dst, $src" %}
12786   ins_encode %{
12787     __ fabss(as_FloatRegister($dst$$reg),
12788              as_FloatRegister($src$$reg));
12789   %}
12790 
12791   ins_pipe(fp_uop_s);
12792 %}
12793 
12794 instruct absD_reg(vRegD dst, vRegD src) %{
12795   match(Set dst (AbsD src));
12796 
12797   ins_cost(INSN_COST * 3);
12798   format %{ "fabsd   $dst, $src" %}
12799   ins_encode %{
12800     __ fabsd(as_FloatRegister($dst$$reg),
12801              as_FloatRegister($src$$reg));
12802   %}
12803 
12804   ins_pipe(fp_uop_d);
12805 %}
12806 
12807 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12808   match(Set dst (SqrtD src));
12809 
12810   ins_cost(INSN_COST * 50);
12811   format %{ "fsqrtd  $dst, $src" %}
12812   ins_encode %{
12813     __ fsqrtd(as_FloatRegister($dst$$reg),
12814              as_FloatRegister($src$$reg));
12815   %}
12816 
12817   ins_pipe(fp_div_s);
12818 %}
12819 
12820 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12821   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12822 
12823   ins_cost(INSN_COST * 50);
12824   format %{ "fsqrts  $dst, $src" %}
12825   ins_encode %{
12826     __ fsqrts(as_FloatRegister($dst$$reg),
12827              as_FloatRegister($src$$reg));
12828   %}
12829 
12830   ins_pipe(fp_div_d);
12831 %}
12832 
12833 // ============================================================================
12834 // Logical Instructions
12835 
12836 // Integer Logical Instructions
12837 
12838 // And Instructions
12839 
12840 
12841 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12842   match(Set dst (AndI src1 src2));
12843 
12844   format %{ "andw  $dst, $src1, $src2\t# int" %}
12845 
12846   ins_cost(INSN_COST);
12847   ins_encode %{
12848     __ andw(as_Register($dst$$reg),
12849             as_Register($src1$$reg),
12850             as_Register($src2$$reg));
12851   %}
12852 
12853   ins_pipe(ialu_reg_reg);
12854 %}
12855 
12856 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12857   match(Set dst (AndI src1 src2));
12858 
12859   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12860 
12861   ins_cost(INSN_COST);
12862   ins_encode %{
12863     __ andw(as_Register($dst$$reg),
12864             as_Register($src1$$reg),
12865             (unsigned long)($src2$$constant));
12866   %}
12867 
12868   ins_pipe(ialu_reg_imm);
12869 %}
12870 
12871 // Or Instructions
12872 
12873 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12874   match(Set dst (OrI src1 src2));
12875 
12876   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12877 
12878   ins_cost(INSN_COST);
12879   ins_encode %{
12880     __ orrw(as_Register($dst$$reg),
12881             as_Register($src1$$reg),
12882             as_Register($src2$$reg));
12883   %}
12884 
12885   ins_pipe(ialu_reg_reg);
12886 %}
12887 
12888 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12889   match(Set dst (OrI src1 src2));
12890 
12891   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12892 
12893   ins_cost(INSN_COST);
12894   ins_encode %{
12895     __ orrw(as_Register($dst$$reg),
12896             as_Register($src1$$reg),
12897             (unsigned long)($src2$$constant));
12898   %}
12899 
12900   ins_pipe(ialu_reg_imm);
12901 %}
12902 
12903 // Xor Instructions
12904 
12905 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12906   match(Set dst (XorI src1 src2));
12907 
12908   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12909 
12910   ins_cost(INSN_COST);
12911   ins_encode %{
12912     __ eorw(as_Register($dst$$reg),
12913             as_Register($src1$$reg),
12914             as_Register($src2$$reg));
12915   %}
12916 
12917   ins_pipe(ialu_reg_reg);
12918 %}
12919 
12920 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12921   match(Set dst (XorI src1 src2));
12922 
12923   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12924 
12925   ins_cost(INSN_COST);
12926   ins_encode %{
12927     __ eorw(as_Register($dst$$reg),
12928             as_Register($src1$$reg),
12929             (unsigned long)($src2$$constant));
12930   %}
12931 
12932   ins_pipe(ialu_reg_imm);
12933 %}
12934 
12935 // Long Logical Instructions
12936 // TODO
12937 
12938 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12939   match(Set dst (AndL src1 src2));
12940 
12941   format %{ "and  $dst, $src1, $src2\t# int" %}
12942 
12943   ins_cost(INSN_COST);
12944   ins_encode %{
12945     __ andr(as_Register($dst$$reg),
12946             as_Register($src1$$reg),
12947             as_Register($src2$$reg));
12948   %}
12949 
12950   ins_pipe(ialu_reg_reg);
12951 %}
12952 
12953 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12954   match(Set dst (AndL src1 src2));
12955 
12956   format %{ "and  $dst, $src1, $src2\t# int" %}
12957 
12958   ins_cost(INSN_COST);
12959   ins_encode %{
12960     __ andr(as_Register($dst$$reg),
12961             as_Register($src1$$reg),
12962             (unsigned long)($src2$$constant));
12963   %}
12964 
12965   ins_pipe(ialu_reg_imm);
12966 %}
12967 
12968 // Or Instructions
12969 
12970 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12971   match(Set dst (OrL src1 src2));
12972 
12973   format %{ "orr  $dst, $src1, $src2\t# int" %}
12974 
12975   ins_cost(INSN_COST);
12976   ins_encode %{
12977     __ orr(as_Register($dst$$reg),
12978            as_Register($src1$$reg),
12979            as_Register($src2$$reg));
12980   %}
12981 
12982   ins_pipe(ialu_reg_reg);
12983 %}
12984 
12985 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
12986   match(Set dst (OrL src1 src2));
12987 
12988   format %{ "orr  $dst, $src1, $src2\t# int" %}
12989 
12990   ins_cost(INSN_COST);
12991   ins_encode %{
12992     __ orr(as_Register($dst$$reg),
12993            as_Register($src1$$reg),
12994            (unsigned long)($src2$$constant));
12995   %}
12996 
12997   ins_pipe(ialu_reg_imm);
12998 %}
12999 
13000 // Xor Instructions
13001 
13002 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13003   match(Set dst (XorL src1 src2));
13004 
13005   format %{ "eor  $dst, $src1, $src2\t# int" %}
13006 
13007   ins_cost(INSN_COST);
13008   ins_encode %{
13009     __ eor(as_Register($dst$$reg),
13010            as_Register($src1$$reg),
13011            as_Register($src2$$reg));
13012   %}
13013 
13014   ins_pipe(ialu_reg_reg);
13015 %}
13016 
13017 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13018   match(Set dst (XorL src1 src2));
13019 
13020   ins_cost(INSN_COST);
13021   format %{ "eor  $dst, $src1, $src2\t# int" %}
13022 
13023   ins_encode %{
13024     __ eor(as_Register($dst$$reg),
13025            as_Register($src1$$reg),
13026            (unsigned long)($src2$$constant));
13027   %}
13028 
13029   ins_pipe(ialu_reg_imm);
13030 %}
13031 
13032 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13033 %{
13034   match(Set dst (ConvI2L src));
13035 
13036   ins_cost(INSN_COST);
13037   format %{ "sxtw  $dst, $src\t# i2l" %}
13038   ins_encode %{
13039     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13040   %}
13041   ins_pipe(ialu_reg_shift);
13042 %}
13043 
13044 // this pattern occurs in bigmath arithmetic
13045 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13046 %{
13047   match(Set dst (AndL (ConvI2L src) mask));
13048 
13049   ins_cost(INSN_COST);
13050   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13051   ins_encode %{
13052     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13053   %}
13054 
13055   ins_pipe(ialu_reg_shift);
13056 %}
13057 
13058 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13059   match(Set dst (ConvL2I src));
13060 
13061   ins_cost(INSN_COST);
13062   format %{ "movw  $dst, $src \t// l2i" %}
13063 
13064   ins_encode %{
13065     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13066   %}
13067 
13068   ins_pipe(ialu_reg);
13069 %}
13070 
13071 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13072 %{
13073   match(Set dst (Conv2B src));
13074   effect(KILL cr);
13075 
13076   format %{
13077     "cmpw $src, zr\n\t"
13078     "cset $dst, ne"
13079   %}
13080 
13081   ins_encode %{
13082     __ cmpw(as_Register($src$$reg), zr);
13083     __ cset(as_Register($dst$$reg), Assembler::NE);
13084   %}
13085 
13086   ins_pipe(ialu_reg);
13087 %}
13088 
13089 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13090 %{
13091   match(Set dst (Conv2B src));
13092   effect(KILL cr);
13093 
13094   format %{
13095     "cmp  $src, zr\n\t"
13096     "cset $dst, ne"
13097   %}
13098 
13099   ins_encode %{
13100     __ cmp(as_Register($src$$reg), zr);
13101     __ cset(as_Register($dst$$reg), Assembler::NE);
13102   %}
13103 
13104   ins_pipe(ialu_reg);
13105 %}
13106 
13107 instruct convD2F_reg(vRegF dst, vRegD src) %{
13108   match(Set dst (ConvD2F src));
13109 
13110   ins_cost(INSN_COST * 5);
13111   format %{ "fcvtd  $dst, $src \t// d2f" %}
13112 
13113   ins_encode %{
13114     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13115   %}
13116 
13117   ins_pipe(fp_d2f);
13118 %}
13119 
13120 instruct convF2D_reg(vRegD dst, vRegF src) %{
13121   match(Set dst (ConvF2D src));
13122 
13123   ins_cost(INSN_COST * 5);
13124   format %{ "fcvts  $dst, $src \t// f2d" %}
13125 
13126   ins_encode %{
13127     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13128   %}
13129 
13130   ins_pipe(fp_f2d);
13131 %}
13132 
13133 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13134   match(Set dst (ConvF2I src));
13135 
13136   ins_cost(INSN_COST * 5);
13137   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13138 
13139   ins_encode %{
13140     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13141   %}
13142 
13143   ins_pipe(fp_f2i);
13144 %}
13145 
13146 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13147   match(Set dst (ConvF2L src));
13148 
13149   ins_cost(INSN_COST * 5);
13150   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13151 
13152   ins_encode %{
13153     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13154   %}
13155 
13156   ins_pipe(fp_f2l);
13157 %}
13158 
13159 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13160   match(Set dst (ConvI2F src));
13161 
13162   ins_cost(INSN_COST * 5);
13163   format %{ "scvtfws  $dst, $src \t// i2f" %}
13164 
13165   ins_encode %{
13166     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13167   %}
13168 
13169   ins_pipe(fp_i2f);
13170 %}
13171 
13172 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13173   match(Set dst (ConvL2F src));
13174 
13175   ins_cost(INSN_COST * 5);
13176   format %{ "scvtfs  $dst, $src \t// l2f" %}
13177 
13178   ins_encode %{
13179     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13180   %}
13181 
13182   ins_pipe(fp_l2f);
13183 %}
13184 
13185 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13186   match(Set dst (ConvD2I src));
13187 
13188   ins_cost(INSN_COST * 5);
13189   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13190 
13191   ins_encode %{
13192     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13193   %}
13194 
13195   ins_pipe(fp_d2i);
13196 %}
13197 
13198 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13199   match(Set dst (ConvD2L src));
13200 
13201   ins_cost(INSN_COST * 5);
13202   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13203 
13204   ins_encode %{
13205     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13206   %}
13207 
13208   ins_pipe(fp_d2l);
13209 %}
13210 
13211 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13212   match(Set dst (ConvI2D src));
13213 
13214   ins_cost(INSN_COST * 5);
13215   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13216 
13217   ins_encode %{
13218     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13219   %}
13220 
13221   ins_pipe(fp_i2d);
13222 %}
13223 
13224 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13225   match(Set dst (ConvL2D src));
13226 
13227   ins_cost(INSN_COST * 5);
13228   format %{ "scvtfd  $dst, $src \t// l2d" %}
13229 
13230   ins_encode %{
13231     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13232   %}
13233 
13234   ins_pipe(fp_l2d);
13235 %}
13236 
13237 // stack <-> reg and reg <-> reg shuffles with no conversion
13238 
13239 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13240 
13241   match(Set dst (MoveF2I src));
13242 
13243   effect(DEF dst, USE src);
13244 
13245   ins_cost(4 * INSN_COST);
13246 
13247   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13248 
13249   ins_encode %{
13250     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13251   %}
13252 
13253   ins_pipe(iload_reg_reg);
13254 
13255 %}
13256 
13257 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13258 
13259   match(Set dst (MoveI2F src));
13260 
13261   effect(DEF dst, USE src);
13262 
13263   ins_cost(4 * INSN_COST);
13264 
13265   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13266 
13267   ins_encode %{
13268     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13269   %}
13270 
13271   ins_pipe(pipe_class_memory);
13272 
13273 %}
13274 
13275 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13276 
13277   match(Set dst (MoveD2L src));
13278 
13279   effect(DEF dst, USE src);
13280 
13281   ins_cost(4 * INSN_COST);
13282 
13283   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13284 
13285   ins_encode %{
13286     __ ldr($dst$$Register, Address(sp, $src$$disp));
13287   %}
13288 
13289   ins_pipe(iload_reg_reg);
13290 
13291 %}
13292 
13293 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13294 
13295   match(Set dst (MoveL2D src));
13296 
13297   effect(DEF dst, USE src);
13298 
13299   ins_cost(4 * INSN_COST);
13300 
13301   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13302 
13303   ins_encode %{
13304     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13305   %}
13306 
13307   ins_pipe(pipe_class_memory);
13308 
13309 %}
13310 
13311 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13312 
13313   match(Set dst (MoveF2I src));
13314 
13315   effect(DEF dst, USE src);
13316 
13317   ins_cost(INSN_COST);
13318 
13319   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13320 
13321   ins_encode %{
13322     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13323   %}
13324 
13325   ins_pipe(pipe_class_memory);
13326 
13327 %}
13328 
13329 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13330 
13331   match(Set dst (MoveI2F src));
13332 
13333   effect(DEF dst, USE src);
13334 
13335   ins_cost(INSN_COST);
13336 
13337   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13338 
13339   ins_encode %{
13340     __ strw($src$$Register, Address(sp, $dst$$disp));
13341   %}
13342 
13343   ins_pipe(istore_reg_reg);
13344 
13345 %}
13346 
13347 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13348 
13349   match(Set dst (MoveD2L src));
13350 
13351   effect(DEF dst, USE src);
13352 
13353   ins_cost(INSN_COST);
13354 
13355   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13356 
13357   ins_encode %{
13358     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13359   %}
13360 
13361   ins_pipe(pipe_class_memory);
13362 
13363 %}
13364 
13365 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13366 
13367   match(Set dst (MoveL2D src));
13368 
13369   effect(DEF dst, USE src);
13370 
13371   ins_cost(INSN_COST);
13372 
13373   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13374 
13375   ins_encode %{
13376     __ str($src$$Register, Address(sp, $dst$$disp));
13377   %}
13378 
13379   ins_pipe(istore_reg_reg);
13380 
13381 %}
13382 
13383 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13384 
13385   match(Set dst (MoveF2I src));
13386 
13387   effect(DEF dst, USE src);
13388 
13389   ins_cost(INSN_COST);
13390 
13391   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13392 
13393   ins_encode %{
13394     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13395   %}
13396 
13397   ins_pipe(fp_f2i);
13398 
13399 %}
13400 
13401 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13402 
13403   match(Set dst (MoveI2F src));
13404 
13405   effect(DEF dst, USE src);
13406 
13407   ins_cost(INSN_COST);
13408 
13409   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13410 
13411   ins_encode %{
13412     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13413   %}
13414 
13415   ins_pipe(fp_i2f);
13416 
13417 %}
13418 
13419 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13420 
13421   match(Set dst (MoveD2L src));
13422 
13423   effect(DEF dst, USE src);
13424 
13425   ins_cost(INSN_COST);
13426 
13427   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13428 
13429   ins_encode %{
13430     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13431   %}
13432 
13433   ins_pipe(fp_d2l);
13434 
13435 %}
13436 
13437 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13438 
13439   match(Set dst (MoveL2D src));
13440 
13441   effect(DEF dst, USE src);
13442 
13443   ins_cost(INSN_COST);
13444 
13445   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13446 
13447   ins_encode %{
13448     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13449   %}
13450 
13451   ins_pipe(fp_l2d);
13452 
13453 %}
13454 
13455 // ============================================================================
13456 // clearing of an array
13457 
13458 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13459 %{
13460   match(Set dummy (ClearArray cnt base));
13461   effect(USE_KILL cnt, USE_KILL base);
13462 
13463   ins_cost(4 * INSN_COST);
13464   format %{ "ClearArray $cnt, $base" %}
13465 
13466   ins_encode %{
13467     __ zero_words($base$$Register, $cnt$$Register);
13468   %}
13469 
13470   ins_pipe(pipe_class_memory);
13471 %}
13472 
13473 instruct clearArray_imm_reg(immL cnt, iRegP base, Universe dummy, rFlagsReg cr)
13474 %{
13475   match(Set dummy (ClearArray cnt base));
13476 
13477   ins_cost(4 * INSN_COST);
13478   format %{ "ClearArray $cnt, $base" %}
13479 
13480   ins_encode %{
13481     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13482   %}
13483 
13484   ins_pipe(pipe_class_memory);
13485 %}
13486 
13487 // ============================================================================
13488 // Overflow Math Instructions
13489 
13490 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13491 %{
13492   match(Set cr (OverflowAddI op1 op2));
13493 
13494   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13495   ins_cost(INSN_COST);
13496   ins_encode %{
13497     __ cmnw($op1$$Register, $op2$$Register);
13498   %}
13499 
13500   ins_pipe(icmp_reg_reg);
13501 %}
13502 
13503 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13504 %{
13505   match(Set cr (OverflowAddI op1 op2));
13506 
13507   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13508   ins_cost(INSN_COST);
13509   ins_encode %{
13510     __ cmnw($op1$$Register, $op2$$constant);
13511   %}
13512 
13513   ins_pipe(icmp_reg_imm);
13514 %}
13515 
13516 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13517 %{
13518   match(Set cr (OverflowAddL op1 op2));
13519 
13520   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13521   ins_cost(INSN_COST);
13522   ins_encode %{
13523     __ cmn($op1$$Register, $op2$$Register);
13524   %}
13525 
13526   ins_pipe(icmp_reg_reg);
13527 %}
13528 
13529 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13530 %{
13531   match(Set cr (OverflowAddL op1 op2));
13532 
13533   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13534   ins_cost(INSN_COST);
13535   ins_encode %{
13536     __ cmn($op1$$Register, $op2$$constant);
13537   %}
13538 
13539   ins_pipe(icmp_reg_imm);
13540 %}
13541 
13542 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13543 %{
13544   match(Set cr (OverflowSubI op1 op2));
13545 
13546   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13547   ins_cost(INSN_COST);
13548   ins_encode %{
13549     __ cmpw($op1$$Register, $op2$$Register);
13550   %}
13551 
13552   ins_pipe(icmp_reg_reg);
13553 %}
13554 
13555 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13556 %{
13557   match(Set cr (OverflowSubI op1 op2));
13558 
13559   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13560   ins_cost(INSN_COST);
13561   ins_encode %{
13562     __ cmpw($op1$$Register, $op2$$constant);
13563   %}
13564 
13565   ins_pipe(icmp_reg_imm);
13566 %}
13567 
13568 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13569 %{
13570   match(Set cr (OverflowSubL op1 op2));
13571 
13572   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13573   ins_cost(INSN_COST);
13574   ins_encode %{
13575     __ cmp($op1$$Register, $op2$$Register);
13576   %}
13577 
13578   ins_pipe(icmp_reg_reg);
13579 %}
13580 
13581 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13582 %{
13583   match(Set cr (OverflowSubL op1 op2));
13584 
13585   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13586   ins_cost(INSN_COST);
13587   ins_encode %{
13588     __ cmp($op1$$Register, $op2$$constant);
13589   %}
13590 
13591   ins_pipe(icmp_reg_imm);
13592 %}
13593 
13594 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13595 %{
13596   match(Set cr (OverflowSubI zero op1));
13597 
13598   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13599   ins_cost(INSN_COST);
13600   ins_encode %{
13601     __ cmpw(zr, $op1$$Register);
13602   %}
13603 
13604   ins_pipe(icmp_reg_imm);
13605 %}
13606 
13607 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13608 %{
13609   match(Set cr (OverflowSubL zero op1));
13610 
13611   format %{ "cmp   zr, $op1\t# overflow check long" %}
13612   ins_cost(INSN_COST);
13613   ins_encode %{
13614     __ cmp(zr, $op1$$Register);
13615   %}
13616 
13617   ins_pipe(icmp_reg_imm);
13618 %}
13619 
13620 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13621 %{
13622   match(Set cr (OverflowMulI op1 op2));
13623 
13624   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13625             "cmp   rscratch1, rscratch1, sxtw\n\t"
13626             "movw  rscratch1, #0x80000000\n\t"
13627             "cselw rscratch1, rscratch1, zr, NE\n\t"
13628             "cmpw  rscratch1, #1" %}
13629   ins_cost(5 * INSN_COST);
13630   ins_encode %{
13631     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13632     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13633     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13634     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13635     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13636   %}
13637 
13638   ins_pipe(pipe_slow);
13639 %}
13640 
13641 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13642 %{
13643   match(If cmp (OverflowMulI op1 op2));
13644   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13645             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13646   effect(USE labl, KILL cr);
13647 
13648   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13649             "cmp   rscratch1, rscratch1, sxtw\n\t"
13650             "b$cmp   $labl" %}
13651   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13652   ins_encode %{
13653     Label* L = $labl$$label;
13654     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13655     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13656     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13657     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13658   %}
13659 
13660   ins_pipe(pipe_serial);
13661 %}
13662 
13663 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13664 %{
13665   match(Set cr (OverflowMulL op1 op2));
13666 
13667   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13668             "smulh rscratch2, $op1, $op2\n\t"
13669             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13670             "movw  rscratch1, #0x80000000\n\t"
13671             "cselw rscratch1, rscratch1, zr, NE\n\t"
13672             "cmpw  rscratch1, #1" %}
13673   ins_cost(6 * INSN_COST);
13674   ins_encode %{
13675     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13676     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13677     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13678     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13679     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13680     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13681   %}
13682 
13683   ins_pipe(pipe_slow);
13684 %}
13685 
13686 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13687 %{
13688   match(If cmp (OverflowMulL op1 op2));
13689   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13690             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13691   effect(USE labl, KILL cr);
13692 
13693   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13694             "smulh rscratch2, $op1, $op2\n\t"
13695             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13696             "b$cmp $labl" %}
13697   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13698   ins_encode %{
13699     Label* L = $labl$$label;
13700     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13701     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13702     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13703     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13704     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13705   %}
13706 
13707   ins_pipe(pipe_serial);
13708 %}
13709 
13710 // ============================================================================
13711 // Compare Instructions
13712 
13713 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13714 %{
13715   match(Set cr (CmpI op1 op2));
13716 
13717   effect(DEF cr, USE op1, USE op2);
13718 
13719   ins_cost(INSN_COST);
13720   format %{ "cmpw  $op1, $op2" %}
13721 
13722   ins_encode(aarch64_enc_cmpw(op1, op2));
13723 
13724   ins_pipe(icmp_reg_reg);
13725 %}
13726 
13727 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13728 %{
13729   match(Set cr (CmpI op1 zero));
13730 
13731   effect(DEF cr, USE op1);
13732 
13733   ins_cost(INSN_COST);
13734   format %{ "cmpw $op1, 0" %}
13735 
13736   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13737 
13738   ins_pipe(icmp_reg_imm);
13739 %}
13740 
13741 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13742 %{
13743   match(Set cr (CmpI op1 op2));
13744 
13745   effect(DEF cr, USE op1);
13746 
13747   ins_cost(INSN_COST);
13748   format %{ "cmpw  $op1, $op2" %}
13749 
13750   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13751 
13752   ins_pipe(icmp_reg_imm);
13753 %}
13754 
13755 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13756 %{
13757   match(Set cr (CmpI op1 op2));
13758 
13759   effect(DEF cr, USE op1);
13760 
13761   ins_cost(INSN_COST * 2);
13762   format %{ "cmpw  $op1, $op2" %}
13763 
13764   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13765 
13766   ins_pipe(icmp_reg_imm);
13767 %}
13768 
13769 // Unsigned compare Instructions; really, same as signed compare
13770 // except it should only be used to feed an If or a CMovI which takes a
13771 // cmpOpU.
13772 
13773 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13774 %{
13775   match(Set cr (CmpU op1 op2));
13776 
13777   effect(DEF cr, USE op1, USE op2);
13778 
13779   ins_cost(INSN_COST);
13780   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13781 
13782   ins_encode(aarch64_enc_cmpw(op1, op2));
13783 
13784   ins_pipe(icmp_reg_reg);
13785 %}
13786 
13787 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13788 %{
13789   match(Set cr (CmpU op1 zero));
13790 
13791   effect(DEF cr, USE op1);
13792 
13793   ins_cost(INSN_COST);
13794   format %{ "cmpw $op1, #0\t# unsigned" %}
13795 
13796   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13797 
13798   ins_pipe(icmp_reg_imm);
13799 %}
13800 
13801 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13802 %{
13803   match(Set cr (CmpU op1 op2));
13804 
13805   effect(DEF cr, USE op1);
13806 
13807   ins_cost(INSN_COST);
13808   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13809 
13810   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13811 
13812   ins_pipe(icmp_reg_imm);
13813 %}
13814 
13815 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13816 %{
13817   match(Set cr (CmpU op1 op2));
13818 
13819   effect(DEF cr, USE op1);
13820 
13821   ins_cost(INSN_COST * 2);
13822   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13823 
13824   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13825 
13826   ins_pipe(icmp_reg_imm);
13827 %}
13828 
13829 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13830 %{
13831   match(Set cr (CmpL op1 op2));
13832 
13833   effect(DEF cr, USE op1, USE op2);
13834 
13835   ins_cost(INSN_COST);
13836   format %{ "cmp  $op1, $op2" %}
13837 
13838   ins_encode(aarch64_enc_cmp(op1, op2));
13839 
13840   ins_pipe(icmp_reg_reg);
13841 %}
13842 
13843 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13844 %{
13845   match(Set cr (CmpL op1 zero));
13846 
13847   effect(DEF cr, USE op1);
13848 
13849   ins_cost(INSN_COST);
13850   format %{ "tst  $op1" %}
13851 
13852   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13853 
13854   ins_pipe(icmp_reg_imm);
13855 %}
13856 
13857 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13858 %{
13859   match(Set cr (CmpL op1 op2));
13860 
13861   effect(DEF cr, USE op1);
13862 
13863   ins_cost(INSN_COST);
13864   format %{ "cmp  $op1, $op2" %}
13865 
13866   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13867 
13868   ins_pipe(icmp_reg_imm);
13869 %}
13870 
13871 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13872 %{
13873   match(Set cr (CmpL op1 op2));
13874 
13875   effect(DEF cr, USE op1);
13876 
13877   ins_cost(INSN_COST * 2);
13878   format %{ "cmp  $op1, $op2" %}
13879 
13880   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13881 
13882   ins_pipe(icmp_reg_imm);
13883 %}
13884 
13885 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13886 %{
13887   match(Set cr (CmpP op1 op2));
13888 
13889   effect(DEF cr, USE op1, USE op2);
13890 
13891   ins_cost(INSN_COST);
13892   format %{ "cmp  $op1, $op2\t // ptr" %}
13893 
13894   ins_encode(aarch64_enc_cmpp(op1, op2));
13895 
13896   ins_pipe(icmp_reg_reg);
13897 %}
13898 
13899 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13900 %{
13901   match(Set cr (CmpN op1 op2));
13902 
13903   effect(DEF cr, USE op1, USE op2);
13904 
13905   ins_cost(INSN_COST);
13906   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13907 
13908   ins_encode(aarch64_enc_cmpn(op1, op2));
13909 
13910   ins_pipe(icmp_reg_reg);
13911 %}
13912 
13913 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13914 %{
13915   match(Set cr (CmpP op1 zero));
13916 
13917   effect(DEF cr, USE op1, USE zero);
13918 
13919   ins_cost(INSN_COST);
13920   format %{ "cmp  $op1, 0\t // ptr" %}
13921 
13922   ins_encode(aarch64_enc_testp(op1));
13923 
13924   ins_pipe(icmp_reg_imm);
13925 %}
13926 
13927 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13928 %{
13929   match(Set cr (CmpN op1 zero));
13930 
13931   effect(DEF cr, USE op1, USE zero);
13932 
13933   ins_cost(INSN_COST);
13934   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13935 
13936   ins_encode(aarch64_enc_testn(op1));
13937 
13938   ins_pipe(icmp_reg_imm);
13939 %}
13940 
13941 // FP comparisons
13942 //
13943 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13944 // using normal cmpOp. See declaration of rFlagsReg for details.
13945 
13946 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13947 %{
13948   match(Set cr (CmpF src1 src2));
13949 
13950   ins_cost(3 * INSN_COST);
13951   format %{ "fcmps $src1, $src2" %}
13952 
13953   ins_encode %{
13954     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13955   %}
13956 
13957   ins_pipe(pipe_class_compare);
13958 %}
13959 
13960 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13961 %{
13962   match(Set cr (CmpF src1 src2));
13963 
13964   ins_cost(3 * INSN_COST);
13965   format %{ "fcmps $src1, 0.0" %}
13966 
13967   ins_encode %{
13968     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13969   %}
13970 
13971   ins_pipe(pipe_class_compare);
13972 %}
13973 // FROM HERE
13974 
13975 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13976 %{
13977   match(Set cr (CmpD src1 src2));
13978 
13979   ins_cost(3 * INSN_COST);
13980   format %{ "fcmpd $src1, $src2" %}
13981 
13982   ins_encode %{
13983     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13984   %}
13985 
13986   ins_pipe(pipe_class_compare);
13987 %}
13988 
13989 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13990 %{
13991   match(Set cr (CmpD src1 src2));
13992 
13993   ins_cost(3 * INSN_COST);
13994   format %{ "fcmpd $src1, 0.0" %}
13995 
13996   ins_encode %{
13997     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13998   %}
13999 
14000   ins_pipe(pipe_class_compare);
14001 %}
14002 
14003 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14004 %{
14005   match(Set dst (CmpF3 src1 src2));
14006   effect(KILL cr);
14007 
14008   ins_cost(5 * INSN_COST);
14009   format %{ "fcmps $src1, $src2\n\t"
14010             "csinvw($dst, zr, zr, eq\n\t"
14011             "csnegw($dst, $dst, $dst, lt)"
14012   %}
14013 
14014   ins_encode %{
14015     Label done;
14016     FloatRegister s1 = as_FloatRegister($src1$$reg);
14017     FloatRegister s2 = as_FloatRegister($src2$$reg);
14018     Register d = as_Register($dst$$reg);
14019     __ fcmps(s1, s2);
14020     // installs 0 if EQ else -1
14021     __ csinvw(d, zr, zr, Assembler::EQ);
14022     // keeps -1 if less or unordered else installs 1
14023     __ csnegw(d, d, d, Assembler::LT);
14024     __ bind(done);
14025   %}
14026 
14027   ins_pipe(pipe_class_default);
14028 
14029 %}
14030 
14031 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14032 %{
14033   match(Set dst (CmpD3 src1 src2));
14034   effect(KILL cr);
14035 
14036   ins_cost(5 * INSN_COST);
14037   format %{ "fcmpd $src1, $src2\n\t"
14038             "csinvw($dst, zr, zr, eq\n\t"
14039             "csnegw($dst, $dst, $dst, lt)"
14040   %}
14041 
14042   ins_encode %{
14043     Label done;
14044     FloatRegister s1 = as_FloatRegister($src1$$reg);
14045     FloatRegister s2 = as_FloatRegister($src2$$reg);
14046     Register d = as_Register($dst$$reg);
14047     __ fcmpd(s1, s2);
14048     // installs 0 if EQ else -1
14049     __ csinvw(d, zr, zr, Assembler::EQ);
14050     // keeps -1 if less or unordered else installs 1
14051     __ csnegw(d, d, d, Assembler::LT);
14052     __ bind(done);
14053   %}
14054   ins_pipe(pipe_class_default);
14055 
14056 %}
14057 
14058 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14059 %{
14060   match(Set dst (CmpF3 src1 zero));
14061   effect(KILL cr);
14062 
14063   ins_cost(5 * INSN_COST);
14064   format %{ "fcmps $src1, 0.0\n\t"
14065             "csinvw($dst, zr, zr, eq\n\t"
14066             "csnegw($dst, $dst, $dst, lt)"
14067   %}
14068 
14069   ins_encode %{
14070     Label done;
14071     FloatRegister s1 = as_FloatRegister($src1$$reg);
14072     Register d = as_Register($dst$$reg);
14073     __ fcmps(s1, 0.0D);
14074     // installs 0 if EQ else -1
14075     __ csinvw(d, zr, zr, Assembler::EQ);
14076     // keeps -1 if less or unordered else installs 1
14077     __ csnegw(d, d, d, Assembler::LT);
14078     __ bind(done);
14079   %}
14080 
14081   ins_pipe(pipe_class_default);
14082 
14083 %}
14084 
14085 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14086 %{
14087   match(Set dst (CmpD3 src1 zero));
14088   effect(KILL cr);
14089 
14090   ins_cost(5 * INSN_COST);
14091   format %{ "fcmpd $src1, 0.0\n\t"
14092             "csinvw($dst, zr, zr, eq\n\t"
14093             "csnegw($dst, $dst, $dst, lt)"
14094   %}
14095 
14096   ins_encode %{
14097     Label done;
14098     FloatRegister s1 = as_FloatRegister($src1$$reg);
14099     Register d = as_Register($dst$$reg);
14100     __ fcmpd(s1, 0.0D);
14101     // installs 0 if EQ else -1
14102     __ csinvw(d, zr, zr, Assembler::EQ);
14103     // keeps -1 if less or unordered else installs 1
14104     __ csnegw(d, d, d, Assembler::LT);
14105     __ bind(done);
14106   %}
14107   ins_pipe(pipe_class_default);
14108 
14109 %}
14110 
14111 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14112 %{
14113   match(Set dst (CmpLTMask p q));
14114   effect(KILL cr);
14115 
14116   ins_cost(3 * INSN_COST);
14117 
14118   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14119             "csetw $dst, lt\n\t"
14120             "subw $dst, zr, $dst"
14121   %}
14122 
14123   ins_encode %{
14124     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14125     __ csetw(as_Register($dst$$reg), Assembler::LT);
14126     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14127   %}
14128 
14129   ins_pipe(ialu_reg_reg);
14130 %}
14131 
14132 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14133 %{
14134   match(Set dst (CmpLTMask src zero));
14135   effect(KILL cr);
14136 
14137   ins_cost(INSN_COST);
14138 
14139   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14140 
14141   ins_encode %{
14142     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14143   %}
14144 
14145   ins_pipe(ialu_reg_shift);
14146 %}
14147 
14148 // ============================================================================
14149 // Max and Min
14150 
14151 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14152 %{
14153   match(Set dst (MinI src1 src2));
14154 
14155   effect(DEF dst, USE src1, USE src2, KILL cr);
14156   size(8);
14157 
14158   ins_cost(INSN_COST * 3);
14159   format %{
14160     "cmpw $src1 $src2\t signed int\n\t"
14161     "cselw $dst, $src1, $src2 lt\t"
14162   %}
14163 
14164   ins_encode %{
14165     __ cmpw(as_Register($src1$$reg),
14166             as_Register($src2$$reg));
14167     __ cselw(as_Register($dst$$reg),
14168              as_Register($src1$$reg),
14169              as_Register($src2$$reg),
14170              Assembler::LT);
14171   %}
14172 
14173   ins_pipe(ialu_reg_reg);
14174 %}
14175 // FROM HERE
14176 
14177 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14178 %{
14179   match(Set dst (MaxI src1 src2));
14180 
14181   effect(DEF dst, USE src1, USE src2, KILL cr);
14182   size(8);
14183 
14184   ins_cost(INSN_COST * 3);
14185   format %{
14186     "cmpw $src1 $src2\t signed int\n\t"
14187     "cselw $dst, $src1, $src2 gt\t"
14188   %}
14189 
14190   ins_encode %{
14191     __ cmpw(as_Register($src1$$reg),
14192             as_Register($src2$$reg));
14193     __ cselw(as_Register($dst$$reg),
14194              as_Register($src1$$reg),
14195              as_Register($src2$$reg),
14196              Assembler::GT);
14197   %}
14198 
14199   ins_pipe(ialu_reg_reg);
14200 %}
14201 
14202 // ============================================================================
14203 // Branch Instructions
14204 
14205 // Direct Branch.
14206 instruct branch(label lbl)
14207 %{
14208   match(Goto);
14209 
14210   effect(USE lbl);
14211 
14212   ins_cost(BRANCH_COST);
14213   format %{ "b  $lbl" %}
14214 
14215   ins_encode(aarch64_enc_b(lbl));
14216 
14217   ins_pipe(pipe_branch);
14218 %}
14219 
14220 // Conditional Near Branch
14221 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14222 %{
14223   // Same match rule as `branchConFar'.
14224   match(If cmp cr);
14225 
14226   effect(USE lbl);
14227 
14228   ins_cost(BRANCH_COST);
14229   // If set to 1 this indicates that the current instruction is a
14230   // short variant of a long branch. This avoids using this
14231   // instruction in first-pass matching. It will then only be used in
14232   // the `Shorten_branches' pass.
14233   // ins_short_branch(1);
14234   format %{ "b$cmp  $lbl" %}
14235 
14236   ins_encode(aarch64_enc_br_con(cmp, lbl));
14237 
14238   ins_pipe(pipe_branch_cond);
14239 %}
14240 
14241 // Conditional Near Branch Unsigned
14242 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14243 %{
14244   // Same match rule as `branchConFar'.
14245   match(If cmp cr);
14246 
14247   effect(USE lbl);
14248 
14249   ins_cost(BRANCH_COST);
14250   // If set to 1 this indicates that the current instruction is a
14251   // short variant of a long branch. This avoids using this
14252   // instruction in first-pass matching. It will then only be used in
14253   // the `Shorten_branches' pass.
14254   // ins_short_branch(1);
14255   format %{ "b$cmp  $lbl\t# unsigned" %}
14256 
14257   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14258 
14259   ins_pipe(pipe_branch_cond);
14260 %}
14261 
14262 // Make use of CBZ and CBNZ.  These instructions, as well as being
14263 // shorter than (cmp; branch), have the additional benefit of not
14264 // killing the flags.
14265 
14266 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14267   match(If cmp (CmpI op1 op2));
14268   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14269             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14270   effect(USE labl);
14271 
14272   ins_cost(BRANCH_COST);
14273   format %{ "cbw$cmp   $op1, $labl" %}
14274   ins_encode %{
14275     Label* L = $labl$$label;
14276     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14277     if (cond == Assembler::EQ)
14278       __ cbzw($op1$$Register, *L);
14279     else
14280       __ cbnzw($op1$$Register, *L);
14281   %}
14282   ins_pipe(pipe_cmp_branch);
14283 %}
14284 
14285 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14286   match(If cmp (CmpL op1 op2));
14287   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14288             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14289   effect(USE labl);
14290 
14291   ins_cost(BRANCH_COST);
14292   format %{ "cb$cmp   $op1, $labl" %}
14293   ins_encode %{
14294     Label* L = $labl$$label;
14295     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14296     if (cond == Assembler::EQ)
14297       __ cbz($op1$$Register, *L);
14298     else
14299       __ cbnz($op1$$Register, *L);
14300   %}
14301   ins_pipe(pipe_cmp_branch);
14302 %}
14303 
14304 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14305   match(If cmp (CmpP op1 op2));
14306   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14307             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14308   effect(USE labl);
14309 
14310   ins_cost(BRANCH_COST);
14311   format %{ "cb$cmp   $op1, $labl" %}
14312   ins_encode %{
14313     Label* L = $labl$$label;
14314     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14315     if (cond == Assembler::EQ)
14316       __ cbz($op1$$Register, *L);
14317     else
14318       __ cbnz($op1$$Register, *L);
14319   %}
14320   ins_pipe(pipe_cmp_branch);
14321 %}
14322 
14323 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
14324   match(If cmp (CmpN op1 op2));
14325   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14326             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14327   effect(USE labl);
14328 
14329   ins_cost(BRANCH_COST);
14330   format %{ "cbw$cmp   $op1, $labl" %}
14331   ins_encode %{
14332     Label* L = $labl$$label;
14333     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14334     if (cond == Assembler::EQ)
14335       __ cbzw($op1$$Register, *L);
14336     else
14337       __ cbnzw($op1$$Register, *L);
14338   %}
14339   ins_pipe(pipe_cmp_branch);
14340 %}
14341 
14342 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14343   match(If cmp (CmpP (DecodeN oop) zero));
14344   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14345             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14346   effect(USE labl);
14347 
14348   ins_cost(BRANCH_COST);
14349   format %{ "cb$cmp   $oop, $labl" %}
14350   ins_encode %{
14351     Label* L = $labl$$label;
14352     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14353     if (cond == Assembler::EQ)
14354       __ cbzw($oop$$Register, *L);
14355     else
14356       __ cbnzw($oop$$Register, *L);
14357   %}
14358   ins_pipe(pipe_cmp_branch);
14359 %}
14360 
14361 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
14362   match(If cmp (CmpU op1 op2));
14363   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14364             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14365             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14366             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
14367   effect(USE labl);
14368 
14369   ins_cost(BRANCH_COST);
14370   format %{ "cbw$cmp   $op1, $labl" %}
14371   ins_encode %{
14372     Label* L = $labl$$label;
14373     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14374     if (cond == Assembler::EQ || cond == Assembler::LS)
14375       __ cbzw($op1$$Register, *L);
14376     else
14377       __ cbnzw($op1$$Register, *L);
14378   %}
14379   ins_pipe(pipe_cmp_branch);
14380 %}
14381 
14382 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
14383   match(If cmp (CmpU op1 op2));
14384   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14385             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
14386             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
14387             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
14388   effect(USE labl);
14389 
14390   ins_cost(BRANCH_COST);
14391   format %{ "cb$cmp   $op1, $labl" %}
14392   ins_encode %{
14393     Label* L = $labl$$label;
14394     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14395     if (cond == Assembler::EQ || cond == Assembler::LS)
14396       __ cbz($op1$$Register, *L);
14397     else
14398       __ cbnz($op1$$Register, *L);
14399   %}
14400   ins_pipe(pipe_cmp_branch);
14401 %}
14402 
14403 // Test bit and Branch
14404 
14405 // Patterns for short (< 32KiB) variants
14406 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14407   match(If cmp (CmpL op1 op2));
14408   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14409             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14410   effect(USE labl);
14411 
14412   ins_cost(BRANCH_COST);
14413   format %{ "cb$cmp   $op1, $labl # long" %}
14414   ins_encode %{
14415     Label* L = $labl$$label;
14416     Assembler::Condition cond =
14417       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14418     __ tbr(cond, $op1$$Register, 63, *L);
14419   %}
14420   ins_pipe(pipe_cmp_branch);
14421   ins_short_branch(1);
14422 %}
14423 
14424 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14425   match(If cmp (CmpI op1 op2));
14426   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14427             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14428   effect(USE labl);
14429 
14430   ins_cost(BRANCH_COST);
14431   format %{ "cb$cmp   $op1, $labl # int" %}
14432   ins_encode %{
14433     Label* L = $labl$$label;
14434     Assembler::Condition cond =
14435       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14436     __ tbr(cond, $op1$$Register, 31, *L);
14437   %}
14438   ins_pipe(pipe_cmp_branch);
14439   ins_short_branch(1);
14440 %}
14441 
14442 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14443   match(If cmp (CmpL (AndL op1 op2) op3));
14444   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14445             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14446             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14447   effect(USE labl);
14448 
14449   ins_cost(BRANCH_COST);
14450   format %{ "tb$cmp   $op1, $op2, $labl" %}
14451   ins_encode %{
14452     Label* L = $labl$$label;
14453     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14454     int bit = exact_log2($op2$$constant);
14455     __ tbr(cond, $op1$$Register, bit, *L);
14456   %}
14457   ins_pipe(pipe_cmp_branch);
14458   ins_short_branch(1);
14459 %}
14460 
14461 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14462   match(If cmp (CmpI (AndI op1 op2) op3));
14463   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14464             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14465             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14466   effect(USE labl);
14467 
14468   ins_cost(BRANCH_COST);
14469   format %{ "tb$cmp   $op1, $op2, $labl" %}
14470   ins_encode %{
14471     Label* L = $labl$$label;
14472     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14473     int bit = exact_log2($op2$$constant);
14474     __ tbr(cond, $op1$$Register, bit, *L);
14475   %}
14476   ins_pipe(pipe_cmp_branch);
14477   ins_short_branch(1);
14478 %}
14479 
14480 // And far variants
14481 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14482   match(If cmp (CmpL op1 op2));
14483   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14484             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14485   effect(USE labl);
14486 
14487   ins_cost(BRANCH_COST);
14488   format %{ "cb$cmp   $op1, $labl # long" %}
14489   ins_encode %{
14490     Label* L = $labl$$label;
14491     Assembler::Condition cond =
14492       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14493     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14494   %}
14495   ins_pipe(pipe_cmp_branch);
14496 %}
14497 
14498 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14499   match(If cmp (CmpI op1 op2));
14500   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14501             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14502   effect(USE labl);
14503 
14504   ins_cost(BRANCH_COST);
14505   format %{ "cb$cmp   $op1, $labl # int" %}
14506   ins_encode %{
14507     Label* L = $labl$$label;
14508     Assembler::Condition cond =
14509       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14510     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14511   %}
14512   ins_pipe(pipe_cmp_branch);
14513 %}
14514 
14515 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14516   match(If cmp (CmpL (AndL op1 op2) op3));
14517   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14518             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14519             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14520   effect(USE labl);
14521 
14522   ins_cost(BRANCH_COST);
14523   format %{ "tb$cmp   $op1, $op2, $labl" %}
14524   ins_encode %{
14525     Label* L = $labl$$label;
14526     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14527     int bit = exact_log2($op2$$constant);
14528     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14529   %}
14530   ins_pipe(pipe_cmp_branch);
14531 %}
14532 
14533 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14534   match(If cmp (CmpI (AndI op1 op2) op3));
14535   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14536             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14537             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14538   effect(USE labl);
14539 
14540   ins_cost(BRANCH_COST);
14541   format %{ "tb$cmp   $op1, $op2, $labl" %}
14542   ins_encode %{
14543     Label* L = $labl$$label;
14544     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14545     int bit = exact_log2($op2$$constant);
14546     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14547   %}
14548   ins_pipe(pipe_cmp_branch);
14549 %}
14550 
14551 // Test bits
14552 
14553 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14554   match(Set cr (CmpL (AndL op1 op2) op3));
14555   predicate(Assembler::operand_valid_for_logical_immediate
14556             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14557 
14558   ins_cost(INSN_COST);
14559   format %{ "tst $op1, $op2 # long" %}
14560   ins_encode %{
14561     __ tst($op1$$Register, $op2$$constant);
14562   %}
14563   ins_pipe(ialu_reg_reg);
14564 %}
14565 
14566 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14567   match(Set cr (CmpI (AndI op1 op2) op3));
14568   predicate(Assembler::operand_valid_for_logical_immediate
14569             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14570 
14571   ins_cost(INSN_COST);
14572   format %{ "tst $op1, $op2 # int" %}
14573   ins_encode %{
14574     __ tstw($op1$$Register, $op2$$constant);
14575   %}
14576   ins_pipe(ialu_reg_reg);
14577 %}
14578 
14579 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14580   match(Set cr (CmpL (AndL op1 op2) op3));
14581 
14582   ins_cost(INSN_COST);
14583   format %{ "tst $op1, $op2 # long" %}
14584   ins_encode %{
14585     __ tst($op1$$Register, $op2$$Register);
14586   %}
14587   ins_pipe(ialu_reg_reg);
14588 %}
14589 
14590 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14591   match(Set cr (CmpI (AndI op1 op2) op3));
14592 
14593   ins_cost(INSN_COST);
14594   format %{ "tstw $op1, $op2 # int" %}
14595   ins_encode %{
14596     __ tstw($op1$$Register, $op2$$Register);
14597   %}
14598   ins_pipe(ialu_reg_reg);
14599 %}
14600 
14601 
14602 // Conditional Far Branch
14603 // Conditional Far Branch Unsigned
14604 // TODO: fixme
14605 
14606 // counted loop end branch near
14607 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14608 %{
14609   match(CountedLoopEnd cmp cr);
14610 
14611   effect(USE lbl);
14612 
14613   ins_cost(BRANCH_COST);
14614   // short variant.
14615   // ins_short_branch(1);
14616   format %{ "b$cmp $lbl \t// counted loop end" %}
14617 
14618   ins_encode(aarch64_enc_br_con(cmp, lbl));
14619 
14620   ins_pipe(pipe_branch);
14621 %}
14622 
14623 // counted loop end branch near Unsigned
14624 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14625 %{
14626   match(CountedLoopEnd cmp cr);
14627 
14628   effect(USE lbl);
14629 
14630   ins_cost(BRANCH_COST);
14631   // short variant.
14632   // ins_short_branch(1);
14633   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14634 
14635   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14636 
14637   ins_pipe(pipe_branch);
14638 %}
14639 
14640 // counted loop end branch far
14641 // counted loop end branch far unsigned
14642 // TODO: fixme
14643 
14644 // ============================================================================
14645 // inlined locking and unlocking
14646 
14647 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14648 %{
14649   match(Set cr (FastLock object box));
14650   effect(TEMP tmp, TEMP tmp2);
14651 
14652   // TODO
14653   // identify correct cost
14654   ins_cost(5 * INSN_COST);
14655   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14656 
14657   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14658 
14659   ins_pipe(pipe_serial);
14660 %}
14661 
14662 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14663 %{
14664   match(Set cr (FastUnlock object box));
14665   effect(TEMP tmp, TEMP tmp2);
14666 
14667   ins_cost(5 * INSN_COST);
14668   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14669 
14670   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14671 
14672   ins_pipe(pipe_serial);
14673 %}
14674 
14675 
14676 // ============================================================================
14677 // Safepoint Instructions
14678 
14679 // TODO
14680 // provide a near and far version of this code
14681 
14682 instruct safePoint(iRegP poll)
14683 %{
14684   match(SafePoint poll);
14685 
14686   format %{
14687     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14688   %}
14689   ins_encode %{
14690     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14691   %}
14692   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14693 %}
14694 
14695 
14696 // ============================================================================
14697 // Procedure Call/Return Instructions
14698 
14699 // Call Java Static Instruction
14700 
14701 instruct CallStaticJavaDirect(method meth)
14702 %{
14703   match(CallStaticJava);
14704 
14705   effect(USE meth);
14706 
14707   ins_cost(CALL_COST);
14708 
14709   format %{ "call,static $meth \t// ==> " %}
14710 
14711   ins_encode( aarch64_enc_java_static_call(meth),
14712               aarch64_enc_call_epilog );
14713 
14714   ins_pipe(pipe_class_call);
14715 %}
14716 
14717 // TO HERE
14718 
14719 // Call Java Dynamic Instruction
14720 instruct CallDynamicJavaDirect(method meth)
14721 %{
14722   match(CallDynamicJava);
14723 
14724   effect(USE meth);
14725 
14726   ins_cost(CALL_COST);
14727 
14728   format %{ "CALL,dynamic $meth \t// ==> " %}
14729 
14730   ins_encode( aarch64_enc_java_dynamic_call(meth),
14731                aarch64_enc_call_epilog );
14732 
14733   ins_pipe(pipe_class_call);
14734 %}
14735 
14736 // Call Runtime Instruction
14737 
14738 instruct CallRuntimeDirect(method meth)
14739 %{
14740   match(CallRuntime);
14741 
14742   effect(USE meth);
14743 
14744   ins_cost(CALL_COST);
14745 
14746   format %{ "CALL, runtime $meth" %}
14747 
14748   ins_encode( aarch64_enc_java_to_runtime(meth) );
14749 
14750   ins_pipe(pipe_class_call);
14751 %}
14752 
14753 // Call Runtime Instruction
14754 
14755 instruct CallLeafDirect(method meth)
14756 %{
14757   match(CallLeaf);
14758 
14759   effect(USE meth);
14760 
14761   ins_cost(CALL_COST);
14762 
14763   format %{ "CALL, runtime leaf $meth" %}
14764 
14765   ins_encode( aarch64_enc_java_to_runtime(meth) );
14766 
14767   ins_pipe(pipe_class_call);
14768 %}
14769 
14770 // Call Runtime Instruction
14771 
14772 instruct CallLeafNoFPDirect(method meth)
14773 %{
14774   match(CallLeafNoFP);
14775 
14776   effect(USE meth);
14777 
14778   ins_cost(CALL_COST);
14779 
14780   format %{ "CALL, runtime leaf nofp $meth" %}
14781 
14782   ins_encode( aarch64_enc_java_to_runtime(meth) );
14783 
14784   ins_pipe(pipe_class_call);
14785 %}
14786 
14787 // Tail Call; Jump from runtime stub to Java code.
14788 // Also known as an 'interprocedural jump'.
14789 // Target of jump will eventually return to caller.
14790 // TailJump below removes the return address.
14791 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14792 %{
14793   match(TailCall jump_target method_oop);
14794 
14795   ins_cost(CALL_COST);
14796 
14797   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14798 
14799   ins_encode(aarch64_enc_tail_call(jump_target));
14800 
14801   ins_pipe(pipe_class_call);
14802 %}
14803 
14804 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14805 %{
14806   match(TailJump jump_target ex_oop);
14807 
14808   ins_cost(CALL_COST);
14809 
14810   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14811 
14812   ins_encode(aarch64_enc_tail_jmp(jump_target));
14813 
14814   ins_pipe(pipe_class_call);
14815 %}
14816 
14817 // Create exception oop: created by stack-crawling runtime code.
14818 // Created exception is now available to this handler, and is setup
14819 // just prior to jumping to this handler. No code emitted.
14820 // TODO check
14821 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14822 instruct CreateException(iRegP_R0 ex_oop)
14823 %{
14824   match(Set ex_oop (CreateEx));
14825 
14826   format %{ " -- \t// exception oop; no code emitted" %}
14827 
14828   size(0);
14829 
14830   ins_encode( /*empty*/ );
14831 
14832   ins_pipe(pipe_class_empty);
14833 %}
14834 
14835 // Rethrow exception: The exception oop will come in the first
14836 // argument position. Then JUMP (not call) to the rethrow stub code.
14837 instruct RethrowException() %{
14838   match(Rethrow);
14839   ins_cost(CALL_COST);
14840 
14841   format %{ "b rethrow_stub" %}
14842 
14843   ins_encode( aarch64_enc_rethrow() );
14844 
14845   ins_pipe(pipe_class_call);
14846 %}
14847 
14848 
14849 // Return Instruction
14850 // epilog node loads ret address into lr as part of frame pop
14851 instruct Ret()
14852 %{
14853   match(Return);
14854 
14855   format %{ "ret\t// return register" %}
14856 
14857   ins_encode( aarch64_enc_ret() );
14858 
14859   ins_pipe(pipe_branch);
14860 %}
14861 
14862 // Die now.
14863 instruct ShouldNotReachHere() %{
14864   match(Halt);
14865 
14866   ins_cost(CALL_COST);
14867   format %{ "ShouldNotReachHere" %}
14868 
14869   ins_encode %{
14870     // TODO
14871     // implement proper trap call here
14872     __ brk(999);
14873   %}
14874 
14875   ins_pipe(pipe_class_default);
14876 %}
14877 
14878 // ============================================================================
14879 // Partial Subtype Check
14880 //
14881 // superklass array for an instance of the superklass.  Set a hidden
14882 // internal cache on a hit (cache is checked with exposed code in
14883 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14884 // encoding ALSO sets flags.
14885 
14886 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14887 %{
14888   match(Set result (PartialSubtypeCheck sub super));
14889   effect(KILL cr, KILL temp);
14890 
14891   ins_cost(1100);  // slightly larger than the next version
14892   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14893 
14894   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14895 
14896   opcode(0x1); // Force zero of result reg on hit
14897 
14898   ins_pipe(pipe_class_memory);
14899 %}
14900 
14901 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14902 %{
14903   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14904   effect(KILL temp, KILL result);
14905 
14906   ins_cost(1100);  // slightly larger than the next version
14907   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14908 
14909   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14910 
14911   opcode(0x0); // Don't zero result reg on hit
14912 
14913   ins_pipe(pipe_class_memory);
14914 %}
14915 
14916 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14917                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14918 %{
14919   predicate(!CompactStrings);
14920   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14921   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14922 
14923   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14924   ins_encode %{
14925     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14926     __ asrw($cnt1$$Register, $cnt1$$Register, 1);
14927     __ asrw($cnt2$$Register, $cnt2$$Register, 1);
14928     __ string_compare($str1$$Register, $str2$$Register,
14929                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14930                       $tmp1$$Register);
14931   %}
14932   ins_pipe(pipe_class_memory);
14933 %}
14934 
14935 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14936        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14937 %{
14938   predicate(!CompactStrings);
14939   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14940   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14941          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14942   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14943 
14944   ins_encode %{
14945     __ string_indexof($str1$$Register, $str2$$Register,
14946                       $cnt1$$Register, $cnt2$$Register,
14947                       $tmp1$$Register, $tmp2$$Register,
14948                       $tmp3$$Register, $tmp4$$Register,
14949                       -1, $result$$Register);
14950   %}
14951   ins_pipe(pipe_class_memory);
14952 %}
14953 
14954 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14955                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14956                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14957 %{
14958   predicate(!CompactStrings);
14959   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14960   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14961          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14962   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14963 
14964   ins_encode %{
14965     int icnt2 = (int)$int_cnt2$$constant;
14966     __ string_indexof($str1$$Register, $str2$$Register,
14967                       $cnt1$$Register, zr,
14968                       $tmp1$$Register, $tmp2$$Register,
14969                       $tmp3$$Register, $tmp4$$Register,
14970                       icnt2, $result$$Register);
14971   %}
14972   ins_pipe(pipe_class_memory);
14973 %}
14974 
14975 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14976                         iRegI_R0 result, rFlagsReg cr)
14977 %{
14978   predicate(!CompactStrings);
14979   match(Set result (StrEquals (Binary str1 str2) cnt));
14980   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14981 
14982   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
14983   ins_encode %{
14984     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14985     __ asrw($cnt$$Register, $cnt$$Register, 1);
14986     __ arrays_equals($str1$$Register, $str2$$Register,
14987                      $result$$Register, $cnt$$Register,
14988                      2, /*is_string*/true);
14989   %}
14990   ins_pipe(pipe_class_memory);
14991 %}
14992 
14993 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14994                       iRegP_R10 tmp, rFlagsReg cr)
14995 %{
14996   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
14997   match(Set result (AryEq ary1 ary2));
14998   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14999 
15000   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15001   ins_encode %{
15002     __ arrays_equals($ary1$$Register, $ary2$$Register,
15003                      $result$$Register, $tmp$$Register,
15004                      1, /*is_string*/false);
15005     %}
15006   ins_pipe(pipe_class_memory);
15007 %}
15008 
15009 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
15010                       iRegP_R10 tmp, rFlagsReg cr)
15011 %{
15012   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
15013   match(Set result (AryEq ary1 ary2));
15014   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
15015 
15016   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
15017   ins_encode %{
15018     __ arrays_equals($ary1$$Register, $ary2$$Register,
15019                      $result$$Register, $tmp$$Register,
15020                      2, /*is_string*/false);
15021   %}
15022   ins_pipe(pipe_class_memory);
15023 %}
15024 
15025 
15026 // encode char[] to byte[] in ISO_8859_1
15027 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
15028                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
15029                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
15030                           iRegI_R0 result, rFlagsReg cr)
15031 %{
15032   match(Set result (EncodeISOArray src (Binary dst len)));
15033   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
15034          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
15035 
15036   format %{ "Encode array $src,$dst,$len -> $result" %}
15037   ins_encode %{
15038     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
15039          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
15040          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
15041   %}
15042   ins_pipe( pipe_class_memory );
15043 %}
15044 
15045 // ============================================================================
15046 // This name is KNOWN by the ADLC and cannot be changed.
15047 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
15048 // for this guy.
15049 instruct tlsLoadP(thread_RegP dst)
15050 %{
15051   match(Set dst (ThreadLocal));
15052 
15053   ins_cost(0);
15054 
15055   format %{ " -- \t// $dst=Thread::current(), empty" %}
15056 
15057   size(0);
15058 
15059   ins_encode( /*empty*/ );
15060 
15061   ins_pipe(pipe_class_empty);
15062 %}
15063 
15064 // ====================VECTOR INSTRUCTIONS=====================================
15065 
15066 // Load vector (32 bits)
15067 instruct loadV4(vecD dst, vmem4 mem)
15068 %{
15069   predicate(n->as_LoadVector()->memory_size() == 4);
15070   match(Set dst (LoadVector mem));
15071   ins_cost(4 * INSN_COST);
15072   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
15073   ins_encode( aarch64_enc_ldrvS(dst, mem) );
15074   ins_pipe(vload_reg_mem64);
15075 %}
15076 
15077 // Load vector (64 bits)
15078 instruct loadV8(vecD dst, vmem8 mem)
15079 %{
15080   predicate(n->as_LoadVector()->memory_size() == 8);
15081   match(Set dst (LoadVector mem));
15082   ins_cost(4 * INSN_COST);
15083   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15084   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15085   ins_pipe(vload_reg_mem64);
15086 %}
15087 
15088 // Load Vector (128 bits)
15089 instruct loadV16(vecX dst, vmem16 mem)
15090 %{
15091   predicate(n->as_LoadVector()->memory_size() == 16);
15092   match(Set dst (LoadVector mem));
15093   ins_cost(4 * INSN_COST);
15094   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15095   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15096   ins_pipe(vload_reg_mem128);
15097 %}
15098 
15099 // Store Vector (32 bits)
15100 instruct storeV4(vecD src, vmem4 mem)
15101 %{
15102   predicate(n->as_StoreVector()->memory_size() == 4);
15103   match(Set mem (StoreVector mem src));
15104   ins_cost(4 * INSN_COST);
15105   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15106   ins_encode( aarch64_enc_strvS(src, mem) );
15107   ins_pipe(vstore_reg_mem64);
15108 %}
15109 
15110 // Store Vector (64 bits)
15111 instruct storeV8(vecD src, vmem8 mem)
15112 %{
15113   predicate(n->as_StoreVector()->memory_size() == 8);
15114   match(Set mem (StoreVector mem src));
15115   ins_cost(4 * INSN_COST);
15116   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15117   ins_encode( aarch64_enc_strvD(src, mem) );
15118   ins_pipe(vstore_reg_mem64);
15119 %}
15120 
15121 // Store Vector (128 bits)
15122 instruct storeV16(vecX src, vmem16 mem)
15123 %{
15124   predicate(n->as_StoreVector()->memory_size() == 16);
15125   match(Set mem (StoreVector mem src));
15126   ins_cost(4 * INSN_COST);
15127   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15128   ins_encode( aarch64_enc_strvQ(src, mem) );
15129   ins_pipe(vstore_reg_mem128);
15130 %}
15131 
15132 instruct replicate8B(vecD dst, iRegIorL2I src)
15133 %{
15134   predicate(n->as_Vector()->length() == 4 ||
15135             n->as_Vector()->length() == 8);
15136   match(Set dst (ReplicateB src));
15137   ins_cost(INSN_COST);
15138   format %{ "dup  $dst, $src\t# vector (8B)" %}
15139   ins_encode %{
15140     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15141   %}
15142   ins_pipe(vdup_reg_reg64);
15143 %}
15144 
15145 instruct replicate16B(vecX dst, iRegIorL2I src)
15146 %{
15147   predicate(n->as_Vector()->length() == 16);
15148   match(Set dst (ReplicateB src));
15149   ins_cost(INSN_COST);
15150   format %{ "dup  $dst, $src\t# vector (16B)" %}
15151   ins_encode %{
15152     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15153   %}
15154   ins_pipe(vdup_reg_reg128);
15155 %}
15156 
15157 instruct replicate8B_imm(vecD dst, immI con)
15158 %{
15159   predicate(n->as_Vector()->length() == 4 ||
15160             n->as_Vector()->length() == 8);
15161   match(Set dst (ReplicateB con));
15162   ins_cost(INSN_COST);
15163   format %{ "movi  $dst, $con\t# vector(8B)" %}
15164   ins_encode %{
15165     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15166   %}
15167   ins_pipe(vmovi_reg_imm64);
15168 %}
15169 
15170 instruct replicate16B_imm(vecX dst, immI con)
15171 %{
15172   predicate(n->as_Vector()->length() == 16);
15173   match(Set dst (ReplicateB con));
15174   ins_cost(INSN_COST);
15175   format %{ "movi  $dst, $con\t# vector(16B)" %}
15176   ins_encode %{
15177     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15178   %}
15179   ins_pipe(vmovi_reg_imm128);
15180 %}
15181 
15182 instruct replicate4S(vecD dst, iRegIorL2I src)
15183 %{
15184   predicate(n->as_Vector()->length() == 2 ||
15185             n->as_Vector()->length() == 4);
15186   match(Set dst (ReplicateS src));
15187   ins_cost(INSN_COST);
15188   format %{ "dup  $dst, $src\t# vector (4S)" %}
15189   ins_encode %{
15190     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15191   %}
15192   ins_pipe(vdup_reg_reg64);
15193 %}
15194 
15195 instruct replicate8S(vecX dst, iRegIorL2I src)
15196 %{
15197   predicate(n->as_Vector()->length() == 8);
15198   match(Set dst (ReplicateS src));
15199   ins_cost(INSN_COST);
15200   format %{ "dup  $dst, $src\t# vector (8S)" %}
15201   ins_encode %{
15202     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15203   %}
15204   ins_pipe(vdup_reg_reg128);
15205 %}
15206 
15207 instruct replicate4S_imm(vecD dst, immI con)
15208 %{
15209   predicate(n->as_Vector()->length() == 2 ||
15210             n->as_Vector()->length() == 4);
15211   match(Set dst (ReplicateS con));
15212   ins_cost(INSN_COST);
15213   format %{ "movi  $dst, $con\t# vector(4H)" %}
15214   ins_encode %{
15215     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15216   %}
15217   ins_pipe(vmovi_reg_imm64);
15218 %}
15219 
15220 instruct replicate8S_imm(vecX dst, immI con)
15221 %{
15222   predicate(n->as_Vector()->length() == 8);
15223   match(Set dst (ReplicateS con));
15224   ins_cost(INSN_COST);
15225   format %{ "movi  $dst, $con\t# vector(8H)" %}
15226   ins_encode %{
15227     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15228   %}
15229   ins_pipe(vmovi_reg_imm128);
15230 %}
15231 
15232 instruct replicate2I(vecD dst, iRegIorL2I src)
15233 %{
15234   predicate(n->as_Vector()->length() == 2);
15235   match(Set dst (ReplicateI src));
15236   ins_cost(INSN_COST);
15237   format %{ "dup  $dst, $src\t# vector (2I)" %}
15238   ins_encode %{
15239     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15240   %}
15241   ins_pipe(vdup_reg_reg64);
15242 %}
15243 
15244 instruct replicate4I(vecX dst, iRegIorL2I src)
15245 %{
15246   predicate(n->as_Vector()->length() == 4);
15247   match(Set dst (ReplicateI src));
15248   ins_cost(INSN_COST);
15249   format %{ "dup  $dst, $src\t# vector (4I)" %}
15250   ins_encode %{
15251     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15252   %}
15253   ins_pipe(vdup_reg_reg128);
15254 %}
15255 
15256 instruct replicate2I_imm(vecD dst, immI con)
15257 %{
15258   predicate(n->as_Vector()->length() == 2);
15259   match(Set dst (ReplicateI con));
15260   ins_cost(INSN_COST);
15261   format %{ "movi  $dst, $con\t# vector(2I)" %}
15262   ins_encode %{
15263     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15264   %}
15265   ins_pipe(vmovi_reg_imm64);
15266 %}
15267 
15268 instruct replicate4I_imm(vecX dst, immI con)
15269 %{
15270   predicate(n->as_Vector()->length() == 4);
15271   match(Set dst (ReplicateI con));
15272   ins_cost(INSN_COST);
15273   format %{ "movi  $dst, $con\t# vector(4I)" %}
15274   ins_encode %{
15275     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15276   %}
15277   ins_pipe(vmovi_reg_imm128);
15278 %}
15279 
15280 instruct replicate2L(vecX dst, iRegL src)
15281 %{
15282   predicate(n->as_Vector()->length() == 2);
15283   match(Set dst (ReplicateL src));
15284   ins_cost(INSN_COST);
15285   format %{ "dup  $dst, $src\t# vector (2L)" %}
15286   ins_encode %{
15287     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15288   %}
15289   ins_pipe(vdup_reg_reg128);
15290 %}
15291 
15292 instruct replicate2L_zero(vecX dst, immI0 zero)
15293 %{
15294   predicate(n->as_Vector()->length() == 2);
15295   match(Set dst (ReplicateI zero));
15296   ins_cost(INSN_COST);
15297   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15298   ins_encode %{
15299     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15300            as_FloatRegister($dst$$reg),
15301            as_FloatRegister($dst$$reg));
15302   %}
15303   ins_pipe(vmovi_reg_imm128);
15304 %}
15305 
15306 instruct replicate2F(vecD dst, vRegF src)
15307 %{
15308   predicate(n->as_Vector()->length() == 2);
15309   match(Set dst (ReplicateF src));
15310   ins_cost(INSN_COST);
15311   format %{ "dup  $dst, $src\t# vector (2F)" %}
15312   ins_encode %{
15313     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15314            as_FloatRegister($src$$reg));
15315   %}
15316   ins_pipe(vdup_reg_freg64);
15317 %}
15318 
15319 instruct replicate4F(vecX dst, vRegF src)
15320 %{
15321   predicate(n->as_Vector()->length() == 4);
15322   match(Set dst (ReplicateF src));
15323   ins_cost(INSN_COST);
15324   format %{ "dup  $dst, $src\t# vector (4F)" %}
15325   ins_encode %{
15326     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15327            as_FloatRegister($src$$reg));
15328   %}
15329   ins_pipe(vdup_reg_freg128);
15330 %}
15331 
15332 instruct replicate2D(vecX dst, vRegD src)
15333 %{
15334   predicate(n->as_Vector()->length() == 2);
15335   match(Set dst (ReplicateD src));
15336   ins_cost(INSN_COST);
15337   format %{ "dup  $dst, $src\t# vector (2D)" %}
15338   ins_encode %{
15339     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15340            as_FloatRegister($src$$reg));
15341   %}
15342   ins_pipe(vdup_reg_dreg128);
15343 %}
15344 
15345 // ====================REDUCTION ARITHMETIC====================================
15346 
15347 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15348 %{
15349   match(Set dst (AddReductionVI src1 src2));
15350   ins_cost(INSN_COST);
15351   effect(TEMP tmp, TEMP tmp2);
15352   format %{ "umov  $tmp, $src2, S, 0\n\t"
15353             "umov  $tmp2, $src2, S, 1\n\t"
15354             "addw  $dst, $src1, $tmp\n\t"
15355             "addw  $dst, $dst, $tmp2\t add reduction2i"
15356   %}
15357   ins_encode %{
15358     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15359     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15360     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15361     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15362   %}
15363   ins_pipe(pipe_class_default);
15364 %}
15365 
15366 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15367 %{
15368   match(Set dst (AddReductionVI src1 src2));
15369   ins_cost(INSN_COST);
15370   effect(TEMP tmp, TEMP tmp2);
15371   format %{ "addv  $tmp, T4S, $src2\n\t"
15372             "umov  $tmp2, $tmp, S, 0\n\t"
15373             "addw  $dst, $tmp2, $src1\t add reduction4i"
15374   %}
15375   ins_encode %{
15376     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15377             as_FloatRegister($src2$$reg));
15378     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15379     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15380   %}
15381   ins_pipe(pipe_class_default);
15382 %}
15383 
15384 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15385 %{
15386   match(Set dst (MulReductionVI src1 src2));
15387   ins_cost(INSN_COST);
15388   effect(TEMP tmp, TEMP dst);
15389   format %{ "umov  $tmp, $src2, S, 0\n\t"
15390             "mul   $dst, $tmp, $src1\n\t"
15391             "umov  $tmp, $src2, S, 1\n\t"
15392             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15393   %}
15394   ins_encode %{
15395     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15396     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15397     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15398     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15399   %}
15400   ins_pipe(pipe_class_default);
15401 %}
15402 
15403 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15404 %{
15405   match(Set dst (MulReductionVI src1 src2));
15406   ins_cost(INSN_COST);
15407   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15408   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15409             "mul   $tmp, $tmp, $src2\n\t"
15410             "umov  $tmp2, $tmp, S, 0\n\t"
15411             "mul   $dst, $tmp2, $src1\n\t"
15412             "umov  $tmp2, $tmp, S, 1\n\t"
15413             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15414   %}
15415   ins_encode %{
15416     __ ins(as_FloatRegister($tmp$$reg), __ D,
15417            as_FloatRegister($src2$$reg), 0, 1);
15418     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15419            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15420     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15421     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15422     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15423     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15424   %}
15425   ins_pipe(pipe_class_default);
15426 %}
15427 
15428 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15429 %{
15430   match(Set dst (AddReductionVF src1 src2));
15431   ins_cost(INSN_COST);
15432   effect(TEMP tmp, TEMP dst);
15433   format %{ "fadds $dst, $src1, $src2\n\t"
15434             "ins   $tmp, S, $src2, 0, 1\n\t"
15435             "fadds $dst, $dst, $tmp\t add reduction2f"
15436   %}
15437   ins_encode %{
15438     __ fadds(as_FloatRegister($dst$$reg),
15439              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15440     __ ins(as_FloatRegister($tmp$$reg), __ S,
15441            as_FloatRegister($src2$$reg), 0, 1);
15442     __ fadds(as_FloatRegister($dst$$reg),
15443              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15444   %}
15445   ins_pipe(pipe_class_default);
15446 %}
15447 
15448 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15449 %{
15450   match(Set dst (AddReductionVF src1 src2));
15451   ins_cost(INSN_COST);
15452   effect(TEMP tmp, TEMP dst);
15453   format %{ "fadds $dst, $src1, $src2\n\t"
15454             "ins   $tmp, S, $src2, 0, 1\n\t"
15455             "fadds $dst, $dst, $tmp\n\t"
15456             "ins   $tmp, S, $src2, 0, 2\n\t"
15457             "fadds $dst, $dst, $tmp\n\t"
15458             "ins   $tmp, S, $src2, 0, 3\n\t"
15459             "fadds $dst, $dst, $tmp\t add reduction4f"
15460   %}
15461   ins_encode %{
15462     __ fadds(as_FloatRegister($dst$$reg),
15463              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15464     __ ins(as_FloatRegister($tmp$$reg), __ S,
15465            as_FloatRegister($src2$$reg), 0, 1);
15466     __ fadds(as_FloatRegister($dst$$reg),
15467              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15468     __ ins(as_FloatRegister($tmp$$reg), __ S,
15469            as_FloatRegister($src2$$reg), 0, 2);
15470     __ fadds(as_FloatRegister($dst$$reg),
15471              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15472     __ ins(as_FloatRegister($tmp$$reg), __ S,
15473            as_FloatRegister($src2$$reg), 0, 3);
15474     __ fadds(as_FloatRegister($dst$$reg),
15475              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15476   %}
15477   ins_pipe(pipe_class_default);
15478 %}
15479 
15480 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15481 %{
15482   match(Set dst (MulReductionVF src1 src2));
15483   ins_cost(INSN_COST);
15484   effect(TEMP tmp, TEMP dst);
15485   format %{ "fmuls $dst, $src1, $src2\n\t"
15486             "ins   $tmp, S, $src2, 0, 1\n\t"
15487             "fmuls $dst, $dst, $tmp\t add reduction4f"
15488   %}
15489   ins_encode %{
15490     __ fmuls(as_FloatRegister($dst$$reg),
15491              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15492     __ ins(as_FloatRegister($tmp$$reg), __ S,
15493            as_FloatRegister($src2$$reg), 0, 1);
15494     __ fmuls(as_FloatRegister($dst$$reg),
15495              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15496   %}
15497   ins_pipe(pipe_class_default);
15498 %}
15499 
15500 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15501 %{
15502   match(Set dst (MulReductionVF src1 src2));
15503   ins_cost(INSN_COST);
15504   effect(TEMP tmp, TEMP dst);
15505   format %{ "fmuls $dst, $src1, $src2\n\t"
15506             "ins   $tmp, S, $src2, 0, 1\n\t"
15507             "fmuls $dst, $dst, $tmp\n\t"
15508             "ins   $tmp, S, $src2, 0, 2\n\t"
15509             "fmuls $dst, $dst, $tmp\n\t"
15510             "ins   $tmp, S, $src2, 0, 3\n\t"
15511             "fmuls $dst, $dst, $tmp\t add reduction4f"
15512   %}
15513   ins_encode %{
15514     __ fmuls(as_FloatRegister($dst$$reg),
15515              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15516     __ ins(as_FloatRegister($tmp$$reg), __ S,
15517            as_FloatRegister($src2$$reg), 0, 1);
15518     __ fmuls(as_FloatRegister($dst$$reg),
15519              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15520     __ ins(as_FloatRegister($tmp$$reg), __ S,
15521            as_FloatRegister($src2$$reg), 0, 2);
15522     __ fmuls(as_FloatRegister($dst$$reg),
15523              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15524     __ ins(as_FloatRegister($tmp$$reg), __ S,
15525            as_FloatRegister($src2$$reg), 0, 3);
15526     __ fmuls(as_FloatRegister($dst$$reg),
15527              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15528   %}
15529   ins_pipe(pipe_class_default);
15530 %}
15531 
15532 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15533 %{
15534   match(Set dst (AddReductionVD src1 src2));
15535   ins_cost(INSN_COST);
15536   effect(TEMP tmp, TEMP dst);
15537   format %{ "faddd $dst, $src1, $src2\n\t"
15538             "ins   $tmp, D, $src2, 0, 1\n\t"
15539             "faddd $dst, $dst, $tmp\t add reduction2d"
15540   %}
15541   ins_encode %{
15542     __ faddd(as_FloatRegister($dst$$reg),
15543              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15544     __ ins(as_FloatRegister($tmp$$reg), __ D,
15545            as_FloatRegister($src2$$reg), 0, 1);
15546     __ faddd(as_FloatRegister($dst$$reg),
15547              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15548   %}
15549   ins_pipe(pipe_class_default);
15550 %}
15551 
15552 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15553 %{
15554   match(Set dst (MulReductionVD src1 src2));
15555   ins_cost(INSN_COST);
15556   effect(TEMP tmp, TEMP dst);
15557   format %{ "fmuld $dst, $src1, $src2\n\t"
15558             "ins   $tmp, D, $src2, 0, 1\n\t"
15559             "fmuld $dst, $dst, $tmp\t add reduction2d"
15560   %}
15561   ins_encode %{
15562     __ fmuld(as_FloatRegister($dst$$reg),
15563              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15564     __ ins(as_FloatRegister($tmp$$reg), __ D,
15565            as_FloatRegister($src2$$reg), 0, 1);
15566     __ fmuld(as_FloatRegister($dst$$reg),
15567              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15568   %}
15569   ins_pipe(pipe_class_default);
15570 %}
15571 
15572 // ====================VECTOR ARITHMETIC=======================================
15573 
15574 // --------------------------------- ADD --------------------------------------
15575 
15576 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15577 %{
15578   predicate(n->as_Vector()->length() == 4 ||
15579             n->as_Vector()->length() == 8);
15580   match(Set dst (AddVB src1 src2));
15581   ins_cost(INSN_COST);
15582   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15583   ins_encode %{
15584     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15585             as_FloatRegister($src1$$reg),
15586             as_FloatRegister($src2$$reg));
15587   %}
15588   ins_pipe(vdop64);
15589 %}
15590 
15591 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15592 %{
15593   predicate(n->as_Vector()->length() == 16);
15594   match(Set dst (AddVB src1 src2));
15595   ins_cost(INSN_COST);
15596   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15597   ins_encode %{
15598     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15599             as_FloatRegister($src1$$reg),
15600             as_FloatRegister($src2$$reg));
15601   %}
15602   ins_pipe(vdop128);
15603 %}
15604 
15605 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15606 %{
15607   predicate(n->as_Vector()->length() == 2 ||
15608             n->as_Vector()->length() == 4);
15609   match(Set dst (AddVS src1 src2));
15610   ins_cost(INSN_COST);
15611   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15612   ins_encode %{
15613     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15614             as_FloatRegister($src1$$reg),
15615             as_FloatRegister($src2$$reg));
15616   %}
15617   ins_pipe(vdop64);
15618 %}
15619 
15620 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15621 %{
15622   predicate(n->as_Vector()->length() == 8);
15623   match(Set dst (AddVS src1 src2));
15624   ins_cost(INSN_COST);
15625   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15626   ins_encode %{
15627     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15628             as_FloatRegister($src1$$reg),
15629             as_FloatRegister($src2$$reg));
15630   %}
15631   ins_pipe(vdop128);
15632 %}
15633 
15634 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15635 %{
15636   predicate(n->as_Vector()->length() == 2);
15637   match(Set dst (AddVI src1 src2));
15638   ins_cost(INSN_COST);
15639   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15640   ins_encode %{
15641     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15642             as_FloatRegister($src1$$reg),
15643             as_FloatRegister($src2$$reg));
15644   %}
15645   ins_pipe(vdop64);
15646 %}
15647 
15648 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15649 %{
15650   predicate(n->as_Vector()->length() == 4);
15651   match(Set dst (AddVI src1 src2));
15652   ins_cost(INSN_COST);
15653   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15654   ins_encode %{
15655     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15656             as_FloatRegister($src1$$reg),
15657             as_FloatRegister($src2$$reg));
15658   %}
15659   ins_pipe(vdop128);
15660 %}
15661 
15662 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15663 %{
15664   predicate(n->as_Vector()->length() == 2);
15665   match(Set dst (AddVL src1 src2));
15666   ins_cost(INSN_COST);
15667   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15668   ins_encode %{
15669     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15670             as_FloatRegister($src1$$reg),
15671             as_FloatRegister($src2$$reg));
15672   %}
15673   ins_pipe(vdop128);
15674 %}
15675 
15676 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15677 %{
15678   predicate(n->as_Vector()->length() == 2);
15679   match(Set dst (AddVF src1 src2));
15680   ins_cost(INSN_COST);
15681   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15682   ins_encode %{
15683     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15684             as_FloatRegister($src1$$reg),
15685             as_FloatRegister($src2$$reg));
15686   %}
15687   ins_pipe(vdop_fp64);
15688 %}
15689 
15690 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15691 %{
15692   predicate(n->as_Vector()->length() == 4);
15693   match(Set dst (AddVF src1 src2));
15694   ins_cost(INSN_COST);
15695   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15696   ins_encode %{
15697     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15698             as_FloatRegister($src1$$reg),
15699             as_FloatRegister($src2$$reg));
15700   %}
15701   ins_pipe(vdop_fp128);
15702 %}
15703 
15704 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15705 %{
15706   match(Set dst (AddVD src1 src2));
15707   ins_cost(INSN_COST);
15708   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15709   ins_encode %{
15710     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15711             as_FloatRegister($src1$$reg),
15712             as_FloatRegister($src2$$reg));
15713   %}
15714   ins_pipe(vdop_fp128);
15715 %}
15716 
15717 // --------------------------------- SUB --------------------------------------
15718 
15719 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15720 %{
15721   predicate(n->as_Vector()->length() == 4 ||
15722             n->as_Vector()->length() == 8);
15723   match(Set dst (SubVB src1 src2));
15724   ins_cost(INSN_COST);
15725   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15726   ins_encode %{
15727     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15728             as_FloatRegister($src1$$reg),
15729             as_FloatRegister($src2$$reg));
15730   %}
15731   ins_pipe(vdop64);
15732 %}
15733 
15734 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15735 %{
15736   predicate(n->as_Vector()->length() == 16);
15737   match(Set dst (SubVB src1 src2));
15738   ins_cost(INSN_COST);
15739   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15740   ins_encode %{
15741     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15742             as_FloatRegister($src1$$reg),
15743             as_FloatRegister($src2$$reg));
15744   %}
15745   ins_pipe(vdop128);
15746 %}
15747 
15748 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15749 %{
15750   predicate(n->as_Vector()->length() == 2 ||
15751             n->as_Vector()->length() == 4);
15752   match(Set dst (SubVS src1 src2));
15753   ins_cost(INSN_COST);
15754   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15755   ins_encode %{
15756     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15757             as_FloatRegister($src1$$reg),
15758             as_FloatRegister($src2$$reg));
15759   %}
15760   ins_pipe(vdop64);
15761 %}
15762 
15763 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15764 %{
15765   predicate(n->as_Vector()->length() == 8);
15766   match(Set dst (SubVS src1 src2));
15767   ins_cost(INSN_COST);
15768   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15769   ins_encode %{
15770     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15771             as_FloatRegister($src1$$reg),
15772             as_FloatRegister($src2$$reg));
15773   %}
15774   ins_pipe(vdop128);
15775 %}
15776 
15777 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15778 %{
15779   predicate(n->as_Vector()->length() == 2);
15780   match(Set dst (SubVI src1 src2));
15781   ins_cost(INSN_COST);
15782   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15783   ins_encode %{
15784     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15785             as_FloatRegister($src1$$reg),
15786             as_FloatRegister($src2$$reg));
15787   %}
15788   ins_pipe(vdop64);
15789 %}
15790 
15791 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15792 %{
15793   predicate(n->as_Vector()->length() == 4);
15794   match(Set dst (SubVI src1 src2));
15795   ins_cost(INSN_COST);
15796   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15797   ins_encode %{
15798     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15799             as_FloatRegister($src1$$reg),
15800             as_FloatRegister($src2$$reg));
15801   %}
15802   ins_pipe(vdop128);
15803 %}
15804 
15805 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15806 %{
15807   predicate(n->as_Vector()->length() == 2);
15808   match(Set dst (SubVL src1 src2));
15809   ins_cost(INSN_COST);
15810   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15811   ins_encode %{
15812     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15813             as_FloatRegister($src1$$reg),
15814             as_FloatRegister($src2$$reg));
15815   %}
15816   ins_pipe(vdop128);
15817 %}
15818 
15819 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15820 %{
15821   predicate(n->as_Vector()->length() == 2);
15822   match(Set dst (SubVF src1 src2));
15823   ins_cost(INSN_COST);
15824   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15825   ins_encode %{
15826     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15827             as_FloatRegister($src1$$reg),
15828             as_FloatRegister($src2$$reg));
15829   %}
15830   ins_pipe(vdop_fp64);
15831 %}
15832 
15833 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15834 %{
15835   predicate(n->as_Vector()->length() == 4);
15836   match(Set dst (SubVF src1 src2));
15837   ins_cost(INSN_COST);
15838   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15839   ins_encode %{
15840     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15841             as_FloatRegister($src1$$reg),
15842             as_FloatRegister($src2$$reg));
15843   %}
15844   ins_pipe(vdop_fp128);
15845 %}
15846 
15847 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15848 %{
15849   predicate(n->as_Vector()->length() == 2);
15850   match(Set dst (SubVD src1 src2));
15851   ins_cost(INSN_COST);
15852   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15853   ins_encode %{
15854     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15855             as_FloatRegister($src1$$reg),
15856             as_FloatRegister($src2$$reg));
15857   %}
15858   ins_pipe(vdop_fp128);
15859 %}
15860 
15861 // --------------------------------- MUL --------------------------------------
15862 
15863 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15864 %{
15865   predicate(n->as_Vector()->length() == 2 ||
15866             n->as_Vector()->length() == 4);
15867   match(Set dst (MulVS src1 src2));
15868   ins_cost(INSN_COST);
15869   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15870   ins_encode %{
15871     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15872             as_FloatRegister($src1$$reg),
15873             as_FloatRegister($src2$$reg));
15874   %}
15875   ins_pipe(vmul64);
15876 %}
15877 
15878 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15879 %{
15880   predicate(n->as_Vector()->length() == 8);
15881   match(Set dst (MulVS src1 src2));
15882   ins_cost(INSN_COST);
15883   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15884   ins_encode %{
15885     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15886             as_FloatRegister($src1$$reg),
15887             as_FloatRegister($src2$$reg));
15888   %}
15889   ins_pipe(vmul128);
15890 %}
15891 
15892 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15893 %{
15894   predicate(n->as_Vector()->length() == 2);
15895   match(Set dst (MulVI src1 src2));
15896   ins_cost(INSN_COST);
15897   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15898   ins_encode %{
15899     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15900             as_FloatRegister($src1$$reg),
15901             as_FloatRegister($src2$$reg));
15902   %}
15903   ins_pipe(vmul64);
15904 %}
15905 
15906 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15907 %{
15908   predicate(n->as_Vector()->length() == 4);
15909   match(Set dst (MulVI src1 src2));
15910   ins_cost(INSN_COST);
15911   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15912   ins_encode %{
15913     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15914             as_FloatRegister($src1$$reg),
15915             as_FloatRegister($src2$$reg));
15916   %}
15917   ins_pipe(vmul128);
15918 %}
15919 
15920 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15921 %{
15922   predicate(n->as_Vector()->length() == 2);
15923   match(Set dst (MulVF src1 src2));
15924   ins_cost(INSN_COST);
15925   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15926   ins_encode %{
15927     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15928             as_FloatRegister($src1$$reg),
15929             as_FloatRegister($src2$$reg));
15930   %}
15931   ins_pipe(vmuldiv_fp64);
15932 %}
15933 
15934 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15935 %{
15936   predicate(n->as_Vector()->length() == 4);
15937   match(Set dst (MulVF src1 src2));
15938   ins_cost(INSN_COST);
15939   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15940   ins_encode %{
15941     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15942             as_FloatRegister($src1$$reg),
15943             as_FloatRegister($src2$$reg));
15944   %}
15945   ins_pipe(vmuldiv_fp128);
15946 %}
15947 
15948 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15949 %{
15950   predicate(n->as_Vector()->length() == 2);
15951   match(Set dst (MulVD src1 src2));
15952   ins_cost(INSN_COST);
15953   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15954   ins_encode %{
15955     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15956             as_FloatRegister($src1$$reg),
15957             as_FloatRegister($src2$$reg));
15958   %}
15959   ins_pipe(vmuldiv_fp128);
15960 %}
15961 
15962 // --------------------------------- MLA --------------------------------------
15963 
15964 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15965 %{
15966   predicate(n->as_Vector()->length() == 2 ||
15967             n->as_Vector()->length() == 4);
15968   match(Set dst (AddVS dst (MulVS src1 src2)));
15969   ins_cost(INSN_COST);
15970   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
15971   ins_encode %{
15972     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
15973             as_FloatRegister($src1$$reg),
15974             as_FloatRegister($src2$$reg));
15975   %}
15976   ins_pipe(vmla64);
15977 %}
15978 
15979 instruct vmla8S(vecX dst, vecX src1, vecX src2)
15980 %{
15981   predicate(n->as_Vector()->length() == 8);
15982   match(Set dst (AddVS dst (MulVS src1 src2)));
15983   ins_cost(INSN_COST);
15984   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
15985   ins_encode %{
15986     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
15987             as_FloatRegister($src1$$reg),
15988             as_FloatRegister($src2$$reg));
15989   %}
15990   ins_pipe(vmla128);
15991 %}
15992 
15993 instruct vmla2I(vecD dst, vecD src1, vecD src2)
15994 %{
15995   predicate(n->as_Vector()->length() == 2);
15996   match(Set dst (AddVI dst (MulVI src1 src2)));
15997   ins_cost(INSN_COST);
15998   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
15999   ins_encode %{
16000     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
16001             as_FloatRegister($src1$$reg),
16002             as_FloatRegister($src2$$reg));
16003   %}
16004   ins_pipe(vmla64);
16005 %}
16006 
16007 instruct vmla4I(vecX dst, vecX src1, vecX src2)
16008 %{
16009   predicate(n->as_Vector()->length() == 4);
16010   match(Set dst (AddVI dst (MulVI src1 src2)));
16011   ins_cost(INSN_COST);
16012   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
16013   ins_encode %{
16014     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
16015             as_FloatRegister($src1$$reg),
16016             as_FloatRegister($src2$$reg));
16017   %}
16018   ins_pipe(vmla128);
16019 %}
16020 
16021 // --------------------------------- MLS --------------------------------------
16022 
16023 instruct vmls4S(vecD dst, vecD src1, vecD src2)
16024 %{
16025   predicate(n->as_Vector()->length() == 2 ||
16026             n->as_Vector()->length() == 4);
16027   match(Set dst (SubVS dst (MulVS src1 src2)));
16028   ins_cost(INSN_COST);
16029   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
16030   ins_encode %{
16031     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
16032             as_FloatRegister($src1$$reg),
16033             as_FloatRegister($src2$$reg));
16034   %}
16035   ins_pipe(vmla64);
16036 %}
16037 
16038 instruct vmls8S(vecX dst, vecX src1, vecX src2)
16039 %{
16040   predicate(n->as_Vector()->length() == 8);
16041   match(Set dst (SubVS dst (MulVS src1 src2)));
16042   ins_cost(INSN_COST);
16043   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
16044   ins_encode %{
16045     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
16046             as_FloatRegister($src1$$reg),
16047             as_FloatRegister($src2$$reg));
16048   %}
16049   ins_pipe(vmla128);
16050 %}
16051 
16052 instruct vmls2I(vecD dst, vecD src1, vecD src2)
16053 %{
16054   predicate(n->as_Vector()->length() == 2);
16055   match(Set dst (SubVI dst (MulVI src1 src2)));
16056   ins_cost(INSN_COST);
16057   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
16058   ins_encode %{
16059     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
16060             as_FloatRegister($src1$$reg),
16061             as_FloatRegister($src2$$reg));
16062   %}
16063   ins_pipe(vmla64);
16064 %}
16065 
16066 instruct vmls4I(vecX dst, vecX src1, vecX src2)
16067 %{
16068   predicate(n->as_Vector()->length() == 4);
16069   match(Set dst (SubVI dst (MulVI src1 src2)));
16070   ins_cost(INSN_COST);
16071   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
16072   ins_encode %{
16073     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
16074             as_FloatRegister($src1$$reg),
16075             as_FloatRegister($src2$$reg));
16076   %}
16077   ins_pipe(vmla128);
16078 %}
16079 
16080 // --------------------------------- DIV --------------------------------------
16081 
16082 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16083 %{
16084   predicate(n->as_Vector()->length() == 2);
16085   match(Set dst (DivVF src1 src2));
16086   ins_cost(INSN_COST);
16087   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16088   ins_encode %{
16089     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16090             as_FloatRegister($src1$$reg),
16091             as_FloatRegister($src2$$reg));
16092   %}
16093   ins_pipe(vmuldiv_fp64);
16094 %}
16095 
16096 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16097 %{
16098   predicate(n->as_Vector()->length() == 4);
16099   match(Set dst (DivVF src1 src2));
16100   ins_cost(INSN_COST);
16101   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16102   ins_encode %{
16103     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16104             as_FloatRegister($src1$$reg),
16105             as_FloatRegister($src2$$reg));
16106   %}
16107   ins_pipe(vmuldiv_fp128);
16108 %}
16109 
16110 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16111 %{
16112   predicate(n->as_Vector()->length() == 2);
16113   match(Set dst (DivVD src1 src2));
16114   ins_cost(INSN_COST);
16115   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16116   ins_encode %{
16117     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16118             as_FloatRegister($src1$$reg),
16119             as_FloatRegister($src2$$reg));
16120   %}
16121   ins_pipe(vmuldiv_fp128);
16122 %}
16123 
16124 // --------------------------------- SQRT -------------------------------------
16125 
16126 instruct vsqrt2D(vecX dst, vecX src)
16127 %{
16128   predicate(n->as_Vector()->length() == 2);
16129   match(Set dst (SqrtVD src));
16130   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16131   ins_encode %{
16132     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16133              as_FloatRegister($src$$reg));
16134   %}
16135   ins_pipe(vsqrt_fp128);
16136 %}
16137 
16138 // --------------------------------- ABS --------------------------------------
16139 
16140 instruct vabs2F(vecD dst, vecD src)
16141 %{
16142   predicate(n->as_Vector()->length() == 2);
16143   match(Set dst (AbsVF src));
16144   ins_cost(INSN_COST * 3);
16145   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16146   ins_encode %{
16147     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16148             as_FloatRegister($src$$reg));
16149   %}
16150   ins_pipe(vunop_fp64);
16151 %}
16152 
16153 instruct vabs4F(vecX dst, vecX src)
16154 %{
16155   predicate(n->as_Vector()->length() == 4);
16156   match(Set dst (AbsVF src));
16157   ins_cost(INSN_COST * 3);
16158   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16159   ins_encode %{
16160     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16161             as_FloatRegister($src$$reg));
16162   %}
16163   ins_pipe(vunop_fp128);
16164 %}
16165 
16166 instruct vabs2D(vecX dst, vecX src)
16167 %{
16168   predicate(n->as_Vector()->length() == 2);
16169   match(Set dst (AbsVD src));
16170   ins_cost(INSN_COST * 3);
16171   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16172   ins_encode %{
16173     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16174             as_FloatRegister($src$$reg));
16175   %}
16176   ins_pipe(vunop_fp128);
16177 %}
16178 
16179 // --------------------------------- NEG --------------------------------------
16180 
16181 instruct vneg2F(vecD dst, vecD src)
16182 %{
16183   predicate(n->as_Vector()->length() == 2);
16184   match(Set dst (NegVF src));
16185   ins_cost(INSN_COST * 3);
16186   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16187   ins_encode %{
16188     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16189             as_FloatRegister($src$$reg));
16190   %}
16191   ins_pipe(vunop_fp64);
16192 %}
16193 
16194 instruct vneg4F(vecX dst, vecX src)
16195 %{
16196   predicate(n->as_Vector()->length() == 4);
16197   match(Set dst (NegVF src));
16198   ins_cost(INSN_COST * 3);
16199   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16200   ins_encode %{
16201     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16202             as_FloatRegister($src$$reg));
16203   %}
16204   ins_pipe(vunop_fp128);
16205 %}
16206 
16207 instruct vneg2D(vecX dst, vecX src)
16208 %{
16209   predicate(n->as_Vector()->length() == 2);
16210   match(Set dst (NegVD src));
16211   ins_cost(INSN_COST * 3);
16212   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16213   ins_encode %{
16214     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16215             as_FloatRegister($src$$reg));
16216   %}
16217   ins_pipe(vunop_fp128);
16218 %}
16219 
16220 // --------------------------------- AND --------------------------------------
16221 
16222 instruct vand8B(vecD dst, vecD src1, vecD src2)
16223 %{
16224   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16225             n->as_Vector()->length_in_bytes() == 8);
16226   match(Set dst (AndV src1 src2));
16227   ins_cost(INSN_COST);
16228   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16229   ins_encode %{
16230     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16231             as_FloatRegister($src1$$reg),
16232             as_FloatRegister($src2$$reg));
16233   %}
16234   ins_pipe(vlogical64);
16235 %}
16236 
16237 instruct vand16B(vecX dst, vecX src1, vecX src2)
16238 %{
16239   predicate(n->as_Vector()->length_in_bytes() == 16);
16240   match(Set dst (AndV src1 src2));
16241   ins_cost(INSN_COST);
16242   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16243   ins_encode %{
16244     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16245             as_FloatRegister($src1$$reg),
16246             as_FloatRegister($src2$$reg));
16247   %}
16248   ins_pipe(vlogical128);
16249 %}
16250 
16251 // --------------------------------- OR ---------------------------------------
16252 
16253 instruct vor8B(vecD dst, vecD src1, vecD src2)
16254 %{
16255   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16256             n->as_Vector()->length_in_bytes() == 8);
16257   match(Set dst (OrV src1 src2));
16258   ins_cost(INSN_COST);
16259   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16260   ins_encode %{
16261     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16262             as_FloatRegister($src1$$reg),
16263             as_FloatRegister($src2$$reg));
16264   %}
16265   ins_pipe(vlogical64);
16266 %}
16267 
16268 instruct vor16B(vecX dst, vecX src1, vecX src2)
16269 %{
16270   predicate(n->as_Vector()->length_in_bytes() == 16);
16271   match(Set dst (OrV src1 src2));
16272   ins_cost(INSN_COST);
16273   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16274   ins_encode %{
16275     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16276             as_FloatRegister($src1$$reg),
16277             as_FloatRegister($src2$$reg));
16278   %}
16279   ins_pipe(vlogical128);
16280 %}
16281 
16282 // --------------------------------- XOR --------------------------------------
16283 
16284 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16285 %{
16286   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16287             n->as_Vector()->length_in_bytes() == 8);
16288   match(Set dst (XorV src1 src2));
16289   ins_cost(INSN_COST);
16290   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16291   ins_encode %{
16292     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16293             as_FloatRegister($src1$$reg),
16294             as_FloatRegister($src2$$reg));
16295   %}
16296   ins_pipe(vlogical64);
16297 %}
16298 
16299 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16300 %{
16301   predicate(n->as_Vector()->length_in_bytes() == 16);
16302   match(Set dst (XorV src1 src2));
16303   ins_cost(INSN_COST);
16304   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16305   ins_encode %{
16306     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16307             as_FloatRegister($src1$$reg),
16308             as_FloatRegister($src2$$reg));
16309   %}
16310   ins_pipe(vlogical128);
16311 %}
16312 
16313 // ------------------------------ Shift ---------------------------------------
16314 
16315 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16316   match(Set dst (LShiftCntV cnt));
16317   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16318   ins_encode %{
16319     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16320   %}
16321   ins_pipe(vdup_reg_reg128);
16322 %}
16323 
16324 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16325 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16326   match(Set dst (RShiftCntV cnt));
16327   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16328   ins_encode %{
16329     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16330     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16331   %}
16332   ins_pipe(vdup_reg_reg128);
16333 %}
16334 
16335 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16336   predicate(n->as_Vector()->length() == 4 ||
16337             n->as_Vector()->length() == 8);
16338   match(Set dst (LShiftVB src shift));
16339   match(Set dst (RShiftVB src shift));
16340   ins_cost(INSN_COST);
16341   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16342   ins_encode %{
16343     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16344             as_FloatRegister($src$$reg),
16345             as_FloatRegister($shift$$reg));
16346   %}
16347   ins_pipe(vshift64);
16348 %}
16349 
16350 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16351   predicate(n->as_Vector()->length() == 16);
16352   match(Set dst (LShiftVB src shift));
16353   match(Set dst (RShiftVB src shift));
16354   ins_cost(INSN_COST);
16355   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16356   ins_encode %{
16357     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16358             as_FloatRegister($src$$reg),
16359             as_FloatRegister($shift$$reg));
16360   %}
16361   ins_pipe(vshift128);
16362 %}
16363 
16364 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16365   predicate(n->as_Vector()->length() == 4 ||
16366             n->as_Vector()->length() == 8);
16367   match(Set dst (URShiftVB src shift));
16368   ins_cost(INSN_COST);
16369   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16370   ins_encode %{
16371     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16372             as_FloatRegister($src$$reg),
16373             as_FloatRegister($shift$$reg));
16374   %}
16375   ins_pipe(vshift64);
16376 %}
16377 
16378 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16379   predicate(n->as_Vector()->length() == 16);
16380   match(Set dst (URShiftVB src shift));
16381   ins_cost(INSN_COST);
16382   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16383   ins_encode %{
16384     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16385             as_FloatRegister($src$$reg),
16386             as_FloatRegister($shift$$reg));
16387   %}
16388   ins_pipe(vshift128);
16389 %}
16390 
16391 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16392   predicate(n->as_Vector()->length() == 4 ||
16393             n->as_Vector()->length() == 8);
16394   match(Set dst (LShiftVB src shift));
16395   ins_cost(INSN_COST);
16396   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16397   ins_encode %{
16398     int sh = (int)$shift$$constant & 31;
16399     if (sh >= 8) {
16400       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16401              as_FloatRegister($src$$reg),
16402              as_FloatRegister($src$$reg));
16403     } else {
16404       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16405              as_FloatRegister($src$$reg), sh);
16406     }
16407   %}
16408   ins_pipe(vshift64_imm);
16409 %}
16410 
16411 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16412   predicate(n->as_Vector()->length() == 16);
16413   match(Set dst (LShiftVB src shift));
16414   ins_cost(INSN_COST);
16415   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16416   ins_encode %{
16417     int sh = (int)$shift$$constant & 31;
16418     if (sh >= 8) {
16419       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16420              as_FloatRegister($src$$reg),
16421              as_FloatRegister($src$$reg));
16422     } else {
16423       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16424              as_FloatRegister($src$$reg), sh);
16425     }
16426   %}
16427   ins_pipe(vshift128_imm);
16428 %}
16429 
16430 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16431   predicate(n->as_Vector()->length() == 4 ||
16432             n->as_Vector()->length() == 8);
16433   match(Set dst (RShiftVB src shift));
16434   ins_cost(INSN_COST);
16435   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16436   ins_encode %{
16437     int sh = (int)$shift$$constant & 31;
16438     if (sh >= 8) sh = 7;
16439     sh = -sh & 7;
16440     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16441            as_FloatRegister($src$$reg), sh);
16442   %}
16443   ins_pipe(vshift64_imm);
16444 %}
16445 
16446 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16447   predicate(n->as_Vector()->length() == 16);
16448   match(Set dst (RShiftVB src shift));
16449   ins_cost(INSN_COST);
16450   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16451   ins_encode %{
16452     int sh = (int)$shift$$constant & 31;
16453     if (sh >= 8) sh = 7;
16454     sh = -sh & 7;
16455     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16456            as_FloatRegister($src$$reg), sh);
16457   %}
16458   ins_pipe(vshift128_imm);
16459 %}
16460 
16461 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16462   predicate(n->as_Vector()->length() == 4 ||
16463             n->as_Vector()->length() == 8);
16464   match(Set dst (URShiftVB src shift));
16465   ins_cost(INSN_COST);
16466   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16467   ins_encode %{
16468     int sh = (int)$shift$$constant & 31;
16469     if (sh >= 8) {
16470       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16471              as_FloatRegister($src$$reg),
16472              as_FloatRegister($src$$reg));
16473     } else {
16474       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16475              as_FloatRegister($src$$reg), -sh & 7);
16476     }
16477   %}
16478   ins_pipe(vshift64_imm);
16479 %}
16480 
16481 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16482   predicate(n->as_Vector()->length() == 16);
16483   match(Set dst (URShiftVB src shift));
16484   ins_cost(INSN_COST);
16485   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16486   ins_encode %{
16487     int sh = (int)$shift$$constant & 31;
16488     if (sh >= 8) {
16489       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16490              as_FloatRegister($src$$reg),
16491              as_FloatRegister($src$$reg));
16492     } else {
16493       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16494              as_FloatRegister($src$$reg), -sh & 7);
16495     }
16496   %}
16497   ins_pipe(vshift128_imm);
16498 %}
16499 
16500 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16501   predicate(n->as_Vector()->length() == 2 ||
16502             n->as_Vector()->length() == 4);
16503   match(Set dst (LShiftVS src shift));
16504   match(Set dst (RShiftVS src shift));
16505   ins_cost(INSN_COST);
16506   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16507   ins_encode %{
16508     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16509             as_FloatRegister($src$$reg),
16510             as_FloatRegister($shift$$reg));
16511   %}
16512   ins_pipe(vshift64);
16513 %}
16514 
16515 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16516   predicate(n->as_Vector()->length() == 8);
16517   match(Set dst (LShiftVS src shift));
16518   match(Set dst (RShiftVS src shift));
16519   ins_cost(INSN_COST);
16520   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16521   ins_encode %{
16522     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16523             as_FloatRegister($src$$reg),
16524             as_FloatRegister($shift$$reg));
16525   %}
16526   ins_pipe(vshift128);
16527 %}
16528 
16529 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16530   predicate(n->as_Vector()->length() == 2 ||
16531             n->as_Vector()->length() == 4);
16532   match(Set dst (URShiftVS src shift));
16533   ins_cost(INSN_COST);
16534   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16535   ins_encode %{
16536     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16537             as_FloatRegister($src$$reg),
16538             as_FloatRegister($shift$$reg));
16539   %}
16540   ins_pipe(vshift64);
16541 %}
16542 
16543 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16544   predicate(n->as_Vector()->length() == 8);
16545   match(Set dst (URShiftVS src shift));
16546   ins_cost(INSN_COST);
16547   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16548   ins_encode %{
16549     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16550             as_FloatRegister($src$$reg),
16551             as_FloatRegister($shift$$reg));
16552   %}
16553   ins_pipe(vshift128);
16554 %}
16555 
16556 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16557   predicate(n->as_Vector()->length() == 2 ||
16558             n->as_Vector()->length() == 4);
16559   match(Set dst (LShiftVS src shift));
16560   ins_cost(INSN_COST);
16561   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16562   ins_encode %{
16563     int sh = (int)$shift$$constant & 31;
16564     if (sh >= 16) {
16565       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16566              as_FloatRegister($src$$reg),
16567              as_FloatRegister($src$$reg));
16568     } else {
16569       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16570              as_FloatRegister($src$$reg), sh);
16571     }
16572   %}
16573   ins_pipe(vshift64_imm);
16574 %}
16575 
16576 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16577   predicate(n->as_Vector()->length() == 8);
16578   match(Set dst (LShiftVS src shift));
16579   ins_cost(INSN_COST);
16580   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16581   ins_encode %{
16582     int sh = (int)$shift$$constant & 31;
16583     if (sh >= 16) {
16584       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16585              as_FloatRegister($src$$reg),
16586              as_FloatRegister($src$$reg));
16587     } else {
16588       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16589              as_FloatRegister($src$$reg), sh);
16590     }
16591   %}
16592   ins_pipe(vshift128_imm);
16593 %}
16594 
16595 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16596   predicate(n->as_Vector()->length() == 2 ||
16597             n->as_Vector()->length() == 4);
16598   match(Set dst (RShiftVS src shift));
16599   ins_cost(INSN_COST);
16600   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16601   ins_encode %{
16602     int sh = (int)$shift$$constant & 31;
16603     if (sh >= 16) sh = 15;
16604     sh = -sh & 15;
16605     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16606            as_FloatRegister($src$$reg), sh);
16607   %}
16608   ins_pipe(vshift64_imm);
16609 %}
16610 
16611 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16612   predicate(n->as_Vector()->length() == 8);
16613   match(Set dst (RShiftVS src shift));
16614   ins_cost(INSN_COST);
16615   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16616   ins_encode %{
16617     int sh = (int)$shift$$constant & 31;
16618     if (sh >= 16) sh = 15;
16619     sh = -sh & 15;
16620     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16621            as_FloatRegister($src$$reg), sh);
16622   %}
16623   ins_pipe(vshift128_imm);
16624 %}
16625 
16626 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16627   predicate(n->as_Vector()->length() == 2 ||
16628             n->as_Vector()->length() == 4);
16629   match(Set dst (URShiftVS src shift));
16630   ins_cost(INSN_COST);
16631   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16632   ins_encode %{
16633     int sh = (int)$shift$$constant & 31;
16634     if (sh >= 16) {
16635       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16636              as_FloatRegister($src$$reg),
16637              as_FloatRegister($src$$reg));
16638     } else {
16639       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16640              as_FloatRegister($src$$reg), -sh & 15);
16641     }
16642   %}
16643   ins_pipe(vshift64_imm);
16644 %}
16645 
16646 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16647   predicate(n->as_Vector()->length() == 8);
16648   match(Set dst (URShiftVS src shift));
16649   ins_cost(INSN_COST);
16650   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16651   ins_encode %{
16652     int sh = (int)$shift$$constant & 31;
16653     if (sh >= 16) {
16654       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16655              as_FloatRegister($src$$reg),
16656              as_FloatRegister($src$$reg));
16657     } else {
16658       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16659              as_FloatRegister($src$$reg), -sh & 15);
16660     }
16661   %}
16662   ins_pipe(vshift128_imm);
16663 %}
16664 
16665 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16666   predicate(n->as_Vector()->length() == 2);
16667   match(Set dst (LShiftVI src shift));
16668   match(Set dst (RShiftVI src shift));
16669   ins_cost(INSN_COST);
16670   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16671   ins_encode %{
16672     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16673             as_FloatRegister($src$$reg),
16674             as_FloatRegister($shift$$reg));
16675   %}
16676   ins_pipe(vshift64);
16677 %}
16678 
16679 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16680   predicate(n->as_Vector()->length() == 4);
16681   match(Set dst (LShiftVI src shift));
16682   match(Set dst (RShiftVI src shift));
16683   ins_cost(INSN_COST);
16684   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16685   ins_encode %{
16686     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16687             as_FloatRegister($src$$reg),
16688             as_FloatRegister($shift$$reg));
16689   %}
16690   ins_pipe(vshift128);
16691 %}
16692 
16693 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16694   predicate(n->as_Vector()->length() == 2);
16695   match(Set dst (URShiftVI src shift));
16696   ins_cost(INSN_COST);
16697   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16698   ins_encode %{
16699     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16700             as_FloatRegister($src$$reg),
16701             as_FloatRegister($shift$$reg));
16702   %}
16703   ins_pipe(vshift64);
16704 %}
16705 
16706 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16707   predicate(n->as_Vector()->length() == 4);
16708   match(Set dst (URShiftVI src shift));
16709   ins_cost(INSN_COST);
16710   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16711   ins_encode %{
16712     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16713             as_FloatRegister($src$$reg),
16714             as_FloatRegister($shift$$reg));
16715   %}
16716   ins_pipe(vshift128);
16717 %}
16718 
16719 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16720   predicate(n->as_Vector()->length() == 2);
16721   match(Set dst (LShiftVI src shift));
16722   ins_cost(INSN_COST);
16723   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16724   ins_encode %{
16725     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16726            as_FloatRegister($src$$reg),
16727            (int)$shift$$constant & 31);
16728   %}
16729   ins_pipe(vshift64_imm);
16730 %}
16731 
16732 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16733   predicate(n->as_Vector()->length() == 4);
16734   match(Set dst (LShiftVI src shift));
16735   ins_cost(INSN_COST);
16736   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16737   ins_encode %{
16738     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16739            as_FloatRegister($src$$reg),
16740            (int)$shift$$constant & 31);
16741   %}
16742   ins_pipe(vshift128_imm);
16743 %}
16744 
16745 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16746   predicate(n->as_Vector()->length() == 2);
16747   match(Set dst (RShiftVI src shift));
16748   ins_cost(INSN_COST);
16749   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16750   ins_encode %{
16751     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16752             as_FloatRegister($src$$reg),
16753             -(int)$shift$$constant & 31);
16754   %}
16755   ins_pipe(vshift64_imm);
16756 %}
16757 
16758 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16759   predicate(n->as_Vector()->length() == 4);
16760   match(Set dst (RShiftVI src shift));
16761   ins_cost(INSN_COST);
16762   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16763   ins_encode %{
16764     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16765             as_FloatRegister($src$$reg),
16766             -(int)$shift$$constant & 31);
16767   %}
16768   ins_pipe(vshift128_imm);
16769 %}
16770 
16771 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16772   predicate(n->as_Vector()->length() == 2);
16773   match(Set dst (URShiftVI src shift));
16774   ins_cost(INSN_COST);
16775   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16776   ins_encode %{
16777     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16778             as_FloatRegister($src$$reg),
16779             -(int)$shift$$constant & 31);
16780   %}
16781   ins_pipe(vshift64_imm);
16782 %}
16783 
16784 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16785   predicate(n->as_Vector()->length() == 4);
16786   match(Set dst (URShiftVI src shift));
16787   ins_cost(INSN_COST);
16788   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16789   ins_encode %{
16790     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16791             as_FloatRegister($src$$reg),
16792             -(int)$shift$$constant & 31);
16793   %}
16794   ins_pipe(vshift128_imm);
16795 %}
16796 
16797 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16798   predicate(n->as_Vector()->length() == 2);
16799   match(Set dst (LShiftVL src shift));
16800   match(Set dst (RShiftVL src shift));
16801   ins_cost(INSN_COST);
16802   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16803   ins_encode %{
16804     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16805             as_FloatRegister($src$$reg),
16806             as_FloatRegister($shift$$reg));
16807   %}
16808   ins_pipe(vshift128);
16809 %}
16810 
16811 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16812   predicate(n->as_Vector()->length() == 2);
16813   match(Set dst (URShiftVL src shift));
16814   ins_cost(INSN_COST);
16815   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16816   ins_encode %{
16817     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16818             as_FloatRegister($src$$reg),
16819             as_FloatRegister($shift$$reg));
16820   %}
16821   ins_pipe(vshift128);
16822 %}
16823 
16824 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16825   predicate(n->as_Vector()->length() == 2);
16826   match(Set dst (LShiftVL src shift));
16827   ins_cost(INSN_COST);
16828   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16829   ins_encode %{
16830     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16831            as_FloatRegister($src$$reg),
16832            (int)$shift$$constant & 63);
16833   %}
16834   ins_pipe(vshift128_imm);
16835 %}
16836 
16837 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16838   predicate(n->as_Vector()->length() == 2);
16839   match(Set dst (RShiftVL src shift));
16840   ins_cost(INSN_COST);
16841   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16842   ins_encode %{
16843     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16844             as_FloatRegister($src$$reg),
16845             -(int)$shift$$constant & 63);
16846   %}
16847   ins_pipe(vshift128_imm);
16848 %}
16849 
16850 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16851   predicate(n->as_Vector()->length() == 2);
16852   match(Set dst (URShiftVL src shift));
16853   ins_cost(INSN_COST);
16854   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16855   ins_encode %{
16856     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16857             as_FloatRegister($src$$reg),
16858             -(int)$shift$$constant & 63);
16859   %}
16860   ins_pipe(vshift128_imm);
16861 %}
16862 
16863 //----------PEEPHOLE RULES-----------------------------------------------------
16864 // These must follow all instruction definitions as they use the names
16865 // defined in the instructions definitions.
16866 //
16867 // peepmatch ( root_instr_name [preceding_instruction]* );
16868 //
16869 // peepconstraint %{
16870 // (instruction_number.operand_name relational_op instruction_number.operand_name
16871 //  [, ...] );
16872 // // instruction numbers are zero-based using left to right order in peepmatch
16873 //
16874 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16875 // // provide an instruction_number.operand_name for each operand that appears
16876 // // in the replacement instruction's match rule
16877 //
16878 // ---------VM FLAGS---------------------------------------------------------
16879 //
16880 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16881 //
16882 // Each peephole rule is given an identifying number starting with zero and
16883 // increasing by one in the order seen by the parser.  An individual peephole
16884 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16885 // on the command-line.
16886 //
16887 // ---------CURRENT LIMITATIONS----------------------------------------------
16888 //
16889 // Only match adjacent instructions in same basic block
16890 // Only equality constraints
16891 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16892 // Only one replacement instruction
16893 //
16894 // ---------EXAMPLE----------------------------------------------------------
16895 //
16896 // // pertinent parts of existing instructions in architecture description
16897 // instruct movI(iRegINoSp dst, iRegI src)
16898 // %{
16899 //   match(Set dst (CopyI src));
16900 // %}
16901 //
16902 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16903 // %{
16904 //   match(Set dst (AddI dst src));
16905 //   effect(KILL cr);
16906 // %}
16907 //
16908 // // Change (inc mov) to lea
16909 // peephole %{
16910 //   // increment preceeded by register-register move
16911 //   peepmatch ( incI_iReg movI );
16912 //   // require that the destination register of the increment
16913 //   // match the destination register of the move
16914 //   peepconstraint ( 0.dst == 1.dst );
16915 //   // construct a replacement instruction that sets
16916 //   // the destination to ( move's source register + one )
16917 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16918 // %}
16919 //
16920 
16921 // Implementation no longer uses movX instructions since
16922 // machine-independent system no longer uses CopyX nodes.
16923 //
16924 // peephole
16925 // %{
16926 //   peepmatch (incI_iReg movI);
16927 //   peepconstraint (0.dst == 1.dst);
16928 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16929 // %}
16930 
16931 // peephole
16932 // %{
16933 //   peepmatch (decI_iReg movI);
16934 //   peepconstraint (0.dst == 1.dst);
16935 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16936 // %}
16937 
16938 // peephole
16939 // %{
16940 //   peepmatch (addI_iReg_imm movI);
16941 //   peepconstraint (0.dst == 1.dst);
16942 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16943 // %}
16944 
16945 // peephole
16946 // %{
16947 //   peepmatch (incL_iReg movL);
16948 //   peepconstraint (0.dst == 1.dst);
16949 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16950 // %}
16951 
16952 // peephole
16953 // %{
16954 //   peepmatch (decL_iReg movL);
16955 //   peepconstraint (0.dst == 1.dst);
16956 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16957 // %}
16958 
16959 // peephole
16960 // %{
16961 //   peepmatch (addL_iReg_imm movL);
16962 //   peepconstraint (0.dst == 1.dst);
16963 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16964 // %}
16965 
16966 // peephole
16967 // %{
16968 //   peepmatch (addP_iReg_imm movP);
16969 //   peepconstraint (0.dst == 1.dst);
16970 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16971 // %}
16972 
16973 // // Change load of spilled value to only a spill
16974 // instruct storeI(memory mem, iRegI src)
16975 // %{
16976 //   match(Set mem (StoreI mem src));
16977 // %}
16978 //
16979 // instruct loadI(iRegINoSp dst, memory mem)
16980 // %{
16981 //   match(Set dst (LoadI mem));
16982 // %}
16983 //
16984 
16985 //----------SMARTSPILL RULES---------------------------------------------------
16986 // These must follow all instruction definitions as they use the names
16987 // defined in the instructions definitions.
16988 
16989 // Local Variables:
16990 // mode: c++
16991 // End: