1 //
   2 // Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 //
   6 // This code is free software; you can redistribute it and/or modify it
   7 // under the terms of the GNU General Public License version 2 only, as
   8 // published by the Free Software Foundation.
   9 //
  10 // This code is distributed in the hope that it will be useful, but WITHOUT
  11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13 // version 2 for more details (a copy is included in the LICENSE file that
  14 // accompanied this code).
  15 //
  16 // You should have received a copy of the GNU General Public License version
  17 // 2 along with this work; if not, write to the Free Software Foundation,
  18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 //
  20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21 // or visit www.oracle.com if you need additional information or have any
  22 // questions.
  23 //
  24 //
  25 
  26 // AArch64 Architecture Description File
  27 
  28 //----------REGISTER DEFINITION BLOCK------------------------------------------
  29 // This information is used by the matcher and the register allocator to
  30 // describe individual registers and classes of registers within the target
  31 // archtecture.
  32 
  33 register %{
  34 //----------Architecture Description Register Definitions----------------------
  35 // General Registers
  36 // "reg_def"  name ( register save type, C convention save type,
  37 //                   ideal register type, encoding );
  38 // Register Save Types:
  39 //
  40 // NS  = No-Save:       The register allocator assumes that these registers
  41 //                      can be used without saving upon entry to the method, &
  42 //                      that they do not need to be saved at call sites.
  43 //
  44 // SOC = Save-On-Call:  The register allocator assumes that these registers
  45 //                      can be used without saving upon entry to the method,
  46 //                      but that they must be saved at call sites.
  47 //
  48 // SOE = Save-On-Entry: The register allocator assumes that these registers
  49 //                      must be saved before using them upon entry to the
  50 //                      method, but they do not need to be saved at call
  51 //                      sites.
  52 //
  53 // AS  = Always-Save:   The register allocator assumes that these registers
  54 //                      must be saved before using them upon entry to the
  55 //                      method, & that they must be saved at call sites.
  56 //
  57 // Ideal Register Type is used to determine how to save & restore a
  58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  60 //
  61 // The encoding number is the actual bit-pattern placed into the opcodes.
  62 
  63 // We must define the 64 bit int registers in two 32 bit halves, the
  64 // real lower register and a virtual upper half register. upper halves
  65 // are used by the register allocator but are not actually supplied as
  66 // operands to memory ops.
  67 //
  68 // follow the C1 compiler in making registers
  69 //
  70 //   r0-r7,r10-r26 volatile (caller save)
  71 //   r27-r32 system (no save, no allocate)
  72 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  73 //
  74 // as regards Java usage. we don't use any callee save registers
  75 // because this makes it difficult to de-optimise a frame (see comment
  76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  77 //
  78 
  79 // General Registers
  80 
  81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  97 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  98 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  99 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 100 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 101 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 102 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 103 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 104 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 105 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 106 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 107 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 108 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 109 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 110 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 111 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 112 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 113 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 114 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 115 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 116 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 117 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 118 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 119 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 120 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 121 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 122 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 123 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 124 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 125 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 126 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 127 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 128 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 129 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 130 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 131 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 132 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
 133 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 134 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 135 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 136 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 137 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 138 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 139 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 140 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 141 
 142 // ----------------------------
 143 // Float/Double Registers
 144 // ----------------------------
 145 
 146 // Double Registers
 147 
 148 // The rules of ADL require that double registers be defined in pairs.
 149 // Each pair must be two 32-bit values, but not necessarily a pair of
 150 // single float registers. In each pair, ADLC-assigned register numbers
 151 // must be adjacent, with the lower number even. Finally, when the
 152 // CPU stores such a register pair to memory, the word associated with
 153 // the lower ADLC-assigned number must be stored to the lower address.
 154 
 155 // AArch64 has 32 floating-point registers. Each can store a vector of
 156 // single or double precision floating-point values up to 8 * 32
 157 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 158 // use the first float or double element of the vector.
 159 
 160 // for Java use float registers v0-v15 are always save on call whereas
 161 // the platform ABI treats v8-v15 as callee save). float registers
 162 // v16-v31 are SOC as per the platform spec
 163 
 164   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 165   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 166   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 167   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 168 
 169   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 170   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 171   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 172   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 173 
 174   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 175   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 176   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 177   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 178 
 179   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 180   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 181   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 182   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 183 
 184   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 185   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 186   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 187   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 188 
 189   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 190   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 191   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 192   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 193 
 194   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 195   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 196   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 197   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 198 
 199   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 200   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 201   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 202   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 203 
 204   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 205   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 206   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 207   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 208 
 209   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 210   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 211   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 212   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 213 
 214   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 215   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 216   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 217   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 218 
 219   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 220   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 221   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 222   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 223 
 224   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 225   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 226   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 227   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 228 
 229   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 230   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 231   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 232   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 233 
 234   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 235   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 236   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 237   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 238 
 239   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 240   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 241   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 242   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 243 
 244   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 245   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 246   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 247   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 248 
 249   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 250   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 251   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 252   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 253 
 254   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 255   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 256   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 257   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 258 
 259   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 260   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 261   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 262   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 263 
 264   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 265   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 266   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 267   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 268 
 269   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 270   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 271   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 272   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 273 
 274   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 275   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 276   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 277   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 278 
 279   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 280   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 281   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 282   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 283 
 284   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 285   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 286   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 287   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 288 
 289   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 290   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 291   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 292   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 293 
 294   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 295   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 296   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 297   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 298 
 299   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 300   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 301   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 302   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 303 
 304   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 305   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 306   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 307   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 308 
 309   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 310   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 311   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 312   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 313 
 314   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 315   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 316   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 317   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 318 
 319   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 320   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 321   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 322   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 323 
 324 // ----------------------------
 325 // Special Registers
 326 // ----------------------------
 327 
 328 // the AArch64 CSPR status flag register is not directly acessible as
 329 // instruction operand. the FPSR status flag register is a system
 330 // register which can be written/read using MSR/MRS but again does not
 331 // appear as an operand (a code identifying the FSPR occurs as an
 332 // immediate value in the instruction).
 333 
 334 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 335 
 336 
 337 // Specify priority of register selection within phases of register
 338 // allocation.  Highest priority is first.  A useful heuristic is to
 339 // give registers a low priority when they are required by machine
 340 // instructions, like EAX and EDX on I486, and choose no-save registers
 341 // before save-on-call, & save-on-call before save-on-entry.  Registers
 342 // which participate in fixed calling sequences should come last.
 343 // Registers which are used as pairs must fall on an even boundary.
 344 
 345 alloc_class chunk0(
 346     // volatiles
 347     R10, R10_H,
 348     R11, R11_H,
 349     R12, R12_H,
 350     R13, R13_H,
 351     R14, R14_H,
 352     R15, R15_H,
 353     R16, R16_H,
 354     R17, R17_H,
 355     R18, R18_H,
 356 
 357     // arg registers
 358     R0, R0_H,
 359     R1, R1_H,
 360     R2, R2_H,
 361     R3, R3_H,
 362     R4, R4_H,
 363     R5, R5_H,
 364     R6, R6_H,
 365     R7, R7_H,
 366 
 367     // non-volatiles
 368     R19, R19_H,
 369     R20, R20_H,
 370     R21, R21_H,
 371     R22, R22_H,
 372     R23, R23_H,
 373     R24, R24_H,
 374     R25, R25_H,
 375     R26, R26_H,
 376 
 377     // non-allocatable registers
 378 
 379     R27, R27_H, // heapbase
 380     R28, R28_H, // thread
 381     R29, R29_H, // fp
 382     R30, R30_H, // lr
 383     R31, R31_H, // sp
 384 );
 385 
 386 alloc_class chunk1(
 387 
 388     // no save
 389     V16, V16_H, V16_J, V16_K,
 390     V17, V17_H, V17_J, V17_K,
 391     V18, V18_H, V18_J, V18_K,
 392     V19, V19_H, V19_J, V19_K,
 393     V20, V20_H, V20_J, V20_K,
 394     V21, V21_H, V21_J, V21_K,
 395     V22, V22_H, V22_J, V22_K,
 396     V23, V23_H, V23_J, V23_K,
 397     V24, V24_H, V24_J, V24_K,
 398     V25, V25_H, V25_J, V25_K,
 399     V26, V26_H, V26_J, V26_K,
 400     V27, V27_H, V27_J, V27_K,
 401     V28, V28_H, V28_J, V28_K,
 402     V29, V29_H, V29_J, V29_K,
 403     V30, V30_H, V30_J, V30_K,
 404     V31, V31_H, V31_J, V31_K,
 405 
 406     // arg registers
 407     V0, V0_H, V0_J, V0_K,
 408     V1, V1_H, V1_J, V1_K,
 409     V2, V2_H, V2_J, V2_K,
 410     V3, V3_H, V3_J, V3_K,
 411     V4, V4_H, V4_J, V4_K,
 412     V5, V5_H, V5_J, V5_K,
 413     V6, V6_H, V6_J, V6_K,
 414     V7, V7_H, V7_J, V7_K,
 415 
 416     // non-volatiles
 417     V8, V8_H, V8_J, V8_K,
 418     V9, V9_H, V9_J, V9_K,
 419     V10, V10_H, V10_J, V10_K,
 420     V11, V11_H, V11_J, V11_K,
 421     V12, V12_H, V12_J, V12_K,
 422     V13, V13_H, V13_J, V13_K,
 423     V14, V14_H, V14_J, V14_K,
 424     V15, V15_H, V15_J, V15_K,
 425 );
 426 
 427 alloc_class chunk2(RFLAGS);
 428 
 429 //----------Architecture Description Register Classes--------------------------
 430 // Several register classes are automatically defined based upon information in
 431 // this architecture description.
 432 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 433 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 434 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 435 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 436 //
 437 
 438 // Class for all 32 bit integer registers -- excludes SP which will
 439 // never be used as an integer register
 440 reg_class any_reg32(
 441     R0,
 442     R1,
 443     R2,
 444     R3,
 445     R4,
 446     R5,
 447     R6,
 448     R7,
 449     R10,
 450     R11,
 451     R12,
 452     R13,
 453     R14,
 454     R15,
 455     R16,
 456     R17,
 457     R18,
 458     R19,
 459     R20,
 460     R21,
 461     R22,
 462     R23,
 463     R24,
 464     R25,
 465     R26,
 466     R27,
 467     R28,
 468     R29,
 469     R30
 470 );
 471 
 472 // Singleton class for R0 int register
 473 reg_class int_r0_reg(R0);
 474 
 475 // Singleton class for R2 int register
 476 reg_class int_r2_reg(R2);
 477 
 478 // Singleton class for R3 int register
 479 reg_class int_r3_reg(R3);
 480 
 481 // Singleton class for R4 int register
 482 reg_class int_r4_reg(R4);
 483 
 484 // Class for all long integer registers (including RSP)
 485 reg_class any_reg(
 486     R0, R0_H,
 487     R1, R1_H,
 488     R2, R2_H,
 489     R3, R3_H,
 490     R4, R4_H,
 491     R5, R5_H,
 492     R6, R6_H,
 493     R7, R7_H,
 494     R10, R10_H,
 495     R11, R11_H,
 496     R12, R12_H,
 497     R13, R13_H,
 498     R14, R14_H,
 499     R15, R15_H,
 500     R16, R16_H,
 501     R17, R17_H,
 502     R18, R18_H,
 503     R19, R19_H,
 504     R20, R20_H,
 505     R21, R21_H,
 506     R22, R22_H,
 507     R23, R23_H,
 508     R24, R24_H,
 509     R25, R25_H,
 510     R26, R26_H,
 511     R27, R27_H,
 512     R28, R28_H,
 513     R29, R29_H,
 514     R30, R30_H,
 515     R31, R31_H
 516 );
 517 
 518 // Class for all non-special integer registers
 519 reg_class no_special_reg32_no_fp(
 520     R0,
 521     R1,
 522     R2,
 523     R3,
 524     R4,
 525     R5,
 526     R6,
 527     R7,
 528     R10,
 529     R11,
 530     R12,                        // rmethod
 531     R13,
 532     R14,
 533     R15,
 534     R16,
 535     R17,
 536     R18,
 537     R19,
 538     R20,
 539     R21,
 540     R22,
 541     R23,
 542     R24,
 543     R25,
 544     R26
 545  /* R27, */                     // heapbase
 546  /* R28, */                     // thread
 547  /* R29, */                     // fp
 548  /* R30, */                     // lr
 549  /* R31 */                      // sp
 550 );
 551 
 552 reg_class no_special_reg32_with_fp(
 553     R0,
 554     R1,
 555     R2,
 556     R3,
 557     R4,
 558     R5,
 559     R6,
 560     R7,
 561     R10,
 562     R11,
 563     R12,                        // rmethod
 564     R13,
 565     R14,
 566     R15,
 567     R16,
 568     R17,
 569     R18,
 570     R19,
 571     R20,
 572     R21,
 573     R22,
 574     R23,
 575     R24,
 576     R25,
 577     R26
 578  /* R27, */                     // heapbase
 579  /* R28, */                     // thread
 580     R29,                        // fp
 581  /* R30, */                     // lr
 582  /* R31 */                      // sp
 583 );
 584 
 585 reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %});
 586 
 587 // Class for all non-special long integer registers
 588 reg_class no_special_reg_no_fp(
 589     R0, R0_H,
 590     R1, R1_H,
 591     R2, R2_H,
 592     R3, R3_H,
 593     R4, R4_H,
 594     R5, R5_H,
 595     R6, R6_H,
 596     R7, R7_H,
 597     R10, R10_H,
 598     R11, R11_H,
 599     R12, R12_H,                 // rmethod
 600     R13, R13_H,
 601     R14, R14_H,
 602     R15, R15_H,
 603     R16, R16_H,
 604     R17, R17_H,
 605     R18, R18_H,
 606     R19, R19_H,
 607     R20, R20_H,
 608     R21, R21_H,
 609     R22, R22_H,
 610     R23, R23_H,
 611     R24, R24_H,
 612     R25, R25_H,
 613     R26, R26_H,
 614  /* R27, R27_H, */              // heapbase
 615  /* R28, R28_H, */              // thread
 616  /* R29, R29_H, */              // fp
 617  /* R30, R30_H, */              // lr
 618  /* R31, R31_H */               // sp
 619 );
 620 
 621 reg_class no_special_reg_with_fp(
 622     R0, R0_H,
 623     R1, R1_H,
 624     R2, R2_H,
 625     R3, R3_H,
 626     R4, R4_H,
 627     R5, R5_H,
 628     R6, R6_H,
 629     R7, R7_H,
 630     R10, R10_H,
 631     R11, R11_H,
 632     R12, R12_H,                 // rmethod
 633     R13, R13_H,
 634     R14, R14_H,
 635     R15, R15_H,
 636     R16, R16_H,
 637     R17, R17_H,
 638     R18, R18_H,
 639     R19, R19_H,
 640     R20, R20_H,
 641     R21, R21_H,
 642     R22, R22_H,
 643     R23, R23_H,
 644     R24, R24_H,
 645     R25, R25_H,
 646     R26, R26_H,
 647  /* R27, R27_H, */              // heapbase
 648  /* R28, R28_H, */              // thread
 649     R29, R29_H,                 // fp
 650  /* R30, R30_H, */              // lr
 651  /* R31, R31_H */               // sp
 652 );
 653 
 654 reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %});
 655 
 656 // Class for 64 bit register r0
 657 reg_class r0_reg(
 658     R0, R0_H
 659 );
 660 
 661 // Class for 64 bit register r1
 662 reg_class r1_reg(
 663     R1, R1_H
 664 );
 665 
 666 // Class for 64 bit register r2
 667 reg_class r2_reg(
 668     R2, R2_H
 669 );
 670 
 671 // Class for 64 bit register r3
 672 reg_class r3_reg(
 673     R3, R3_H
 674 );
 675 
 676 // Class for 64 bit register r4
 677 reg_class r4_reg(
 678     R4, R4_H
 679 );
 680 
 681 // Class for 64 bit register r5
 682 reg_class r5_reg(
 683     R5, R5_H
 684 );
 685 
 686 // Class for 64 bit register r10
 687 reg_class r10_reg(
 688     R10, R10_H
 689 );
 690 
 691 // Class for 64 bit register r11
 692 reg_class r11_reg(
 693     R11, R11_H
 694 );
 695 
 696 // Class for method register
 697 reg_class method_reg(
 698     R12, R12_H
 699 );
 700 
 701 // Class for heapbase register
 702 reg_class heapbase_reg(
 703     R27, R27_H
 704 );
 705 
 706 // Class for thread register
 707 reg_class thread_reg(
 708     R28, R28_H
 709 );
 710 
 711 // Class for frame pointer register
 712 reg_class fp_reg(
 713     R29, R29_H
 714 );
 715 
 716 // Class for link register
 717 reg_class lr_reg(
 718     R30, R30_H
 719 );
 720 
 721 // Class for long sp register
 722 reg_class sp_reg(
 723   R31, R31_H
 724 );
 725 
 726 // Class for all pointer registers
 727 reg_class ptr_reg(
 728     R0, R0_H,
 729     R1, R1_H,
 730     R2, R2_H,
 731     R3, R3_H,
 732     R4, R4_H,
 733     R5, R5_H,
 734     R6, R6_H,
 735     R7, R7_H,
 736     R10, R10_H,
 737     R11, R11_H,
 738     R12, R12_H,
 739     R13, R13_H,
 740     R14, R14_H,
 741     R15, R15_H,
 742     R16, R16_H,
 743     R17, R17_H,
 744     R18, R18_H,
 745     R19, R19_H,
 746     R20, R20_H,
 747     R21, R21_H,
 748     R22, R22_H,
 749     R23, R23_H,
 750     R24, R24_H,
 751     R25, R25_H,
 752     R26, R26_H,
 753     R27, R27_H,
 754     R28, R28_H,
 755     R29, R29_H,
 756     R30, R30_H,
 757     R31, R31_H
 758 );
 759 
 760 // Class for all non_special pointer registers
 761 reg_class no_special_ptr_reg(
 762     R0, R0_H,
 763     R1, R1_H,
 764     R2, R2_H,
 765     R3, R3_H,
 766     R4, R4_H,
 767     R5, R5_H,
 768     R6, R6_H,
 769     R7, R7_H,
 770     R10, R10_H,
 771     R11, R11_H,
 772     R12, R12_H,
 773     R13, R13_H,
 774     R14, R14_H,
 775     R15, R15_H,
 776     R16, R16_H,
 777     R17, R17_H,
 778     R18, R18_H,
 779     R19, R19_H,
 780     R20, R20_H,
 781     R21, R21_H,
 782     R22, R22_H,
 783     R23, R23_H,
 784     R24, R24_H,
 785     R25, R25_H,
 786     R26, R26_H,
 787  /* R27, R27_H, */              // heapbase
 788  /* R28, R28_H, */              // thread
 789  /* R29, R29_H, */              // fp
 790  /* R30, R30_H, */              // lr
 791  /* R31, R31_H */               // sp
 792 );
 793 
 794 // Class for all float registers
 795 reg_class float_reg(
 796     V0,
 797     V1,
 798     V2,
 799     V3,
 800     V4,
 801     V5,
 802     V6,
 803     V7,
 804     V8,
 805     V9,
 806     V10,
 807     V11,
 808     V12,
 809     V13,
 810     V14,
 811     V15,
 812     V16,
 813     V17,
 814     V18,
 815     V19,
 816     V20,
 817     V21,
 818     V22,
 819     V23,
 820     V24,
 821     V25,
 822     V26,
 823     V27,
 824     V28,
 825     V29,
 826     V30,
 827     V31
 828 );
 829 
 830 // Double precision float registers have virtual `high halves' that
 831 // are needed by the allocator.
 832 // Class for all double registers
 833 reg_class double_reg(
 834     V0, V0_H,
 835     V1, V1_H,
 836     V2, V2_H,
 837     V3, V3_H,
 838     V4, V4_H,
 839     V5, V5_H,
 840     V6, V6_H,
 841     V7, V7_H,
 842     V8, V8_H,
 843     V9, V9_H,
 844     V10, V10_H,
 845     V11, V11_H,
 846     V12, V12_H,
 847     V13, V13_H,
 848     V14, V14_H,
 849     V15, V15_H,
 850     V16, V16_H,
 851     V17, V17_H,
 852     V18, V18_H,
 853     V19, V19_H,
 854     V20, V20_H,
 855     V21, V21_H,
 856     V22, V22_H,
 857     V23, V23_H,
 858     V24, V24_H,
 859     V25, V25_H,
 860     V26, V26_H,
 861     V27, V27_H,
 862     V28, V28_H,
 863     V29, V29_H,
 864     V30, V30_H,
 865     V31, V31_H
 866 );
 867 
 868 // Class for all 64bit vector registers
 869 reg_class vectord_reg(
 870     V0, V0_H,
 871     V1, V1_H,
 872     V2, V2_H,
 873     V3, V3_H,
 874     V4, V4_H,
 875     V5, V5_H,
 876     V6, V6_H,
 877     V7, V7_H,
 878     V8, V8_H,
 879     V9, V9_H,
 880     V10, V10_H,
 881     V11, V11_H,
 882     V12, V12_H,
 883     V13, V13_H,
 884     V14, V14_H,
 885     V15, V15_H,
 886     V16, V16_H,
 887     V17, V17_H,
 888     V18, V18_H,
 889     V19, V19_H,
 890     V20, V20_H,
 891     V21, V21_H,
 892     V22, V22_H,
 893     V23, V23_H,
 894     V24, V24_H,
 895     V25, V25_H,
 896     V26, V26_H,
 897     V27, V27_H,
 898     V28, V28_H,
 899     V29, V29_H,
 900     V30, V30_H,
 901     V31, V31_H
 902 );
 903 
 904 // Class for all 128bit vector registers
 905 reg_class vectorx_reg(
 906     V0, V0_H, V0_J, V0_K,
 907     V1, V1_H, V1_J, V1_K,
 908     V2, V2_H, V2_J, V2_K,
 909     V3, V3_H, V3_J, V3_K,
 910     V4, V4_H, V4_J, V4_K,
 911     V5, V5_H, V5_J, V5_K,
 912     V6, V6_H, V6_J, V6_K,
 913     V7, V7_H, V7_J, V7_K,
 914     V8, V8_H, V8_J, V8_K,
 915     V9, V9_H, V9_J, V9_K,
 916     V10, V10_H, V10_J, V10_K,
 917     V11, V11_H, V11_J, V11_K,
 918     V12, V12_H, V12_J, V12_K,
 919     V13, V13_H, V13_J, V13_K,
 920     V14, V14_H, V14_J, V14_K,
 921     V15, V15_H, V15_J, V15_K,
 922     V16, V16_H, V16_J, V16_K,
 923     V17, V17_H, V17_J, V17_K,
 924     V18, V18_H, V18_J, V18_K,
 925     V19, V19_H, V19_J, V19_K,
 926     V20, V20_H, V20_J, V20_K,
 927     V21, V21_H, V21_J, V21_K,
 928     V22, V22_H, V22_J, V22_K,
 929     V23, V23_H, V23_J, V23_K,
 930     V24, V24_H, V24_J, V24_K,
 931     V25, V25_H, V25_J, V25_K,
 932     V26, V26_H, V26_J, V26_K,
 933     V27, V27_H, V27_J, V27_K,
 934     V28, V28_H, V28_J, V28_K,
 935     V29, V29_H, V29_J, V29_K,
 936     V30, V30_H, V30_J, V30_K,
 937     V31, V31_H, V31_J, V31_K
 938 );
 939 
 940 // Class for 128 bit register v0
 941 reg_class v0_reg(
 942     V0, V0_H
 943 );
 944 
 945 // Class for 128 bit register v1
 946 reg_class v1_reg(
 947     V1, V1_H
 948 );
 949 
 950 // Class for 128 bit register v2
 951 reg_class v2_reg(
 952     V2, V2_H
 953 );
 954 
 955 // Class for 128 bit register v3
 956 reg_class v3_reg(
 957     V3, V3_H
 958 );
 959 
 960 // Singleton class for condition codes
 961 reg_class int_flags(RFLAGS);
 962 
 963 %}
 964 
 965 //----------DEFINITION BLOCK---------------------------------------------------
 966 // Define name --> value mappings to inform the ADLC of an integer valued name
 967 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 968 // Format:
 969 //        int_def  <name>         ( <int_value>, <expression>);
 970 // Generated Code in ad_<arch>.hpp
 971 //        #define  <name>   (<expression>)
 972 //        // value == <int_value>
 973 // Generated code in ad_<arch>.cpp adlc_verification()
 974 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 975 //
 976 
 977 // we follow the ppc-aix port in using a simple cost model which ranks
 978 // register operations as cheap, memory ops as more expensive and
 979 // branches as most expensive. the first two have a low as well as a
 980 // normal cost. huge cost appears to be a way of saying don't do
 981 // something
 982 
 983 definitions %{
 984   // The default cost (of a register move instruction).
 985   int_def INSN_COST            (    100,     100);
 986   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 987   int_def CALL_COST            (    200,     2 * INSN_COST);
 988   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 989 %}
 990 
 991 
 992 //----------SOURCE BLOCK-------------------------------------------------------
 993 // This is a block of C++ code which provides values, functions, and
 994 // definitions necessary in the rest of the architecture description
 995 
 996 source_hpp %{
 997 
 998 #include "gc/shared/cardTableModRefBS.hpp"
 999 
1000 class CallStubImpl {
1001 
1002   //--------------------------------------------------------------
1003   //---<  Used for optimization in Compile::shorten_branches  >---
1004   //--------------------------------------------------------------
1005 
1006  public:
1007   // Size of call trampoline stub.
1008   static uint size_call_trampoline() {
1009     return 0; // no call trampolines on this platform
1010   }
1011 
1012   // number of relocations needed by a call trampoline stub
1013   static uint reloc_call_trampoline() {
1014     return 0; // no call trampolines on this platform
1015   }
1016 };
1017 
1018 class HandlerImpl {
1019 
1020  public:
1021 
1022   static int emit_exception_handler(CodeBuffer &cbuf);
1023   static int emit_deopt_handler(CodeBuffer& cbuf);
1024 
1025   static uint size_exception_handler() {
1026     return MacroAssembler::far_branch_size();
1027   }
1028 
1029   static uint size_deopt_handler() {
1030     // count one adr and one far branch instruction
1031     return 4 * NativeInstruction::instruction_size;
1032   }
1033 };
1034 
1035   // graph traversal helpers
1036 
1037   MemBarNode *parent_membar(const Node *n);
1038   MemBarNode *child_membar(const MemBarNode *n);
1039   bool leading_membar(const MemBarNode *barrier);
1040 
1041   bool is_card_mark_membar(const MemBarNode *barrier);
1042   bool is_CAS(int opcode);
1043 
1044   MemBarNode *leading_to_normal(MemBarNode *leading);
1045   MemBarNode *normal_to_leading(const MemBarNode *barrier);
1046   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
1047   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
1048   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
1049 
1050   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1051 
1052   bool unnecessary_acquire(const Node *barrier);
1053   bool needs_acquiring_load(const Node *load);
1054 
1055   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1056 
1057   bool unnecessary_release(const Node *barrier);
1058   bool unnecessary_volatile(const Node *barrier);
1059   bool needs_releasing_store(const Node *store);
1060 
1061   // predicate controlling translation of CompareAndSwapX
1062   bool needs_acquiring_load_exclusive(const Node *load);
1063 
1064   // predicate controlling translation of StoreCM
1065   bool unnecessary_storestore(const Node *storecm);
1066 %}
1067 
1068 source %{
1069 
1070   // Optimizaton of volatile gets and puts
1071   // -------------------------------------
1072   //
1073   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
1074   // use to implement volatile reads and writes. For a volatile read
1075   // we simply need
1076   //
1077   //   ldar<x>
1078   //
1079   // and for a volatile write we need
1080   //
1081   //   stlr<x>
1082   //
1083   // Alternatively, we can implement them by pairing a normal
1084   // load/store with a memory barrier. For a volatile read we need
1085   //
1086   //   ldr<x>
1087   //   dmb ishld
1088   //
1089   // for a volatile write
1090   //
1091   //   dmb ish
1092   //   str<x>
1093   //   dmb ish
1094   //
1095   // We can also use ldaxr and stlxr to implement compare and swap CAS
1096   // sequences. These are normally translated to an instruction
1097   // sequence like the following
1098   //
1099   //   dmb      ish
1100   // retry:
1101   //   ldxr<x>   rval raddr
1102   //   cmp       rval rold
1103   //   b.ne done
1104   //   stlxr<x>  rval, rnew, rold
1105   //   cbnz      rval retry
1106   // done:
1107   //   cset      r0, eq
1108   //   dmb ishld
1109   //
1110   // Note that the exclusive store is already using an stlxr
1111   // instruction. That is required to ensure visibility to other
1112   // threads of the exclusive write (assuming it succeeds) before that
1113   // of any subsequent writes.
1114   //
1115   // The following instruction sequence is an improvement on the above
1116   //
1117   // retry:
1118   //   ldaxr<x>  rval raddr
1119   //   cmp       rval rold
1120   //   b.ne done
1121   //   stlxr<x>  rval, rnew, rold
1122   //   cbnz      rval retry
1123   // done:
1124   //   cset      r0, eq
1125   //
1126   // We don't need the leading dmb ish since the stlxr guarantees
1127   // visibility of prior writes in the case that the swap is
1128   // successful. Crucially we don't have to worry about the case where
1129   // the swap is not successful since no valid program should be
1130   // relying on visibility of prior changes by the attempting thread
1131   // in the case where the CAS fails.
1132   //
1133   // Similarly, we don't need the trailing dmb ishld if we substitute
1134   // an ldaxr instruction since that will provide all the guarantees we
1135   // require regarding observation of changes made by other threads
1136   // before any change to the CAS address observed by the load.
1137   //
1138   // In order to generate the desired instruction sequence we need to
1139   // be able to identify specific 'signature' ideal graph node
1140   // sequences which i) occur as a translation of a volatile reads or
1141   // writes or CAS operations and ii) do not occur through any other
1142   // translation or graph transformation. We can then provide
1143   // alternative aldc matching rules which translate these node
1144   // sequences to the desired machine code sequences. Selection of the
1145   // alternative rules can be implemented by predicates which identify
1146   // the relevant node sequences.
1147   //
1148   // The ideal graph generator translates a volatile read to the node
1149   // sequence
1150   //
1151   //   LoadX[mo_acquire]
1152   //   MemBarAcquire
1153   //
1154   // As a special case when using the compressed oops optimization we
1155   // may also see this variant
1156   //
1157   //   LoadN[mo_acquire]
1158   //   DecodeN
1159   //   MemBarAcquire
1160   //
1161   // A volatile write is translated to the node sequence
1162   //
1163   //   MemBarRelease
1164   //   StoreX[mo_release] {CardMark}-optional
1165   //   MemBarVolatile
1166   //
1167   // n.b. the above node patterns are generated with a strict
1168   // 'signature' configuration of input and output dependencies (see
1169   // the predicates below for exact details). The card mark may be as
1170   // simple as a few extra nodes or, in a few GC configurations, may
1171   // include more complex control flow between the leading and
1172   // trailing memory barriers. However, whatever the card mark
1173   // configuration these signatures are unique to translated volatile
1174   // reads/stores -- they will not appear as a result of any other
1175   // bytecode translation or inlining nor as a consequence of
1176   // optimizing transforms.
1177   //
1178   // We also want to catch inlined unsafe volatile gets and puts and
1179   // be able to implement them using either ldar<x>/stlr<x> or some
1180   // combination of ldr<x>/stlr<x> and dmb instructions.
1181   //
1182   // Inlined unsafe volatiles puts manifest as a minor variant of the
1183   // normal volatile put node sequence containing an extra cpuorder
1184   // membar
1185   //
1186   //   MemBarRelease
1187   //   MemBarCPUOrder
1188   //   StoreX[mo_release] {CardMark}-optional
1189   //   MemBarVolatile
1190   //
1191   // n.b. as an aside, the cpuorder membar is not itself subject to
1192   // matching and translation by adlc rules.  However, the rule
1193   // predicates need to detect its presence in order to correctly
1194   // select the desired adlc rules.
1195   //
1196   // Inlined unsafe volatile gets manifest as a somewhat different
1197   // node sequence to a normal volatile get
1198   //
1199   //   MemBarCPUOrder
1200   //        ||       \\
1201   //   MemBarAcquire LoadX[mo_acquire]
1202   //        ||
1203   //   MemBarCPUOrder
1204   //
1205   // In this case the acquire membar does not directly depend on the
1206   // load. However, we can be sure that the load is generated from an
1207   // inlined unsafe volatile get if we see it dependent on this unique
1208   // sequence of membar nodes. Similarly, given an acquire membar we
1209   // can know that it was added because of an inlined unsafe volatile
1210   // get if it is fed and feeds a cpuorder membar and if its feed
1211   // membar also feeds an acquiring load.
1212   //
1213   // Finally an inlined (Unsafe) CAS operation is translated to the
1214   // following ideal graph
1215   //
1216   //   MemBarRelease
1217   //   MemBarCPUOrder
1218   //   CompareAndSwapX {CardMark}-optional
1219   //   MemBarCPUOrder
1220   //   MemBarAcquire
1221   //
1222   // So, where we can identify these volatile read and write
1223   // signatures we can choose to plant either of the above two code
1224   // sequences. For a volatile read we can simply plant a normal
1225   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1226   // also choose to inhibit translation of the MemBarAcquire and
1227   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1228   //
1229   // When we recognise a volatile store signature we can choose to
1230   // plant at a dmb ish as a translation for the MemBarRelease, a
1231   // normal str<x> and then a dmb ish for the MemBarVolatile.
1232   // Alternatively, we can inhibit translation of the MemBarRelease
1233   // and MemBarVolatile and instead plant a simple stlr<x>
1234   // instruction.
1235   //
1236   // when we recognise a CAS signature we can choose to plant a dmb
1237   // ish as a translation for the MemBarRelease, the conventional
1238   // macro-instruction sequence for the CompareAndSwap node (which
1239   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1240   // Alternatively, we can elide generation of the dmb instructions
1241   // and plant the alternative CompareAndSwap macro-instruction
1242   // sequence (which uses ldaxr<x>).
1243   //
1244   // Of course, the above only applies when we see these signature
1245   // configurations. We still want to plant dmb instructions in any
1246   // other cases where we may see a MemBarAcquire, MemBarRelease or
1247   // MemBarVolatile. For example, at the end of a constructor which
1248   // writes final/volatile fields we will see a MemBarRelease
1249   // instruction and this needs a 'dmb ish' lest we risk the
1250   // constructed object being visible without making the
1251   // final/volatile field writes visible.
1252   //
1253   // n.b. the translation rules below which rely on detection of the
1254   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1255   // If we see anything other than the signature configurations we
1256   // always just translate the loads and stores to ldr<x> and str<x>
1257   // and translate acquire, release and volatile membars to the
1258   // relevant dmb instructions.
1259   //
1260 
1261   // graph traversal helpers used for volatile put/get and CAS
1262   // optimization
1263 
1264   // 1) general purpose helpers
1265 
1266   // if node n is linked to a parent MemBarNode by an intervening
1267   // Control and Memory ProjNode return the MemBarNode otherwise return
1268   // NULL.
1269   //
1270   // n may only be a Load or a MemBar.
1271 
1272   MemBarNode *parent_membar(const Node *n)
1273   {
1274     Node *ctl = NULL;
1275     Node *mem = NULL;
1276     Node *membar = NULL;
1277 
1278     if (n->is_Load()) {
1279       ctl = n->lookup(LoadNode::Control);
1280       mem = n->lookup(LoadNode::Memory);
1281     } else if (n->is_MemBar()) {
1282       ctl = n->lookup(TypeFunc::Control);
1283       mem = n->lookup(TypeFunc::Memory);
1284     } else {
1285         return NULL;
1286     }
1287 
1288     if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
1289       return NULL;
1290     }
1291 
1292     membar = ctl->lookup(0);
1293 
1294     if (!membar || !membar->is_MemBar()) {
1295       return NULL;
1296     }
1297 
1298     if (mem->lookup(0) != membar) {
1299       return NULL;
1300     }
1301 
1302     return membar->as_MemBar();
1303   }
1304 
1305   // if n is linked to a child MemBarNode by intervening Control and
1306   // Memory ProjNodes return the MemBarNode otherwise return NULL.
1307 
1308   MemBarNode *child_membar(const MemBarNode *n)
1309   {
1310     ProjNode *ctl = n->proj_out(TypeFunc::Control);
1311     ProjNode *mem = n->proj_out(TypeFunc::Memory);
1312 
1313     // MemBar needs to have both a Ctl and Mem projection
1314     if (! ctl || ! mem)
1315       return NULL;
1316 
1317     MemBarNode *child = NULL;
1318     Node *x;
1319 
1320     for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
1321       x = ctl->fast_out(i);
1322       // if we see a membar we keep hold of it. we may also see a new
1323       // arena copy of the original but it will appear later
1324       if (x->is_MemBar()) {
1325           child = x->as_MemBar();
1326           break;
1327       }
1328     }
1329 
1330     if (child == NULL) {
1331       return NULL;
1332     }
1333 
1334     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1335       x = mem->fast_out(i);
1336       // if we see a membar we keep hold of it. we may also see a new
1337       // arena copy of the original but it will appear later
1338       if (x == child) {
1339         return child;
1340       }
1341     }
1342     return NULL;
1343   }
1344 
1345   // helper predicate use to filter candidates for a leading memory
1346   // barrier
1347   //
1348   // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
1349   // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
1350 
1351   bool leading_membar(const MemBarNode *barrier)
1352   {
1353     int opcode = barrier->Opcode();
1354     // if this is a release membar we are ok
1355     if (opcode == Op_MemBarRelease) {
1356       return true;
1357     }
1358     // if its a cpuorder membar . . .
1359     if (opcode != Op_MemBarCPUOrder) {
1360       return false;
1361     }
1362     // then the parent has to be a release membar
1363     MemBarNode *parent = parent_membar(barrier);
1364     if (!parent) {
1365       return false;
1366     }
1367     opcode = parent->Opcode();
1368     return opcode == Op_MemBarRelease;
1369   }
1370 
1371   // 2) card mark detection helper
1372 
1373   // helper predicate which can be used to detect a volatile membar
1374   // introduced as part of a conditional card mark sequence either by
1375   // G1 or by CMS when UseCondCardMark is true.
1376   //
1377   // membar can be definitively determined to be part of a card mark
1378   // sequence if and only if all the following hold
1379   //
1380   // i) it is a MemBarVolatile
1381   //
1382   // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
1383   // true
1384   //
1385   // iii) the node's Mem projection feeds a StoreCM node.
1386 
1387   bool is_card_mark_membar(const MemBarNode *barrier)
1388   {
1389     if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
1390       return false;
1391     }
1392 
1393     if (barrier->Opcode() != Op_MemBarVolatile) {
1394       return false;
1395     }
1396 
1397     ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
1398 
1399     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
1400       Node *y = mem->fast_out(i);
1401       if (y->Opcode() == Op_StoreCM) {
1402         return true;
1403       }
1404     }
1405 
1406     return false;
1407   }
1408 
1409 
1410   // 3) helper predicates to traverse volatile put or CAS graphs which
1411   // may contain GC barrier subgraphs
1412 
1413   // Preamble
1414   // --------
1415   //
1416   // for volatile writes we can omit generating barriers and employ a
1417   // releasing store when we see a node sequence sequence with a
1418   // leading MemBarRelease and a trailing MemBarVolatile as follows
1419   //
1420   //   MemBarRelease
1421   //  {      ||      } -- optional
1422   //  {MemBarCPUOrder}
1423   //         ||     \\
1424   //         ||     StoreX[mo_release]
1425   //         | \     /
1426   //         | MergeMem
1427   //         | /
1428   //   MemBarVolatile
1429   //
1430   // where
1431   //  || and \\ represent Ctl and Mem feeds via Proj nodes
1432   //  | \ and / indicate further routing of the Ctl and Mem feeds
1433   //
1434   // this is the graph we see for non-object stores. however, for a
1435   // volatile Object store (StoreN/P) we may see other nodes below the
1436   // leading membar because of the need for a GC pre- or post-write
1437   // barrier.
1438   //
1439   // with most GC configurations we with see this simple variant which
1440   // includes a post-write barrier card mark.
1441   //
1442   //   MemBarRelease______________________________
1443   //         ||    \\               Ctl \        \\
1444   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
1445   //         | \     /                       . . .  /
1446   //         | MergeMem
1447   //         | /
1448   //         ||      /
1449   //   MemBarVolatile
1450   //
1451   // i.e. the leading membar feeds Ctl to a CastP2X (which converts
1452   // the object address to an int used to compute the card offset) and
1453   // Ctl+Mem to a StoreB node (which does the actual card mark).
1454   //
1455   // n.b. a StoreCM node will only appear in this configuration when
1456   // using CMS. StoreCM differs from a normal card mark write (StoreB)
1457   // because it implies a requirement to order visibility of the card
1458   // mark (StoreCM) relative to the object put (StoreP/N) using a
1459   // StoreStore memory barrier (arguably this ought to be represented
1460   // explicitly in the ideal graph but that is not how it works). This
1461   // ordering is required for both non-volatile and volatile
1462   // puts. Normally that means we need to translate a StoreCM using
1463   // the sequence
1464   //
1465   //   dmb ishst
1466   //   stlrb
1467   //
1468   // However, in the case of a volatile put if we can recognise this
1469   // configuration and plant an stlr for the object write then we can
1470   // omit the dmb and just plant an strb since visibility of the stlr
1471   // is ordered before visibility of subsequent stores. StoreCM nodes
1472   // also arise when using G1 or using CMS with conditional card
1473   // marking. In these cases (as we shall see) we don't need to insert
1474   // the dmb when translating StoreCM because there is already an
1475   // intervening StoreLoad barrier between it and the StoreP/N.
1476   //
1477   // It is also possible to perform the card mark conditionally on it
1478   // currently being unmarked in which case the volatile put graph
1479   // will look slightly different
1480   //
1481   //   MemBarRelease____________________________________________
1482   //         ||    \\               Ctl \     Ctl \     \\  Mem \
1483   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
1484   //         | \     /                              \            |
1485   //         | MergeMem                            . . .      StoreB
1486   //         | /                                                /
1487   //         ||     /
1488   //   MemBarVolatile
1489   //
1490   // It is worth noting at this stage that both the above
1491   // configurations can be uniquely identified by checking that the
1492   // memory flow includes the following subgraph:
1493   //
1494   //   MemBarRelease
1495   //  {MemBarCPUOrder}
1496   //          |  \      . . .
1497   //          |  StoreX[mo_release]  . . .
1498   //          |   /
1499   //         MergeMem
1500   //          |
1501   //   MemBarVolatile
1502   //
1503   // This is referred to as a *normal* subgraph. It can easily be
1504   // detected starting from any candidate MemBarRelease,
1505   // StoreX[mo_release] or MemBarVolatile.
1506   //
1507   // A simple variation on this normal case occurs for an unsafe CAS
1508   // operation. The basic graph for a non-object CAS is
1509   //
1510   //   MemBarRelease
1511   //         ||
1512   //   MemBarCPUOrder
1513   //         ||     \\   . . .
1514   //         ||     CompareAndSwapX
1515   //         ||       |
1516   //         ||     SCMemProj
1517   //         | \     /
1518   //         | MergeMem
1519   //         | /
1520   //   MemBarCPUOrder
1521   //         ||
1522   //   MemBarAcquire
1523   //
1524   // The same basic variations on this arrangement (mutatis mutandis)
1525   // occur when a card mark is introduced. i.e. we se the same basic
1526   // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
1527   // tail of the graph is a pair comprising a MemBarCPUOrder +
1528   // MemBarAcquire.
1529   //
1530   // So, in the case of a CAS the normal graph has the variant form
1531   //
1532   //   MemBarRelease
1533   //   MemBarCPUOrder
1534   //          |   \      . . .
1535   //          |  CompareAndSwapX  . . .
1536   //          |    |
1537   //          |   SCMemProj
1538   //          |   /  . . .
1539   //         MergeMem
1540   //          |
1541   //   MemBarCPUOrder
1542   //   MemBarAcquire
1543   //
1544   // This graph can also easily be detected starting from any
1545   // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
1546   //
1547   // the code below uses two helper predicates, leading_to_normal and
1548   // normal_to_leading to identify these normal graphs, one validating
1549   // the layout starting from the top membar and searching down and
1550   // the other validating the layout starting from the lower membar
1551   // and searching up.
1552   //
1553   // There are two special case GC configurations when a normal graph
1554   // may not be generated: when using G1 (which always employs a
1555   // conditional card mark); and when using CMS with conditional card
1556   // marking configured. These GCs are both concurrent rather than
1557   // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
1558   // graph between the leading and trailing membar nodes, in
1559   // particular enforcing stronger memory serialisation beween the
1560   // object put and the corresponding conditional card mark. CMS
1561   // employs a post-write GC barrier while G1 employs both a pre- and
1562   // post-write GC barrier. Of course the extra nodes may be absent --
1563   // they are only inserted for object puts. This significantly
1564   // complicates the task of identifying whether a MemBarRelease,
1565   // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
1566   // when using these GC configurations (see below). It adds similar
1567   // complexity to the task of identifying whether a MemBarRelease,
1568   // CompareAndSwapX or MemBarAcquire forms part of a CAS.
1569   //
1570   // In both cases the post-write subtree includes an auxiliary
1571   // MemBarVolatile (StoreLoad barrier) separating the object put and
1572   // the read of the corresponding card. This poses two additional
1573   // problems.
1574   //
1575   // Firstly, a card mark MemBarVolatile needs to be distinguished
1576   // from a normal trailing MemBarVolatile. Resolving this first
1577   // problem is straightforward: a card mark MemBarVolatile always
1578   // projects a Mem feed to a StoreCM node and that is a unique marker
1579   //
1580   //      MemBarVolatile (card mark)
1581   //       C |    \     . . .
1582   //         |   StoreCM   . . .
1583   //       . . .
1584   //
1585   // The second problem is how the code generator is to translate the
1586   // card mark barrier? It always needs to be translated to a "dmb
1587   // ish" instruction whether or not it occurs as part of a volatile
1588   // put. A StoreLoad barrier is needed after the object put to ensure
1589   // i) visibility to GC threads of the object put and ii) visibility
1590   // to the mutator thread of any card clearing write by a GC
1591   // thread. Clearly a normal store (str) will not guarantee this
1592   // ordering but neither will a releasing store (stlr). The latter
1593   // guarantees that the object put is visible but does not guarantee
1594   // that writes by other threads have also been observed.
1595   //
1596   // So, returning to the task of translating the object put and the
1597   // leading/trailing membar nodes: what do the non-normal node graph
1598   // look like for these 2 special cases? and how can we determine the
1599   // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
1600   // in both normal and non-normal cases?
1601   //
1602   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
1603   // which selects conditonal execution based on the value loaded
1604   // (LoadB) from the card. Ctl and Mem are fed to the If via an
1605   // intervening StoreLoad barrier (MemBarVolatile).
1606   //
1607   // So, with CMS we may see a node graph for a volatile object store
1608   // which looks like this
1609   //
1610   //   MemBarRelease
1611   //   MemBarCPUOrder_(leading)__________________
1612   //     C |    M \       \\                   C \
1613   //       |       \    StoreN/P[mo_release]  CastP2X
1614   //       |    Bot \    /
1615   //       |       MergeMem
1616   //       |         /
1617   //      MemBarVolatile (card mark)
1618   //     C |  ||    M |
1619   //       | LoadB    |
1620   //       |   |      |
1621   //       | Cmp      |\
1622   //       | /        | \
1623   //       If         |  \
1624   //       | \        |   \
1625   // IfFalse  IfTrue  |    \
1626   //       \     / \  |     \
1627   //        \   / StoreCM    |
1628   //         \ /      |      |
1629   //        Region   . . .   |
1630   //          | \           /
1631   //          |  . . .  \  / Bot
1632   //          |       MergeMem
1633   //          |          |
1634   //        MemBarVolatile (trailing)
1635   //
1636   // The first MergeMem merges the AliasIdxBot Mem slice from the
1637   // leading membar and the oopptr Mem slice from the Store into the
1638   // card mark membar. The trailing MergeMem merges the AliasIdxBot
1639   // Mem slice from the card mark membar and the AliasIdxRaw slice
1640   // from the StoreCM into the trailing membar (n.b. the latter
1641   // proceeds via a Phi associated with the If region).
1642   //
1643   // The graph for a CAS varies slightly, the obvious difference being
1644   // that the StoreN/P node is replaced by a CompareAndSwapP/N node
1645   // and the trailing MemBarVolatile by a MemBarCPUOrder +
1646   // MemBarAcquire pair. The other important difference is that the
1647   // CompareAndSwap node's SCMemProj is not merged into the card mark
1648   // membar - it still feeds the trailing MergeMem. This also means
1649   // that the card mark membar receives its Mem feed directly from the
1650   // leading membar rather than via a MergeMem.
1651   //
1652   //   MemBarRelease
1653   //   MemBarCPUOrder__(leading)_________________________
1654   //       ||                       \\                 C \
1655   //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
1656   //     C |  ||    M |              |
1657   //       | LoadB    |       ______/|
1658   //       |   |      |      /       |
1659   //       | Cmp      |     /      SCMemProj
1660   //       | /        |    /         |
1661   //       If         |   /         /
1662   //       | \        |  /         /
1663   // IfFalse  IfTrue  | /         /
1664   //       \     / \  |/ prec    /
1665   //        \   / StoreCM       /
1666   //         \ /      |        /
1667   //        Region   . . .    /
1668   //          | \            /
1669   //          |  . . .  \   / Bot
1670   //          |       MergeMem
1671   //          |          |
1672   //        MemBarCPUOrder
1673   //        MemBarAcquire (trailing)
1674   //
1675   // This has a slightly different memory subgraph to the one seen
1676   // previously but the core of it is the same as for the CAS normal
1677   // sungraph
1678   //
1679   //   MemBarRelease
1680   //   MemBarCPUOrder____
1681   //      ||             \      . . .
1682   //   MemBarVolatile  CompareAndSwapX  . . .
1683   //      |  \            |
1684   //        . . .   SCMemProj
1685   //          |     /  . . .
1686   //         MergeMem
1687   //          |
1688   //   MemBarCPUOrder
1689   //   MemBarAcquire
1690   //
1691   //
1692   // G1 is quite a lot more complicated. The nodes inserted on behalf
1693   // of G1 may comprise: a pre-write graph which adds the old value to
1694   // the SATB queue; the releasing store itself; and, finally, a
1695   // post-write graph which performs a card mark.
1696   //
1697   // The pre-write graph may be omitted, but only when the put is
1698   // writing to a newly allocated (young gen) object and then only if
1699   // there is a direct memory chain to the Initialize node for the
1700   // object allocation. This will not happen for a volatile put since
1701   // any memory chain passes through the leading membar.
1702   //
1703   // The pre-write graph includes a series of 3 If tests. The outermost
1704   // If tests whether SATB is enabled (no else case). The next If tests
1705   // whether the old value is non-NULL (no else case). The third tests
1706   // whether the SATB queue index is > 0, if so updating the queue. The
1707   // else case for this third If calls out to the runtime to allocate a
1708   // new queue buffer.
1709   //
1710   // So with G1 the pre-write and releasing store subgraph looks like
1711   // this (the nested Ifs are omitted).
1712   //
1713   //  MemBarRelease (leading)____________
1714   //     C |  ||  M \   M \    M \  M \ . . .
1715   //       | LoadB   \  LoadL  LoadN   \
1716   //       | /        \                 \
1717   //       If         |\                 \
1718   //       | \        | \                 \
1719   //  IfFalse  IfTrue |  \                 \
1720   //       |     |    |   \                 |
1721   //       |     If   |   /\                |
1722   //       |     |          \               |
1723   //       |                 \              |
1724   //       |    . . .         \             |
1725   //       | /       | /       |            |
1726   //      Region  Phi[M]       |            |
1727   //       | \       |         |            |
1728   //       |  \_____ | ___     |            |
1729   //     C | C \     |   C \ M |            |
1730   //       | CastP2X | StoreN/P[mo_release] |
1731   //       |         |         |            |
1732   //     C |       M |       M |          M |
1733   //        \        |         |           /
1734   //                  . . .
1735   //          (post write subtree elided)
1736   //                    . . .
1737   //             C \         M /
1738   //         MemBarVolatile (trailing)
1739   //
1740   // n.b. the LoadB in this subgraph is not the card read -- it's a
1741   // read of the SATB queue active flag.
1742   //
1743   // Once again the CAS graph is a minor variant on the above with the
1744   // expected substitutions of CompareAndSawpX for StoreN/P and
1745   // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
1746   //
1747   // The G1 post-write subtree is also optional, this time when the
1748   // new value being written is either null or can be identified as a
1749   // newly allocated (young gen) object with no intervening control
1750   // flow. The latter cannot happen but the former may, in which case
1751   // the card mark membar is omitted and the memory feeds form the
1752   // leading membar and the SToreN/P are merged direct into the
1753   // trailing membar as per the normal subgraph. So, the only special
1754   // case which arises is when the post-write subgraph is generated.
1755   //
1756   // The kernel of the post-write G1 subgraph is the card mark itself
1757   // which includes a card mark memory barrier (MemBarVolatile), a
1758   // card test (LoadB), and a conditional update (If feeding a
1759   // StoreCM). These nodes are surrounded by a series of nested Ifs
1760   // which try to avoid doing the card mark. The top level If skips if
1761   // the object reference does not cross regions (i.e. it tests if
1762   // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
1763   // need not be recorded. The next If, which skips on a NULL value,
1764   // may be absent (it is not generated if the type of value is >=
1765   // OopPtr::NotNull). The 3rd If skips writes to young regions (by
1766   // checking if card_val != young).  n.b. although this test requires
1767   // a pre-read of the card it can safely be done before the StoreLoad
1768   // barrier. However that does not bypass the need to reread the card
1769   // after the barrier.
1770   //
1771   //                (pre-write subtree elided)
1772   //        . . .                  . . .    . . .  . . .
1773   //        C |                    M |     M |    M |
1774   //       Region                  Phi[M] StoreN    |
1775   //          |                     / \      |      |
1776   //         / \_______            /   \     |      |
1777   //      C / C \      . . .            \    |      |
1778   //       If   CastP2X . . .            |   |      |
1779   //       / \                           |   |      |
1780   //      /   \                          |   |      |
1781   // IfFalse IfTrue                      |   |      |
1782   //   |       |                         |   |     /|
1783   //   |       If                        |   |    / |
1784   //   |      / \                        |   |   /  |
1785   //   |     /   \                        \  |  /   |
1786   //   | IfFalse IfTrue                   MergeMem  |
1787   //   |  . . .    / \                       /      |
1788   //   |          /   \                     /       |
1789   //   |     IfFalse IfTrue                /        |
1790   //   |      . . .    |                  /         |
1791   //   |               If                /          |
1792   //   |               / \              /           |
1793   //   |              /   \            /            |
1794   //   |         IfFalse IfTrue       /             |
1795   //   |           . . .   |         /              |
1796   //   |                    \       /               |
1797   //   |                     \     /                |
1798   //   |             MemBarVolatile__(card mark)    |
1799   //   |                ||   C |  M \  M \          |
1800   //   |               LoadB   If    |    |         |
1801   //   |                      / \    |    |         |
1802   //   |                     . . .   |    |         |
1803   //   |                          \  |    |        /
1804   //   |                        StoreCM   |       /
1805   //   |                          . . .   |      /
1806   //   |                        _________/      /
1807   //   |                       /  _____________/
1808   //   |   . . .       . . .  |  /            /
1809   //   |    |                 | /   _________/
1810   //   |    |               Phi[M] /        /
1811   //   |    |                 |   /        /
1812   //   |    |                 |  /        /
1813   //   |  Region  . . .     Phi[M]  _____/
1814   //   |    /                 |    /
1815   //   |                      |   /
1816   //   | . . .   . . .        |  /
1817   //   | /                    | /
1818   // Region           |  |  Phi[M]
1819   //   |              |  |  / Bot
1820   //    \            MergeMem
1821   //     \            /
1822   //     MemBarVolatile
1823   //
1824   // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
1825   // from the leading membar and the oopptr Mem slice from the Store
1826   // into the card mark membar i.e. the memory flow to the card mark
1827   // membar still looks like a normal graph.
1828   //
1829   // The trailing MergeMem merges an AliasIdxBot Mem slice with other
1830   // Mem slices (from the StoreCM and other card mark queue stores).
1831   // However in this case the AliasIdxBot Mem slice does not come
1832   // direct from the card mark membar. It is merged through a series
1833   // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
1834   // from the leading membar with the Mem feed from the card mark
1835   // membar. Each Phi corresponds to one of the Ifs which may skip
1836   // around the card mark membar. So when the If implementing the NULL
1837   // value check has been elided the total number of Phis is 2
1838   // otherwise it is 3.
1839   //
1840   // The CAS graph when using G1GC also includes a pre-write subgraph
1841   // and an optional post-write subgraph. Teh sam evarioations are
1842   // introduced as for CMS with conditional card marking i.e. the
1843   // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
1844   // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
1845   // Mem feed from the CompareAndSwapP/N includes a precedence
1846   // dependency feed to the StoreCM and a feed via an SCMemProj to the
1847   // trailing membar. So, as before the configuration includes the
1848   // normal CAS graph as a subgraph of the memory flow.
1849   //
1850   // So, the upshot is that in all cases the volatile put graph will
1851   // include a *normal* memory subgraph betwen the leading membar and
1852   // its child membar, either a volatile put graph (including a
1853   // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
1854   // When that child is not a card mark membar then it marks the end
1855   // of the volatile put or CAS subgraph. If the child is a card mark
1856   // membar then the normal subgraph will form part of a volatile put
1857   // subgraph if and only if the child feeds an AliasIdxBot Mem feed
1858   // to a trailing barrier via a MergeMem. That feed is either direct
1859   // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
1860   // memory flow (for G1).
1861   //
1862   // The predicates controlling generation of instructions for store
1863   // and barrier nodes employ a few simple helper functions (described
1864   // below) which identify the presence or absence of all these
1865   // subgraph configurations and provide a means of traversing from
1866   // one node in the subgraph to another.
1867 
1868   // is_CAS(int opcode)
1869   //
1870   // return true if opcode is one of the possible CompareAndSwapX
1871   // values otherwise false.
1872 
1873   bool is_CAS(int opcode)
1874   {
1875     return (opcode == Op_CompareAndSwapI ||
1876             opcode == Op_CompareAndSwapL ||
1877             opcode == Op_CompareAndSwapN ||
1878             opcode == Op_CompareAndSwapP);
1879   }
1880 
1881   // leading_to_normal
1882   //
1883   //graph traversal helper which detects the normal case Mem feed from
1884   // a release membar (or, optionally, its cpuorder child) to a
1885   // dependent volatile membar i.e. it ensures that one or other of
1886   // the following Mem flow subgraph is present.
1887   //
1888   //   MemBarRelease
1889   //   MemBarCPUOrder {leading}
1890   //          |  \      . . .
1891   //          |  StoreN/P[mo_release]  . . .
1892   //          |   /
1893   //         MergeMem
1894   //          |
1895   //   MemBarVolatile {trailing or card mark}
1896   //
1897   //   MemBarRelease
1898   //   MemBarCPUOrder {leading}
1899   //      |       \      . . .
1900   //      |     CompareAndSwapX  . . .
1901   //               |
1902   //     . . .    SCMemProj
1903   //           \   |
1904   //      |    MergeMem
1905   //      |       /
1906   //    MemBarCPUOrder
1907   //    MemBarAcquire {trailing}
1908   //
1909   // if the correct configuration is present returns the trailing
1910   // membar otherwise NULL.
1911   //
1912   // the input membar is expected to be either a cpuorder membar or a
1913   // release membar. in the latter case it should not have a cpu membar
1914   // child.
1915   //
1916   // the returned value may be a card mark or trailing membar
1917   //
1918 
1919   MemBarNode *leading_to_normal(MemBarNode *leading)
1920   {
1921     assert((leading->Opcode() == Op_MemBarRelease ||
1922             leading->Opcode() == Op_MemBarCPUOrder),
1923            "expecting a volatile or cpuroder membar!");
1924 
1925     // check the mem flow
1926     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
1927 
1928     if (!mem) {
1929       return NULL;
1930     }
1931 
1932     Node *x = NULL;
1933     StoreNode * st = NULL;
1934     LoadStoreNode *cas = NULL;
1935     MergeMemNode *mm = NULL;
1936 
1937     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1938       x = mem->fast_out(i);
1939       if (x->is_MergeMem()) {
1940         if (mm != NULL) {
1941           return NULL;
1942         }
1943         // two merge mems is one too many
1944         mm = x->as_MergeMem();
1945       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
1946         // two releasing stores/CAS nodes is one too many
1947         if (st != NULL || cas != NULL) {
1948           return NULL;
1949         }
1950         st = x->as_Store();
1951       } else if (is_CAS(x->Opcode())) {
1952         if (st != NULL || cas != NULL) {
1953           return NULL;
1954         }
1955         cas = x->as_LoadStore();
1956       }
1957     }
1958 
1959     // must have a store or a cas
1960     if (!st && !cas) {
1961       return NULL;
1962     }
1963 
1964     // must have a merge if we also have st
1965     if (st && !mm) {
1966       return NULL;
1967     }
1968 
1969     Node *y = NULL;
1970     if (cas) {
1971       // look for an SCMemProj
1972       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
1973         x = cas->fast_out(i);
1974         if (x->is_Proj()) {
1975           y = x;
1976           break;
1977         }
1978       }
1979       if (y == NULL) {
1980         return NULL;
1981       }
1982       // the proj must feed a MergeMem
1983       for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) {
1984         x = y->fast_out(i);
1985         if (x->is_MergeMem()) {
1986           mm = x->as_MergeMem();
1987           break;
1988         }
1989       }
1990       if (mm == NULL)
1991         return NULL;
1992     } else {
1993       // ensure the store feeds the existing mergemem;
1994       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
1995         if (st->fast_out(i) == mm) {
1996           y = st;
1997           break;
1998         }
1999       }
2000       if (y == NULL) {
2001         return NULL;
2002       }
2003     }
2004 
2005     MemBarNode *mbar = NULL;
2006     // ensure the merge feeds to the expected type of membar
2007     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2008       x = mm->fast_out(i);
2009       if (x->is_MemBar()) {
2010         int opcode = x->Opcode();
2011         if (opcode == Op_MemBarVolatile && st) {
2012           mbar = x->as_MemBar();
2013         } else if (cas && opcode == Op_MemBarCPUOrder) {
2014           MemBarNode *y =  x->as_MemBar();
2015           y = child_membar(y);
2016           if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
2017             mbar = y;
2018           }
2019         }
2020         break;
2021       }
2022     }
2023 
2024     return mbar;
2025   }
2026 
2027   // normal_to_leading
2028   //
2029   // graph traversal helper which detects the normal case Mem feed
2030   // from either a card mark or a trailing membar to a preceding
2031   // release membar (optionally its cpuorder child) i.e. it ensures
2032   // that one or other of the following Mem flow subgraphs is present.
2033   //
2034   //   MemBarRelease
2035   //   MemBarCPUOrder {leading}
2036   //          |  \      . . .
2037   //          |  StoreN/P[mo_release]  . . .
2038   //          |   /
2039   //         MergeMem
2040   //          |
2041   //   MemBarVolatile {card mark or trailing}
2042   //
2043   //   MemBarRelease
2044   //   MemBarCPUOrder {leading}
2045   //      |       \      . . .
2046   //      |     CompareAndSwapX  . . .
2047   //               |
2048   //     . . .    SCMemProj
2049   //           \   |
2050   //      |    MergeMem
2051   //      |        /
2052   //    MemBarCPUOrder
2053   //    MemBarAcquire {trailing}
2054   //
2055   // this predicate checks for the same flow as the previous predicate
2056   // but starting from the bottom rather than the top.
2057   //
2058   // if the configuration is present returns the cpuorder member for
2059   // preference or when absent the release membar otherwise NULL.
2060   //
2061   // n.b. the input membar is expected to be a MemBarVolatile but
2062   // need not be a card mark membar.
2063 
2064   MemBarNode *normal_to_leading(const MemBarNode *barrier)
2065   {
2066     // input must be a volatile membar
2067     assert((barrier->Opcode() == Op_MemBarVolatile ||
2068             barrier->Opcode() == Op_MemBarAcquire),
2069            "expecting a volatile or an acquire membar");
2070     Node *x;
2071     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
2072 
2073     // if we have an acquire membar then it must be fed via a CPUOrder
2074     // membar
2075 
2076     if (is_cas) {
2077       // skip to parent barrier which must be a cpuorder
2078       x = parent_membar(barrier);
2079       if (x->Opcode() != Op_MemBarCPUOrder)
2080         return NULL;
2081     } else {
2082       // start from the supplied barrier
2083       x = (Node *)barrier;
2084     }
2085 
2086     // the Mem feed to the membar should be a merge
2087     x = x ->in(TypeFunc::Memory);
2088     if (!x->is_MergeMem())
2089       return NULL;
2090 
2091     MergeMemNode *mm = x->as_MergeMem();
2092 
2093     if (is_cas) {
2094       // the merge should be fed from the CAS via an SCMemProj node
2095       x = NULL;
2096       for (uint idx = 1; idx < mm->req(); idx++) {
2097         if (mm->in(idx)->Opcode() == Op_SCMemProj) {
2098           x = mm->in(idx);
2099           break;
2100         }
2101       }
2102       if (x == NULL) {
2103         return NULL;
2104       }
2105       // check for a CAS feeding this proj
2106       x = x->in(0);
2107       int opcode = x->Opcode();
2108       if (!is_CAS(opcode)) {
2109         return NULL;
2110       }
2111       // the CAS should get its mem feed from the leading membar
2112       x = x->in(MemNode::Memory);
2113     } else {
2114       // the merge should get its Bottom mem feed from the leading membar
2115       x = mm->in(Compile::AliasIdxBot);
2116     }
2117 
2118     // ensure this is a non control projection
2119     if (!x->is_Proj() || x->is_CFG()) {
2120       return NULL;
2121     }
2122     // if it is fed by a membar that's the one we want
2123     x = x->in(0);
2124 
2125     if (!x->is_MemBar()) {
2126       return NULL;
2127     }
2128 
2129     MemBarNode *leading = x->as_MemBar();
2130     // reject invalid candidates
2131     if (!leading_membar(leading)) {
2132       return NULL;
2133     }
2134 
2135     // ok, we have a leading membar, now for the sanity clauses
2136 
2137     // the leading membar must feed Mem to a releasing store or CAS
2138     ProjNode *mem = leading->proj_out(TypeFunc::Memory);
2139     StoreNode *st = NULL;
2140     LoadStoreNode *cas = NULL;
2141     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2142       x = mem->fast_out(i);
2143       if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
2144         // two stores or CASes is one too many
2145         if (st != NULL || cas != NULL) {
2146           return NULL;
2147         }
2148         st = x->as_Store();
2149       } else if (is_CAS(x->Opcode())) {
2150         if (st != NULL || cas != NULL) {
2151           return NULL;
2152         }
2153         cas = x->as_LoadStore();
2154       }
2155     }
2156 
2157     // we should not have both a store and a cas
2158     if (st == NULL & cas == NULL) {
2159       return NULL;
2160     }
2161 
2162     if (st == NULL) {
2163       // nothing more to check
2164       return leading;
2165     } else {
2166       // we should not have a store if we started from an acquire
2167       if (is_cas) {
2168         return NULL;
2169       }
2170 
2171       // the store should feed the merge we used to get here
2172       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
2173         if (st->fast_out(i) == mm) {
2174           return leading;
2175         }
2176       }
2177     }
2178 
2179     return NULL;
2180   }
2181 
2182   // card_mark_to_trailing
2183   //
2184   // graph traversal helper which detects extra, non-normal Mem feed
2185   // from a card mark volatile membar to a trailing membar i.e. it
2186   // ensures that one of the following three GC post-write Mem flow
2187   // subgraphs is present.
2188   //
2189   // 1)
2190   //     . . .
2191   //       |
2192   //   MemBarVolatile (card mark)
2193   //      |          |
2194   //      |        StoreCM
2195   //      |          |
2196   //      |        . . .
2197   //  Bot |  /
2198   //   MergeMem
2199   //      |
2200   //      |
2201   //    MemBarVolatile {trailing}
2202   //
2203   // 2)
2204   //   MemBarRelease/CPUOrder (leading)
2205   //    |
2206   //    |
2207   //    |\       . . .
2208   //    | \        |
2209   //    |  \  MemBarVolatile (card mark)
2210   //    |   \   |     |
2211   //     \   \  |   StoreCM    . . .
2212   //      \   \ |
2213   //       \  Phi
2214   //        \ /
2215   //        Phi  . . .
2216   //     Bot |   /
2217   //       MergeMem
2218   //         |
2219   //    MemBarVolatile {trailing}
2220   //
2221   //
2222   // 3)
2223   //   MemBarRelease/CPUOrder (leading)
2224   //    |
2225   //    |\
2226   //    | \
2227   //    |  \      . . .
2228   //    |   \       |
2229   //    |\   \  MemBarVolatile (card mark)
2230   //    | \   \   |     |
2231   //    |  \   \  |   StoreCM    . . .
2232   //    |   \   \ |
2233   //     \   \  Phi
2234   //      \   \ /
2235   //       \  Phi
2236   //        \ /
2237   //        Phi  . . .
2238   //     Bot |   /
2239   //       MergeMem
2240   //         |
2241   //         |
2242   //    MemBarVolatile {trailing}
2243   //
2244   // configuration 1 is only valid if UseConcMarkSweepGC &&
2245   // UseCondCardMark
2246   //
2247   // configurations 2 and 3 are only valid if UseG1GC.
2248   //
2249   // if a valid configuration is present returns the trailing membar
2250   // otherwise NULL.
2251   //
2252   // n.b. the supplied membar is expected to be a card mark
2253   // MemBarVolatile i.e. the caller must ensure the input node has the
2254   // correct operand and feeds Mem to a StoreCM node
2255 
2256   MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
2257   {
2258     // input must be a card mark volatile membar
2259     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
2260 
2261     Node *feed = barrier->proj_out(TypeFunc::Memory);
2262     Node *x;
2263     MergeMemNode *mm = NULL;
2264 
2265     const int MAX_PHIS = 3;     // max phis we will search through
2266     int phicount = 0;           // current search count
2267 
2268     bool retry_feed = true;
2269     while (retry_feed) {
2270       // see if we have a direct MergeMem feed
2271       for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2272         x = feed->fast_out(i);
2273         // the correct Phi will be merging a Bot memory slice
2274         if (x->is_MergeMem()) {
2275           mm = x->as_MergeMem();
2276           break;
2277         }
2278       }
2279       if (mm) {
2280         retry_feed = false;
2281       } else if (UseG1GC & phicount++ < MAX_PHIS) {
2282         // the barrier may feed indirectly via one or two Phi nodes
2283         PhiNode *phi = NULL;
2284         for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
2285           x = feed->fast_out(i);
2286           // the correct Phi will be merging a Bot memory slice
2287           if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
2288             phi = x->as_Phi();
2289             break;
2290           }
2291         }
2292         if (!phi) {
2293           return NULL;
2294         }
2295         // look for another merge below this phi
2296         feed = phi;
2297       } else {
2298         // couldn't find a merge
2299         return NULL;
2300       }
2301     }
2302 
2303     // sanity check this feed turns up as the expected slice
2304     assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
2305 
2306     MemBarNode *trailing = NULL;
2307     // be sure we have a trailing membar the merge
2308     for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
2309       x = mm->fast_out(i);
2310       if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
2311         trailing = x->as_MemBar();
2312         break;
2313       }
2314     }
2315 
2316     return trailing;
2317   }
2318 
2319   // trailing_to_card_mark
2320   //
2321   // graph traversal helper which detects extra, non-normal Mem feed
2322   // from a trailing volatile membar to a preceding card mark volatile
2323   // membar i.e. it identifies whether one of the three possible extra
2324   // GC post-write Mem flow subgraphs is present
2325   //
2326   // this predicate checks for the same flow as the previous predicate
2327   // but starting from the bottom rather than the top.
2328   //
2329   // if the configuration is present returns the card mark membar
2330   // otherwise NULL
2331   //
2332   // n.b. the supplied membar is expected to be a trailing
2333   // MemBarVolatile i.e. the caller must ensure the input node has the
2334   // correct opcode
2335 
2336   MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
2337   {
2338     assert(trailing->Opcode() == Op_MemBarVolatile,
2339            "expecting a volatile membar");
2340     assert(!is_card_mark_membar(trailing),
2341            "not expecting a card mark membar");
2342 
2343     // the Mem feed to the membar should be a merge
2344     Node *x = trailing->in(TypeFunc::Memory);
2345     if (!x->is_MergeMem()) {
2346       return NULL;
2347     }
2348 
2349     MergeMemNode *mm = x->as_MergeMem();
2350 
2351     x = mm->in(Compile::AliasIdxBot);
2352     // with G1 we may possibly see a Phi or two before we see a Memory
2353     // Proj from the card mark membar
2354 
2355     const int MAX_PHIS = 3;     // max phis we will search through
2356     int phicount = 0;           // current search count
2357 
2358     bool retry_feed = !x->is_Proj();
2359 
2360     while (retry_feed) {
2361       if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
2362         PhiNode *phi = x->as_Phi();
2363         ProjNode *proj = NULL;
2364         PhiNode *nextphi = NULL;
2365         bool found_leading = false;
2366         for (uint i = 1; i < phi->req(); i++) {
2367           x = phi->in(i);
2368           if (x->is_Phi()) {
2369             nextphi = x->as_Phi();
2370           } else if (x->is_Proj()) {
2371             int opcode = x->in(0)->Opcode();
2372             if (opcode == Op_MemBarVolatile) {
2373               proj = x->as_Proj();
2374             } else if (opcode == Op_MemBarRelease ||
2375                        opcode == Op_MemBarCPUOrder) {
2376               // probably a leading membar
2377               found_leading = true;
2378             }
2379           }
2380         }
2381         // if we found a correct looking proj then retry from there
2382         // otherwise we must see a leading and a phi or this the
2383         // wrong config
2384         if (proj != NULL) {
2385           x = proj;
2386           retry_feed = false;
2387         } else if (found_leading && nextphi != NULL) {
2388           // retry from this phi to check phi2
2389           x = nextphi;
2390         } else {
2391           // not what we were looking for
2392           return NULL;
2393         }
2394       } else {
2395         return NULL;
2396       }
2397     }
2398     // the proj has to come from the card mark membar
2399     x = x->in(0);
2400     if (!x->is_MemBar()) {
2401       return NULL;
2402     }
2403 
2404     MemBarNode *card_mark_membar = x->as_MemBar();
2405 
2406     if (!is_card_mark_membar(card_mark_membar)) {
2407       return NULL;
2408     }
2409 
2410     return card_mark_membar;
2411   }
2412 
2413   // trailing_to_leading
2414   //
2415   // graph traversal helper which checks the Mem flow up the graph
2416   // from a (non-card mark) trailing membar attempting to locate and
2417   // return an associated leading membar. it first looks for a
2418   // subgraph in the normal configuration (relying on helper
2419   // normal_to_leading). failing that it then looks for one of the
2420   // possible post-write card mark subgraphs linking the trailing node
2421   // to a the card mark membar (relying on helper
2422   // trailing_to_card_mark), and then checks that the card mark membar
2423   // is fed by a leading membar (once again relying on auxiliary
2424   // predicate normal_to_leading).
2425   //
2426   // if the configuration is valid returns the cpuorder member for
2427   // preference or when absent the release membar otherwise NULL.
2428   //
2429   // n.b. the input membar is expected to be either a volatile or
2430   // acquire membar but in the former case must *not* be a card mark
2431   // membar.
2432 
2433   MemBarNode *trailing_to_leading(const MemBarNode *trailing)
2434   {
2435     assert((trailing->Opcode() == Op_MemBarAcquire ||
2436             trailing->Opcode() == Op_MemBarVolatile),
2437            "expecting an acquire or volatile membar");
2438     assert((trailing->Opcode() != Op_MemBarVolatile ||
2439             !is_card_mark_membar(trailing)),
2440            "not expecting a card mark membar");
2441 
2442     MemBarNode *leading = normal_to_leading(trailing);
2443 
2444     if (leading) {
2445       return leading;
2446     }
2447 
2448     // nothing more to do if this is an acquire
2449     if (trailing->Opcode() == Op_MemBarAcquire) {
2450       return NULL;
2451     }
2452 
2453     MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
2454 
2455     if (!card_mark_membar) {
2456       return NULL;
2457     }
2458 
2459     return normal_to_leading(card_mark_membar);
2460   }
2461 
2462   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
2463 
2464 bool unnecessary_acquire(const Node *barrier)
2465 {
2466   assert(barrier->is_MemBar(), "expecting a membar");
2467 
2468   if (UseBarriersForVolatile) {
2469     // we need to plant a dmb
2470     return false;
2471   }
2472 
2473   // a volatile read derived from bytecode (or also from an inlined
2474   // SHA field read via LibraryCallKit::load_field_from_object)
2475   // manifests as a LoadX[mo_acquire] followed by an acquire membar
2476   // with a bogus read dependency on it's preceding load. so in those
2477   // cases we will find the load node at the PARMS offset of the
2478   // acquire membar.  n.b. there may be an intervening DecodeN node.
2479   //
2480   // a volatile load derived from an inlined unsafe field access
2481   // manifests as a cpuorder membar with Ctl and Mem projections
2482   // feeding both an acquire membar and a LoadX[mo_acquire]. The
2483   // acquire then feeds another cpuorder membar via Ctl and Mem
2484   // projections. The load has no output dependency on these trailing
2485   // membars because subsequent nodes inserted into the graph take
2486   // their control feed from the final membar cpuorder meaning they
2487   // are all ordered after the load.
2488 
2489   Node *x = barrier->lookup(TypeFunc::Parms);
2490   if (x) {
2491     // we are starting from an acquire and it has a fake dependency
2492     //
2493     // need to check for
2494     //
2495     //   LoadX[mo_acquire]
2496     //   {  |1   }
2497     //   {DecodeN}
2498     //      |Parms
2499     //   MemBarAcquire*
2500     //
2501     // where * tags node we were passed
2502     // and |k means input k
2503     if (x->is_DecodeNarrowPtr()) {
2504       x = x->in(1);
2505     }
2506 
2507     return (x->is_Load() && x->as_Load()->is_acquire());
2508   }
2509 
2510   // now check for an unsafe volatile get
2511 
2512   // need to check for
2513   //
2514   //   MemBarCPUOrder
2515   //        ||       \\
2516   //   MemBarAcquire* LoadX[mo_acquire]
2517   //        ||
2518   //   MemBarCPUOrder
2519   //
2520   // where * tags node we were passed
2521   // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes
2522 
2523   // check for a parent MemBarCPUOrder
2524   ProjNode *ctl;
2525   ProjNode *mem;
2526   MemBarNode *parent = parent_membar(barrier);
2527   if (!parent || parent->Opcode() != Op_MemBarCPUOrder)
2528     return false;
2529   ctl = parent->proj_out(TypeFunc::Control);
2530   mem = parent->proj_out(TypeFunc::Memory);
2531   if (!ctl || !mem) {
2532     return false;
2533   }
2534   // ensure the proj nodes both feed a LoadX[mo_acquire]
2535   LoadNode *ld = NULL;
2536   for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
2537     x = ctl->fast_out(i);
2538     // if we see a load we keep hold of it and stop searching
2539     if (x->is_Load()) {
2540       ld = x->as_Load();
2541       break;
2542     }
2543   }
2544   // it must be an acquiring load
2545   if (ld && ld->is_acquire()) {
2546 
2547     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
2548       x = mem->fast_out(i);
2549       // if we see the same load we drop it and stop searching
2550       if (x == ld) {
2551         ld = NULL;
2552         break;
2553       }
2554     }
2555     // we must have dropped the load
2556     if (ld == NULL) {
2557       // check for a child cpuorder membar
2558       MemBarNode *child  = child_membar(barrier->as_MemBar());
2559       if (child && child->Opcode() == Op_MemBarCPUOrder)
2560         return true;
2561     }
2562   }
2563 
2564   // final option for unnecessary mebar is that it is a trailing node
2565   // belonging to a CAS
2566 
2567   MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
2568 
2569   return leading != NULL;
2570 }
2571 
2572 bool needs_acquiring_load(const Node *n)
2573 {
2574   assert(n->is_Load(), "expecting a load");
2575   if (UseBarriersForVolatile) {
2576     // we use a normal load and a dmb
2577     return false;
2578   }
2579 
2580   LoadNode *ld = n->as_Load();
2581 
2582   if (!ld->is_acquire()) {
2583     return false;
2584   }
2585 
2586   // check if this load is feeding an acquire membar
2587   //
2588   //   LoadX[mo_acquire]
2589   //   {  |1   }
2590   //   {DecodeN}
2591   //      |Parms
2592   //   MemBarAcquire*
2593   //
2594   // where * tags node we were passed
2595   // and |k means input k
2596 
2597   Node *start = ld;
2598   Node *mbacq = NULL;
2599 
2600   // if we hit a DecodeNarrowPtr we reset the start node and restart
2601   // the search through the outputs
2602  restart:
2603 
2604   for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
2605     Node *x = start->fast_out(i);
2606     if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
2607       mbacq = x;
2608     } else if (!mbacq &&
2609                (x->is_DecodeNarrowPtr() ||
2610                 (x->is_Mach() && x->Opcode() == Op_DecodeN))) {
2611       start = x;
2612       goto restart;
2613     }
2614   }
2615 
2616   if (mbacq) {
2617     return true;
2618   }
2619 
2620   // now check for an unsafe volatile get
2621 
2622   // check if Ctl and Proj feed comes from a MemBarCPUOrder
2623   //
2624   //     MemBarCPUOrder
2625   //        ||       \\
2626   //   MemBarAcquire* LoadX[mo_acquire]
2627   //        ||
2628   //   MemBarCPUOrder
2629 
2630   MemBarNode *membar;
2631 
2632   membar = parent_membar(ld);
2633 
2634   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2635     return false;
2636   }
2637 
2638   // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain
2639 
2640   membar = child_membar(membar);
2641 
2642   if (!membar || !membar->Opcode() == Op_MemBarAcquire) {
2643     return false;
2644   }
2645 
2646   membar = child_membar(membar);
2647 
2648   if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
2649     return false;
2650   }
2651 
2652   return true;
2653 }
2654 
2655 bool unnecessary_release(const Node *n)
2656 {
2657   assert((n->is_MemBar() &&
2658           n->Opcode() == Op_MemBarRelease),
2659          "expecting a release membar");
2660 
2661   if (UseBarriersForVolatile) {
2662     // we need to plant a dmb
2663     return false;
2664   }
2665 
2666   // if there is a dependent CPUOrder barrier then use that as the
2667   // leading
2668 
2669   MemBarNode *barrier = n->as_MemBar();
2670   // check for an intervening cpuorder membar
2671   MemBarNode *b = child_membar(barrier);
2672   if (b && b->Opcode() == Op_MemBarCPUOrder) {
2673     // ok, so start the check from the dependent cpuorder barrier
2674     barrier = b;
2675   }
2676 
2677   // must start with a normal feed
2678   MemBarNode *child_barrier = leading_to_normal(barrier);
2679 
2680   if (!child_barrier) {
2681     return false;
2682   }
2683 
2684   if (!is_card_mark_membar(child_barrier)) {
2685     // this is the trailing membar and we are done
2686     return true;
2687   }
2688 
2689   // must be sure this card mark feeds a trailing membar
2690   MemBarNode *trailing = card_mark_to_trailing(child_barrier);
2691   return (trailing != NULL);
2692 }
2693 
2694 bool unnecessary_volatile(const Node *n)
2695 {
2696   // assert n->is_MemBar();
2697   if (UseBarriersForVolatile) {
2698     // we need to plant a dmb
2699     return false;
2700   }
2701 
2702   MemBarNode *mbvol = n->as_MemBar();
2703 
2704   // first we check if this is part of a card mark. if so then we have
2705   // to generate a StoreLoad barrier
2706 
2707   if (is_card_mark_membar(mbvol)) {
2708       return false;
2709   }
2710 
2711   // ok, if it's not a card mark then we still need to check if it is
2712   // a trailing membar of a volatile put hgraph.
2713 
2714   return (trailing_to_leading(mbvol) != NULL);
2715 }
2716 
2717 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
2718 
2719 bool needs_releasing_store(const Node *n)
2720 {
2721   // assert n->is_Store();
2722   if (UseBarriersForVolatile) {
2723     // we use a normal store and dmb combination
2724     return false;
2725   }
2726 
2727   StoreNode *st = n->as_Store();
2728 
2729   // the store must be marked as releasing
2730   if (!st->is_release()) {
2731     return false;
2732   }
2733 
2734   // the store must be fed by a membar
2735 
2736   Node *x = st->lookup(StoreNode::Memory);
2737 
2738   if (! x || !x->is_Proj()) {
2739     return false;
2740   }
2741 
2742   ProjNode *proj = x->as_Proj();
2743 
2744   x = proj->lookup(0);
2745 
2746   if (!x || !x->is_MemBar()) {
2747     return false;
2748   }
2749 
2750   MemBarNode *barrier = x->as_MemBar();
2751 
2752   // if the barrier is a release membar or a cpuorder mmebar fed by a
2753   // release membar then we need to check whether that forms part of a
2754   // volatile put graph.
2755 
2756   // reject invalid candidates
2757   if (!leading_membar(barrier)) {
2758     return false;
2759   }
2760 
2761   // does this lead a normal subgraph?
2762   MemBarNode *mbvol = leading_to_normal(barrier);
2763 
2764   if (!mbvol) {
2765     return false;
2766   }
2767 
2768   // all done unless this is a card mark
2769   if (!is_card_mark_membar(mbvol)) {
2770     return true;
2771   }
2772 
2773   // we found a card mark -- just make sure we have a trailing barrier
2774 
2775   return (card_mark_to_trailing(mbvol) != NULL);
2776 }
2777 
2778 // predicate controlling translation of CAS
2779 //
2780 // returns true if CAS needs to use an acquiring load otherwise false
2781 
2782 bool needs_acquiring_load_exclusive(const Node *n)
2783 {
2784   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
2785   if (UseBarriersForVolatile) {
2786     return false;
2787   }
2788 
2789   // CAS nodes only ought to turn up in inlined unsafe CAS operations
2790 #ifdef ASSERT
2791   LoadStoreNode *st = n->as_LoadStore();
2792 
2793   // the store must be fed by a membar
2794 
2795   Node *x = st->lookup(StoreNode::Memory);
2796 
2797   assert (x && x->is_Proj(), "CAS not fed by memory proj!");
2798 
2799   ProjNode *proj = x->as_Proj();
2800 
2801   x = proj->lookup(0);
2802 
2803   assert (x && x->is_MemBar(), "CAS not fed by membar!");
2804 
2805   MemBarNode *barrier = x->as_MemBar();
2806 
2807   // the barrier must be a cpuorder mmebar fed by a release membar
2808 
2809   assert(barrier->Opcode() == Op_MemBarCPUOrder,
2810          "CAS not fed by cpuorder membar!");
2811 
2812   MemBarNode *b = parent_membar(barrier);
2813   assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
2814           "CAS not fed by cpuorder+release membar pair!");
2815 
2816   // does this lead a normal subgraph?
2817   MemBarNode *mbar = leading_to_normal(barrier);
2818 
2819   assert(mbar != NULL, "CAS not embedded in normal graph!");
2820 
2821   assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
2822 #endif // ASSERT
2823   // so we can just return true here
2824   return true;
2825 }
2826 
2827 // predicate controlling translation of StoreCM
2828 //
2829 // returns true if a StoreStore must precede the card write otherwise
2830 // false
2831 
2832 bool unnecessary_storestore(const Node *storecm)
2833 {
2834   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
2835 
2836   // we only ever need to generate a dmb ishst between an object put
2837   // and the associated card mark when we are using CMS without
2838   // conditional card marking
2839 
2840   if (!UseConcMarkSweepGC || UseCondCardMark) {
2841     return true;
2842   }
2843 
2844   // if we are implementing volatile puts using barriers then the
2845   // object put as an str so we must insert the dmb ishst
2846 
2847   if (UseBarriersForVolatile) {
2848     return false;
2849   }
2850 
2851   // we can omit the dmb ishst if this StoreCM is part of a volatile
2852   // put because in thta case the put will be implemented by stlr
2853   //
2854   // we need to check for a normal subgraph feeding this StoreCM.
2855   // that means the StoreCM must be fed Memory from a leading membar,
2856   // either a MemBarRelease or its dependent MemBarCPUOrder, and the
2857   // leading membar must be part of a normal subgraph
2858 
2859   Node *x = storecm->in(StoreNode::Memory);
2860 
2861   if (!x->is_Proj()) {
2862     return false;
2863   }
2864 
2865   x = x->in(0);
2866 
2867   if (!x->is_MemBar()) {
2868     return false;
2869   }
2870 
2871   MemBarNode *leading = x->as_MemBar();
2872 
2873   // reject invalid candidates
2874   if (!leading_membar(leading)) {
2875     return false;
2876   }
2877 
2878   // we can omit the StoreStore if it is the head of a normal subgraph
2879   return (leading_to_normal(leading) != NULL);
2880 }
2881 
2882 
2883 #define __ _masm.
2884 
2885 // advance declarations for helper functions to convert register
2886 // indices to register objects
2887 
2888 // the ad file has to provide implementations of certain methods
2889 // expected by the generic code
2890 //
2891 // REQUIRED FUNCTIONALITY
2892 
2893 //=============================================================================
2894 
2895 // !!!!! Special hack to get all types of calls to specify the byte offset
2896 //       from the start of the call to the point where the return address
2897 //       will point.
2898 
2899 int MachCallStaticJavaNode::ret_addr_offset()
2900 {
2901   // call should be a simple bl
2902   int off = 4;
2903   return off;
2904 }
2905 
2906 int MachCallDynamicJavaNode::ret_addr_offset()
2907 {
2908   return 16; // movz, movk, movk, bl
2909 }
2910 
2911 int MachCallRuntimeNode::ret_addr_offset() {
2912   // for generated stubs the call will be
2913   //   far_call(addr)
2914   // for real runtime callouts it will be six instructions
2915   // see aarch64_enc_java_to_runtime
2916   //   adr(rscratch2, retaddr)
2917   //   lea(rscratch1, RuntimeAddress(addr)
2918   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
2919   //   blrt rscratch1
2920   CodeBlob *cb = CodeCache::find_blob(_entry_point);
2921   if (cb) {
2922     return MacroAssembler::far_branch_size();
2923   } else {
2924     return 6 * NativeInstruction::instruction_size;
2925   }
2926 }
2927 
2928 // Indicate if the safepoint node needs the polling page as an input
2929 
2930 // the shared code plants the oop data at the start of the generated
2931 // code for the safepoint node and that needs ot be at the load
2932 // instruction itself. so we cannot plant a mov of the safepoint poll
2933 // address followed by a load. setting this to true means the mov is
2934 // scheduled as a prior instruction. that's better for scheduling
2935 // anyway.
2936 
2937 bool SafePointNode::needs_polling_address_input()
2938 {
2939   return true;
2940 }
2941 
2942 //=============================================================================
2943 
2944 #ifndef PRODUCT
2945 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
2946   st->print("BREAKPOINT");
2947 }
2948 #endif
2949 
2950 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
2951   MacroAssembler _masm(&cbuf);
2952   __ brk(0);
2953 }
2954 
2955 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
2956   return MachNode::size(ra_);
2957 }
2958 
2959 //=============================================================================
2960 
2961 #ifndef PRODUCT
2962   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2963     st->print("nop \t# %d bytes pad for loops and calls", _count);
2964   }
2965 #endif
2966 
2967   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2968     MacroAssembler _masm(&cbuf);
2969     for (int i = 0; i < _count; i++) {
2970       __ nop();
2971     }
2972   }
2973 
2974   uint MachNopNode::size(PhaseRegAlloc*) const {
2975     return _count * NativeInstruction::instruction_size;
2976   }
2977 
2978 //=============================================================================
2979 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
2980 
2981 int Compile::ConstantTable::calculate_table_base_offset() const {
2982   return 0;  // absolute addressing, no offset
2983 }
2984 
2985 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
2986 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
2987   ShouldNotReachHere();
2988 }
2989 
2990 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
2991   // Empty encoding
2992 }
2993 
2994 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
2995   return 0;
2996 }
2997 
2998 #ifndef PRODUCT
2999 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
3000   st->print("-- \t// MachConstantBaseNode (empty encoding)");
3001 }
3002 #endif
3003 
3004 #ifndef PRODUCT
3005 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3006   Compile* C = ra_->C;
3007 
3008   int framesize = C->frame_slots() << LogBytesPerInt;
3009 
3010   if (C->need_stack_bang(framesize))
3011     st->print("# stack bang size=%d\n\t", framesize);
3012 
3013   if (framesize < ((1 << 9) + 2 * wordSize)) {
3014     st->print("sub  sp, sp, #%d\n\t", framesize);
3015     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
3016     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
3017   } else {
3018     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
3019     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
3020     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3021     st->print("sub  sp, sp, rscratch1");
3022   }
3023 }
3024 #endif
3025 
3026 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3027   Compile* C = ra_->C;
3028   MacroAssembler _masm(&cbuf);
3029 
3030   // n.b. frame size includes space for return pc and rfp
3031   const long framesize = C->frame_size_in_bytes();
3032   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
3033 
3034   // insert a nop at the start of the prolog so we can patch in a
3035   // branch if we need to invalidate the method later
3036   __ nop();
3037 
3038   int bangsize = C->bang_size_in_bytes();
3039   if (C->need_stack_bang(bangsize) && UseStackBanging)
3040     __ generate_stack_overflow_check(bangsize);
3041 
3042   __ build_frame(framesize);
3043 
3044   if (NotifySimulator) {
3045     __ notify(Assembler::method_entry);
3046   }
3047 
3048   if (VerifyStackAtCalls) {
3049     Unimplemented();
3050   }
3051 
3052   C->set_frame_complete(cbuf.insts_size());
3053 
3054   if (C->has_mach_constant_base_node()) {
3055     // NOTE: We set the table base offset here because users might be
3056     // emitted before MachConstantBaseNode.
3057     Compile::ConstantTable& constant_table = C->constant_table();
3058     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
3059   }
3060 }
3061 
3062 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
3063 {
3064   return MachNode::size(ra_); // too many variables; just compute it
3065                               // the hard way
3066 }
3067 
3068 int MachPrologNode::reloc() const
3069 {
3070   return 0;
3071 }
3072 
3073 //=============================================================================
3074 
3075 #ifndef PRODUCT
3076 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3077   Compile* C = ra_->C;
3078   int framesize = C->frame_slots() << LogBytesPerInt;
3079 
3080   st->print("# pop frame %d\n\t",framesize);
3081 
3082   if (framesize == 0) {
3083     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3084   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
3085     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
3086     st->print("add  sp, sp, #%d\n\t", framesize);
3087   } else {
3088     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
3089     st->print("add  sp, sp, rscratch1\n\t");
3090     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
3091   }
3092 
3093   if (do_polling() && C->is_method_compilation()) {
3094     st->print("# touch polling page\n\t");
3095     st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
3096     st->print("ldr zr, [rscratch1]");
3097   }
3098 }
3099 #endif
3100 
3101 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3102   Compile* C = ra_->C;
3103   MacroAssembler _masm(&cbuf);
3104   int framesize = C->frame_slots() << LogBytesPerInt;
3105 
3106   __ remove_frame(framesize);
3107 
3108   if (NotifySimulator) {
3109     __ notify(Assembler::method_reentry);
3110   }
3111 
3112   if (do_polling() && C->is_method_compilation()) {
3113     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
3114   }
3115 }
3116 
3117 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
3118   // Variable size. Determine dynamically.
3119   return MachNode::size(ra_);
3120 }
3121 
3122 int MachEpilogNode::reloc() const {
3123   // Return number of relocatable values contained in this instruction.
3124   return 1; // 1 for polling page.
3125 }
3126 
3127 const Pipeline * MachEpilogNode::pipeline() const {
3128   return MachNode::pipeline_class();
3129 }
3130 
3131 // This method seems to be obsolete. It is declared in machnode.hpp
3132 // and defined in all *.ad files, but it is never called. Should we
3133 // get rid of it?
3134 int MachEpilogNode::safepoint_offset() const {
3135   assert(do_polling(), "no return for this epilog node");
3136   return 4;
3137 }
3138 
3139 //=============================================================================
3140 
3141 // Figure out which register class each belongs in: rc_int, rc_float or
3142 // rc_stack.
3143 enum RC { rc_bad, rc_int, rc_float, rc_stack };
3144 
3145 static enum RC rc_class(OptoReg::Name reg) {
3146 
3147   if (reg == OptoReg::Bad) {
3148     return rc_bad;
3149   }
3150 
3151   // we have 30 int registers * 2 halves
3152   // (rscratch1 and rscratch2 are omitted)
3153 
3154   if (reg < 60) {
3155     return rc_int;
3156   }
3157 
3158   // we have 32 float register * 2 halves
3159   if (reg < 60 + 128) {
3160     return rc_float;
3161   }
3162 
3163   // Between float regs & stack is the flags regs.
3164   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
3165 
3166   return rc_stack;
3167 }
3168 
3169 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
3170   Compile* C = ra_->C;
3171 
3172   // Get registers to move.
3173   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
3174   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
3175   OptoReg::Name dst_hi = ra_->get_reg_second(this);
3176   OptoReg::Name dst_lo = ra_->get_reg_first(this);
3177 
3178   enum RC src_hi_rc = rc_class(src_hi);
3179   enum RC src_lo_rc = rc_class(src_lo);
3180   enum RC dst_hi_rc = rc_class(dst_hi);
3181   enum RC dst_lo_rc = rc_class(dst_lo);
3182 
3183   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
3184 
3185   if (src_hi != OptoReg::Bad) {
3186     assert((src_lo&1)==0 && src_lo+1==src_hi &&
3187            (dst_lo&1)==0 && dst_lo+1==dst_hi,
3188            "expected aligned-adjacent pairs");
3189   }
3190 
3191   if (src_lo == dst_lo && src_hi == dst_hi) {
3192     return 0;            // Self copy, no move.
3193   }
3194 
3195   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3196               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3197   int src_offset = ra_->reg2offset(src_lo);
3198   int dst_offset = ra_->reg2offset(dst_lo);
3199 
3200   if (bottom_type()->isa_vect() != NULL) {
3201     uint ireg = ideal_reg();
3202     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3203     if (cbuf) {
3204       MacroAssembler _masm(cbuf);
3205       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3206       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3207         // stack->stack
3208         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3209         if (ireg == Op_VecD) {
3210           __ unspill(rscratch1, true, src_offset);
3211           __ spill(rscratch1, true, dst_offset);
3212         } else {
3213           __ spill_copy128(src_offset, dst_offset);
3214         }
3215       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3216         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3217                ireg == Op_VecD ? __ T8B : __ T16B,
3218                as_FloatRegister(Matcher::_regEncode[src_lo]));
3219       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3220         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3221                        ireg == Op_VecD ? __ D : __ Q,
3222                        ra_->reg2offset(dst_lo));
3223       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3224         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3225                        ireg == Op_VecD ? __ D : __ Q,
3226                        ra_->reg2offset(src_lo));
3227       } else {
3228         ShouldNotReachHere();
3229       }
3230     }
3231   } else if (cbuf) {
3232     MacroAssembler _masm(cbuf);
3233     switch (src_lo_rc) {
3234     case rc_int:
3235       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
3236         if (is64) {
3237             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
3238                    as_Register(Matcher::_regEncode[src_lo]));
3239         } else {
3240             MacroAssembler _masm(cbuf);
3241             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
3242                     as_Register(Matcher::_regEncode[src_lo]));
3243         }
3244       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
3245         if (is64) {
3246             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3247                      as_Register(Matcher::_regEncode[src_lo]));
3248         } else {
3249             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3250                      as_Register(Matcher::_regEncode[src_lo]));
3251         }
3252       } else {                    // gpr --> stack spill
3253         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3254         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
3255       }
3256       break;
3257     case rc_float:
3258       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
3259         if (is64) {
3260             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
3261                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3262         } else {
3263             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
3264                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3265         }
3266       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
3267           if (cbuf) {
3268             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3269                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3270         } else {
3271             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3272                      as_FloatRegister(Matcher::_regEncode[src_lo]));
3273         }
3274       } else {                    // fpr --> stack spill
3275         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3276         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3277                  is64 ? __ D : __ S, dst_offset);
3278       }
3279       break;
3280     case rc_stack:
3281       if (dst_lo_rc == rc_int) {  // stack --> gpr load
3282         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
3283       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
3284         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3285                    is64 ? __ D : __ S, src_offset);
3286       } else {                    // stack --> stack copy
3287         assert(dst_lo_rc == rc_stack, "spill to bad register class");
3288         __ unspill(rscratch1, is64, src_offset);
3289         __ spill(rscratch1, is64, dst_offset);
3290       }
3291       break;
3292     default:
3293       assert(false, "bad rc_class for spill");
3294       ShouldNotReachHere();
3295     }
3296   }
3297 
3298   if (st) {
3299     st->print("spill ");
3300     if (src_lo_rc == rc_stack) {
3301       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
3302     } else {
3303       st->print("%s -> ", Matcher::regName[src_lo]);
3304     }
3305     if (dst_lo_rc == rc_stack) {
3306       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
3307     } else {
3308       st->print("%s", Matcher::regName[dst_lo]);
3309     }
3310     if (bottom_type()->isa_vect() != NULL) {
3311       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
3312     } else {
3313       st->print("\t# spill size = %d", is64 ? 64:32);
3314     }
3315   }
3316 
3317   return 0;
3318 
3319 }
3320 
3321 #ifndef PRODUCT
3322 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3323   if (!ra_)
3324     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
3325   else
3326     implementation(NULL, ra_, false, st);
3327 }
3328 #endif
3329 
3330 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3331   implementation(&cbuf, ra_, false, NULL);
3332 }
3333 
3334 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
3335   return MachNode::size(ra_);
3336 }
3337 
3338 //=============================================================================
3339 
3340 #ifndef PRODUCT
3341 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
3342   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3343   int reg = ra_->get_reg_first(this);
3344   st->print("add %s, rsp, #%d]\t# box lock",
3345             Matcher::regName[reg], offset);
3346 }
3347 #endif
3348 
3349 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
3350   MacroAssembler _masm(&cbuf);
3351 
3352   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
3353   int reg    = ra_->get_encode(this);
3354 
3355   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
3356     __ add(as_Register(reg), sp, offset);
3357   } else {
3358     ShouldNotReachHere();
3359   }
3360 }
3361 
3362 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
3363   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
3364   return 4;
3365 }
3366 
3367 //=============================================================================
3368 
3369 #ifndef PRODUCT
3370 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
3371 {
3372   st->print_cr("# MachUEPNode");
3373   if (UseCompressedClassPointers) {
3374     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3375     if (Universe::narrow_klass_shift() != 0) {
3376       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
3377     }
3378   } else {
3379    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
3380   }
3381   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
3382   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
3383 }
3384 #endif
3385 
3386 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
3387 {
3388   // This is the unverified entry point.
3389   MacroAssembler _masm(&cbuf);
3390 
3391   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
3392   Label skip;
3393   // TODO
3394   // can we avoid this skip and still use a reloc?
3395   __ br(Assembler::EQ, skip);
3396   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
3397   __ bind(skip);
3398 }
3399 
3400 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
3401 {
3402   return MachNode::size(ra_);
3403 }
3404 
3405 // REQUIRED EMIT CODE
3406 
3407 //=============================================================================
3408 
3409 // Emit exception handler code.
3410 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
3411 {
3412   // mov rscratch1 #exception_blob_entry_point
3413   // br rscratch1
3414   // Note that the code buffer's insts_mark is always relative to insts.
3415   // That's why we must use the macroassembler to generate a handler.
3416   MacroAssembler _masm(&cbuf);
3417   address base = __ start_a_stub(size_exception_handler());
3418   if (base == NULL) {
3419     ciEnv::current()->record_failure("CodeCache is full");
3420     return 0;  // CodeBuffer::expand failed
3421   }
3422   int offset = __ offset();
3423   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
3424   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
3425   __ end_a_stub();
3426   return offset;
3427 }
3428 
3429 // Emit deopt handler code.
3430 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
3431 {
3432   // Note that the code buffer's insts_mark is always relative to insts.
3433   // That's why we must use the macroassembler to generate a handler.
3434   MacroAssembler _masm(&cbuf);
3435   address base = __ start_a_stub(size_deopt_handler());
3436   if (base == NULL) {
3437     ciEnv::current()->record_failure("CodeCache is full");
3438     return 0;  // CodeBuffer::expand failed
3439   }
3440   int offset = __ offset();
3441 
3442   __ adr(lr, __ pc());
3443   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
3444 
3445   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
3446   __ end_a_stub();
3447   return offset;
3448 }
3449 
3450 // REQUIRED MATCHER CODE
3451 
3452 //=============================================================================
3453 
3454 const bool Matcher::match_rule_supported(int opcode) {
3455 
3456   // TODO
3457   // identify extra cases that we might want to provide match rules for
3458   // e.g. Op_StrEquals and other intrinsics
3459   if (!has_match_rule(opcode)) {
3460     return false;
3461   }
3462 
3463   return true;  // Per default match rules are supported.
3464 }
3465 
3466 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
3467 
3468   // TODO
3469   // identify extra cases that we might want to provide match rules for
3470   // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
3471   bool ret_value = match_rule_supported(opcode);
3472   // Add rules here.
3473 
3474   return ret_value;  // Per default match rules are supported.
3475 }
3476 
3477 const int Matcher::float_pressure(int default_pressure_threshold) {
3478   return default_pressure_threshold;
3479 }
3480 
3481 int Matcher::regnum_to_fpu_offset(int regnum)
3482 {
3483   Unimplemented();
3484   return 0;
3485 }
3486 
3487 // Is this branch offset short enough that a short branch can be used?
3488 //
3489 // NOTE: If the platform does not provide any short branch variants, then
3490 //       this method should return false for offset 0.
3491 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
3492   // The passed offset is relative to address of the branch.
3493 
3494   return (-32768 <= offset && offset < 32768);
3495 }
3496 
3497 const bool Matcher::isSimpleConstant64(jlong value) {
3498   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
3499   // Probably always true, even if a temp register is required.
3500   return true;
3501 }
3502 
3503 // true just means we have fast l2f conversion
3504 const bool Matcher::convL2FSupported(void) {
3505   return true;
3506 }
3507 
3508 // Vector width in bytes.
3509 const int Matcher::vector_width_in_bytes(BasicType bt) {
3510   int size = MIN2(16,(int)MaxVectorSize);
3511   // Minimum 2 values in vector
3512   if (size < 2*type2aelembytes(bt)) size = 0;
3513   // But never < 4
3514   if (size < 4) size = 0;
3515   return size;
3516 }
3517 
3518 // Limits on vector size (number of elements) loaded into vector.
3519 const int Matcher::max_vector_size(const BasicType bt) {
3520   return vector_width_in_bytes(bt)/type2aelembytes(bt);
3521 }
3522 const int Matcher::min_vector_size(const BasicType bt) {
3523 //  For the moment limit the vector size to 8 bytes
3524     int size = 8 / type2aelembytes(bt);
3525     if (size < 2) size = 2;
3526     return size;
3527 }
3528 
3529 // Vector ideal reg.
3530 const int Matcher::vector_ideal_reg(int len) {
3531   switch(len) {
3532     case  8: return Op_VecD;
3533     case 16: return Op_VecX;
3534   }
3535   ShouldNotReachHere();
3536   return 0;
3537 }
3538 
3539 const int Matcher::vector_shift_count_ideal_reg(int size) {
3540   return Op_VecX;
3541 }
3542 
3543 // AES support not yet implemented
3544 const bool Matcher::pass_original_key_for_aes() {
3545   return false;
3546 }
3547 
3548 // x86 supports misaligned vectors store/load.
3549 const bool Matcher::misaligned_vectors_ok() {
3550   return !AlignVector; // can be changed by flag
3551 }
3552 
3553 // false => size gets scaled to BytesPerLong, ok.
3554 const bool Matcher::init_array_count_is_in_bytes = false;
3555 
3556 // Threshold size for cleararray.
3557 const int Matcher::init_array_short_size = 18 * BytesPerLong;
3558 
3559 // Use conditional move (CMOVL)
3560 const int Matcher::long_cmove_cost() {
3561   // long cmoves are no more expensive than int cmoves
3562   return 0;
3563 }
3564 
3565 const int Matcher::float_cmove_cost() {
3566   // float cmoves are no more expensive than int cmoves
3567   return 0;
3568 }
3569 
3570 // Does the CPU require late expand (see block.cpp for description of late expand)?
3571 const bool Matcher::require_postalloc_expand = false;
3572 
3573 // Should the Matcher clone shifts on addressing modes, expecting them
3574 // to be subsumed into complex addressing expressions or compute them
3575 // into registers?  True for Intel but false for most RISCs
3576 const bool Matcher::clone_shift_expressions = false;
3577 
3578 // Do we need to mask the count passed to shift instructions or does
3579 // the cpu only look at the lower 5/6 bits anyway?
3580 const bool Matcher::need_masked_shift_count = false;
3581 
3582 // This affects two different things:
3583 //  - how Decode nodes are matched
3584 //  - how ImplicitNullCheck opportunities are recognized
3585 // If true, the matcher will try to remove all Decodes and match them
3586 // (as operands) into nodes. NullChecks are not prepared to deal with
3587 // Decodes by final_graph_reshaping().
3588 // If false, final_graph_reshaping() forces the decode behind the Cmp
3589 // for a NullCheck. The matcher matches the Decode node into a register.
3590 // Implicit_null_check optimization moves the Decode along with the
3591 // memory operation back up before the NullCheck.
3592 bool Matcher::narrow_oop_use_complex_address() {
3593   return Universe::narrow_oop_shift() == 0;
3594 }
3595 
3596 bool Matcher::narrow_klass_use_complex_address() {
3597 // TODO
3598 // decide whether we need to set this to true
3599   return false;
3600 }
3601 
3602 // Is it better to copy float constants, or load them directly from
3603 // memory?  Intel can load a float constant from a direct address,
3604 // requiring no extra registers.  Most RISCs will have to materialize
3605 // an address into a register first, so they would do better to copy
3606 // the constant from stack.
3607 const bool Matcher::rematerialize_float_constants = false;
3608 
3609 // If CPU can load and store mis-aligned doubles directly then no
3610 // fixup is needed.  Else we split the double into 2 integer pieces
3611 // and move it piece-by-piece.  Only happens when passing doubles into
3612 // C code as the Java calling convention forces doubles to be aligned.
3613 const bool Matcher::misaligned_doubles_ok = true;
3614 
3615 // No-op on amd64
3616 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
3617   Unimplemented();
3618 }
3619 
3620 // Advertise here if the CPU requires explicit rounding operations to
3621 // implement the UseStrictFP mode.
3622 const bool Matcher::strict_fp_requires_explicit_rounding = false;
3623 
3624 // Are floats converted to double when stored to stack during
3625 // deoptimization?
3626 bool Matcher::float_in_double() { return true; }
3627 
3628 // Do ints take an entire long register or just half?
3629 // The relevant question is how the int is callee-saved:
3630 // the whole long is written but de-opt'ing will have to extract
3631 // the relevant 32 bits.
3632 const bool Matcher::int_in_long = true;
3633 
3634 // Return whether or not this register is ever used as an argument.
3635 // This function is used on startup to build the trampoline stubs in
3636 // generateOptoStub.  Registers not mentioned will be killed by the VM
3637 // call in the trampoline, and arguments in those registers not be
3638 // available to the callee.
3639 bool Matcher::can_be_java_arg(int reg)
3640 {
3641   return
3642     reg ==  R0_num || reg == R0_H_num ||
3643     reg ==  R1_num || reg == R1_H_num ||
3644     reg ==  R2_num || reg == R2_H_num ||
3645     reg ==  R3_num || reg == R3_H_num ||
3646     reg ==  R4_num || reg == R4_H_num ||
3647     reg ==  R5_num || reg == R5_H_num ||
3648     reg ==  R6_num || reg == R6_H_num ||
3649     reg ==  R7_num || reg == R7_H_num ||
3650     reg ==  V0_num || reg == V0_H_num ||
3651     reg ==  V1_num || reg == V1_H_num ||
3652     reg ==  V2_num || reg == V2_H_num ||
3653     reg ==  V3_num || reg == V3_H_num ||
3654     reg ==  V4_num || reg == V4_H_num ||
3655     reg ==  V5_num || reg == V5_H_num ||
3656     reg ==  V6_num || reg == V6_H_num ||
3657     reg ==  V7_num || reg == V7_H_num;
3658 }
3659 
3660 bool Matcher::is_spillable_arg(int reg)
3661 {
3662   return can_be_java_arg(reg);
3663 }
3664 
3665 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
3666   return false;
3667 }
3668 
3669 RegMask Matcher::divI_proj_mask() {
3670   ShouldNotReachHere();
3671   return RegMask();
3672 }
3673 
3674 // Register for MODI projection of divmodI.
3675 RegMask Matcher::modI_proj_mask() {
3676   ShouldNotReachHere();
3677   return RegMask();
3678 }
3679 
3680 // Register for DIVL projection of divmodL.
3681 RegMask Matcher::divL_proj_mask() {
3682   ShouldNotReachHere();
3683   return RegMask();
3684 }
3685 
3686 // Register for MODL projection of divmodL.
3687 RegMask Matcher::modL_proj_mask() {
3688   ShouldNotReachHere();
3689   return RegMask();
3690 }
3691 
3692 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
3693   return FP_REG_mask();
3694 }
3695 
3696 // helper for encoding java_to_runtime calls on sim
3697 //
3698 // this is needed to compute the extra arguments required when
3699 // planting a call to the simulator blrt instruction. the TypeFunc
3700 // can be queried to identify the counts for integral, and floating
3701 // arguments and the return type
3702 
3703 static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
3704 {
3705   int gps = 0;
3706   int fps = 0;
3707   const TypeTuple *domain = tf->domain();
3708   int max = domain->cnt();
3709   for (int i = TypeFunc::Parms; i < max; i++) {
3710     const Type *t = domain->field_at(i);
3711     switch(t->basic_type()) {
3712     case T_FLOAT:
3713     case T_DOUBLE:
3714       fps++;
3715     default:
3716       gps++;
3717     }
3718   }
3719   gpcnt = gps;
3720   fpcnt = fps;
3721   BasicType rt = tf->return_type();
3722   switch (rt) {
3723   case T_VOID:
3724     rtype = MacroAssembler::ret_type_void;
3725     break;
3726   default:
3727     rtype = MacroAssembler::ret_type_integral;
3728     break;
3729   case T_FLOAT:
3730     rtype = MacroAssembler::ret_type_float;
3731     break;
3732   case T_DOUBLE:
3733     rtype = MacroAssembler::ret_type_double;
3734     break;
3735   }
3736 }
3737 
3738 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
3739   MacroAssembler _masm(&cbuf);                                          \
3740   {                                                                     \
3741     guarantee(INDEX == -1, "mode not permitted for volatile");          \
3742     guarantee(DISP == 0, "mode not permitted for volatile");            \
3743     guarantee(SCALE == 0, "mode not permitted for volatile");           \
3744     __ INSN(REG, as_Register(BASE));                                    \
3745   }
3746 
3747 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
3748 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
3749 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
3750                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
3751 
3752   // Used for all non-volatile memory accesses.  The use of
3753   // $mem->opcode() to discover whether this pattern uses sign-extended
3754   // offsets is something of a kludge.
3755   static void loadStore(MacroAssembler masm, mem_insn insn,
3756                          Register reg, int opcode,
3757                          Register base, int index, int size, int disp)
3758   {
3759     Address::extend scale;
3760 
3761     // Hooboy, this is fugly.  We need a way to communicate to the
3762     // encoder that the index needs to be sign extended, so we have to
3763     // enumerate all the cases.
3764     switch (opcode) {
3765     case INDINDEXSCALEDOFFSETI2L:
3766     case INDINDEXSCALEDI2L:
3767     case INDINDEXSCALEDOFFSETI2LN:
3768     case INDINDEXSCALEDI2LN:
3769     case INDINDEXOFFSETI2L:
3770     case INDINDEXOFFSETI2LN:
3771       scale = Address::sxtw(size);
3772       break;
3773     default:
3774       scale = Address::lsl(size);
3775     }
3776 
3777     if (index == -1) {
3778       (masm.*insn)(reg, Address(base, disp));
3779     } else {
3780       if (disp == 0) {
3781         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3782       } else {
3783         masm.lea(rscratch1, Address(base, disp));
3784         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3785       }
3786     }
3787   }
3788 
3789   static void loadStore(MacroAssembler masm, mem_float_insn insn,
3790                          FloatRegister reg, int opcode,
3791                          Register base, int index, int size, int disp)
3792   {
3793     Address::extend scale;
3794 
3795     switch (opcode) {
3796     case INDINDEXSCALEDOFFSETI2L:
3797     case INDINDEXSCALEDI2L:
3798     case INDINDEXSCALEDOFFSETI2LN:
3799     case INDINDEXSCALEDI2LN:
3800       scale = Address::sxtw(size);
3801       break;
3802     default:
3803       scale = Address::lsl(size);
3804     }
3805 
3806      if (index == -1) {
3807       (masm.*insn)(reg, Address(base, disp));
3808     } else {
3809       if (disp == 0) {
3810         (masm.*insn)(reg, Address(base, as_Register(index), scale));
3811       } else {
3812         masm.lea(rscratch1, Address(base, disp));
3813         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
3814       }
3815     }
3816   }
3817 
3818   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
3819                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
3820                          int opcode, Register base, int index, int size, int disp)
3821   {
3822     if (index == -1) {
3823       (masm.*insn)(reg, T, Address(base, disp));
3824     } else {
3825       assert(disp == 0, "unsupported address mode");
3826       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
3827     }
3828   }
3829 
3830 %}
3831 
3832 
3833 
3834 //----------ENCODING BLOCK-----------------------------------------------------
3835 // This block specifies the encoding classes used by the compiler to
3836 // output byte streams.  Encoding classes are parameterized macros
3837 // used by Machine Instruction Nodes in order to generate the bit
3838 // encoding of the instruction.  Operands specify their base encoding
3839 // interface with the interface keyword.  There are currently
3840 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3841 // COND_INTER.  REG_INTER causes an operand to generate a function
3842 // which returns its register number when queried.  CONST_INTER causes
3843 // an operand to generate a function which returns the value of the
3844 // constant when queried.  MEMORY_INTER causes an operand to generate
3845 // four functions which return the Base Register, the Index Register,
3846 // the Scale Value, and the Offset Value of the operand when queried.
3847 // COND_INTER causes an operand to generate six functions which return
3848 // the encoding code (ie - encoding bits for the instruction)
3849 // associated with each basic boolean condition for a conditional
3850 // instruction.
3851 //
3852 // Instructions specify two basic values for encoding.  Again, a
3853 // function is available to check if the constant displacement is an
3854 // oop. They use the ins_encode keyword to specify their encoding
3855 // classes (which must be a sequence of enc_class names, and their
3856 // parameters, specified in the encoding block), and they use the
3857 // opcode keyword to specify, in order, their primary, secondary, and
3858 // tertiary opcode.  Only the opcode sections which a particular
3859 // instruction needs for encoding need to be specified.
3860 encode %{
3861   // Build emit functions for each basic byte or larger field in the
3862   // intel encoding scheme (opcode, rm, sib, immediate), and call them
3863   // from C++ code in the enc_class source block.  Emit functions will
3864   // live in the main source block for now.  In future, we can
3865   // generalize this by adding a syntax that specifies the sizes of
3866   // fields in an order, so that the adlc can build the emit functions
3867   // automagically
3868 
3869   // catch all for unimplemented encodings
3870   enc_class enc_unimplemented %{
3871     MacroAssembler _masm(&cbuf);
3872     __ unimplemented("C2 catch all");
3873   %}
3874 
3875   // BEGIN Non-volatile memory access
3876 
3877   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
3878     Register dst_reg = as_Register($dst$$reg);
3879     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
3880                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3881   %}
3882 
3883   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
3884     Register dst_reg = as_Register($dst$$reg);
3885     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
3886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3887   %}
3888 
3889   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
3890     Register dst_reg = as_Register($dst$$reg);
3891     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3892                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3893   %}
3894 
3895   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
3896     Register dst_reg = as_Register($dst$$reg);
3897     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
3898                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3899   %}
3900 
3901   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
3902     Register dst_reg = as_Register($dst$$reg);
3903     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
3904                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3905   %}
3906 
3907   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
3908     Register dst_reg = as_Register($dst$$reg);
3909     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
3910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3911   %}
3912 
3913   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
3914     Register dst_reg = as_Register($dst$$reg);
3915     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3916                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3917   %}
3918 
3919   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
3920     Register dst_reg = as_Register($dst$$reg);
3921     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
3922                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3923   %}
3924 
3925   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
3926     Register dst_reg = as_Register($dst$$reg);
3927     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3928                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3929   %}
3930 
3931   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
3932     Register dst_reg = as_Register($dst$$reg);
3933     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
3934                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3935   %}
3936 
3937   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
3938     Register dst_reg = as_Register($dst$$reg);
3939     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
3940                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3941   %}
3942 
3943   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
3944     Register dst_reg = as_Register($dst$$reg);
3945     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
3946                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3947   %}
3948 
3949   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
3950     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3951     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
3952                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3953   %}
3954 
3955   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
3956     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3957     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
3958                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3959   %}
3960 
3961   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
3962     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3963     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
3964        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3965   %}
3966 
3967   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
3968     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3969     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
3970        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3971   %}
3972 
3973   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
3974     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3975     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
3976        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3977   %}
3978 
3979   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
3980     Register src_reg = as_Register($src$$reg);
3981     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
3982                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3983   %}
3984 
3985   enc_class aarch64_enc_strb0(memory mem) %{
3986     MacroAssembler _masm(&cbuf);
3987     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3988                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3989   %}
3990 
3991   enc_class aarch64_enc_strb0_ordered(memory mem) %{
3992     MacroAssembler _masm(&cbuf);
3993     __ membar(Assembler::StoreStore);
3994     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
3995                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
3996   %}
3997 
3998   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
3999     Register src_reg = as_Register($src$$reg);
4000     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
4001                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4002   %}
4003 
4004   enc_class aarch64_enc_strh0(memory mem) %{
4005     MacroAssembler _masm(&cbuf);
4006     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
4007                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4008   %}
4009 
4010   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
4011     Register src_reg = as_Register($src$$reg);
4012     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
4013                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4014   %}
4015 
4016   enc_class aarch64_enc_strw0(memory mem) %{
4017     MacroAssembler _masm(&cbuf);
4018     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
4019                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4020   %}
4021 
4022   enc_class aarch64_enc_str(iRegL src, memory mem) %{
4023     Register src_reg = as_Register($src$$reg);
4024     // we sometimes get asked to store the stack pointer into the
4025     // current thread -- we cannot do that directly on AArch64
4026     if (src_reg == r31_sp) {
4027       MacroAssembler _masm(&cbuf);
4028       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4029       __ mov(rscratch2, sp);
4030       src_reg = rscratch2;
4031     }
4032     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
4033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4034   %}
4035 
4036   enc_class aarch64_enc_str0(memory mem) %{
4037     MacroAssembler _masm(&cbuf);
4038     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
4039                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4040   %}
4041 
4042   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
4043     FloatRegister src_reg = as_FloatRegister($src$$reg);
4044     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
4045                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4046   %}
4047 
4048   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
4049     FloatRegister src_reg = as_FloatRegister($src$$reg);
4050     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
4051                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4052   %}
4053 
4054   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
4055     FloatRegister src_reg = as_FloatRegister($src$$reg);
4056     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
4057        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4058   %}
4059 
4060   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
4061     FloatRegister src_reg = as_FloatRegister($src$$reg);
4062     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
4063        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4064   %}
4065 
4066   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
4067     FloatRegister src_reg = as_FloatRegister($src$$reg);
4068     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
4069        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
4070   %}
4071 
4072   // END Non-volatile memory access
4073 
4074   // volatile loads and stores
4075 
4076   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
4077     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4078                  rscratch1, stlrb);
4079   %}
4080 
4081   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
4082     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4083                  rscratch1, stlrh);
4084   %}
4085 
4086   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
4087     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4088                  rscratch1, stlrw);
4089   %}
4090 
4091 
4092   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
4093     Register dst_reg = as_Register($dst$$reg);
4094     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4095              rscratch1, ldarb);
4096     __ sxtbw(dst_reg, dst_reg);
4097   %}
4098 
4099   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
4100     Register dst_reg = as_Register($dst$$reg);
4101     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4102              rscratch1, ldarb);
4103     __ sxtb(dst_reg, dst_reg);
4104   %}
4105 
4106   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
4107     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4108              rscratch1, ldarb);
4109   %}
4110 
4111   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
4112     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4113              rscratch1, ldarb);
4114   %}
4115 
4116   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
4117     Register dst_reg = as_Register($dst$$reg);
4118     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4119              rscratch1, ldarh);
4120     __ sxthw(dst_reg, dst_reg);
4121   %}
4122 
4123   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
4124     Register dst_reg = as_Register($dst$$reg);
4125     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4126              rscratch1, ldarh);
4127     __ sxth(dst_reg, dst_reg);
4128   %}
4129 
4130   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
4131     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4132              rscratch1, ldarh);
4133   %}
4134 
4135   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
4136     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4137              rscratch1, ldarh);
4138   %}
4139 
4140   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
4141     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4142              rscratch1, ldarw);
4143   %}
4144 
4145   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
4146     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4147              rscratch1, ldarw);
4148   %}
4149 
4150   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
4151     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4152              rscratch1, ldar);
4153   %}
4154 
4155   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
4156     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4157              rscratch1, ldarw);
4158     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
4159   %}
4160 
4161   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
4162     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4163              rscratch1, ldar);
4164     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
4165   %}
4166 
4167   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
4168     Register src_reg = as_Register($src$$reg);
4169     // we sometimes get asked to store the stack pointer into the
4170     // current thread -- we cannot do that directly on AArch64
4171     if (src_reg == r31_sp) {
4172         MacroAssembler _masm(&cbuf);
4173       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
4174       __ mov(rscratch2, sp);
4175       src_reg = rscratch2;
4176     }
4177     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4178                  rscratch1, stlr);
4179   %}
4180 
4181   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
4182     {
4183       MacroAssembler _masm(&cbuf);
4184       FloatRegister src_reg = as_FloatRegister($src$$reg);
4185       __ fmovs(rscratch2, src_reg);
4186     }
4187     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4188                  rscratch1, stlrw);
4189   %}
4190 
4191   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
4192     {
4193       MacroAssembler _masm(&cbuf);
4194       FloatRegister src_reg = as_FloatRegister($src$$reg);
4195       __ fmovd(rscratch2, src_reg);
4196     }
4197     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
4198                  rscratch1, stlr);
4199   %}
4200 
4201   // synchronized read/update encodings
4202 
4203   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
4204     MacroAssembler _masm(&cbuf);
4205     Register dst_reg = as_Register($dst$$reg);
4206     Register base = as_Register($mem$$base);
4207     int index = $mem$$index;
4208     int scale = $mem$$scale;
4209     int disp = $mem$$disp;
4210     if (index == -1) {
4211        if (disp != 0) {
4212         __ lea(rscratch1, Address(base, disp));
4213         __ ldaxr(dst_reg, rscratch1);
4214       } else {
4215         // TODO
4216         // should we ever get anything other than this case?
4217         __ ldaxr(dst_reg, base);
4218       }
4219     } else {
4220       Register index_reg = as_Register(index);
4221       if (disp == 0) {
4222         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
4223         __ ldaxr(dst_reg, rscratch1);
4224       } else {
4225         __ lea(rscratch1, Address(base, disp));
4226         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
4227         __ ldaxr(dst_reg, rscratch1);
4228       }
4229     }
4230   %}
4231 
4232   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
4233     MacroAssembler _masm(&cbuf);
4234     Register src_reg = as_Register($src$$reg);
4235     Register base = as_Register($mem$$base);
4236     int index = $mem$$index;
4237     int scale = $mem$$scale;
4238     int disp = $mem$$disp;
4239     if (index == -1) {
4240        if (disp != 0) {
4241         __ lea(rscratch2, Address(base, disp));
4242         __ stlxr(rscratch1, src_reg, rscratch2);
4243       } else {
4244         // TODO
4245         // should we ever get anything other than this case?
4246         __ stlxr(rscratch1, src_reg, base);
4247       }
4248     } else {
4249       Register index_reg = as_Register(index);
4250       if (disp == 0) {
4251         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4252         __ stlxr(rscratch1, src_reg, rscratch2);
4253       } else {
4254         __ lea(rscratch2, Address(base, disp));
4255         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4256         __ stlxr(rscratch1, src_reg, rscratch2);
4257       }
4258     }
4259     __ cmpw(rscratch1, zr);
4260   %}
4261 
4262   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4263     MacroAssembler _masm(&cbuf);
4264     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4265     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4266                &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
4267   %}
4268 
4269   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4270     MacroAssembler _masm(&cbuf);
4271     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4272     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4273                &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4274   %}
4275 
4276 
4277   // The only difference between aarch64_enc_cmpxchg and
4278   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4279   // CompareAndSwap sequence to serve as a barrier on acquiring a
4280   // lock.
4281   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4282     MacroAssembler _masm(&cbuf);
4283     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4284     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4285                &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
4286   %}
4287 
4288   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4289     MacroAssembler _masm(&cbuf);
4290     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4291     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4292                &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4293   %}
4294 
4295 
4296   // auxiliary used for CompareAndSwapX to set result register
4297   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4298     MacroAssembler _masm(&cbuf);
4299     Register res_reg = as_Register($res$$reg);
4300     __ cset(res_reg, Assembler::EQ);
4301   %}
4302 
4303   // prefetch encodings
4304 
4305   enc_class aarch64_enc_prefetchw(memory mem) %{
4306     MacroAssembler _masm(&cbuf);
4307     Register base = as_Register($mem$$base);
4308     int index = $mem$$index;
4309     int scale = $mem$$scale;
4310     int disp = $mem$$disp;
4311     if (index == -1) {
4312       __ prfm(Address(base, disp), PSTL1KEEP);
4313     } else {
4314       Register index_reg = as_Register(index);
4315       if (disp == 0) {
4316         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
4317       } else {
4318         __ lea(rscratch1, Address(base, disp));
4319         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
4320       }
4321     }
4322   %}
4323 
4324   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
4325     MacroAssembler _masm(&cbuf);
4326     Register cnt_reg = as_Register($cnt$$reg);
4327     Register base_reg = as_Register($base$$reg);
4328     // base is word aligned
4329     // cnt is count of words
4330 
4331     Label loop;
4332     Label entry;
4333 
4334 //  Algorithm:
4335 //
4336 //    scratch1 = cnt & 7;
4337 //    cnt -= scratch1;
4338 //    p += scratch1;
4339 //    switch (scratch1) {
4340 //      do {
4341 //        cnt -= 8;
4342 //          p[-8] = 0;
4343 //        case 7:
4344 //          p[-7] = 0;
4345 //        case 6:
4346 //          p[-6] = 0;
4347 //          // ...
4348 //        case 1:
4349 //          p[-1] = 0;
4350 //        case 0:
4351 //          p += 8;
4352 //      } while (cnt);
4353 //    }
4354 
4355     const int unroll = 8; // Number of str(zr) instructions we'll unroll
4356 
4357     __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
4358     __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
4359     // base_reg always points to the end of the region we're about to zero
4360     __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
4361     __ adr(rscratch2, entry);
4362     __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
4363     __ br(rscratch2);
4364     __ bind(loop);
4365     __ sub(cnt_reg, cnt_reg, unroll);
4366     for (int i = -unroll; i < 0; i++)
4367       __ str(zr, Address(base_reg, i * wordSize));
4368     __ bind(entry);
4369     __ add(base_reg, base_reg, unroll * wordSize);
4370     __ cbnz(cnt_reg, loop);
4371   %}
4372 
4373   /// mov envcodings
4374 
4375   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
4376     MacroAssembler _masm(&cbuf);
4377     u_int32_t con = (u_int32_t)$src$$constant;
4378     Register dst_reg = as_Register($dst$$reg);
4379     if (con == 0) {
4380       __ movw(dst_reg, zr);
4381     } else {
4382       __ movw(dst_reg, con);
4383     }
4384   %}
4385 
4386   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
4387     MacroAssembler _masm(&cbuf);
4388     Register dst_reg = as_Register($dst$$reg);
4389     u_int64_t con = (u_int64_t)$src$$constant;
4390     if (con == 0) {
4391       __ mov(dst_reg, zr);
4392     } else {
4393       __ mov(dst_reg, con);
4394     }
4395   %}
4396 
4397   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
4398     MacroAssembler _masm(&cbuf);
4399     Register dst_reg = as_Register($dst$$reg);
4400     address con = (address)$src$$constant;
4401     if (con == NULL || con == (address)1) {
4402       ShouldNotReachHere();
4403     } else {
4404       relocInfo::relocType rtype = $src->constant_reloc();
4405       if (rtype == relocInfo::oop_type) {
4406         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
4407       } else if (rtype == relocInfo::metadata_type) {
4408         __ mov_metadata(dst_reg, (Metadata*)con);
4409       } else {
4410         assert(rtype == relocInfo::none, "unexpected reloc type");
4411         if (con < (address)(uintptr_t)os::vm_page_size()) {
4412           __ mov(dst_reg, con);
4413         } else {
4414           unsigned long offset;
4415           __ adrp(dst_reg, con, offset);
4416           __ add(dst_reg, dst_reg, offset);
4417         }
4418       }
4419     }
4420   %}
4421 
4422   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
4423     MacroAssembler _masm(&cbuf);
4424     Register dst_reg = as_Register($dst$$reg);
4425     __ mov(dst_reg, zr);
4426   %}
4427 
4428   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
4429     MacroAssembler _masm(&cbuf);
4430     Register dst_reg = as_Register($dst$$reg);
4431     __ mov(dst_reg, (u_int64_t)1);
4432   %}
4433 
4434   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
4435     MacroAssembler _masm(&cbuf);
4436     address page = (address)$src$$constant;
4437     Register dst_reg = as_Register($dst$$reg);
4438     unsigned long off;
4439     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
4440     assert(off == 0, "assumed offset == 0");
4441   %}
4442 
4443   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
4444     MacroAssembler _masm(&cbuf);
4445     __ load_byte_map_base($dst$$Register);
4446   %}
4447 
4448   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
4449     MacroAssembler _masm(&cbuf);
4450     Register dst_reg = as_Register($dst$$reg);
4451     address con = (address)$src$$constant;
4452     if (con == NULL) {
4453       ShouldNotReachHere();
4454     } else {
4455       relocInfo::relocType rtype = $src->constant_reloc();
4456       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
4457       __ set_narrow_oop(dst_reg, (jobject)con);
4458     }
4459   %}
4460 
4461   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
4462     MacroAssembler _masm(&cbuf);
4463     Register dst_reg = as_Register($dst$$reg);
4464     __ mov(dst_reg, zr);
4465   %}
4466 
4467   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
4468     MacroAssembler _masm(&cbuf);
4469     Register dst_reg = as_Register($dst$$reg);
4470     address con = (address)$src$$constant;
4471     if (con == NULL) {
4472       ShouldNotReachHere();
4473     } else {
4474       relocInfo::relocType rtype = $src->constant_reloc();
4475       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
4476       __ set_narrow_klass(dst_reg, (Klass *)con);
4477     }
4478   %}
4479 
4480   // arithmetic encodings
4481 
4482   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
4483     MacroAssembler _masm(&cbuf);
4484     Register dst_reg = as_Register($dst$$reg);
4485     Register src_reg = as_Register($src1$$reg);
4486     int32_t con = (int32_t)$src2$$constant;
4487     // add has primary == 0, subtract has primary == 1
4488     if ($primary) { con = -con; }
4489     if (con < 0) {
4490       __ subw(dst_reg, src_reg, -con);
4491     } else {
4492       __ addw(dst_reg, src_reg, con);
4493     }
4494   %}
4495 
4496   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
4497     MacroAssembler _masm(&cbuf);
4498     Register dst_reg = as_Register($dst$$reg);
4499     Register src_reg = as_Register($src1$$reg);
4500     int32_t con = (int32_t)$src2$$constant;
4501     // add has primary == 0, subtract has primary == 1
4502     if ($primary) { con = -con; }
4503     if (con < 0) {
4504       __ sub(dst_reg, src_reg, -con);
4505     } else {
4506       __ add(dst_reg, src_reg, con);
4507     }
4508   %}
4509 
4510   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
4511     MacroAssembler _masm(&cbuf);
4512    Register dst_reg = as_Register($dst$$reg);
4513    Register src1_reg = as_Register($src1$$reg);
4514    Register src2_reg = as_Register($src2$$reg);
4515     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
4516   %}
4517 
4518   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
4519     MacroAssembler _masm(&cbuf);
4520    Register dst_reg = as_Register($dst$$reg);
4521    Register src1_reg = as_Register($src1$$reg);
4522    Register src2_reg = as_Register($src2$$reg);
4523     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
4524   %}
4525 
4526   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
4527     MacroAssembler _masm(&cbuf);
4528    Register dst_reg = as_Register($dst$$reg);
4529    Register src1_reg = as_Register($src1$$reg);
4530    Register src2_reg = as_Register($src2$$reg);
4531     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
4532   %}
4533 
4534   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
4535     MacroAssembler _masm(&cbuf);
4536    Register dst_reg = as_Register($dst$$reg);
4537    Register src1_reg = as_Register($src1$$reg);
4538    Register src2_reg = as_Register($src2$$reg);
4539     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
4540   %}
4541 
4542   // compare instruction encodings
4543 
4544   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
4545     MacroAssembler _masm(&cbuf);
4546     Register reg1 = as_Register($src1$$reg);
4547     Register reg2 = as_Register($src2$$reg);
4548     __ cmpw(reg1, reg2);
4549   %}
4550 
4551   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
4552     MacroAssembler _masm(&cbuf);
4553     Register reg = as_Register($src1$$reg);
4554     int32_t val = $src2$$constant;
4555     if (val >= 0) {
4556       __ subsw(zr, reg, val);
4557     } else {
4558       __ addsw(zr, reg, -val);
4559     }
4560   %}
4561 
4562   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
4563     MacroAssembler _masm(&cbuf);
4564     Register reg1 = as_Register($src1$$reg);
4565     u_int32_t val = (u_int32_t)$src2$$constant;
4566     __ movw(rscratch1, val);
4567     __ cmpw(reg1, rscratch1);
4568   %}
4569 
4570   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
4571     MacroAssembler _masm(&cbuf);
4572     Register reg1 = as_Register($src1$$reg);
4573     Register reg2 = as_Register($src2$$reg);
4574     __ cmp(reg1, reg2);
4575   %}
4576 
4577   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
4578     MacroAssembler _masm(&cbuf);
4579     Register reg = as_Register($src1$$reg);
4580     int64_t val = $src2$$constant;
4581     if (val >= 0) {
4582       __ subs(zr, reg, val);
4583     } else if (val != -val) {
4584       __ adds(zr, reg, -val);
4585     } else {
4586     // aargh, Long.MIN_VALUE is a special case
4587       __ orr(rscratch1, zr, (u_int64_t)val);
4588       __ subs(zr, reg, rscratch1);
4589     }
4590   %}
4591 
4592   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
4593     MacroAssembler _masm(&cbuf);
4594     Register reg1 = as_Register($src1$$reg);
4595     u_int64_t val = (u_int64_t)$src2$$constant;
4596     __ mov(rscratch1, val);
4597     __ cmp(reg1, rscratch1);
4598   %}
4599 
4600   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
4601     MacroAssembler _masm(&cbuf);
4602     Register reg1 = as_Register($src1$$reg);
4603     Register reg2 = as_Register($src2$$reg);
4604     __ cmp(reg1, reg2);
4605   %}
4606 
4607   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
4608     MacroAssembler _masm(&cbuf);
4609     Register reg1 = as_Register($src1$$reg);
4610     Register reg2 = as_Register($src2$$reg);
4611     __ cmpw(reg1, reg2);
4612   %}
4613 
4614   enc_class aarch64_enc_testp(iRegP src) %{
4615     MacroAssembler _masm(&cbuf);
4616     Register reg = as_Register($src$$reg);
4617     __ cmp(reg, zr);
4618   %}
4619 
4620   enc_class aarch64_enc_testn(iRegN src) %{
4621     MacroAssembler _masm(&cbuf);
4622     Register reg = as_Register($src$$reg);
4623     __ cmpw(reg, zr);
4624   %}
4625 
4626   enc_class aarch64_enc_b(label lbl) %{
4627     MacroAssembler _masm(&cbuf);
4628     Label *L = $lbl$$label;
4629     __ b(*L);
4630   %}
4631 
4632   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
4633     MacroAssembler _masm(&cbuf);
4634     Label *L = $lbl$$label;
4635     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4636   %}
4637 
4638   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
4639     MacroAssembler _masm(&cbuf);
4640     Label *L = $lbl$$label;
4641     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
4642   %}
4643 
4644   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
4645   %{
4646      Register sub_reg = as_Register($sub$$reg);
4647      Register super_reg = as_Register($super$$reg);
4648      Register temp_reg = as_Register($temp$$reg);
4649      Register result_reg = as_Register($result$$reg);
4650 
4651      Label miss;
4652      MacroAssembler _masm(&cbuf);
4653      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
4654                                      NULL, &miss,
4655                                      /*set_cond_codes:*/ true);
4656      if ($primary) {
4657        __ mov(result_reg, zr);
4658      }
4659      __ bind(miss);
4660   %}
4661 
4662   enc_class aarch64_enc_java_static_call(method meth) %{
4663     MacroAssembler _masm(&cbuf);
4664 
4665     address addr = (address)$meth$$method;
4666     address call;
4667     if (!_method) {
4668       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
4669       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
4670     } else {
4671       int method_index = resolved_method_index(cbuf);
4672       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
4673                                                   : static_call_Relocation::spec(method_index);
4674       call = __ trampoline_call(Address(addr, rspec), &cbuf);
4675 
4676       // Emit stub for static call
4677       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
4678       if (stub == NULL) {
4679         ciEnv::current()->record_failure("CodeCache is full");
4680         return;
4681       }
4682     }
4683     if (call == NULL) {
4684       ciEnv::current()->record_failure("CodeCache is full");
4685       return;
4686     }
4687   %}
4688 
4689   enc_class aarch64_enc_java_dynamic_call(method meth) %{
4690     MacroAssembler _masm(&cbuf);
4691     int method_index = resolved_method_index(cbuf);
4692     address call = __ ic_call((address)$meth$$method, method_index);
4693     if (call == NULL) {
4694       ciEnv::current()->record_failure("CodeCache is full");
4695       return;
4696     }
4697   %}
4698 
4699   enc_class aarch64_enc_call_epilog() %{
4700     MacroAssembler _masm(&cbuf);
4701     if (VerifyStackAtCalls) {
4702       // Check that stack depth is unchanged: find majik cookie on stack
4703       __ call_Unimplemented();
4704     }
4705   %}
4706 
4707   enc_class aarch64_enc_java_to_runtime(method meth) %{
4708     MacroAssembler _masm(&cbuf);
4709 
4710     // some calls to generated routines (arraycopy code) are scheduled
4711     // by C2 as runtime calls. if so we can call them using a br (they
4712     // will be in a reachable segment) otherwise we have to use a blrt
4713     // which loads the absolute address into a register.
4714     address entry = (address)$meth$$method;
4715     CodeBlob *cb = CodeCache::find_blob(entry);
4716     if (cb) {
4717       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
4718       if (call == NULL) {
4719         ciEnv::current()->record_failure("CodeCache is full");
4720         return;
4721       }
4722     } else {
4723       int gpcnt;
4724       int fpcnt;
4725       int rtype;
4726       getCallInfo(tf(), gpcnt, fpcnt, rtype);
4727       Label retaddr;
4728       __ adr(rscratch2, retaddr);
4729       __ lea(rscratch1, RuntimeAddress(entry));
4730       // Leave a breadcrumb for JavaThread::pd_last_frame().
4731       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
4732       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
4733       __ bind(retaddr);
4734       __ add(sp, sp, 2 * wordSize);
4735     }
4736   %}
4737 
4738   enc_class aarch64_enc_rethrow() %{
4739     MacroAssembler _masm(&cbuf);
4740     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
4741   %}
4742 
4743   enc_class aarch64_enc_ret() %{
4744     MacroAssembler _masm(&cbuf);
4745     __ ret(lr);
4746   %}
4747 
4748   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
4749     MacroAssembler _masm(&cbuf);
4750     Register target_reg = as_Register($jump_target$$reg);
4751     __ br(target_reg);
4752   %}
4753 
4754   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
4755     MacroAssembler _masm(&cbuf);
4756     Register target_reg = as_Register($jump_target$$reg);
4757     // exception oop should be in r0
4758     // ret addr has been popped into lr
4759     // callee expects it in r3
4760     __ mov(r3, lr);
4761     __ br(target_reg);
4762   %}
4763 
4764   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4765     MacroAssembler _masm(&cbuf);
4766     Register oop = as_Register($object$$reg);
4767     Register box = as_Register($box$$reg);
4768     Register disp_hdr = as_Register($tmp$$reg);
4769     Register tmp = as_Register($tmp2$$reg);
4770     Label cont;
4771     Label object_has_monitor;
4772     Label cas_failed;
4773 
4774     assert_different_registers(oop, box, tmp, disp_hdr);
4775 
4776     // Load markOop from object into displaced_header.
4777     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
4778 
4779     // Always do locking in runtime.
4780     if (EmitSync & 0x01) {
4781       __ cmp(oop, zr);
4782       return;
4783     }
4784 
4785     if (UseBiasedLocking && !UseOptoBiasInlining) {
4786       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
4787     }
4788 
4789     // Handle existing monitor
4790     if ((EmitSync & 0x02) == 0) {
4791       // we can use AArch64's bit test and branch here but
4792       // markoopDesc does not define a bit index just the bit value
4793       // so assert in case the bit pos changes
4794 #     define __monitor_value_log2 1
4795       assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4796       __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4797 #     undef __monitor_value_log2
4798     }
4799 
4800     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4801     __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4802 
4803     // Load Compare Value application register.
4804 
4805     // Initialize the box. (Must happen before we update the object mark!)
4806     __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4807 
4808     // Compare object markOop with mark and if equal exchange scratch1
4809     // with object markOop.
4810     {
4811       Label retry_load;
4812       __ bind(retry_load);
4813       __ ldaxr(tmp, oop);
4814       __ cmp(tmp, disp_hdr);
4815       __ br(Assembler::NE, cas_failed);
4816       // use stlxr to ensure update is immediately visible
4817       __ stlxr(tmp, box, oop);
4818       __ cbzw(tmp, cont);
4819       __ b(retry_load);
4820     }
4821 
4822     // Formerly:
4823     // __ cmpxchgptr(/*oldv=*/disp_hdr,
4824     //               /*newv=*/box,
4825     //               /*addr=*/oop,
4826     //               /*tmp=*/tmp,
4827     //               cont,
4828     //               /*fail*/NULL);
4829 
4830     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4831 
4832     // If the compare-and-exchange succeeded, then we found an unlocked
4833     // object, will have now locked it will continue at label cont
4834 
4835     __ bind(cas_failed);
4836     // We did not see an unlocked object so try the fast recursive case.
4837 
4838     // Check if the owner is self by comparing the value in the
4839     // markOop of object (disp_hdr) with the stack pointer.
4840     __ mov(rscratch1, sp);
4841     __ sub(disp_hdr, disp_hdr, rscratch1);
4842     __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4843     // If condition is true we are cont and hence we can store 0 as the
4844     // displaced header in the box, which indicates that it is a recursive lock.
4845     __ ands(tmp/*==0?*/, disp_hdr, tmp);
4846     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4847 
4848     // Handle existing monitor.
4849     if ((EmitSync & 0x02) == 0) {
4850       __ b(cont);
4851 
4852       __ bind(object_has_monitor);
4853       // The object's monitor m is unlocked iff m->owner == NULL,
4854       // otherwise m->owner may contain a thread or a stack address.
4855       //
4856       // Try to CAS m->owner from NULL to current thread.
4857       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4858       __ mov(disp_hdr, zr);
4859 
4860       {
4861         Label retry_load, fail;
4862         __ bind(retry_load);
4863         __ ldaxr(rscratch1, tmp);
4864         __ cmp(disp_hdr, rscratch1);
4865         __ br(Assembler::NE, fail);
4866         // use stlxr to ensure update is immediately visible
4867         __ stlxr(rscratch1, rthread, tmp);
4868         __ cbnzw(rscratch1, retry_load);
4869         __ bind(fail);
4870       }
4871 
4872       // Label next;
4873       // __ cmpxchgptr(/*oldv=*/disp_hdr,
4874       //               /*newv=*/rthread,
4875       //               /*addr=*/tmp,
4876       //               /*tmp=*/rscratch1,
4877       //               /*succeed*/next,
4878       //               /*fail*/NULL);
4879       // __ bind(next);
4880 
4881       // store a non-null value into the box.
4882       __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4883 
4884       // PPC port checks the following invariants
4885       // #ifdef ASSERT
4886       // bne(flag, cont);
4887       // We have acquired the monitor, check some invariants.
4888       // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
4889       // Invariant 1: _recursions should be 0.
4890       // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
4891       // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
4892       //                        "monitor->_recursions should be 0", -1);
4893       // Invariant 2: OwnerIsThread shouldn't be 0.
4894       // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
4895       //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
4896       //                           "monitor->OwnerIsThread shouldn't be 0", -1);
4897       // #endif
4898     }
4899 
4900     __ bind(cont);
4901     // flag == EQ indicates success
4902     // flag == NE indicates failure
4903 
4904   %}
4905 
4906   // TODO
4907   // reimplement this with custom cmpxchgptr code
4908   // which avoids some of the unnecessary branching
4909   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
4910     MacroAssembler _masm(&cbuf);
4911     Register oop = as_Register($object$$reg);
4912     Register box = as_Register($box$$reg);
4913     Register disp_hdr = as_Register($tmp$$reg);
4914     Register tmp = as_Register($tmp2$$reg);
4915     Label cont;
4916     Label object_has_monitor;
4917     Label cas_failed;
4918 
4919     assert_different_registers(oop, box, tmp, disp_hdr);
4920 
4921     // Always do locking in runtime.
4922     if (EmitSync & 0x01) {
4923       __ cmp(oop, zr); // Oop can't be 0 here => always false.
4924       return;
4925     }
4926 
4927     if (UseBiasedLocking && !UseOptoBiasInlining) {
4928       __ biased_locking_exit(oop, tmp, cont);
4929     }
4930 
4931     // Find the lock address and load the displaced header from the stack.
4932     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4933 
4934     // If the displaced header is 0, we have a recursive unlock.
4935     __ cmp(disp_hdr, zr);
4936     __ br(Assembler::EQ, cont);
4937 
4938 
4939     // Handle existing monitor.
4940     if ((EmitSync & 0x02) == 0) {
4941       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4942       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4943     }
4944 
4945     // Check if it is still a light weight lock, this is is true if we
4946     // see the stack address of the basicLock in the markOop of the
4947     // object.
4948 
4949       {
4950         Label retry_load;
4951         __ bind(retry_load);
4952         __ ldxr(tmp, oop);
4953         __ cmp(box, tmp);
4954         __ br(Assembler::NE, cas_failed);
4955         // use stlxr to ensure update is immediately visible
4956         __ stlxr(tmp, disp_hdr, oop);
4957         __ cbzw(tmp, cont);
4958         __ b(retry_load);
4959       }
4960 
4961     // __ cmpxchgptr(/*compare_value=*/box,
4962     //               /*exchange_value=*/disp_hdr,
4963     //               /*where=*/oop,
4964     //               /*result=*/tmp,
4965     //               cont,
4966     //               /*cas_failed*/NULL);
4967     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4968 
4969     __ bind(cas_failed);
4970 
4971     // Handle existing monitor.
4972     if ((EmitSync & 0x02) == 0) {
4973       __ b(cont);
4974 
4975       __ bind(object_has_monitor);
4976       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
4977       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4978       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
4979       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
4980       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
4981       __ cmp(rscratch1, zr);
4982       __ br(Assembler::NE, cont);
4983 
4984       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
4985       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
4986       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
4987       __ cmp(rscratch1, zr);
4988       __ cbnz(rscratch1, cont);
4989       // need a release store here
4990       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
4991       __ stlr(rscratch1, tmp); // rscratch1 is zero
4992     }
4993 
4994     __ bind(cont);
4995     // flag == EQ indicates success
4996     // flag == NE indicates failure
4997   %}
4998 
4999 %}
5000 
5001 //----------FRAME--------------------------------------------------------------
5002 // Definition of frame structure and management information.
5003 //
5004 //  S T A C K   L A Y O U T    Allocators stack-slot number
5005 //                             |   (to get allocators register number
5006 //  G  Owned by    |        |  v    add OptoReg::stack0())
5007 //  r   CALLER     |        |
5008 //  o     |        +--------+      pad to even-align allocators stack-slot
5009 //  w     V        |  pad0  |        numbers; owned by CALLER
5010 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
5011 //  h     ^        |   in   |  5
5012 //        |        |  args  |  4   Holes in incoming args owned by SELF
5013 //  |     |        |        |  3
5014 //  |     |        +--------+
5015 //  V     |        | old out|      Empty on Intel, window on Sparc
5016 //        |    old |preserve|      Must be even aligned.
5017 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
5018 //        |        |   in   |  3   area for Intel ret address
5019 //     Owned by    |preserve|      Empty on Sparc.
5020 //       SELF      +--------+
5021 //        |        |  pad2  |  2   pad to align old SP
5022 //        |        +--------+  1
5023 //        |        | locks  |  0
5024 //        |        +--------+----> OptoReg::stack0(), even aligned
5025 //        |        |  pad1  | 11   pad to align new SP
5026 //        |        +--------+
5027 //        |        |        | 10
5028 //        |        | spills |  9   spills
5029 //        V        |        |  8   (pad0 slot for callee)
5030 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
5031 //        ^        |  out   |  7
5032 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
5033 //     Owned by    +--------+
5034 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
5035 //        |    new |preserve|      Must be even-aligned.
5036 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
5037 //        |        |        |
5038 //
5039 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
5040 //         known from SELF's arguments and the Java calling convention.
5041 //         Region 6-7 is determined per call site.
5042 // Note 2: If the calling convention leaves holes in the incoming argument
5043 //         area, those holes are owned by SELF.  Holes in the outgoing area
5044 //         are owned by the CALLEE.  Holes should not be nessecary in the
5045 //         incoming area, as the Java calling convention is completely under
5046 //         the control of the AD file.  Doubles can be sorted and packed to
5047 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
5048 //         varargs C calling conventions.
5049 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
5050 //         even aligned with pad0 as needed.
5051 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
5052 //           (the latter is true on Intel but is it false on AArch64?)
5053 //         region 6-11 is even aligned; it may be padded out more so that
5054 //         the region from SP to FP meets the minimum stack alignment.
5055 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
5056 //         alignment.  Region 11, pad1, may be dynamically extended so that
5057 //         SP meets the minimum alignment.
5058 
5059 frame %{
5060   // What direction does stack grow in (assumed to be same for C & Java)
5061   stack_direction(TOWARDS_LOW);
5062 
5063   // These three registers define part of the calling convention
5064   // between compiled code and the interpreter.
5065 
5066   // Inline Cache Register or methodOop for I2C.
5067   inline_cache_reg(R12);
5068 
5069   // Method Oop Register when calling interpreter.
5070   interpreter_method_oop_reg(R12);
5071 
5072   // Number of stack slots consumed by locking an object
5073   sync_stack_slots(2);
5074 
5075   // Compiled code's Frame Pointer
5076   frame_pointer(R31);
5077 
5078   // Interpreter stores its frame pointer in a register which is
5079   // stored to the stack by I2CAdaptors.
5080   // I2CAdaptors convert from interpreted java to compiled java.
5081   interpreter_frame_pointer(R29);
5082 
5083   // Stack alignment requirement
5084   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
5085 
5086   // Number of stack slots between incoming argument block and the start of
5087   // a new frame.  The PROLOG must add this many slots to the stack.  The
5088   // EPILOG must remove this many slots. aarch64 needs two slots for
5089   // return address and fp.
5090   // TODO think this is correct but check
5091   in_preserve_stack_slots(4);
5092 
5093   // Number of outgoing stack slots killed above the out_preserve_stack_slots
5094   // for calls to C.  Supports the var-args backing area for register parms.
5095   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
5096 
5097   // The after-PROLOG location of the return address.  Location of
5098   // return address specifies a type (REG or STACK) and a number
5099   // representing the register number (i.e. - use a register name) or
5100   // stack slot.
5101   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
5102   // Otherwise, it is above the locks and verification slot and alignment word
5103   // TODO this may well be correct but need to check why that - 2 is there
5104   // ppc port uses 0 but we definitely need to allow for fixed_slots
5105   // which folds in the space used for monitors
5106   return_addr(STACK - 2 +
5107               round_to((Compile::current()->in_preserve_stack_slots() +
5108                         Compile::current()->fixed_slots()),
5109                        stack_alignment_in_slots()));
5110 
5111   // Body of function which returns an integer array locating
5112   // arguments either in registers or in stack slots.  Passed an array
5113   // of ideal registers called "sig" and a "length" count.  Stack-slot
5114   // offsets are based on outgoing arguments, i.e. a CALLER setting up
5115   // arguments for a CALLEE.  Incoming stack arguments are
5116   // automatically biased by the preserve_stack_slots field above.
5117 
5118   calling_convention
5119   %{
5120     // No difference between ingoing/outgoing just pass false
5121     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
5122   %}
5123 
5124   c_calling_convention
5125   %{
5126     // This is obviously always outgoing
5127     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
5128   %}
5129 
5130   // Location of compiled Java return values.  Same as C for now.
5131   return_value
5132   %{
5133     // TODO do we allow ideal_reg == Op_RegN???
5134     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
5135            "only return normal values");
5136 
5137     static const int lo[Op_RegL + 1] = { // enum name
5138       0,                                 // Op_Node
5139       0,                                 // Op_Set
5140       R0_num,                            // Op_RegN
5141       R0_num,                            // Op_RegI
5142       R0_num,                            // Op_RegP
5143       V0_num,                            // Op_RegF
5144       V0_num,                            // Op_RegD
5145       R0_num                             // Op_RegL
5146     };
5147 
5148     static const int hi[Op_RegL + 1] = { // enum name
5149       0,                                 // Op_Node
5150       0,                                 // Op_Set
5151       OptoReg::Bad,                       // Op_RegN
5152       OptoReg::Bad,                      // Op_RegI
5153       R0_H_num,                          // Op_RegP
5154       OptoReg::Bad,                      // Op_RegF
5155       V0_H_num,                          // Op_RegD
5156       R0_H_num                           // Op_RegL
5157     };
5158 
5159     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
5160   %}
5161 %}
5162 
5163 //----------ATTRIBUTES---------------------------------------------------------
5164 //----------Operand Attributes-------------------------------------------------
5165 op_attrib op_cost(1);        // Required cost attribute
5166 
5167 //----------Instruction Attributes---------------------------------------------
5168 ins_attrib ins_cost(INSN_COST); // Required cost attribute
5169 ins_attrib ins_size(32);        // Required size attribute (in bits)
5170 ins_attrib ins_short_branch(0); // Required flag: is this instruction
5171                                 // a non-matching short branch variant
5172                                 // of some long branch?
5173 ins_attrib ins_alignment(4);    // Required alignment attribute (must
5174                                 // be a power of 2) specifies the
5175                                 // alignment that some part of the
5176                                 // instruction (not necessarily the
5177                                 // start) requires.  If > 1, a
5178                                 // compute_padding() function must be
5179                                 // provided for the instruction
5180 
5181 //----------OPERANDS-----------------------------------------------------------
5182 // Operand definitions must precede instruction definitions for correct parsing
5183 // in the ADLC because operands constitute user defined types which are used in
5184 // instruction definitions.
5185 
5186 //----------Simple Operands----------------------------------------------------
5187 
5188 // Integer operands 32 bit
5189 // 32 bit immediate
5190 operand immI()
5191 %{
5192   match(ConI);
5193 
5194   op_cost(0);
5195   format %{ %}
5196   interface(CONST_INTER);
5197 %}
5198 
5199 // 32 bit zero
5200 operand immI0()
5201 %{
5202   predicate(n->get_int() == 0);
5203   match(ConI);
5204 
5205   op_cost(0);
5206   format %{ %}
5207   interface(CONST_INTER);
5208 %}
5209 
5210 // 32 bit unit increment
5211 operand immI_1()
5212 %{
5213   predicate(n->get_int() == 1);
5214   match(ConI);
5215 
5216   op_cost(0);
5217   format %{ %}
5218   interface(CONST_INTER);
5219 %}
5220 
5221 // 32 bit unit decrement
5222 operand immI_M1()
5223 %{
5224   predicate(n->get_int() == -1);
5225   match(ConI);
5226 
5227   op_cost(0);
5228   format %{ %}
5229   interface(CONST_INTER);
5230 %}
5231 
5232 operand immI_le_4()
5233 %{
5234   predicate(n->get_int() <= 4);
5235   match(ConI);
5236 
5237   op_cost(0);
5238   format %{ %}
5239   interface(CONST_INTER);
5240 %}
5241 
5242 operand immI_31()
5243 %{
5244   predicate(n->get_int() == 31);
5245   match(ConI);
5246 
5247   op_cost(0);
5248   format %{ %}
5249   interface(CONST_INTER);
5250 %}
5251 
5252 operand immI_8()
5253 %{
5254   predicate(n->get_int() == 8);
5255   match(ConI);
5256 
5257   op_cost(0);
5258   format %{ %}
5259   interface(CONST_INTER);
5260 %}
5261 
5262 operand immI_16()
5263 %{
5264   predicate(n->get_int() == 16);
5265   match(ConI);
5266 
5267   op_cost(0);
5268   format %{ %}
5269   interface(CONST_INTER);
5270 %}
5271 
5272 operand immI_24()
5273 %{
5274   predicate(n->get_int() == 24);
5275   match(ConI);
5276 
5277   op_cost(0);
5278   format %{ %}
5279   interface(CONST_INTER);
5280 %}
5281 
5282 operand immI_32()
5283 %{
5284   predicate(n->get_int() == 32);
5285   match(ConI);
5286 
5287   op_cost(0);
5288   format %{ %}
5289   interface(CONST_INTER);
5290 %}
5291 
5292 operand immI_48()
5293 %{
5294   predicate(n->get_int() == 48);
5295   match(ConI);
5296 
5297   op_cost(0);
5298   format %{ %}
5299   interface(CONST_INTER);
5300 %}
5301 
5302 operand immI_56()
5303 %{
5304   predicate(n->get_int() == 56);
5305   match(ConI);
5306 
5307   op_cost(0);
5308   format %{ %}
5309   interface(CONST_INTER);
5310 %}
5311 
5312 operand immI_64()
5313 %{
5314   predicate(n->get_int() == 64);
5315   match(ConI);
5316 
5317   op_cost(0);
5318   format %{ %}
5319   interface(CONST_INTER);
5320 %}
5321 
5322 operand immI_255()
5323 %{
5324   predicate(n->get_int() == 255);
5325   match(ConI);
5326 
5327   op_cost(0);
5328   format %{ %}
5329   interface(CONST_INTER);
5330 %}
5331 
5332 operand immI_65535()
5333 %{
5334   predicate(n->get_int() == 65535);
5335   match(ConI);
5336 
5337   op_cost(0);
5338   format %{ %}
5339   interface(CONST_INTER);
5340 %}
5341 
5342 operand immL_63()
5343 %{
5344   predicate(n->get_int() == 63);
5345   match(ConI);
5346 
5347   op_cost(0);
5348   format %{ %}
5349   interface(CONST_INTER);
5350 %}
5351 
5352 operand immL_255()
5353 %{
5354   predicate(n->get_int() == 255);
5355   match(ConI);
5356 
5357   op_cost(0);
5358   format %{ %}
5359   interface(CONST_INTER);
5360 %}
5361 
5362 operand immL_65535()
5363 %{
5364   predicate(n->get_long() == 65535L);
5365   match(ConL);
5366 
5367   op_cost(0);
5368   format %{ %}
5369   interface(CONST_INTER);
5370 %}
5371 
5372 operand immL_4294967295()
5373 %{
5374   predicate(n->get_long() == 4294967295L);
5375   match(ConL);
5376 
5377   op_cost(0);
5378   format %{ %}
5379   interface(CONST_INTER);
5380 %}
5381 
5382 operand immL_bitmask()
5383 %{
5384   predicate(((n->get_long() & 0xc000000000000000l) == 0)
5385             && is_power_of_2(n->get_long() + 1));
5386   match(ConL);
5387 
5388   op_cost(0);
5389   format %{ %}
5390   interface(CONST_INTER);
5391 %}
5392 
5393 operand immI_bitmask()
5394 %{
5395   predicate(((n->get_int() & 0xc0000000) == 0)
5396             && is_power_of_2(n->get_int() + 1));
5397   match(ConI);
5398 
5399   op_cost(0);
5400   format %{ %}
5401   interface(CONST_INTER);
5402 %}
5403 
5404 // Scale values for scaled offset addressing modes (up to long but not quad)
5405 operand immIScale()
5406 %{
5407   predicate(0 <= n->get_int() && (n->get_int() <= 3));
5408   match(ConI);
5409 
5410   op_cost(0);
5411   format %{ %}
5412   interface(CONST_INTER);
5413 %}
5414 
5415 // 26 bit signed offset -- for pc-relative branches
5416 operand immI26()
5417 %{
5418   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
5419   match(ConI);
5420 
5421   op_cost(0);
5422   format %{ %}
5423   interface(CONST_INTER);
5424 %}
5425 
5426 // 19 bit signed offset -- for pc-relative loads
5427 operand immI19()
5428 %{
5429   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
5430   match(ConI);
5431 
5432   op_cost(0);
5433   format %{ %}
5434   interface(CONST_INTER);
5435 %}
5436 
5437 // 12 bit unsigned offset -- for base plus immediate loads
5438 operand immIU12()
5439 %{
5440   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
5441   match(ConI);
5442 
5443   op_cost(0);
5444   format %{ %}
5445   interface(CONST_INTER);
5446 %}
5447 
5448 operand immLU12()
5449 %{
5450   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
5451   match(ConL);
5452 
5453   op_cost(0);
5454   format %{ %}
5455   interface(CONST_INTER);
5456 %}
5457 
5458 // Offset for scaled or unscaled immediate loads and stores
5459 operand immIOffset()
5460 %{
5461   predicate(Address::offset_ok_for_immed(n->get_int()));
5462   match(ConI);
5463 
5464   op_cost(0);
5465   format %{ %}
5466   interface(CONST_INTER);
5467 %}
5468 
5469 operand immLoffset()
5470 %{
5471   predicate(Address::offset_ok_for_immed(n->get_long()));
5472   match(ConL);
5473 
5474   op_cost(0);
5475   format %{ %}
5476   interface(CONST_INTER);
5477 %}
5478 
5479 // 32 bit integer valid for add sub immediate
5480 operand immIAddSub()
5481 %{
5482   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
5483   match(ConI);
5484   op_cost(0);
5485   format %{ %}
5486   interface(CONST_INTER);
5487 %}
5488 
5489 // 32 bit unsigned integer valid for logical immediate
5490 // TODO -- check this is right when e.g the mask is 0x80000000
5491 operand immILog()
5492 %{
5493   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
5494   match(ConI);
5495 
5496   op_cost(0);
5497   format %{ %}
5498   interface(CONST_INTER);
5499 %}
5500 
5501 // Integer operands 64 bit
5502 // 64 bit immediate
5503 operand immL()
5504 %{
5505   match(ConL);
5506 
5507   op_cost(0);
5508   format %{ %}
5509   interface(CONST_INTER);
5510 %}
5511 
5512 // 64 bit zero
5513 operand immL0()
5514 %{
5515   predicate(n->get_long() == 0);
5516   match(ConL);
5517 
5518   op_cost(0);
5519   format %{ %}
5520   interface(CONST_INTER);
5521 %}
5522 
5523 // 64 bit unit increment
5524 operand immL_1()
5525 %{
5526   predicate(n->get_long() == 1);
5527   match(ConL);
5528 
5529   op_cost(0);
5530   format %{ %}
5531   interface(CONST_INTER);
5532 %}
5533 
5534 // 64 bit unit decrement
5535 operand immL_M1()
5536 %{
5537   predicate(n->get_long() == -1);
5538   match(ConL);
5539 
5540   op_cost(0);
5541   format %{ %}
5542   interface(CONST_INTER);
5543 %}
5544 
5545 // 32 bit offset of pc in thread anchor
5546 
5547 operand immL_pc_off()
5548 %{
5549   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
5550                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
5551   match(ConL);
5552 
5553   op_cost(0);
5554   format %{ %}
5555   interface(CONST_INTER);
5556 %}
5557 
5558 // 64 bit integer valid for add sub immediate
5559 operand immLAddSub()
5560 %{
5561   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
5562   match(ConL);
5563   op_cost(0);
5564   format %{ %}
5565   interface(CONST_INTER);
5566 %}
5567 
5568 // 64 bit integer valid for logical immediate
5569 operand immLLog()
5570 %{
5571   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
5572   match(ConL);
5573   op_cost(0);
5574   format %{ %}
5575   interface(CONST_INTER);
5576 %}
5577 
5578 // Long Immediate: low 32-bit mask
5579 operand immL_32bits()
5580 %{
5581   predicate(n->get_long() == 0xFFFFFFFFL);
5582   match(ConL);
5583   op_cost(0);
5584   format %{ %}
5585   interface(CONST_INTER);
5586 %}
5587 
5588 // Pointer operands
5589 // Pointer Immediate
5590 operand immP()
5591 %{
5592   match(ConP);
5593 
5594   op_cost(0);
5595   format %{ %}
5596   interface(CONST_INTER);
5597 %}
5598 
5599 // NULL Pointer Immediate
5600 operand immP0()
5601 %{
5602   predicate(n->get_ptr() == 0);
5603   match(ConP);
5604 
5605   op_cost(0);
5606   format %{ %}
5607   interface(CONST_INTER);
5608 %}
5609 
5610 // Pointer Immediate One
5611 // this is used in object initialization (initial object header)
5612 operand immP_1()
5613 %{
5614   predicate(n->get_ptr() == 1);
5615   match(ConP);
5616 
5617   op_cost(0);
5618   format %{ %}
5619   interface(CONST_INTER);
5620 %}
5621 
5622 // Polling Page Pointer Immediate
5623 operand immPollPage()
5624 %{
5625   predicate((address)n->get_ptr() == os::get_polling_page());
5626   match(ConP);
5627 
5628   op_cost(0);
5629   format %{ %}
5630   interface(CONST_INTER);
5631 %}
5632 
5633 // Card Table Byte Map Base
5634 operand immByteMapBase()
5635 %{
5636   // Get base of card map
5637   predicate((jbyte*)n->get_ptr() ==
5638         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
5639   match(ConP);
5640 
5641   op_cost(0);
5642   format %{ %}
5643   interface(CONST_INTER);
5644 %}
5645 
5646 // Pointer Immediate Minus One
5647 // this is used when we want to write the current PC to the thread anchor
5648 operand immP_M1()
5649 %{
5650   predicate(n->get_ptr() == -1);
5651   match(ConP);
5652 
5653   op_cost(0);
5654   format %{ %}
5655   interface(CONST_INTER);
5656 %}
5657 
5658 // Pointer Immediate Minus Two
5659 // this is used when we want to write the current PC to the thread anchor
5660 operand immP_M2()
5661 %{
5662   predicate(n->get_ptr() == -2);
5663   match(ConP);
5664 
5665   op_cost(0);
5666   format %{ %}
5667   interface(CONST_INTER);
5668 %}
5669 
5670 // Float and Double operands
5671 // Double Immediate
5672 operand immD()
5673 %{
5674   match(ConD);
5675   op_cost(0);
5676   format %{ %}
5677   interface(CONST_INTER);
5678 %}
5679 
5680 // Double Immediate: +0.0d
5681 operand immD0()
5682 %{
5683   predicate(jlong_cast(n->getd()) == 0);
5684   match(ConD);
5685 
5686   op_cost(0);
5687   format %{ %}
5688   interface(CONST_INTER);
5689 %}
5690 
5691 // constant 'double +0.0'.
5692 operand immDPacked()
5693 %{
5694   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
5695   match(ConD);
5696   op_cost(0);
5697   format %{ %}
5698   interface(CONST_INTER);
5699 %}
5700 
5701 // Float Immediate
5702 operand immF()
5703 %{
5704   match(ConF);
5705   op_cost(0);
5706   format %{ %}
5707   interface(CONST_INTER);
5708 %}
5709 
5710 // Float Immediate: +0.0f.
5711 operand immF0()
5712 %{
5713   predicate(jint_cast(n->getf()) == 0);
5714   match(ConF);
5715 
5716   op_cost(0);
5717   format %{ %}
5718   interface(CONST_INTER);
5719 %}
5720 
5721 //
5722 operand immFPacked()
5723 %{
5724   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
5725   match(ConF);
5726   op_cost(0);
5727   format %{ %}
5728   interface(CONST_INTER);
5729 %}
5730 
5731 // Narrow pointer operands
5732 // Narrow Pointer Immediate
5733 operand immN()
5734 %{
5735   match(ConN);
5736 
5737   op_cost(0);
5738   format %{ %}
5739   interface(CONST_INTER);
5740 %}
5741 
5742 // Narrow NULL Pointer Immediate
5743 operand immN0()
5744 %{
5745   predicate(n->get_narrowcon() == 0);
5746   match(ConN);
5747 
5748   op_cost(0);
5749   format %{ %}
5750   interface(CONST_INTER);
5751 %}
5752 
5753 operand immNKlass()
5754 %{
5755   match(ConNKlass);
5756 
5757   op_cost(0);
5758   format %{ %}
5759   interface(CONST_INTER);
5760 %}
5761 
5762 // Integer 32 bit Register Operands
5763 // Integer 32 bitRegister (excludes SP)
5764 operand iRegI()
5765 %{
5766   constraint(ALLOC_IN_RC(any_reg32));
5767   match(RegI);
5768   match(iRegINoSp);
5769   op_cost(0);
5770   format %{ %}
5771   interface(REG_INTER);
5772 %}
5773 
5774 // Integer 32 bit Register not Special
5775 operand iRegINoSp()
5776 %{
5777   constraint(ALLOC_IN_RC(no_special_reg32));
5778   match(RegI);
5779   op_cost(0);
5780   format %{ %}
5781   interface(REG_INTER);
5782 %}
5783 
5784 // Integer 64 bit Register Operands
5785 // Integer 64 bit Register (includes SP)
5786 operand iRegL()
5787 %{
5788   constraint(ALLOC_IN_RC(any_reg));
5789   match(RegL);
5790   match(iRegLNoSp);
5791   op_cost(0);
5792   format %{ %}
5793   interface(REG_INTER);
5794 %}
5795 
5796 // Integer 64 bit Register not Special
5797 operand iRegLNoSp()
5798 %{
5799   constraint(ALLOC_IN_RC(no_special_reg));
5800   match(RegL);
5801   format %{ %}
5802   interface(REG_INTER);
5803 %}
5804 
5805 // Pointer Register Operands
5806 // Pointer Register
5807 operand iRegP()
5808 %{
5809   constraint(ALLOC_IN_RC(ptr_reg));
5810   match(RegP);
5811   match(iRegPNoSp);
5812   match(iRegP_R0);
5813   //match(iRegP_R2);
5814   //match(iRegP_R4);
5815   //match(iRegP_R5);
5816   match(thread_RegP);
5817   op_cost(0);
5818   format %{ %}
5819   interface(REG_INTER);
5820 %}
5821 
5822 // Pointer 64 bit Register not Special
5823 operand iRegPNoSp()
5824 %{
5825   constraint(ALLOC_IN_RC(no_special_ptr_reg));
5826   match(RegP);
5827   // match(iRegP);
5828   // match(iRegP_R0);
5829   // match(iRegP_R2);
5830   // match(iRegP_R4);
5831   // match(iRegP_R5);
5832   // match(thread_RegP);
5833   op_cost(0);
5834   format %{ %}
5835   interface(REG_INTER);
5836 %}
5837 
5838 // Pointer 64 bit Register R0 only
5839 operand iRegP_R0()
5840 %{
5841   constraint(ALLOC_IN_RC(r0_reg));
5842   match(RegP);
5843   // match(iRegP);
5844   match(iRegPNoSp);
5845   op_cost(0);
5846   format %{ %}
5847   interface(REG_INTER);
5848 %}
5849 
5850 // Pointer 64 bit Register R1 only
5851 operand iRegP_R1()
5852 %{
5853   constraint(ALLOC_IN_RC(r1_reg));
5854   match(RegP);
5855   // match(iRegP);
5856   match(iRegPNoSp);
5857   op_cost(0);
5858   format %{ %}
5859   interface(REG_INTER);
5860 %}
5861 
5862 // Pointer 64 bit Register R2 only
5863 operand iRegP_R2()
5864 %{
5865   constraint(ALLOC_IN_RC(r2_reg));
5866   match(RegP);
5867   // match(iRegP);
5868   match(iRegPNoSp);
5869   op_cost(0);
5870   format %{ %}
5871   interface(REG_INTER);
5872 %}
5873 
5874 // Pointer 64 bit Register R3 only
5875 operand iRegP_R3()
5876 %{
5877   constraint(ALLOC_IN_RC(r3_reg));
5878   match(RegP);
5879   // match(iRegP);
5880   match(iRegPNoSp);
5881   op_cost(0);
5882   format %{ %}
5883   interface(REG_INTER);
5884 %}
5885 
5886 // Pointer 64 bit Register R4 only
5887 operand iRegP_R4()
5888 %{
5889   constraint(ALLOC_IN_RC(r4_reg));
5890   match(RegP);
5891   // match(iRegP);
5892   match(iRegPNoSp);
5893   op_cost(0);
5894   format %{ %}
5895   interface(REG_INTER);
5896 %}
5897 
5898 // Pointer 64 bit Register R5 only
5899 operand iRegP_R5()
5900 %{
5901   constraint(ALLOC_IN_RC(r5_reg));
5902   match(RegP);
5903   // match(iRegP);
5904   match(iRegPNoSp);
5905   op_cost(0);
5906   format %{ %}
5907   interface(REG_INTER);
5908 %}
5909 
5910 // Pointer 64 bit Register R10 only
5911 operand iRegP_R10()
5912 %{
5913   constraint(ALLOC_IN_RC(r10_reg));
5914   match(RegP);
5915   // match(iRegP);
5916   match(iRegPNoSp);
5917   op_cost(0);
5918   format %{ %}
5919   interface(REG_INTER);
5920 %}
5921 
5922 // Long 64 bit Register R11 only
5923 operand iRegL_R11()
5924 %{
5925   constraint(ALLOC_IN_RC(r11_reg));
5926   match(RegL);
5927   match(iRegLNoSp);
5928   op_cost(0);
5929   format %{ %}
5930   interface(REG_INTER);
5931 %}
5932 
5933 // Pointer 64 bit Register FP only
5934 operand iRegP_FP()
5935 %{
5936   constraint(ALLOC_IN_RC(fp_reg));
5937   match(RegP);
5938   // match(iRegP);
5939   op_cost(0);
5940   format %{ %}
5941   interface(REG_INTER);
5942 %}
5943 
5944 // Register R0 only
5945 operand iRegI_R0()
5946 %{
5947   constraint(ALLOC_IN_RC(int_r0_reg));
5948   match(RegI);
5949   match(iRegINoSp);
5950   op_cost(0);
5951   format %{ %}
5952   interface(REG_INTER);
5953 %}
5954 
5955 // Register R2 only
5956 operand iRegI_R2()
5957 %{
5958   constraint(ALLOC_IN_RC(int_r2_reg));
5959   match(RegI);
5960   match(iRegINoSp);
5961   op_cost(0);
5962   format %{ %}
5963   interface(REG_INTER);
5964 %}
5965 
5966 // Register R3 only
5967 operand iRegI_R3()
5968 %{
5969   constraint(ALLOC_IN_RC(int_r3_reg));
5970   match(RegI);
5971   match(iRegINoSp);
5972   op_cost(0);
5973   format %{ %}
5974   interface(REG_INTER);
5975 %}
5976 
5977 
5978 // Register R2 only
5979 operand iRegI_R4()
5980 %{
5981   constraint(ALLOC_IN_RC(int_r4_reg));
5982   match(RegI);
5983   match(iRegINoSp);
5984   op_cost(0);
5985   format %{ %}
5986   interface(REG_INTER);
5987 %}
5988 
5989 
5990 // Pointer Register Operands
5991 // Narrow Pointer Register
5992 operand iRegN()
5993 %{
5994   constraint(ALLOC_IN_RC(any_reg32));
5995   match(RegN);
5996   match(iRegNNoSp);
5997   op_cost(0);
5998   format %{ %}
5999   interface(REG_INTER);
6000 %}
6001 
6002 // Integer 64 bit Register not Special
6003 operand iRegNNoSp()
6004 %{
6005   constraint(ALLOC_IN_RC(no_special_reg32));
6006   match(RegN);
6007   op_cost(0);
6008   format %{ %}
6009   interface(REG_INTER);
6010 %}
6011 
6012 // heap base register -- used for encoding immN0
6013 
6014 operand iRegIHeapbase()
6015 %{
6016   constraint(ALLOC_IN_RC(heapbase_reg));
6017   match(RegI);
6018   op_cost(0);
6019   format %{ %}
6020   interface(REG_INTER);
6021 %}
6022 
6023 // Float Register
6024 // Float register operands
6025 operand vRegF()
6026 %{
6027   constraint(ALLOC_IN_RC(float_reg));
6028   match(RegF);
6029 
6030   op_cost(0);
6031   format %{ %}
6032   interface(REG_INTER);
6033 %}
6034 
6035 // Double Register
6036 // Double register operands
6037 operand vRegD()
6038 %{
6039   constraint(ALLOC_IN_RC(double_reg));
6040   match(RegD);
6041 
6042   op_cost(0);
6043   format %{ %}
6044   interface(REG_INTER);
6045 %}
6046 
6047 operand vecD()
6048 %{
6049   constraint(ALLOC_IN_RC(vectord_reg));
6050   match(VecD);
6051 
6052   op_cost(0);
6053   format %{ %}
6054   interface(REG_INTER);
6055 %}
6056 
6057 operand vecX()
6058 %{
6059   constraint(ALLOC_IN_RC(vectorx_reg));
6060   match(VecX);
6061 
6062   op_cost(0);
6063   format %{ %}
6064   interface(REG_INTER);
6065 %}
6066 
6067 operand vRegD_V0()
6068 %{
6069   constraint(ALLOC_IN_RC(v0_reg));
6070   match(RegD);
6071   op_cost(0);
6072   format %{ %}
6073   interface(REG_INTER);
6074 %}
6075 
6076 operand vRegD_V1()
6077 %{
6078   constraint(ALLOC_IN_RC(v1_reg));
6079   match(RegD);
6080   op_cost(0);
6081   format %{ %}
6082   interface(REG_INTER);
6083 %}
6084 
6085 operand vRegD_V2()
6086 %{
6087   constraint(ALLOC_IN_RC(v2_reg));
6088   match(RegD);
6089   op_cost(0);
6090   format %{ %}
6091   interface(REG_INTER);
6092 %}
6093 
6094 operand vRegD_V3()
6095 %{
6096   constraint(ALLOC_IN_RC(v3_reg));
6097   match(RegD);
6098   op_cost(0);
6099   format %{ %}
6100   interface(REG_INTER);
6101 %}
6102 
6103 // Flags register, used as output of signed compare instructions
6104 
6105 // note that on AArch64 we also use this register as the output for
6106 // for floating point compare instructions (CmpF CmpD). this ensures
6107 // that ordered inequality tests use GT, GE, LT or LE none of which
6108 // pass through cases where the result is unordered i.e. one or both
6109 // inputs to the compare is a NaN. this means that the ideal code can
6110 // replace e.g. a GT with an LE and not end up capturing the NaN case
6111 // (where the comparison should always fail). EQ and NE tests are
6112 // always generated in ideal code so that unordered folds into the NE
6113 // case, matching the behaviour of AArch64 NE.
6114 //
6115 // This differs from x86 where the outputs of FP compares use a
6116 // special FP flags registers and where compares based on this
6117 // register are distinguished into ordered inequalities (cmpOpUCF) and
6118 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
6119 // to explicitly handle the unordered case in branches. x86 also has
6120 // to include extra CMoveX rules to accept a cmpOpUCF input.
6121 
6122 operand rFlagsReg()
6123 %{
6124   constraint(ALLOC_IN_RC(int_flags));
6125   match(RegFlags);
6126 
6127   op_cost(0);
6128   format %{ "RFLAGS" %}
6129   interface(REG_INTER);
6130 %}
6131 
6132 // Flags register, used as output of unsigned compare instructions
6133 operand rFlagsRegU()
6134 %{
6135   constraint(ALLOC_IN_RC(int_flags));
6136   match(RegFlags);
6137 
6138   op_cost(0);
6139   format %{ "RFLAGSU" %}
6140   interface(REG_INTER);
6141 %}
6142 
6143 // Special Registers
6144 
6145 // Method Register
6146 operand inline_cache_RegP(iRegP reg)
6147 %{
6148   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
6149   match(reg);
6150   match(iRegPNoSp);
6151   op_cost(0);
6152   format %{ %}
6153   interface(REG_INTER);
6154 %}
6155 
6156 operand interpreter_method_oop_RegP(iRegP reg)
6157 %{
6158   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
6159   match(reg);
6160   match(iRegPNoSp);
6161   op_cost(0);
6162   format %{ %}
6163   interface(REG_INTER);
6164 %}
6165 
6166 // Thread Register
6167 operand thread_RegP(iRegP reg)
6168 %{
6169   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
6170   match(reg);
6171   op_cost(0);
6172   format %{ %}
6173   interface(REG_INTER);
6174 %}
6175 
6176 operand lr_RegP(iRegP reg)
6177 %{
6178   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
6179   match(reg);
6180   op_cost(0);
6181   format %{ %}
6182   interface(REG_INTER);
6183 %}
6184 
6185 //----------Memory Operands----------------------------------------------------
6186 
6187 operand indirect(iRegP reg)
6188 %{
6189   constraint(ALLOC_IN_RC(ptr_reg));
6190   match(reg);
6191   op_cost(0);
6192   format %{ "[$reg]" %}
6193   interface(MEMORY_INTER) %{
6194     base($reg);
6195     index(0xffffffff);
6196     scale(0x0);
6197     disp(0x0);
6198   %}
6199 %}
6200 
6201 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
6202 %{
6203   constraint(ALLOC_IN_RC(ptr_reg));
6204   match(AddP (AddP reg (LShiftL lreg scale)) off);
6205   op_cost(INSN_COST);
6206   format %{ "$reg, $lreg lsl($scale), $off" %}
6207   interface(MEMORY_INTER) %{
6208     base($reg);
6209     index($lreg);
6210     scale($scale);
6211     disp($off);
6212   %}
6213 %}
6214 
6215 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
6216 %{
6217   constraint(ALLOC_IN_RC(ptr_reg));
6218   match(AddP (AddP reg (LShiftL lreg scale)) off);
6219   op_cost(INSN_COST);
6220   format %{ "$reg, $lreg lsl($scale), $off" %}
6221   interface(MEMORY_INTER) %{
6222     base($reg);
6223     index($lreg);
6224     scale($scale);
6225     disp($off);
6226   %}
6227 %}
6228 
6229 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
6230 %{
6231   constraint(ALLOC_IN_RC(ptr_reg));
6232   match(AddP (AddP reg (ConvI2L ireg)) off);
6233   op_cost(INSN_COST);
6234   format %{ "$reg, $ireg, $off I2L" %}
6235   interface(MEMORY_INTER) %{
6236     base($reg);
6237     index($ireg);
6238     scale(0x0);
6239     disp($off);
6240   %}
6241 %}
6242 
6243 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
6244 %{
6245   constraint(ALLOC_IN_RC(ptr_reg));
6246   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
6247   op_cost(INSN_COST);
6248   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
6249   interface(MEMORY_INTER) %{
6250     base($reg);
6251     index($ireg);
6252     scale($scale);
6253     disp($off);
6254   %}
6255 %}
6256 
6257 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
6258 %{
6259   constraint(ALLOC_IN_RC(ptr_reg));
6260   match(AddP reg (LShiftL (ConvI2L ireg) scale));
6261   op_cost(0);
6262   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
6263   interface(MEMORY_INTER) %{
6264     base($reg);
6265     index($ireg);
6266     scale($scale);
6267     disp(0x0);
6268   %}
6269 %}
6270 
6271 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
6272 %{
6273   constraint(ALLOC_IN_RC(ptr_reg));
6274   match(AddP reg (LShiftL lreg scale));
6275   op_cost(0);
6276   format %{ "$reg, $lreg lsl($scale)" %}
6277   interface(MEMORY_INTER) %{
6278     base($reg);
6279     index($lreg);
6280     scale($scale);
6281     disp(0x0);
6282   %}
6283 %}
6284 
6285 operand indIndex(iRegP reg, iRegL lreg)
6286 %{
6287   constraint(ALLOC_IN_RC(ptr_reg));
6288   match(AddP reg lreg);
6289   op_cost(0);
6290   format %{ "$reg, $lreg" %}
6291   interface(MEMORY_INTER) %{
6292     base($reg);
6293     index($lreg);
6294     scale(0x0);
6295     disp(0x0);
6296   %}
6297 %}
6298 
6299 operand indOffI(iRegP reg, immIOffset off)
6300 %{
6301   constraint(ALLOC_IN_RC(ptr_reg));
6302   match(AddP reg off);
6303   op_cost(0);
6304   format %{ "[$reg, $off]" %}
6305   interface(MEMORY_INTER) %{
6306     base($reg);
6307     index(0xffffffff);
6308     scale(0x0);
6309     disp($off);
6310   %}
6311 %}
6312 
6313 operand indOffL(iRegP reg, immLoffset off)
6314 %{
6315   constraint(ALLOC_IN_RC(ptr_reg));
6316   match(AddP reg off);
6317   op_cost(0);
6318   format %{ "[$reg, $off]" %}
6319   interface(MEMORY_INTER) %{
6320     base($reg);
6321     index(0xffffffff);
6322     scale(0x0);
6323     disp($off);
6324   %}
6325 %}
6326 
6327 
6328 operand indirectN(iRegN reg)
6329 %{
6330   predicate(Universe::narrow_oop_shift() == 0);
6331   constraint(ALLOC_IN_RC(ptr_reg));
6332   match(DecodeN reg);
6333   op_cost(0);
6334   format %{ "[$reg]\t# narrow" %}
6335   interface(MEMORY_INTER) %{
6336     base($reg);
6337     index(0xffffffff);
6338     scale(0x0);
6339     disp(0x0);
6340   %}
6341 %}
6342 
6343 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
6344 %{
6345   predicate(Universe::narrow_oop_shift() == 0);
6346   constraint(ALLOC_IN_RC(ptr_reg));
6347   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6348   op_cost(0);
6349   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6350   interface(MEMORY_INTER) %{
6351     base($reg);
6352     index($lreg);
6353     scale($scale);
6354     disp($off);
6355   %}
6356 %}
6357 
6358 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
6359 %{
6360   predicate(Universe::narrow_oop_shift() == 0);
6361   constraint(ALLOC_IN_RC(ptr_reg));
6362   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
6363   op_cost(INSN_COST);
6364   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
6365   interface(MEMORY_INTER) %{
6366     base($reg);
6367     index($lreg);
6368     scale($scale);
6369     disp($off);
6370   %}
6371 %}
6372 
6373 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
6374 %{
6375   predicate(Universe::narrow_oop_shift() == 0);
6376   constraint(ALLOC_IN_RC(ptr_reg));
6377   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
6378   op_cost(INSN_COST);
6379   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
6380   interface(MEMORY_INTER) %{
6381     base($reg);
6382     index($ireg);
6383     scale(0x0);
6384     disp($off);
6385   %}
6386 %}
6387 
6388 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
6389 %{
6390   predicate(Universe::narrow_oop_shift() == 0);
6391   constraint(ALLOC_IN_RC(ptr_reg));
6392   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
6393   op_cost(INSN_COST);
6394   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
6395   interface(MEMORY_INTER) %{
6396     base($reg);
6397     index($ireg);
6398     scale($scale);
6399     disp($off);
6400   %}
6401 %}
6402 
6403 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
6404 %{
6405   predicate(Universe::narrow_oop_shift() == 0);
6406   constraint(ALLOC_IN_RC(ptr_reg));
6407   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
6408   op_cost(0);
6409   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
6410   interface(MEMORY_INTER) %{
6411     base($reg);
6412     index($ireg);
6413     scale($scale);
6414     disp(0x0);
6415   %}
6416 %}
6417 
6418 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
6419 %{
6420   predicate(Universe::narrow_oop_shift() == 0);
6421   constraint(ALLOC_IN_RC(ptr_reg));
6422   match(AddP (DecodeN reg) (LShiftL lreg scale));
6423   op_cost(0);
6424   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
6425   interface(MEMORY_INTER) %{
6426     base($reg);
6427     index($lreg);
6428     scale($scale);
6429     disp(0x0);
6430   %}
6431 %}
6432 
6433 operand indIndexN(iRegN reg, iRegL lreg)
6434 %{
6435   predicate(Universe::narrow_oop_shift() == 0);
6436   constraint(ALLOC_IN_RC(ptr_reg));
6437   match(AddP (DecodeN reg) lreg);
6438   op_cost(0);
6439   format %{ "$reg, $lreg\t# narrow" %}
6440   interface(MEMORY_INTER) %{
6441     base($reg);
6442     index($lreg);
6443     scale(0x0);
6444     disp(0x0);
6445   %}
6446 %}
6447 
6448 operand indOffIN(iRegN reg, immIOffset off)
6449 %{
6450   predicate(Universe::narrow_oop_shift() == 0);
6451   constraint(ALLOC_IN_RC(ptr_reg));
6452   match(AddP (DecodeN reg) off);
6453   op_cost(0);
6454   format %{ "[$reg, $off]\t# narrow" %}
6455   interface(MEMORY_INTER) %{
6456     base($reg);
6457     index(0xffffffff);
6458     scale(0x0);
6459     disp($off);
6460   %}
6461 %}
6462 
6463 operand indOffLN(iRegN reg, immLoffset off)
6464 %{
6465   predicate(Universe::narrow_oop_shift() == 0);
6466   constraint(ALLOC_IN_RC(ptr_reg));
6467   match(AddP (DecodeN reg) off);
6468   op_cost(0);
6469   format %{ "[$reg, $off]\t# narrow" %}
6470   interface(MEMORY_INTER) %{
6471     base($reg);
6472     index(0xffffffff);
6473     scale(0x0);
6474     disp($off);
6475   %}
6476 %}
6477 
6478 
6479 
6480 // AArch64 opto stubs need to write to the pc slot in the thread anchor
6481 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
6482 %{
6483   constraint(ALLOC_IN_RC(ptr_reg));
6484   match(AddP reg off);
6485   op_cost(0);
6486   format %{ "[$reg, $off]" %}
6487   interface(MEMORY_INTER) %{
6488     base($reg);
6489     index(0xffffffff);
6490     scale(0x0);
6491     disp($off);
6492   %}
6493 %}
6494 
6495 //----------Special Memory Operands--------------------------------------------
6496 // Stack Slot Operand - This operand is used for loading and storing temporary
6497 //                      values on the stack where a match requires a value to
6498 //                      flow through memory.
6499 operand stackSlotP(sRegP reg)
6500 %{
6501   constraint(ALLOC_IN_RC(stack_slots));
6502   op_cost(100);
6503   // No match rule because this operand is only generated in matching
6504   // match(RegP);
6505   format %{ "[$reg]" %}
6506   interface(MEMORY_INTER) %{
6507     base(0x1e);  // RSP
6508     index(0x0);  // No Index
6509     scale(0x0);  // No Scale
6510     disp($reg);  // Stack Offset
6511   %}
6512 %}
6513 
6514 operand stackSlotI(sRegI reg)
6515 %{
6516   constraint(ALLOC_IN_RC(stack_slots));
6517   // No match rule because this operand is only generated in matching
6518   // match(RegI);
6519   format %{ "[$reg]" %}
6520   interface(MEMORY_INTER) %{
6521     base(0x1e);  // RSP
6522     index(0x0);  // No Index
6523     scale(0x0);  // No Scale
6524     disp($reg);  // Stack Offset
6525   %}
6526 %}
6527 
6528 operand stackSlotF(sRegF reg)
6529 %{
6530   constraint(ALLOC_IN_RC(stack_slots));
6531   // No match rule because this operand is only generated in matching
6532   // match(RegF);
6533   format %{ "[$reg]" %}
6534   interface(MEMORY_INTER) %{
6535     base(0x1e);  // RSP
6536     index(0x0);  // No Index
6537     scale(0x0);  // No Scale
6538     disp($reg);  // Stack Offset
6539   %}
6540 %}
6541 
6542 operand stackSlotD(sRegD reg)
6543 %{
6544   constraint(ALLOC_IN_RC(stack_slots));
6545   // No match rule because this operand is only generated in matching
6546   // match(RegD);
6547   format %{ "[$reg]" %}
6548   interface(MEMORY_INTER) %{
6549     base(0x1e);  // RSP
6550     index(0x0);  // No Index
6551     scale(0x0);  // No Scale
6552     disp($reg);  // Stack Offset
6553   %}
6554 %}
6555 
6556 operand stackSlotL(sRegL reg)
6557 %{
6558   constraint(ALLOC_IN_RC(stack_slots));
6559   // No match rule because this operand is only generated in matching
6560   // match(RegL);
6561   format %{ "[$reg]" %}
6562   interface(MEMORY_INTER) %{
6563     base(0x1e);  // RSP
6564     index(0x0);  // No Index
6565     scale(0x0);  // No Scale
6566     disp($reg);  // Stack Offset
6567   %}
6568 %}
6569 
6570 // Operands for expressing Control Flow
6571 // NOTE: Label is a predefined operand which should not be redefined in
6572 //       the AD file. It is generically handled within the ADLC.
6573 
6574 //----------Conditional Branch Operands----------------------------------------
6575 // Comparison Op  - This is the operation of the comparison, and is limited to
6576 //                  the following set of codes:
6577 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
6578 //
6579 // Other attributes of the comparison, such as unsignedness, are specified
6580 // by the comparison instruction that sets a condition code flags register.
6581 // That result is represented by a flags operand whose subtype is appropriate
6582 // to the unsignedness (etc.) of the comparison.
6583 //
6584 // Later, the instruction which matches both the Comparison Op (a Bool) and
6585 // the flags (produced by the Cmp) specifies the coding of the comparison op
6586 // by matching a specific subtype of Bool operand below, such as cmpOpU.
6587 
6588 // used for signed integral comparisons and fp comparisons
6589 
6590 operand cmpOp()
6591 %{
6592   match(Bool);
6593 
6594   format %{ "" %}
6595   interface(COND_INTER) %{
6596     equal(0x0, "eq");
6597     not_equal(0x1, "ne");
6598     less(0xb, "lt");
6599     greater_equal(0xa, "ge");
6600     less_equal(0xd, "le");
6601     greater(0xc, "gt");
6602     overflow(0x6, "vs");
6603     no_overflow(0x7, "vc");
6604   %}
6605 %}
6606 
6607 // used for unsigned integral comparisons
6608 
6609 operand cmpOpU()
6610 %{
6611   match(Bool);
6612 
6613   format %{ "" %}
6614   interface(COND_INTER) %{
6615     equal(0x0, "eq");
6616     not_equal(0x1, "ne");
6617     less(0x3, "lo");
6618     greater_equal(0x2, "hs");
6619     less_equal(0x9, "ls");
6620     greater(0x8, "hi");
6621     overflow(0x6, "vs");
6622     no_overflow(0x7, "vc");
6623   %}
6624 %}
6625 
6626 // Special operand allowing long args to int ops to be truncated for free
6627 
6628 operand iRegL2I(iRegL reg) %{
6629 
6630   op_cost(0);
6631 
6632   match(ConvL2I reg);
6633 
6634   format %{ "l2i($reg)" %}
6635 
6636   interface(REG_INTER)
6637 %}
6638 
6639 opclass vmem(indirect, indIndex, indOffI, indOffL);
6640 
6641 //----------OPERAND CLASSES----------------------------------------------------
6642 // Operand Classes are groups of operands that are used as to simplify
6643 // instruction definitions by not requiring the AD writer to specify
6644 // separate instructions for every form of operand when the
6645 // instruction accepts multiple operand types with the same basic
6646 // encoding and format. The classic case of this is memory operands.
6647 
6648 // memory is used to define read/write location for load/store
6649 // instruction defs. we can turn a memory op into an Address
6650 
6651 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
6652                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
6653 
6654 
6655 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
6656 // operations. it allows the src to be either an iRegI or a (ConvL2I
6657 // iRegL). in the latter case the l2i normally planted for a ConvL2I
6658 // can be elided because the 32-bit instruction will just employ the
6659 // lower 32 bits anyway.
6660 //
6661 // n.b. this does not elide all L2I conversions. if the truncated
6662 // value is consumed by more than one operation then the ConvL2I
6663 // cannot be bundled into the consuming nodes so an l2i gets planted
6664 // (actually a movw $dst $src) and the downstream instructions consume
6665 // the result of the l2i as an iRegI input. That's a shame since the
6666 // movw is actually redundant but its not too costly.
6667 
6668 opclass iRegIorL2I(iRegI, iRegL2I);
6669 
6670 //----------PIPELINE-----------------------------------------------------------
6671 // Rules which define the behavior of the target architectures pipeline.
6672 
6673 // For specific pipelines, eg A53, define the stages of that pipeline
6674 //pipe_desc(ISS, EX1, EX2, WR);
6675 #define ISS S0
6676 #define EX1 S1
6677 #define EX2 S2
6678 #define WR  S3
6679 
6680 // Integer ALU reg operation
6681 pipeline %{
6682 
6683 attributes %{
6684   // ARM instructions are of fixed length
6685   fixed_size_instructions;        // Fixed size instructions TODO does
6686   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
6687   // ARM instructions come in 32-bit word units
6688   instruction_unit_size = 4;         // An instruction is 4 bytes long
6689   instruction_fetch_unit_size = 64;  // The processor fetches one line
6690   instruction_fetch_units = 1;       // of 64 bytes
6691 
6692   // List of nop instructions
6693   nops( MachNop );
6694 %}
6695 
6696 // We don't use an actual pipeline model so don't care about resources
6697 // or description. we do use pipeline classes to introduce fixed
6698 // latencies
6699 
6700 //----------RESOURCES----------------------------------------------------------
6701 // Resources are the functional units available to the machine
6702 
6703 resources( INS0, INS1, INS01 = INS0 | INS1,
6704            ALU0, ALU1, ALU = ALU0 | ALU1,
6705            MAC,
6706            DIV,
6707            BRANCH,
6708            LDST,
6709            NEON_FP);
6710 
6711 //----------PIPELINE DESCRIPTION-----------------------------------------------
6712 // Pipeline Description specifies the stages in the machine's pipeline
6713 
6714 // Define the pipeline as a generic 6 stage pipeline
6715 pipe_desc(S0, S1, S2, S3, S4, S5);
6716 
6717 //----------PIPELINE CLASSES---------------------------------------------------
6718 // Pipeline Classes describe the stages in which input and output are
6719 // referenced by the hardware pipeline.
6720 
6721 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
6722 %{
6723   single_instruction;
6724   src1   : S1(read);
6725   src2   : S2(read);
6726   dst    : S5(write);
6727   INS01  : ISS;
6728   NEON_FP : S5;
6729 %}
6730 
6731 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
6732 %{
6733   single_instruction;
6734   src1   : S1(read);
6735   src2   : S2(read);
6736   dst    : S5(write);
6737   INS01  : ISS;
6738   NEON_FP : S5;
6739 %}
6740 
6741 pipe_class fp_uop_s(vRegF dst, vRegF src)
6742 %{
6743   single_instruction;
6744   src    : S1(read);
6745   dst    : S5(write);
6746   INS01  : ISS;
6747   NEON_FP : S5;
6748 %}
6749 
6750 pipe_class fp_uop_d(vRegD dst, vRegD src)
6751 %{
6752   single_instruction;
6753   src    : S1(read);
6754   dst    : S5(write);
6755   INS01  : ISS;
6756   NEON_FP : S5;
6757 %}
6758 
6759 pipe_class fp_d2f(vRegF dst, vRegD src)
6760 %{
6761   single_instruction;
6762   src    : S1(read);
6763   dst    : S5(write);
6764   INS01  : ISS;
6765   NEON_FP : S5;
6766 %}
6767 
6768 pipe_class fp_f2d(vRegD dst, vRegF src)
6769 %{
6770   single_instruction;
6771   src    : S1(read);
6772   dst    : S5(write);
6773   INS01  : ISS;
6774   NEON_FP : S5;
6775 %}
6776 
6777 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
6778 %{
6779   single_instruction;
6780   src    : S1(read);
6781   dst    : S5(write);
6782   INS01  : ISS;
6783   NEON_FP : S5;
6784 %}
6785 
6786 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
6787 %{
6788   single_instruction;
6789   src    : S1(read);
6790   dst    : S5(write);
6791   INS01  : ISS;
6792   NEON_FP : S5;
6793 %}
6794 
6795 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
6796 %{
6797   single_instruction;
6798   src    : S1(read);
6799   dst    : S5(write);
6800   INS01  : ISS;
6801   NEON_FP : S5;
6802 %}
6803 
6804 pipe_class fp_l2f(vRegF dst, iRegL src)
6805 %{
6806   single_instruction;
6807   src    : S1(read);
6808   dst    : S5(write);
6809   INS01  : ISS;
6810   NEON_FP : S5;
6811 %}
6812 
6813 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
6814 %{
6815   single_instruction;
6816   src    : S1(read);
6817   dst    : S5(write);
6818   INS01  : ISS;
6819   NEON_FP : S5;
6820 %}
6821 
6822 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
6823 %{
6824   single_instruction;
6825   src    : S1(read);
6826   dst    : S5(write);
6827   INS01  : ISS;
6828   NEON_FP : S5;
6829 %}
6830 
6831 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
6832 %{
6833   single_instruction;
6834   src    : S1(read);
6835   dst    : S5(write);
6836   INS01  : ISS;
6837   NEON_FP : S5;
6838 %}
6839 
6840 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
6841 %{
6842   single_instruction;
6843   src    : S1(read);
6844   dst    : S5(write);
6845   INS01  : ISS;
6846   NEON_FP : S5;
6847 %}
6848 
6849 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
6850 %{
6851   single_instruction;
6852   src1   : S1(read);
6853   src2   : S2(read);
6854   dst    : S5(write);
6855   INS0   : ISS;
6856   NEON_FP : S5;
6857 %}
6858 
6859 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
6860 %{
6861   single_instruction;
6862   src1   : S1(read);
6863   src2   : S2(read);
6864   dst    : S5(write);
6865   INS0   : ISS;
6866   NEON_FP : S5;
6867 %}
6868 
6869 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
6870 %{
6871   single_instruction;
6872   cr     : S1(read);
6873   src1   : S1(read);
6874   src2   : S1(read);
6875   dst    : S3(write);
6876   INS01  : ISS;
6877   NEON_FP : S3;
6878 %}
6879 
6880 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
6881 %{
6882   single_instruction;
6883   cr     : S1(read);
6884   src1   : S1(read);
6885   src2   : S1(read);
6886   dst    : S3(write);
6887   INS01  : ISS;
6888   NEON_FP : S3;
6889 %}
6890 
6891 pipe_class fp_imm_s(vRegF dst)
6892 %{
6893   single_instruction;
6894   dst    : S3(write);
6895   INS01  : ISS;
6896   NEON_FP : S3;
6897 %}
6898 
6899 pipe_class fp_imm_d(vRegD dst)
6900 %{
6901   single_instruction;
6902   dst    : S3(write);
6903   INS01  : ISS;
6904   NEON_FP : S3;
6905 %}
6906 
6907 pipe_class fp_load_constant_s(vRegF dst)
6908 %{
6909   single_instruction;
6910   dst    : S4(write);
6911   INS01  : ISS;
6912   NEON_FP : S4;
6913 %}
6914 
6915 pipe_class fp_load_constant_d(vRegD dst)
6916 %{
6917   single_instruction;
6918   dst    : S4(write);
6919   INS01  : ISS;
6920   NEON_FP : S4;
6921 %}
6922 
6923 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
6924 %{
6925   single_instruction;
6926   dst    : S5(write);
6927   src1   : S1(read);
6928   src2   : S1(read);
6929   INS01  : ISS;
6930   NEON_FP : S5;
6931 %}
6932 
6933 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
6934 %{
6935   single_instruction;
6936   dst    : S5(write);
6937   src1   : S1(read);
6938   src2   : S1(read);
6939   INS0   : ISS;
6940   NEON_FP : S5;
6941 %}
6942 
6943 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
6944 %{
6945   single_instruction;
6946   dst    : S5(write);
6947   src1   : S1(read);
6948   src2   : S1(read);
6949   dst    : S1(read);
6950   INS01  : ISS;
6951   NEON_FP : S5;
6952 %}
6953 
6954 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
6955 %{
6956   single_instruction;
6957   dst    : S5(write);
6958   src1   : S1(read);
6959   src2   : S1(read);
6960   dst    : S1(read);
6961   INS0   : ISS;
6962   NEON_FP : S5;
6963 %}
6964 
6965 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
6966 %{
6967   single_instruction;
6968   dst    : S4(write);
6969   src1   : S2(read);
6970   src2   : S2(read);
6971   INS01  : ISS;
6972   NEON_FP : S4;
6973 %}
6974 
6975 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
6976 %{
6977   single_instruction;
6978   dst    : S4(write);
6979   src1   : S2(read);
6980   src2   : S2(read);
6981   INS0   : ISS;
6982   NEON_FP : S4;
6983 %}
6984 
6985 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
6986 %{
6987   single_instruction;
6988   dst    : S3(write);
6989   src1   : S2(read);
6990   src2   : S2(read);
6991   INS01  : ISS;
6992   NEON_FP : S3;
6993 %}
6994 
6995 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
6996 %{
6997   single_instruction;
6998   dst    : S3(write);
6999   src1   : S2(read);
7000   src2   : S2(read);
7001   INS0   : ISS;
7002   NEON_FP : S3;
7003 %}
7004 
7005 pipe_class vshift64(vecD dst, vecD src, vecX shift)
7006 %{
7007   single_instruction;
7008   dst    : S3(write);
7009   src    : S1(read);
7010   shift  : S1(read);
7011   INS01  : ISS;
7012   NEON_FP : S3;
7013 %}
7014 
7015 pipe_class vshift128(vecX dst, vecX src, vecX shift)
7016 %{
7017   single_instruction;
7018   dst    : S3(write);
7019   src    : S1(read);
7020   shift  : S1(read);
7021   INS0   : ISS;
7022   NEON_FP : S3;
7023 %}
7024 
7025 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
7026 %{
7027   single_instruction;
7028   dst    : S3(write);
7029   src    : S1(read);
7030   INS01  : ISS;
7031   NEON_FP : S3;
7032 %}
7033 
7034 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
7035 %{
7036   single_instruction;
7037   dst    : S3(write);
7038   src    : S1(read);
7039   INS0   : ISS;
7040   NEON_FP : S3;
7041 %}
7042 
7043 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
7044 %{
7045   single_instruction;
7046   dst    : S5(write);
7047   src1   : S1(read);
7048   src2   : S1(read);
7049   INS01  : ISS;
7050   NEON_FP : S5;
7051 %}
7052 
7053 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
7054 %{
7055   single_instruction;
7056   dst    : S5(write);
7057   src1   : S1(read);
7058   src2   : S1(read);
7059   INS0   : ISS;
7060   NEON_FP : S5;
7061 %}
7062 
7063 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
7064 %{
7065   single_instruction;
7066   dst    : S5(write);
7067   src1   : S1(read);
7068   src2   : S1(read);
7069   INS0   : ISS;
7070   NEON_FP : S5;
7071 %}
7072 
7073 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
7074 %{
7075   single_instruction;
7076   dst    : S5(write);
7077   src1   : S1(read);
7078   src2   : S1(read);
7079   INS0   : ISS;
7080   NEON_FP : S5;
7081 %}
7082 
7083 pipe_class vsqrt_fp128(vecX dst, vecX src)
7084 %{
7085   single_instruction;
7086   dst    : S5(write);
7087   src    : S1(read);
7088   INS0   : ISS;
7089   NEON_FP : S5;
7090 %}
7091 
7092 pipe_class vunop_fp64(vecD dst, vecD src)
7093 %{
7094   single_instruction;
7095   dst    : S5(write);
7096   src    : S1(read);
7097   INS01  : ISS;
7098   NEON_FP : S5;
7099 %}
7100 
7101 pipe_class vunop_fp128(vecX dst, vecX src)
7102 %{
7103   single_instruction;
7104   dst    : S5(write);
7105   src    : S1(read);
7106   INS0   : ISS;
7107   NEON_FP : S5;
7108 %}
7109 
7110 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
7111 %{
7112   single_instruction;
7113   dst    : S3(write);
7114   src    : S1(read);
7115   INS01  : ISS;
7116   NEON_FP : S3;
7117 %}
7118 
7119 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
7120 %{
7121   single_instruction;
7122   dst    : S3(write);
7123   src    : S1(read);
7124   INS01  : ISS;
7125   NEON_FP : S3;
7126 %}
7127 
7128 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
7129 %{
7130   single_instruction;
7131   dst    : S3(write);
7132   src    : S1(read);
7133   INS01  : ISS;
7134   NEON_FP : S3;
7135 %}
7136 
7137 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
7138 %{
7139   single_instruction;
7140   dst    : S3(write);
7141   src    : S1(read);
7142   INS01  : ISS;
7143   NEON_FP : S3;
7144 %}
7145 
7146 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
7147 %{
7148   single_instruction;
7149   dst    : S3(write);
7150   src    : S1(read);
7151   INS01  : ISS;
7152   NEON_FP : S3;
7153 %}
7154 
7155 pipe_class vmovi_reg_imm64(vecD dst)
7156 %{
7157   single_instruction;
7158   dst    : S3(write);
7159   INS01  : ISS;
7160   NEON_FP : S3;
7161 %}
7162 
7163 pipe_class vmovi_reg_imm128(vecX dst)
7164 %{
7165   single_instruction;
7166   dst    : S3(write);
7167   INS0   : ISS;
7168   NEON_FP : S3;
7169 %}
7170 
7171 pipe_class vload_reg_mem64(vecD dst, vmem mem)
7172 %{
7173   single_instruction;
7174   dst    : S5(write);
7175   mem    : ISS(read);
7176   INS01  : ISS;
7177   NEON_FP : S3;
7178 %}
7179 
7180 pipe_class vload_reg_mem128(vecX dst, vmem mem)
7181 %{
7182   single_instruction;
7183   dst    : S5(write);
7184   mem    : ISS(read);
7185   INS01  : ISS;
7186   NEON_FP : S3;
7187 %}
7188 
7189 pipe_class vstore_reg_mem64(vecD src, vmem mem)
7190 %{
7191   single_instruction;
7192   mem    : ISS(read);
7193   src    : S2(read);
7194   INS01  : ISS;
7195   NEON_FP : S3;
7196 %}
7197 
7198 pipe_class vstore_reg_mem128(vecD src, vmem mem)
7199 %{
7200   single_instruction;
7201   mem    : ISS(read);
7202   src    : S2(read);
7203   INS01  : ISS;
7204   NEON_FP : S3;
7205 %}
7206 
7207 //------- Integer ALU operations --------------------------
7208 
7209 // Integer ALU reg-reg operation
7210 // Operands needed in EX1, result generated in EX2
7211 // Eg.  ADD     x0, x1, x2
7212 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7213 %{
7214   single_instruction;
7215   dst    : EX2(write);
7216   src1   : EX1(read);
7217   src2   : EX1(read);
7218   INS01  : ISS; // Dual issue as instruction 0 or 1
7219   ALU    : EX2;
7220 %}
7221 
7222 // Integer ALU reg-reg operation with constant shift
7223 // Shifted register must be available in LATE_ISS instead of EX1
7224 // Eg.  ADD     x0, x1, x2, LSL #2
7225 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
7226 %{
7227   single_instruction;
7228   dst    : EX2(write);
7229   src1   : EX1(read);
7230   src2   : ISS(read);
7231   INS01  : ISS;
7232   ALU    : EX2;
7233 %}
7234 
7235 // Integer ALU reg operation with constant shift
7236 // Eg.  LSL     x0, x1, #shift
7237 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
7238 %{
7239   single_instruction;
7240   dst    : EX2(write);
7241   src1   : ISS(read);
7242   INS01  : ISS;
7243   ALU    : EX2;
7244 %}
7245 
7246 // Integer ALU reg-reg operation with variable shift
7247 // Both operands must be available in LATE_ISS instead of EX1
7248 // Result is available in EX1 instead of EX2
7249 // Eg.  LSLV    x0, x1, x2
7250 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
7251 %{
7252   single_instruction;
7253   dst    : EX1(write);
7254   src1   : ISS(read);
7255   src2   : ISS(read);
7256   INS01  : ISS;
7257   ALU    : EX1;
7258 %}
7259 
7260 // Integer ALU reg-reg operation with extract
7261 // As for _vshift above, but result generated in EX2
7262 // Eg.  EXTR    x0, x1, x2, #N
7263 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
7264 %{
7265   single_instruction;
7266   dst    : EX2(write);
7267   src1   : ISS(read);
7268   src2   : ISS(read);
7269   INS1   : ISS; // Can only dual issue as Instruction 1
7270   ALU    : EX1;
7271 %}
7272 
7273 // Integer ALU reg operation
7274 // Eg.  NEG     x0, x1
7275 pipe_class ialu_reg(iRegI dst, iRegI src)
7276 %{
7277   single_instruction;
7278   dst    : EX2(write);
7279   src    : EX1(read);
7280   INS01  : ISS;
7281   ALU    : EX2;
7282 %}
7283 
7284 // Integer ALU reg mmediate operation
7285 // Eg.  ADD     x0, x1, #N
7286 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
7287 %{
7288   single_instruction;
7289   dst    : EX2(write);
7290   src1   : EX1(read);
7291   INS01  : ISS;
7292   ALU    : EX2;
7293 %}
7294 
7295 // Integer ALU immediate operation (no source operands)
7296 // Eg.  MOV     x0, #N
7297 pipe_class ialu_imm(iRegI dst)
7298 %{
7299   single_instruction;
7300   dst    : EX1(write);
7301   INS01  : ISS;
7302   ALU    : EX1;
7303 %}
7304 
7305 //------- Compare operation -------------------------------
7306 
7307 // Compare reg-reg
7308 // Eg.  CMP     x0, x1
7309 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
7310 %{
7311   single_instruction;
7312 //  fixed_latency(16);
7313   cr     : EX2(write);
7314   op1    : EX1(read);
7315   op2    : EX1(read);
7316   INS01  : ISS;
7317   ALU    : EX2;
7318 %}
7319 
7320 // Compare reg-reg
7321 // Eg.  CMP     x0, #N
7322 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
7323 %{
7324   single_instruction;
7325 //  fixed_latency(16);
7326   cr     : EX2(write);
7327   op1    : EX1(read);
7328   INS01  : ISS;
7329   ALU    : EX2;
7330 %}
7331 
7332 //------- Conditional instructions ------------------------
7333 
7334 // Conditional no operands
7335 // Eg.  CSINC   x0, zr, zr, <cond>
7336 pipe_class icond_none(iRegI dst, rFlagsReg cr)
7337 %{
7338   single_instruction;
7339   cr     : EX1(read);
7340   dst    : EX2(write);
7341   INS01  : ISS;
7342   ALU    : EX2;
7343 %}
7344 
7345 // Conditional 2 operand
7346 // EG.  CSEL    X0, X1, X2, <cond>
7347 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
7348 %{
7349   single_instruction;
7350   cr     : EX1(read);
7351   src1   : EX1(read);
7352   src2   : EX1(read);
7353   dst    : EX2(write);
7354   INS01  : ISS;
7355   ALU    : EX2;
7356 %}
7357 
7358 // Conditional 2 operand
7359 // EG.  CSEL    X0, X1, X2, <cond>
7360 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
7361 %{
7362   single_instruction;
7363   cr     : EX1(read);
7364   src    : EX1(read);
7365   dst    : EX2(write);
7366   INS01  : ISS;
7367   ALU    : EX2;
7368 %}
7369 
7370 //------- Multiply pipeline operations --------------------
7371 
7372 // Multiply reg-reg
7373 // Eg.  MUL     w0, w1, w2
7374 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7375 %{
7376   single_instruction;
7377   dst    : WR(write);
7378   src1   : ISS(read);
7379   src2   : ISS(read);
7380   INS01  : ISS;
7381   MAC    : WR;
7382 %}
7383 
7384 // Multiply accumulate
7385 // Eg.  MADD    w0, w1, w2, w3
7386 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7387 %{
7388   single_instruction;
7389   dst    : WR(write);
7390   src1   : ISS(read);
7391   src2   : ISS(read);
7392   src3   : ISS(read);
7393   INS01  : ISS;
7394   MAC    : WR;
7395 %}
7396 
7397 // Eg.  MUL     w0, w1, w2
7398 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7399 %{
7400   single_instruction;
7401   fixed_latency(3); // Maximum latency for 64 bit mul
7402   dst    : WR(write);
7403   src1   : ISS(read);
7404   src2   : ISS(read);
7405   INS01  : ISS;
7406   MAC    : WR;
7407 %}
7408 
7409 // Multiply accumulate
7410 // Eg.  MADD    w0, w1, w2, w3
7411 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
7412 %{
7413   single_instruction;
7414   fixed_latency(3); // Maximum latency for 64 bit mul
7415   dst    : WR(write);
7416   src1   : ISS(read);
7417   src2   : ISS(read);
7418   src3   : ISS(read);
7419   INS01  : ISS;
7420   MAC    : WR;
7421 %}
7422 
7423 //------- Divide pipeline operations --------------------
7424 
7425 // Eg.  SDIV    w0, w1, w2
7426 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7427 %{
7428   single_instruction;
7429   fixed_latency(8); // Maximum latency for 32 bit divide
7430   dst    : WR(write);
7431   src1   : ISS(read);
7432   src2   : ISS(read);
7433   INS0   : ISS; // Can only dual issue as instruction 0
7434   DIV    : WR;
7435 %}
7436 
7437 // Eg.  SDIV    x0, x1, x2
7438 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
7439 %{
7440   single_instruction;
7441   fixed_latency(16); // Maximum latency for 64 bit divide
7442   dst    : WR(write);
7443   src1   : ISS(read);
7444   src2   : ISS(read);
7445   INS0   : ISS; // Can only dual issue as instruction 0
7446   DIV    : WR;
7447 %}
7448 
7449 //------- Load pipeline operations ------------------------
7450 
7451 // Load - prefetch
7452 // Eg.  PFRM    <mem>
7453 pipe_class iload_prefetch(memory mem)
7454 %{
7455   single_instruction;
7456   mem    : ISS(read);
7457   INS01  : ISS;
7458   LDST   : WR;
7459 %}
7460 
7461 // Load - reg, mem
7462 // Eg.  LDR     x0, <mem>
7463 pipe_class iload_reg_mem(iRegI dst, memory mem)
7464 %{
7465   single_instruction;
7466   dst    : WR(write);
7467   mem    : ISS(read);
7468   INS01  : ISS;
7469   LDST   : WR;
7470 %}
7471 
7472 // Load - reg, reg
7473 // Eg.  LDR     x0, [sp, x1]
7474 pipe_class iload_reg_reg(iRegI dst, iRegI src)
7475 %{
7476   single_instruction;
7477   dst    : WR(write);
7478   src    : ISS(read);
7479   INS01  : ISS;
7480   LDST   : WR;
7481 %}
7482 
7483 //------- Store pipeline operations -----------------------
7484 
7485 // Store - zr, mem
7486 // Eg.  STR     zr, <mem>
7487 pipe_class istore_mem(memory mem)
7488 %{
7489   single_instruction;
7490   mem    : ISS(read);
7491   INS01  : ISS;
7492   LDST   : WR;
7493 %}
7494 
7495 // Store - reg, mem
7496 // Eg.  STR     x0, <mem>
7497 pipe_class istore_reg_mem(iRegI src, memory mem)
7498 %{
7499   single_instruction;
7500   mem    : ISS(read);
7501   src    : EX2(read);
7502   INS01  : ISS;
7503   LDST   : WR;
7504 %}
7505 
7506 // Store - reg, reg
7507 // Eg. STR      x0, [sp, x1]
7508 pipe_class istore_reg_reg(iRegI dst, iRegI src)
7509 %{
7510   single_instruction;
7511   dst    : ISS(read);
7512   src    : EX2(read);
7513   INS01  : ISS;
7514   LDST   : WR;
7515 %}
7516 
7517 //------- Store pipeline operations -----------------------
7518 
7519 // Branch
7520 pipe_class pipe_branch()
7521 %{
7522   single_instruction;
7523   INS01  : ISS;
7524   BRANCH : EX1;
7525 %}
7526 
7527 // Conditional branch
7528 pipe_class pipe_branch_cond(rFlagsReg cr)
7529 %{
7530   single_instruction;
7531   cr     : EX1(read);
7532   INS01  : ISS;
7533   BRANCH : EX1;
7534 %}
7535 
7536 // Compare & Branch
7537 // EG.  CBZ/CBNZ
7538 pipe_class pipe_cmp_branch(iRegI op1)
7539 %{
7540   single_instruction;
7541   op1    : EX1(read);
7542   INS01  : ISS;
7543   BRANCH : EX1;
7544 %}
7545 
7546 //------- Synchronisation operations ----------------------
7547 
7548 // Any operation requiring serialization.
7549 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
7550 pipe_class pipe_serial()
7551 %{
7552   single_instruction;
7553   force_serialization;
7554   fixed_latency(16);
7555   INS01  : ISS(2); // Cannot dual issue with any other instruction
7556   LDST   : WR;
7557 %}
7558 
7559 // Generic big/slow expanded idiom - also serialized
7560 pipe_class pipe_slow()
7561 %{
7562   instruction_count(10);
7563   multiple_bundles;
7564   force_serialization;
7565   fixed_latency(16);
7566   INS01  : ISS(2); // Cannot dual issue with any other instruction
7567   LDST   : WR;
7568 %}
7569 
7570 // Empty pipeline class
7571 pipe_class pipe_class_empty()
7572 %{
7573   single_instruction;
7574   fixed_latency(0);
7575 %}
7576 
7577 // Default pipeline class.
7578 pipe_class pipe_class_default()
7579 %{
7580   single_instruction;
7581   fixed_latency(2);
7582 %}
7583 
7584 // Pipeline class for compares.
7585 pipe_class pipe_class_compare()
7586 %{
7587   single_instruction;
7588   fixed_latency(16);
7589 %}
7590 
7591 // Pipeline class for memory operations.
7592 pipe_class pipe_class_memory()
7593 %{
7594   single_instruction;
7595   fixed_latency(16);
7596 %}
7597 
7598 // Pipeline class for call.
7599 pipe_class pipe_class_call()
7600 %{
7601   single_instruction;
7602   fixed_latency(100);
7603 %}
7604 
7605 // Define the class for the Nop node.
7606 define %{
7607    MachNop = pipe_class_empty;
7608 %}
7609 
7610 %}
7611 //----------INSTRUCTIONS-------------------------------------------------------
7612 //
7613 // match      -- States which machine-independent subtree may be replaced
7614 //               by this instruction.
7615 // ins_cost   -- The estimated cost of this instruction is used by instruction
7616 //               selection to identify a minimum cost tree of machine
7617 //               instructions that matches a tree of machine-independent
7618 //               instructions.
7619 // format     -- A string providing the disassembly for this instruction.
7620 //               The value of an instruction's operand may be inserted
7621 //               by referring to it with a '$' prefix.
7622 // opcode     -- Three instruction opcodes may be provided.  These are referred
7623 //               to within an encode class as $primary, $secondary, and $tertiary
7624 //               rrspectively.  The primary opcode is commonly used to
7625 //               indicate the type of machine instruction, while secondary
7626 //               and tertiary are often used for prefix options or addressing
7627 //               modes.
7628 // ins_encode -- A list of encode classes with parameters. The encode class
7629 //               name must have been defined in an 'enc_class' specification
7630 //               in the encode section of the architecture description.
7631 
7632 // ============================================================================
7633 // Memory (Load/Store) Instructions
7634 
7635 // Load Instructions
7636 
7637 // Load Byte (8 bit signed)
7638 instruct loadB(iRegINoSp dst, memory mem)
7639 %{
7640   match(Set dst (LoadB mem));
7641   predicate(!needs_acquiring_load(n));
7642 
7643   ins_cost(4 * INSN_COST);
7644   format %{ "ldrsbw  $dst, $mem\t# byte" %}
7645 
7646   ins_encode(aarch64_enc_ldrsbw(dst, mem));
7647 
7648   ins_pipe(iload_reg_mem);
7649 %}
7650 
7651 // Load Byte (8 bit signed) into long
7652 instruct loadB2L(iRegLNoSp dst, memory mem)
7653 %{
7654   match(Set dst (ConvI2L (LoadB mem)));
7655   predicate(!needs_acquiring_load(n->in(1)));
7656 
7657   ins_cost(4 * INSN_COST);
7658   format %{ "ldrsb  $dst, $mem\t# byte" %}
7659 
7660   ins_encode(aarch64_enc_ldrsb(dst, mem));
7661 
7662   ins_pipe(iload_reg_mem);
7663 %}
7664 
7665 // Load Byte (8 bit unsigned)
7666 instruct loadUB(iRegINoSp dst, memory mem)
7667 %{
7668   match(Set dst (LoadUB mem));
7669   predicate(!needs_acquiring_load(n));
7670 
7671   ins_cost(4 * INSN_COST);
7672   format %{ "ldrbw  $dst, $mem\t# byte" %}
7673 
7674   ins_encode(aarch64_enc_ldrb(dst, mem));
7675 
7676   ins_pipe(iload_reg_mem);
7677 %}
7678 
7679 // Load Byte (8 bit unsigned) into long
7680 instruct loadUB2L(iRegLNoSp dst, memory mem)
7681 %{
7682   match(Set dst (ConvI2L (LoadUB mem)));
7683   predicate(!needs_acquiring_load(n->in(1)));
7684 
7685   ins_cost(4 * INSN_COST);
7686   format %{ "ldrb  $dst, $mem\t# byte" %}
7687 
7688   ins_encode(aarch64_enc_ldrb(dst, mem));
7689 
7690   ins_pipe(iload_reg_mem);
7691 %}
7692 
7693 // Load Short (16 bit signed)
7694 instruct loadS(iRegINoSp dst, memory mem)
7695 %{
7696   match(Set dst (LoadS mem));
7697   predicate(!needs_acquiring_load(n));
7698 
7699   ins_cost(4 * INSN_COST);
7700   format %{ "ldrshw  $dst, $mem\t# short" %}
7701 
7702   ins_encode(aarch64_enc_ldrshw(dst, mem));
7703 
7704   ins_pipe(iload_reg_mem);
7705 %}
7706 
7707 // Load Short (16 bit signed) into long
7708 instruct loadS2L(iRegLNoSp dst, memory mem)
7709 %{
7710   match(Set dst (ConvI2L (LoadS mem)));
7711   predicate(!needs_acquiring_load(n->in(1)));
7712 
7713   ins_cost(4 * INSN_COST);
7714   format %{ "ldrsh  $dst, $mem\t# short" %}
7715 
7716   ins_encode(aarch64_enc_ldrsh(dst, mem));
7717 
7718   ins_pipe(iload_reg_mem);
7719 %}
7720 
7721 // Load Char (16 bit unsigned)
7722 instruct loadUS(iRegINoSp dst, memory mem)
7723 %{
7724   match(Set dst (LoadUS mem));
7725   predicate(!needs_acquiring_load(n));
7726 
7727   ins_cost(4 * INSN_COST);
7728   format %{ "ldrh  $dst, $mem\t# short" %}
7729 
7730   ins_encode(aarch64_enc_ldrh(dst, mem));
7731 
7732   ins_pipe(iload_reg_mem);
7733 %}
7734 
7735 // Load Short/Char (16 bit unsigned) into long
7736 instruct loadUS2L(iRegLNoSp dst, memory mem)
7737 %{
7738   match(Set dst (ConvI2L (LoadUS mem)));
7739   predicate(!needs_acquiring_load(n->in(1)));
7740 
7741   ins_cost(4 * INSN_COST);
7742   format %{ "ldrh  $dst, $mem\t# short" %}
7743 
7744   ins_encode(aarch64_enc_ldrh(dst, mem));
7745 
7746   ins_pipe(iload_reg_mem);
7747 %}
7748 
7749 // Load Integer (32 bit signed)
7750 instruct loadI(iRegINoSp dst, memory mem)
7751 %{
7752   match(Set dst (LoadI mem));
7753   predicate(!needs_acquiring_load(n));
7754 
7755   ins_cost(4 * INSN_COST);
7756   format %{ "ldrw  $dst, $mem\t# int" %}
7757 
7758   ins_encode(aarch64_enc_ldrw(dst, mem));
7759 
7760   ins_pipe(iload_reg_mem);
7761 %}
7762 
7763 // Load Integer (32 bit signed) into long
7764 instruct loadI2L(iRegLNoSp dst, memory mem)
7765 %{
7766   match(Set dst (ConvI2L (LoadI mem)));
7767   predicate(!needs_acquiring_load(n->in(1)));
7768 
7769   ins_cost(4 * INSN_COST);
7770   format %{ "ldrsw  $dst, $mem\t# int" %}
7771 
7772   ins_encode(aarch64_enc_ldrsw(dst, mem));
7773 
7774   ins_pipe(iload_reg_mem);
7775 %}
7776 
7777 // Load Integer (32 bit unsigned) into long
7778 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
7779 %{
7780   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7781   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
7782 
7783   ins_cost(4 * INSN_COST);
7784   format %{ "ldrw  $dst, $mem\t# int" %}
7785 
7786   ins_encode(aarch64_enc_ldrw(dst, mem));
7787 
7788   ins_pipe(iload_reg_mem);
7789 %}
7790 
7791 // Load Long (64 bit signed)
7792 instruct loadL(iRegLNoSp dst, memory mem)
7793 %{
7794   match(Set dst (LoadL mem));
7795   predicate(!needs_acquiring_load(n));
7796 
7797   ins_cost(4 * INSN_COST);
7798   format %{ "ldr  $dst, $mem\t# int" %}
7799 
7800   ins_encode(aarch64_enc_ldr(dst, mem));
7801 
7802   ins_pipe(iload_reg_mem);
7803 %}
7804 
7805 // Load Range
7806 instruct loadRange(iRegINoSp dst, memory mem)
7807 %{
7808   match(Set dst (LoadRange mem));
7809 
7810   ins_cost(4 * INSN_COST);
7811   format %{ "ldrw  $dst, $mem\t# range" %}
7812 
7813   ins_encode(aarch64_enc_ldrw(dst, mem));
7814 
7815   ins_pipe(iload_reg_mem);
7816 %}
7817 
7818 // Load Pointer
7819 instruct loadP(iRegPNoSp dst, memory mem)
7820 %{
7821   match(Set dst (LoadP mem));
7822   predicate(!needs_acquiring_load(n));
7823 
7824   ins_cost(4 * INSN_COST);
7825   format %{ "ldr  $dst, $mem\t# ptr" %}
7826 
7827   ins_encode(aarch64_enc_ldr(dst, mem));
7828 
7829   ins_pipe(iload_reg_mem);
7830 %}
7831 
7832 // Load Compressed Pointer
7833 instruct loadN(iRegNNoSp dst, memory mem)
7834 %{
7835   match(Set dst (LoadN mem));
7836   predicate(!needs_acquiring_load(n));
7837 
7838   ins_cost(4 * INSN_COST);
7839   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
7840 
7841   ins_encode(aarch64_enc_ldrw(dst, mem));
7842 
7843   ins_pipe(iload_reg_mem);
7844 %}
7845 
7846 // Load Klass Pointer
7847 instruct loadKlass(iRegPNoSp dst, memory mem)
7848 %{
7849   match(Set dst (LoadKlass mem));
7850   predicate(!needs_acquiring_load(n));
7851 
7852   ins_cost(4 * INSN_COST);
7853   format %{ "ldr  $dst, $mem\t# class" %}
7854 
7855   ins_encode(aarch64_enc_ldr(dst, mem));
7856 
7857   ins_pipe(iload_reg_mem);
7858 %}
7859 
7860 // Load Narrow Klass Pointer
7861 instruct loadNKlass(iRegNNoSp dst, memory mem)
7862 %{
7863   match(Set dst (LoadNKlass mem));
7864   predicate(!needs_acquiring_load(n));
7865 
7866   ins_cost(4 * INSN_COST);
7867   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
7868 
7869   ins_encode(aarch64_enc_ldrw(dst, mem));
7870 
7871   ins_pipe(iload_reg_mem);
7872 %}
7873 
7874 // Load Float
7875 instruct loadF(vRegF dst, memory mem)
7876 %{
7877   match(Set dst (LoadF mem));
7878   predicate(!needs_acquiring_load(n));
7879 
7880   ins_cost(4 * INSN_COST);
7881   format %{ "ldrs  $dst, $mem\t# float" %}
7882 
7883   ins_encode( aarch64_enc_ldrs(dst, mem) );
7884 
7885   ins_pipe(pipe_class_memory);
7886 %}
7887 
7888 // Load Double
7889 instruct loadD(vRegD dst, memory mem)
7890 %{
7891   match(Set dst (LoadD mem));
7892   predicate(!needs_acquiring_load(n));
7893 
7894   ins_cost(4 * INSN_COST);
7895   format %{ "ldrd  $dst, $mem\t# double" %}
7896 
7897   ins_encode( aarch64_enc_ldrd(dst, mem) );
7898 
7899   ins_pipe(pipe_class_memory);
7900 %}
7901 
7902 
7903 // Load Int Constant
7904 instruct loadConI(iRegINoSp dst, immI src)
7905 %{
7906   match(Set dst src);
7907 
7908   ins_cost(INSN_COST);
7909   format %{ "mov $dst, $src\t# int" %}
7910 
7911   ins_encode( aarch64_enc_movw_imm(dst, src) );
7912 
7913   ins_pipe(ialu_imm);
7914 %}
7915 
7916 // Load Long Constant
7917 instruct loadConL(iRegLNoSp dst, immL src)
7918 %{
7919   match(Set dst src);
7920 
7921   ins_cost(INSN_COST);
7922   format %{ "mov $dst, $src\t# long" %}
7923 
7924   ins_encode( aarch64_enc_mov_imm(dst, src) );
7925 
7926   ins_pipe(ialu_imm);
7927 %}
7928 
7929 // Load Pointer Constant
7930 
7931 instruct loadConP(iRegPNoSp dst, immP con)
7932 %{
7933   match(Set dst con);
7934 
7935   ins_cost(INSN_COST * 4);
7936   format %{
7937     "mov  $dst, $con\t# ptr\n\t"
7938   %}
7939 
7940   ins_encode(aarch64_enc_mov_p(dst, con));
7941 
7942   ins_pipe(ialu_imm);
7943 %}
7944 
7945 // Load Null Pointer Constant
7946 
7947 instruct loadConP0(iRegPNoSp dst, immP0 con)
7948 %{
7949   match(Set dst con);
7950 
7951   ins_cost(INSN_COST);
7952   format %{ "mov  $dst, $con\t# NULL ptr" %}
7953 
7954   ins_encode(aarch64_enc_mov_p0(dst, con));
7955 
7956   ins_pipe(ialu_imm);
7957 %}
7958 
7959 // Load Pointer Constant One
7960 
7961 instruct loadConP1(iRegPNoSp dst, immP_1 con)
7962 %{
7963   match(Set dst con);
7964 
7965   ins_cost(INSN_COST);
7966   format %{ "mov  $dst, $con\t# NULL ptr" %}
7967 
7968   ins_encode(aarch64_enc_mov_p1(dst, con));
7969 
7970   ins_pipe(ialu_imm);
7971 %}
7972 
7973 // Load Poll Page Constant
7974 
7975 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
7976 %{
7977   match(Set dst con);
7978 
7979   ins_cost(INSN_COST);
7980   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
7981 
7982   ins_encode(aarch64_enc_mov_poll_page(dst, con));
7983 
7984   ins_pipe(ialu_imm);
7985 %}
7986 
7987 // Load Byte Map Base Constant
7988 
7989 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
7990 %{
7991   match(Set dst con);
7992 
7993   ins_cost(INSN_COST);
7994   format %{ "adr  $dst, $con\t# Byte Map Base" %}
7995 
7996   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
7997 
7998   ins_pipe(ialu_imm);
7999 %}
8000 
8001 // Load Narrow Pointer Constant
8002 
8003 instruct loadConN(iRegNNoSp dst, immN con)
8004 %{
8005   match(Set dst con);
8006 
8007   ins_cost(INSN_COST * 4);
8008   format %{ "mov  $dst, $con\t# compressed ptr" %}
8009 
8010   ins_encode(aarch64_enc_mov_n(dst, con));
8011 
8012   ins_pipe(ialu_imm);
8013 %}
8014 
8015 // Load Narrow Null Pointer Constant
8016 
8017 instruct loadConN0(iRegNNoSp dst, immN0 con)
8018 %{
8019   match(Set dst con);
8020 
8021   ins_cost(INSN_COST);
8022   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
8023 
8024   ins_encode(aarch64_enc_mov_n0(dst, con));
8025 
8026   ins_pipe(ialu_imm);
8027 %}
8028 
8029 // Load Narrow Klass Constant
8030 
8031 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
8032 %{
8033   match(Set dst con);
8034 
8035   ins_cost(INSN_COST);
8036   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
8037 
8038   ins_encode(aarch64_enc_mov_nk(dst, con));
8039 
8040   ins_pipe(ialu_imm);
8041 %}
8042 
8043 // Load Packed Float Constant
8044 
8045 instruct loadConF_packed(vRegF dst, immFPacked con) %{
8046   match(Set dst con);
8047   ins_cost(INSN_COST * 4);
8048   format %{ "fmovs  $dst, $con"%}
8049   ins_encode %{
8050     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
8051   %}
8052 
8053   ins_pipe(fp_imm_s);
8054 %}
8055 
8056 // Load Float Constant
8057 
8058 instruct loadConF(vRegF dst, immF con) %{
8059   match(Set dst con);
8060 
8061   ins_cost(INSN_COST * 4);
8062 
8063   format %{
8064     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8065   %}
8066 
8067   ins_encode %{
8068     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
8069   %}
8070 
8071   ins_pipe(fp_load_constant_s);
8072 %}
8073 
8074 // Load Packed Double Constant
8075 
8076 instruct loadConD_packed(vRegD dst, immDPacked con) %{
8077   match(Set dst con);
8078   ins_cost(INSN_COST);
8079   format %{ "fmovd  $dst, $con"%}
8080   ins_encode %{
8081     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
8082   %}
8083 
8084   ins_pipe(fp_imm_d);
8085 %}
8086 
8087 // Load Double Constant
8088 
8089 instruct loadConD(vRegD dst, immD con) %{
8090   match(Set dst con);
8091 
8092   ins_cost(INSN_COST * 5);
8093   format %{
8094     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
8095   %}
8096 
8097   ins_encode %{
8098     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
8099   %}
8100 
8101   ins_pipe(fp_load_constant_d);
8102 %}
8103 
8104 // Store Instructions
8105 
8106 // Store CMS card-mark Immediate
8107 instruct storeimmCM0(immI0 zero, memory mem)
8108 %{
8109   match(Set mem (StoreCM mem zero));
8110   predicate(unnecessary_storestore(n));
8111 
8112   ins_cost(INSN_COST);
8113   format %{ "strb zr, $mem\t# byte" %}
8114 
8115   ins_encode(aarch64_enc_strb0(mem));
8116 
8117   ins_pipe(istore_mem);
8118 %}
8119 
8120 // Store CMS card-mark Immediate with intervening StoreStore
8121 // needed when using CMS with no conditional card marking
8122 instruct storeimmCM0_ordered(immI0 zero, memory mem)
8123 %{
8124   match(Set mem (StoreCM mem zero));
8125 
8126   ins_cost(INSN_COST * 2);
8127   format %{ "dmb ishst"
8128       "\n\tstrb zr, $mem\t# byte" %}
8129 
8130   ins_encode(aarch64_enc_strb0_ordered(mem));
8131 
8132   ins_pipe(istore_mem);
8133 %}
8134 
8135 // Store Byte
8136 instruct storeB(iRegIorL2I src, memory mem)
8137 %{
8138   match(Set mem (StoreB mem src));
8139   predicate(!needs_releasing_store(n));
8140 
8141   ins_cost(INSN_COST);
8142   format %{ "strb  $src, $mem\t# byte" %}
8143 
8144   ins_encode(aarch64_enc_strb(src, mem));
8145 
8146   ins_pipe(istore_reg_mem);
8147 %}
8148 
8149 
8150 instruct storeimmB0(immI0 zero, memory mem)
8151 %{
8152   match(Set mem (StoreB mem zero));
8153   predicate(!needs_releasing_store(n));
8154 
8155   ins_cost(INSN_COST);
8156   format %{ "strb rscractch2, $mem\t# byte" %}
8157 
8158   ins_encode(aarch64_enc_strb0(mem));
8159 
8160   ins_pipe(istore_mem);
8161 %}
8162 
8163 // Store Char/Short
8164 instruct storeC(iRegIorL2I src, memory mem)
8165 %{
8166   match(Set mem (StoreC mem src));
8167   predicate(!needs_releasing_store(n));
8168 
8169   ins_cost(INSN_COST);
8170   format %{ "strh  $src, $mem\t# short" %}
8171 
8172   ins_encode(aarch64_enc_strh(src, mem));
8173 
8174   ins_pipe(istore_reg_mem);
8175 %}
8176 
8177 instruct storeimmC0(immI0 zero, memory mem)
8178 %{
8179   match(Set mem (StoreC mem zero));
8180   predicate(!needs_releasing_store(n));
8181 
8182   ins_cost(INSN_COST);
8183   format %{ "strh  zr, $mem\t# short" %}
8184 
8185   ins_encode(aarch64_enc_strh0(mem));
8186 
8187   ins_pipe(istore_mem);
8188 %}
8189 
8190 // Store Integer
8191 
8192 instruct storeI(iRegIorL2I src, memory mem)
8193 %{
8194   match(Set mem(StoreI mem src));
8195   predicate(!needs_releasing_store(n));
8196 
8197   ins_cost(INSN_COST);
8198   format %{ "strw  $src, $mem\t# int" %}
8199 
8200   ins_encode(aarch64_enc_strw(src, mem));
8201 
8202   ins_pipe(istore_reg_mem);
8203 %}
8204 
8205 instruct storeimmI0(immI0 zero, memory mem)
8206 %{
8207   match(Set mem(StoreI mem zero));
8208   predicate(!needs_releasing_store(n));
8209 
8210   ins_cost(INSN_COST);
8211   format %{ "strw  zr, $mem\t# int" %}
8212 
8213   ins_encode(aarch64_enc_strw0(mem));
8214 
8215   ins_pipe(istore_mem);
8216 %}
8217 
8218 // Store Long (64 bit signed)
8219 instruct storeL(iRegL src, memory mem)
8220 %{
8221   match(Set mem (StoreL mem src));
8222   predicate(!needs_releasing_store(n));
8223 
8224   ins_cost(INSN_COST);
8225   format %{ "str  $src, $mem\t# int" %}
8226 
8227   ins_encode(aarch64_enc_str(src, mem));
8228 
8229   ins_pipe(istore_reg_mem);
8230 %}
8231 
8232 // Store Long (64 bit signed)
8233 instruct storeimmL0(immL0 zero, memory mem)
8234 %{
8235   match(Set mem (StoreL mem zero));
8236   predicate(!needs_releasing_store(n));
8237 
8238   ins_cost(INSN_COST);
8239   format %{ "str  zr, $mem\t# int" %}
8240 
8241   ins_encode(aarch64_enc_str0(mem));
8242 
8243   ins_pipe(istore_mem);
8244 %}
8245 
8246 // Store Pointer
8247 instruct storeP(iRegP src, memory mem)
8248 %{
8249   match(Set mem (StoreP mem src));
8250   predicate(!needs_releasing_store(n));
8251 
8252   ins_cost(INSN_COST);
8253   format %{ "str  $src, $mem\t# ptr" %}
8254 
8255   ins_encode(aarch64_enc_str(src, mem));
8256 
8257   ins_pipe(istore_reg_mem);
8258 %}
8259 
8260 // Store Pointer
8261 instruct storeimmP0(immP0 zero, memory mem)
8262 %{
8263   match(Set mem (StoreP mem zero));
8264   predicate(!needs_releasing_store(n));
8265 
8266   ins_cost(INSN_COST);
8267   format %{ "str zr, $mem\t# ptr" %}
8268 
8269   ins_encode(aarch64_enc_str0(mem));
8270 
8271   ins_pipe(istore_mem);
8272 %}
8273 
8274 // Store Compressed Pointer
8275 instruct storeN(iRegN src, memory mem)
8276 %{
8277   match(Set mem (StoreN mem src));
8278   predicate(!needs_releasing_store(n));
8279 
8280   ins_cost(INSN_COST);
8281   format %{ "strw  $src, $mem\t# compressed ptr" %}
8282 
8283   ins_encode(aarch64_enc_strw(src, mem));
8284 
8285   ins_pipe(istore_reg_mem);
8286 %}
8287 
8288 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
8289 %{
8290   match(Set mem (StoreN mem zero));
8291   predicate(Universe::narrow_oop_base() == NULL &&
8292             Universe::narrow_klass_base() == NULL &&
8293             (!needs_releasing_store(n)));
8294 
8295   ins_cost(INSN_COST);
8296   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
8297 
8298   ins_encode(aarch64_enc_strw(heapbase, mem));
8299 
8300   ins_pipe(istore_reg_mem);
8301 %}
8302 
8303 // Store Float
8304 instruct storeF(vRegF src, memory mem)
8305 %{
8306   match(Set mem (StoreF mem src));
8307   predicate(!needs_releasing_store(n));
8308 
8309   ins_cost(INSN_COST);
8310   format %{ "strs  $src, $mem\t# float" %}
8311 
8312   ins_encode( aarch64_enc_strs(src, mem) );
8313 
8314   ins_pipe(pipe_class_memory);
8315 %}
8316 
8317 // TODO
8318 // implement storeImmF0 and storeFImmPacked
8319 
8320 // Store Double
8321 instruct storeD(vRegD src, memory mem)
8322 %{
8323   match(Set mem (StoreD mem src));
8324   predicate(!needs_releasing_store(n));
8325 
8326   ins_cost(INSN_COST);
8327   format %{ "strd  $src, $mem\t# double" %}
8328 
8329   ins_encode( aarch64_enc_strd(src, mem) );
8330 
8331   ins_pipe(pipe_class_memory);
8332 %}
8333 
8334 // Store Compressed Klass Pointer
8335 instruct storeNKlass(iRegN src, memory mem)
8336 %{
8337   predicate(!needs_releasing_store(n));
8338   match(Set mem (StoreNKlass mem src));
8339 
8340   ins_cost(INSN_COST);
8341   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
8342 
8343   ins_encode(aarch64_enc_strw(src, mem));
8344 
8345   ins_pipe(istore_reg_mem);
8346 %}
8347 
8348 // TODO
8349 // implement storeImmD0 and storeDImmPacked
8350 
8351 // prefetch instructions
8352 // Must be safe to execute with invalid address (cannot fault).
8353 
8354 instruct prefetchalloc( memory mem ) %{
8355   match(PrefetchAllocation mem);
8356 
8357   ins_cost(INSN_COST);
8358   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
8359 
8360   ins_encode( aarch64_enc_prefetchw(mem) );
8361 
8362   ins_pipe(iload_prefetch);
8363 %}
8364 
8365 //  ---------------- volatile loads and stores ----------------
8366 
8367 // Load Byte (8 bit signed)
8368 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8369 %{
8370   match(Set dst (LoadB mem));
8371 
8372   ins_cost(VOLATILE_REF_COST);
8373   format %{ "ldarsb  $dst, $mem\t# byte" %}
8374 
8375   ins_encode(aarch64_enc_ldarsb(dst, mem));
8376 
8377   ins_pipe(pipe_serial);
8378 %}
8379 
8380 // Load Byte (8 bit signed) into long
8381 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8382 %{
8383   match(Set dst (ConvI2L (LoadB mem)));
8384 
8385   ins_cost(VOLATILE_REF_COST);
8386   format %{ "ldarsb  $dst, $mem\t# byte" %}
8387 
8388   ins_encode(aarch64_enc_ldarsb(dst, mem));
8389 
8390   ins_pipe(pipe_serial);
8391 %}
8392 
8393 // Load Byte (8 bit unsigned)
8394 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8395 %{
8396   match(Set dst (LoadUB mem));
8397 
8398   ins_cost(VOLATILE_REF_COST);
8399   format %{ "ldarb  $dst, $mem\t# byte" %}
8400 
8401   ins_encode(aarch64_enc_ldarb(dst, mem));
8402 
8403   ins_pipe(pipe_serial);
8404 %}
8405 
8406 // Load Byte (8 bit unsigned) into long
8407 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8408 %{
8409   match(Set dst (ConvI2L (LoadUB mem)));
8410 
8411   ins_cost(VOLATILE_REF_COST);
8412   format %{ "ldarb  $dst, $mem\t# byte" %}
8413 
8414   ins_encode(aarch64_enc_ldarb(dst, mem));
8415 
8416   ins_pipe(pipe_serial);
8417 %}
8418 
8419 // Load Short (16 bit signed)
8420 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8421 %{
8422   match(Set dst (LoadS mem));
8423 
8424   ins_cost(VOLATILE_REF_COST);
8425   format %{ "ldarshw  $dst, $mem\t# short" %}
8426 
8427   ins_encode(aarch64_enc_ldarshw(dst, mem));
8428 
8429   ins_pipe(pipe_serial);
8430 %}
8431 
8432 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8433 %{
8434   match(Set dst (LoadUS mem));
8435 
8436   ins_cost(VOLATILE_REF_COST);
8437   format %{ "ldarhw  $dst, $mem\t# short" %}
8438 
8439   ins_encode(aarch64_enc_ldarhw(dst, mem));
8440 
8441   ins_pipe(pipe_serial);
8442 %}
8443 
8444 // Load Short/Char (16 bit unsigned) into long
8445 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8446 %{
8447   match(Set dst (ConvI2L (LoadUS mem)));
8448 
8449   ins_cost(VOLATILE_REF_COST);
8450   format %{ "ldarh  $dst, $mem\t# short" %}
8451 
8452   ins_encode(aarch64_enc_ldarh(dst, mem));
8453 
8454   ins_pipe(pipe_serial);
8455 %}
8456 
8457 // Load Short/Char (16 bit signed) into long
8458 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8459 %{
8460   match(Set dst (ConvI2L (LoadS mem)));
8461 
8462   ins_cost(VOLATILE_REF_COST);
8463   format %{ "ldarh  $dst, $mem\t# short" %}
8464 
8465   ins_encode(aarch64_enc_ldarsh(dst, mem));
8466 
8467   ins_pipe(pipe_serial);
8468 %}
8469 
8470 // Load Integer (32 bit signed)
8471 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
8472 %{
8473   match(Set dst (LoadI mem));
8474 
8475   ins_cost(VOLATILE_REF_COST);
8476   format %{ "ldarw  $dst, $mem\t# int" %}
8477 
8478   ins_encode(aarch64_enc_ldarw(dst, mem));
8479 
8480   ins_pipe(pipe_serial);
8481 %}
8482 
8483 // Load Integer (32 bit unsigned) into long
8484 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
8485 %{
8486   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
8487 
8488   ins_cost(VOLATILE_REF_COST);
8489   format %{ "ldarw  $dst, $mem\t# int" %}
8490 
8491   ins_encode(aarch64_enc_ldarw(dst, mem));
8492 
8493   ins_pipe(pipe_serial);
8494 %}
8495 
8496 // Load Long (64 bit signed)
8497 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
8498 %{
8499   match(Set dst (LoadL mem));
8500 
8501   ins_cost(VOLATILE_REF_COST);
8502   format %{ "ldar  $dst, $mem\t# int" %}
8503 
8504   ins_encode(aarch64_enc_ldar(dst, mem));
8505 
8506   ins_pipe(pipe_serial);
8507 %}
8508 
8509 // Load Pointer
8510 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
8511 %{
8512   match(Set dst (LoadP mem));
8513 
8514   ins_cost(VOLATILE_REF_COST);
8515   format %{ "ldar  $dst, $mem\t# ptr" %}
8516 
8517   ins_encode(aarch64_enc_ldar(dst, mem));
8518 
8519   ins_pipe(pipe_serial);
8520 %}
8521 
8522 // Load Compressed Pointer
8523 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
8524 %{
8525   match(Set dst (LoadN mem));
8526 
8527   ins_cost(VOLATILE_REF_COST);
8528   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
8529 
8530   ins_encode(aarch64_enc_ldarw(dst, mem));
8531 
8532   ins_pipe(pipe_serial);
8533 %}
8534 
8535 // Load Float
8536 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
8537 %{
8538   match(Set dst (LoadF mem));
8539 
8540   ins_cost(VOLATILE_REF_COST);
8541   format %{ "ldars  $dst, $mem\t# float" %}
8542 
8543   ins_encode( aarch64_enc_fldars(dst, mem) );
8544 
8545   ins_pipe(pipe_serial);
8546 %}
8547 
8548 // Load Double
8549 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
8550 %{
8551   match(Set dst (LoadD mem));
8552 
8553   ins_cost(VOLATILE_REF_COST);
8554   format %{ "ldard  $dst, $mem\t# double" %}
8555 
8556   ins_encode( aarch64_enc_fldard(dst, mem) );
8557 
8558   ins_pipe(pipe_serial);
8559 %}
8560 
8561 // Store Byte
8562 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8563 %{
8564   match(Set mem (StoreB mem src));
8565 
8566   ins_cost(VOLATILE_REF_COST);
8567   format %{ "stlrb  $src, $mem\t# byte" %}
8568 
8569   ins_encode(aarch64_enc_stlrb(src, mem));
8570 
8571   ins_pipe(pipe_class_memory);
8572 %}
8573 
8574 // Store Char/Short
8575 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8576 %{
8577   match(Set mem (StoreC mem src));
8578 
8579   ins_cost(VOLATILE_REF_COST);
8580   format %{ "stlrh  $src, $mem\t# short" %}
8581 
8582   ins_encode(aarch64_enc_stlrh(src, mem));
8583 
8584   ins_pipe(pipe_class_memory);
8585 %}
8586 
8587 // Store Integer
8588 
8589 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
8590 %{
8591   match(Set mem(StoreI mem src));
8592 
8593   ins_cost(VOLATILE_REF_COST);
8594   format %{ "stlrw  $src, $mem\t# int" %}
8595 
8596   ins_encode(aarch64_enc_stlrw(src, mem));
8597 
8598   ins_pipe(pipe_class_memory);
8599 %}
8600 
8601 // Store Long (64 bit signed)
8602 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
8603 %{
8604   match(Set mem (StoreL mem src));
8605 
8606   ins_cost(VOLATILE_REF_COST);
8607   format %{ "stlr  $src, $mem\t# int" %}
8608 
8609   ins_encode(aarch64_enc_stlr(src, mem));
8610 
8611   ins_pipe(pipe_class_memory);
8612 %}
8613 
8614 // Store Pointer
8615 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
8616 %{
8617   match(Set mem (StoreP mem src));
8618 
8619   ins_cost(VOLATILE_REF_COST);
8620   format %{ "stlr  $src, $mem\t# ptr" %}
8621 
8622   ins_encode(aarch64_enc_stlr(src, mem));
8623 
8624   ins_pipe(pipe_class_memory);
8625 %}
8626 
8627 // Store Compressed Pointer
8628 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
8629 %{
8630   match(Set mem (StoreN mem src));
8631 
8632   ins_cost(VOLATILE_REF_COST);
8633   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
8634 
8635   ins_encode(aarch64_enc_stlrw(src, mem));
8636 
8637   ins_pipe(pipe_class_memory);
8638 %}
8639 
8640 // Store Float
8641 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
8642 %{
8643   match(Set mem (StoreF mem src));
8644 
8645   ins_cost(VOLATILE_REF_COST);
8646   format %{ "stlrs  $src, $mem\t# float" %}
8647 
8648   ins_encode( aarch64_enc_fstlrs(src, mem) );
8649 
8650   ins_pipe(pipe_class_memory);
8651 %}
8652 
8653 // TODO
8654 // implement storeImmF0 and storeFImmPacked
8655 
8656 // Store Double
8657 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
8658 %{
8659   match(Set mem (StoreD mem src));
8660 
8661   ins_cost(VOLATILE_REF_COST);
8662   format %{ "stlrd  $src, $mem\t# double" %}
8663 
8664   ins_encode( aarch64_enc_fstlrd(src, mem) );
8665 
8666   ins_pipe(pipe_class_memory);
8667 %}
8668 
8669 //  ---------------- end of volatile loads and stores ----------------
8670 
8671 // ============================================================================
8672 // BSWAP Instructions
8673 
8674 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
8675   match(Set dst (ReverseBytesI src));
8676 
8677   ins_cost(INSN_COST);
8678   format %{ "revw  $dst, $src" %}
8679 
8680   ins_encode %{
8681     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
8682   %}
8683 
8684   ins_pipe(ialu_reg);
8685 %}
8686 
8687 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
8688   match(Set dst (ReverseBytesL src));
8689 
8690   ins_cost(INSN_COST);
8691   format %{ "rev  $dst, $src" %}
8692 
8693   ins_encode %{
8694     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
8695   %}
8696 
8697   ins_pipe(ialu_reg);
8698 %}
8699 
8700 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
8701   match(Set dst (ReverseBytesUS src));
8702 
8703   ins_cost(INSN_COST);
8704   format %{ "rev16w  $dst, $src" %}
8705 
8706   ins_encode %{
8707     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8708   %}
8709 
8710   ins_pipe(ialu_reg);
8711 %}
8712 
8713 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
8714   match(Set dst (ReverseBytesS src));
8715 
8716   ins_cost(INSN_COST);
8717   format %{ "rev16w  $dst, $src\n\t"
8718             "sbfmw $dst, $dst, #0, #15" %}
8719 
8720   ins_encode %{
8721     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
8722     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
8723   %}
8724 
8725   ins_pipe(ialu_reg);
8726 %}
8727 
8728 // ============================================================================
8729 // Zero Count Instructions
8730 
8731 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8732   match(Set dst (CountLeadingZerosI src));
8733 
8734   ins_cost(INSN_COST);
8735   format %{ "clzw  $dst, $src" %}
8736   ins_encode %{
8737     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
8738   %}
8739 
8740   ins_pipe(ialu_reg);
8741 %}
8742 
8743 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
8744   match(Set dst (CountLeadingZerosL src));
8745 
8746   ins_cost(INSN_COST);
8747   format %{ "clz   $dst, $src" %}
8748   ins_encode %{
8749     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
8750   %}
8751 
8752   ins_pipe(ialu_reg);
8753 %}
8754 
8755 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
8756   match(Set dst (CountTrailingZerosI src));
8757 
8758   ins_cost(INSN_COST * 2);
8759   format %{ "rbitw  $dst, $src\n\t"
8760             "clzw   $dst, $dst" %}
8761   ins_encode %{
8762     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
8763     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
8764   %}
8765 
8766   ins_pipe(ialu_reg);
8767 %}
8768 
8769 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
8770   match(Set dst (CountTrailingZerosL src));
8771 
8772   ins_cost(INSN_COST * 2);
8773   format %{ "rbit   $dst, $src\n\t"
8774             "clz    $dst, $dst" %}
8775   ins_encode %{
8776     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
8777     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
8778   %}
8779 
8780   ins_pipe(ialu_reg);
8781 %}
8782 
8783 //---------- Population Count Instructions -------------------------------------
8784 //
8785 
8786 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
8787   predicate(UsePopCountInstruction);
8788   match(Set dst (PopCountI src));
8789   effect(TEMP tmp);
8790   ins_cost(INSN_COST * 13);
8791 
8792   format %{ "movw   $src, $src\n\t"
8793             "mov    $tmp, $src\t# vector (1D)\n\t"
8794             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8795             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8796             "mov    $dst, $tmp\t# vector (1D)" %}
8797   ins_encode %{
8798     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
8799     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8800     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8801     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8802     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8803   %}
8804 
8805   ins_pipe(pipe_class_default);
8806 %}
8807 
8808 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
8809   predicate(UsePopCountInstruction);
8810   match(Set dst (PopCountI (LoadI mem)));
8811   effect(TEMP tmp);
8812   ins_cost(INSN_COST * 13);
8813 
8814   format %{ "ldrs   $tmp, $mem\n\t"
8815             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8816             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8817             "mov    $dst, $tmp\t# vector (1D)" %}
8818   ins_encode %{
8819     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8820     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
8821                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8822     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8823     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8824     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8825   %}
8826 
8827   ins_pipe(pipe_class_default);
8828 %}
8829 
8830 // Note: Long.bitCount(long) returns an int.
8831 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
8832   predicate(UsePopCountInstruction);
8833   match(Set dst (PopCountL src));
8834   effect(TEMP tmp);
8835   ins_cost(INSN_COST * 13);
8836 
8837   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
8838             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8839             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8840             "mov    $dst, $tmp\t# vector (1D)" %}
8841   ins_encode %{
8842     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
8843     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8844     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8845     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8846   %}
8847 
8848   ins_pipe(pipe_class_default);
8849 %}
8850 
8851 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
8852   predicate(UsePopCountInstruction);
8853   match(Set dst (PopCountL (LoadL mem)));
8854   effect(TEMP tmp);
8855   ins_cost(INSN_COST * 13);
8856 
8857   format %{ "ldrd   $tmp, $mem\n\t"
8858             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
8859             "addv   $tmp, $tmp\t# vector (8B)\n\t"
8860             "mov    $dst, $tmp\t# vector (1D)" %}
8861   ins_encode %{
8862     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
8863     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
8864                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
8865     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8866     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
8867     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
8868   %}
8869 
8870   ins_pipe(pipe_class_default);
8871 %}
8872 
8873 // ============================================================================
8874 // MemBar Instruction
8875 
8876 instruct load_fence() %{
8877   match(LoadFence);
8878   ins_cost(VOLATILE_REF_COST);
8879 
8880   format %{ "load_fence" %}
8881 
8882   ins_encode %{
8883     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8884   %}
8885   ins_pipe(pipe_serial);
8886 %}
8887 
8888 instruct unnecessary_membar_acquire() %{
8889   predicate(unnecessary_acquire(n));
8890   match(MemBarAcquire);
8891   ins_cost(0);
8892 
8893   format %{ "membar_acquire (elided)" %}
8894 
8895   ins_encode %{
8896     __ block_comment("membar_acquire (elided)");
8897   %}
8898 
8899   ins_pipe(pipe_class_empty);
8900 %}
8901 
8902 instruct membar_acquire() %{
8903   match(MemBarAcquire);
8904   ins_cost(VOLATILE_REF_COST);
8905 
8906   format %{ "membar_acquire" %}
8907 
8908   ins_encode %{
8909     __ block_comment("membar_acquire");
8910     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
8911   %}
8912 
8913   ins_pipe(pipe_serial);
8914 %}
8915 
8916 
8917 instruct membar_acquire_lock() %{
8918   match(MemBarAcquireLock);
8919   ins_cost(VOLATILE_REF_COST);
8920 
8921   format %{ "membar_acquire_lock (elided)" %}
8922 
8923   ins_encode %{
8924     __ block_comment("membar_acquire_lock (elided)");
8925   %}
8926 
8927   ins_pipe(pipe_serial);
8928 %}
8929 
8930 instruct store_fence() %{
8931   match(StoreFence);
8932   ins_cost(VOLATILE_REF_COST);
8933 
8934   format %{ "store_fence" %}
8935 
8936   ins_encode %{
8937     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8938   %}
8939   ins_pipe(pipe_serial);
8940 %}
8941 
8942 instruct unnecessary_membar_release() %{
8943   predicate(unnecessary_release(n));
8944   match(MemBarRelease);
8945   ins_cost(0);
8946 
8947   format %{ "membar_release (elided)" %}
8948 
8949   ins_encode %{
8950     __ block_comment("membar_release (elided)");
8951   %}
8952   ins_pipe(pipe_serial);
8953 %}
8954 
8955 instruct membar_release() %{
8956   match(MemBarRelease);
8957   ins_cost(VOLATILE_REF_COST);
8958 
8959   format %{ "membar_release" %}
8960 
8961   ins_encode %{
8962     __ block_comment("membar_release");
8963     __ membar(Assembler::LoadStore|Assembler::StoreStore);
8964   %}
8965   ins_pipe(pipe_serial);
8966 %}
8967 
8968 instruct membar_storestore() %{
8969   match(MemBarStoreStore);
8970   ins_cost(VOLATILE_REF_COST);
8971 
8972   format %{ "MEMBAR-store-store" %}
8973 
8974   ins_encode %{
8975     __ membar(Assembler::StoreStore);
8976   %}
8977   ins_pipe(pipe_serial);
8978 %}
8979 
8980 instruct membar_release_lock() %{
8981   match(MemBarReleaseLock);
8982   ins_cost(VOLATILE_REF_COST);
8983 
8984   format %{ "membar_release_lock (elided)" %}
8985 
8986   ins_encode %{
8987     __ block_comment("membar_release_lock (elided)");
8988   %}
8989 
8990   ins_pipe(pipe_serial);
8991 %}
8992 
8993 instruct unnecessary_membar_volatile() %{
8994   predicate(unnecessary_volatile(n));
8995   match(MemBarVolatile);
8996   ins_cost(0);
8997 
8998   format %{ "membar_volatile (elided)" %}
8999 
9000   ins_encode %{
9001     __ block_comment("membar_volatile (elided)");
9002   %}
9003 
9004   ins_pipe(pipe_serial);
9005 %}
9006 
9007 instruct membar_volatile() %{
9008   match(MemBarVolatile);
9009   ins_cost(VOLATILE_REF_COST*100);
9010 
9011   format %{ "membar_volatile" %}
9012 
9013   ins_encode %{
9014     __ block_comment("membar_volatile");
9015     __ membar(Assembler::StoreLoad);
9016   %}
9017 
9018   ins_pipe(pipe_serial);
9019 %}
9020 
9021 // ============================================================================
9022 // Cast/Convert Instructions
9023 
9024 instruct castX2P(iRegPNoSp dst, iRegL src) %{
9025   match(Set dst (CastX2P src));
9026 
9027   ins_cost(INSN_COST);
9028   format %{ "mov $dst, $src\t# long -> ptr" %}
9029 
9030   ins_encode %{
9031     if ($dst$$reg != $src$$reg) {
9032       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9033     }
9034   %}
9035 
9036   ins_pipe(ialu_reg);
9037 %}
9038 
9039 instruct castP2X(iRegLNoSp dst, iRegP src) %{
9040   match(Set dst (CastP2X src));
9041 
9042   ins_cost(INSN_COST);
9043   format %{ "mov $dst, $src\t# ptr -> long" %}
9044 
9045   ins_encode %{
9046     if ($dst$$reg != $src$$reg) {
9047       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
9048     }
9049   %}
9050 
9051   ins_pipe(ialu_reg);
9052 %}
9053 
9054 // Convert oop into int for vectors alignment masking
9055 instruct convP2I(iRegINoSp dst, iRegP src) %{
9056   match(Set dst (ConvL2I (CastP2X src)));
9057 
9058   ins_cost(INSN_COST);
9059   format %{ "movw $dst, $src\t# ptr -> int" %}
9060   ins_encode %{
9061     __ movw($dst$$Register, $src$$Register);
9062   %}
9063 
9064   ins_pipe(ialu_reg);
9065 %}
9066 
9067 // Convert compressed oop into int for vectors alignment masking
9068 // in case of 32bit oops (heap < 4Gb).
9069 instruct convN2I(iRegINoSp dst, iRegN src)
9070 %{
9071   predicate(Universe::narrow_oop_shift() == 0);
9072   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
9073 
9074   ins_cost(INSN_COST);
9075   format %{ "mov dst, $src\t# compressed ptr -> int" %}
9076   ins_encode %{
9077     __ movw($dst$$Register, $src$$Register);
9078   %}
9079 
9080   ins_pipe(ialu_reg);
9081 %}
9082 
9083 
9084 // Convert oop pointer into compressed form
9085 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9086   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9087   match(Set dst (EncodeP src));
9088   effect(KILL cr);
9089   ins_cost(INSN_COST * 3);
9090   format %{ "encode_heap_oop $dst, $src" %}
9091   ins_encode %{
9092     Register s = $src$$Register;
9093     Register d = $dst$$Register;
9094     __ encode_heap_oop(d, s);
9095   %}
9096   ins_pipe(ialu_reg);
9097 %}
9098 
9099 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
9100   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9101   match(Set dst (EncodeP src));
9102   ins_cost(INSN_COST * 3);
9103   format %{ "encode_heap_oop_not_null $dst, $src" %}
9104   ins_encode %{
9105     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9106   %}
9107   ins_pipe(ialu_reg);
9108 %}
9109 
9110 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9111   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9112             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9113   match(Set dst (DecodeN src));
9114   ins_cost(INSN_COST * 3);
9115   format %{ "decode_heap_oop $dst, $src" %}
9116   ins_encode %{
9117     Register s = $src$$Register;
9118     Register d = $dst$$Register;
9119     __ decode_heap_oop(d, s);
9120   %}
9121   ins_pipe(ialu_reg);
9122 %}
9123 
9124 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
9125   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9126             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9127   match(Set dst (DecodeN src));
9128   ins_cost(INSN_COST * 3);
9129   format %{ "decode_heap_oop_not_null $dst, $src" %}
9130   ins_encode %{
9131     Register s = $src$$Register;
9132     Register d = $dst$$Register;
9133     __ decode_heap_oop_not_null(d, s);
9134   %}
9135   ins_pipe(ialu_reg);
9136 %}
9137 
9138 // n.b. AArch64 implementations of encode_klass_not_null and
9139 // decode_klass_not_null do not modify the flags register so, unlike
9140 // Intel, we don't kill CR as a side effect here
9141 
9142 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
9143   match(Set dst (EncodePKlass src));
9144 
9145   ins_cost(INSN_COST * 3);
9146   format %{ "encode_klass_not_null $dst,$src" %}
9147 
9148   ins_encode %{
9149     Register src_reg = as_Register($src$$reg);
9150     Register dst_reg = as_Register($dst$$reg);
9151     __ encode_klass_not_null(dst_reg, src_reg);
9152   %}
9153 
9154    ins_pipe(ialu_reg);
9155 %}
9156 
9157 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
9158   match(Set dst (DecodeNKlass src));
9159 
9160   ins_cost(INSN_COST * 3);
9161   format %{ "decode_klass_not_null $dst,$src" %}
9162 
9163   ins_encode %{
9164     Register src_reg = as_Register($src$$reg);
9165     Register dst_reg = as_Register($dst$$reg);
9166     if (dst_reg != src_reg) {
9167       __ decode_klass_not_null(dst_reg, src_reg);
9168     } else {
9169       __ decode_klass_not_null(dst_reg);
9170     }
9171   %}
9172 
9173    ins_pipe(ialu_reg);
9174 %}
9175 
9176 instruct checkCastPP(iRegPNoSp dst)
9177 %{
9178   match(Set dst (CheckCastPP dst));
9179 
9180   size(0);
9181   format %{ "# checkcastPP of $dst" %}
9182   ins_encode(/* empty encoding */);
9183   ins_pipe(pipe_class_empty);
9184 %}
9185 
9186 instruct castPP(iRegPNoSp dst)
9187 %{
9188   match(Set dst (CastPP dst));
9189 
9190   size(0);
9191   format %{ "# castPP of $dst" %}
9192   ins_encode(/* empty encoding */);
9193   ins_pipe(pipe_class_empty);
9194 %}
9195 
9196 instruct castII(iRegI dst)
9197 %{
9198   match(Set dst (CastII dst));
9199 
9200   size(0);
9201   format %{ "# castII of $dst" %}
9202   ins_encode(/* empty encoding */);
9203   ins_cost(0);
9204   ins_pipe(pipe_class_empty);
9205 %}
9206 
9207 // ============================================================================
9208 // Atomic operation instructions
9209 //
9210 // Intel and SPARC both implement Ideal Node LoadPLocked and
9211 // Store{PIL}Conditional instructions using a normal load for the
9212 // LoadPLocked and a CAS for the Store{PIL}Conditional.
9213 //
9214 // The ideal code appears only to use LoadPLocked/StorePLocked as a
9215 // pair to lock object allocations from Eden space when not using
9216 // TLABs.
9217 //
9218 // There does not appear to be a Load{IL}Locked Ideal Node and the
9219 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
9220 // and to use StoreIConditional only for 32-bit and StoreLConditional
9221 // only for 64-bit.
9222 //
9223 // We implement LoadPLocked and StorePLocked instructions using,
9224 // respectively the AArch64 hw load-exclusive and store-conditional
9225 // instructions. Whereas we must implement each of
9226 // Store{IL}Conditional using a CAS which employs a pair of
9227 // instructions comprising a load-exclusive followed by a
9228 // store-conditional.
9229 
9230 
9231 // Locked-load (linked load) of the current heap-top
9232 // used when updating the eden heap top
9233 // implemented using ldaxr on AArch64
9234 
9235 instruct loadPLocked(iRegPNoSp dst, indirect mem)
9236 %{
9237   match(Set dst (LoadPLocked mem));
9238 
9239   ins_cost(VOLATILE_REF_COST);
9240 
9241   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
9242 
9243   ins_encode(aarch64_enc_ldaxr(dst, mem));
9244 
9245   ins_pipe(pipe_serial);
9246 %}
9247 
9248 // Conditional-store of the updated heap-top.
9249 // Used during allocation of the shared heap.
9250 // Sets flag (EQ) on success.
9251 // implemented using stlxr on AArch64.
9252 
9253 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
9254 %{
9255   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
9256 
9257   ins_cost(VOLATILE_REF_COST);
9258 
9259  // TODO
9260  // do we need to do a store-conditional release or can we just use a
9261  // plain store-conditional?
9262 
9263   format %{
9264     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
9265     "cmpw rscratch1, zr\t# EQ on successful write"
9266   %}
9267 
9268   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
9269 
9270   ins_pipe(pipe_serial);
9271 %}
9272 
9273 
9274 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
9275 // when attempting to rebias a lock towards the current thread.  We
9276 // must use the acquire form of cmpxchg in order to guarantee acquire
9277 // semantics in this case.
9278 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
9279 %{
9280   match(Set cr (StoreLConditional mem (Binary oldval newval)));
9281 
9282   ins_cost(VOLATILE_REF_COST);
9283 
9284   format %{
9285     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9286     "cmpw rscratch1, zr\t# EQ on successful write"
9287   %}
9288 
9289   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
9290 
9291   ins_pipe(pipe_slow);
9292 %}
9293 
9294 // storeIConditional also has acquire semantics, for no better reason
9295 // than matching storeLConditional.  At the time of writing this
9296 // comment storeIConditional was not used anywhere by AArch64.
9297 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
9298 %{
9299   match(Set cr (StoreIConditional mem (Binary oldval newval)));
9300 
9301   ins_cost(VOLATILE_REF_COST);
9302 
9303   format %{
9304     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
9305     "cmpw rscratch1, zr\t# EQ on successful write"
9306   %}
9307 
9308   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
9309 
9310   ins_pipe(pipe_slow);
9311 %}
9312 
9313 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
9314 // can't match them
9315 
9316 // standard CompareAndSwapX when we are using barriers
9317 // these have higher priority than the rules selected by a predicate
9318 
9319 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9320 
9321   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9322   ins_cost(2 * VOLATILE_REF_COST);
9323 
9324   effect(KILL cr);
9325 
9326  format %{
9327     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9328     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9329  %}
9330 
9331  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9332             aarch64_enc_cset_eq(res));
9333 
9334   ins_pipe(pipe_slow);
9335 %}
9336 
9337 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9338 
9339   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9340   ins_cost(2 * VOLATILE_REF_COST);
9341 
9342   effect(KILL cr);
9343 
9344  format %{
9345     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9346     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9347  %}
9348 
9349  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9350             aarch64_enc_cset_eq(res));
9351 
9352   ins_pipe(pipe_slow);
9353 %}
9354 
9355 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9356 
9357   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9358   ins_cost(2 * VOLATILE_REF_COST);
9359 
9360   effect(KILL cr);
9361 
9362  format %{
9363     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9364     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9365  %}
9366 
9367  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
9368             aarch64_enc_cset_eq(res));
9369 
9370   ins_pipe(pipe_slow);
9371 %}
9372 
9373 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9374 
9375   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9376   ins_cost(2 * VOLATILE_REF_COST);
9377 
9378   effect(KILL cr);
9379 
9380  format %{
9381     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9382     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9383  %}
9384 
9385  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
9386             aarch64_enc_cset_eq(res));
9387 
9388   ins_pipe(pipe_slow);
9389 %}
9390 
9391 // alternative CompareAndSwapX when we are eliding barriers
9392 
9393 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
9394 
9395   predicate(needs_acquiring_load_exclusive(n));
9396   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
9397   ins_cost(VOLATILE_REF_COST);
9398 
9399   effect(KILL cr);
9400 
9401  format %{
9402     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
9403     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9404  %}
9405 
9406  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9407             aarch64_enc_cset_eq(res));
9408 
9409   ins_pipe(pipe_slow);
9410 %}
9411 
9412 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
9413 
9414   predicate(needs_acquiring_load_exclusive(n));
9415   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
9416   ins_cost(VOLATILE_REF_COST);
9417 
9418   effect(KILL cr);
9419 
9420  format %{
9421     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
9422     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9423  %}
9424 
9425  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9426             aarch64_enc_cset_eq(res));
9427 
9428   ins_pipe(pipe_slow);
9429 %}
9430 
9431 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9432 
9433   predicate(needs_acquiring_load_exclusive(n));
9434   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
9435   ins_cost(VOLATILE_REF_COST);
9436 
9437   effect(KILL cr);
9438 
9439  format %{
9440     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
9441     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9442  %}
9443 
9444  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
9445             aarch64_enc_cset_eq(res));
9446 
9447   ins_pipe(pipe_slow);
9448 %}
9449 
9450 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
9451 
9452   predicate(needs_acquiring_load_exclusive(n));
9453   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
9454   ins_cost(VOLATILE_REF_COST);
9455 
9456   effect(KILL cr);
9457 
9458  format %{
9459     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
9460     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9461  %}
9462 
9463  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
9464             aarch64_enc_cset_eq(res));
9465 
9466   ins_pipe(pipe_slow);
9467 %}
9468 
9469 
9470 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
9471   match(Set prev (GetAndSetI mem newv));
9472   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
9473   ins_encode %{
9474     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9475   %}
9476   ins_pipe(pipe_serial);
9477 %}
9478 
9479 instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
9480   match(Set prev (GetAndSetL mem newv));
9481   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9482   ins_encode %{
9483     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9484   %}
9485   ins_pipe(pipe_serial);
9486 %}
9487 
9488 instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
9489   match(Set prev (GetAndSetN mem newv));
9490   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9491   ins_encode %{
9492     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9493   %}
9494   ins_pipe(pipe_serial);
9495 %}
9496 
9497 instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
9498   match(Set prev (GetAndSetP mem newv));
9499   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
9500   ins_encode %{
9501     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9502   %}
9503   ins_pipe(pipe_serial);
9504 %}
9505 
9506 
9507 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9508   match(Set newval (GetAndAddL mem incr));
9509   ins_cost(INSN_COST * 10);
9510   format %{ "get_and_addL $newval, [$mem], $incr" %}
9511   ins_encode %{
9512     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9513   %}
9514   ins_pipe(pipe_serial);
9515 %}
9516 
9517 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
9518   predicate(n->as_LoadStore()->result_not_used());
9519   match(Set dummy (GetAndAddL mem incr));
9520   ins_cost(INSN_COST * 9);
9521   format %{ "get_and_addL [$mem], $incr" %}
9522   ins_encode %{
9523     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
9524   %}
9525   ins_pipe(pipe_serial);
9526 %}
9527 
9528 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
9529   match(Set newval (GetAndAddL mem incr));
9530   ins_cost(INSN_COST * 10);
9531   format %{ "get_and_addL $newval, [$mem], $incr" %}
9532   ins_encode %{
9533     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
9534   %}
9535   ins_pipe(pipe_serial);
9536 %}
9537 
9538 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
9539   predicate(n->as_LoadStore()->result_not_used());
9540   match(Set dummy (GetAndAddL mem incr));
9541   ins_cost(INSN_COST * 9);
9542   format %{ "get_and_addL [$mem], $incr" %}
9543   ins_encode %{
9544     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
9545   %}
9546   ins_pipe(pipe_serial);
9547 %}
9548 
9549 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
9550   match(Set newval (GetAndAddI mem incr));
9551   ins_cost(INSN_COST * 10);
9552   format %{ "get_and_addI $newval, [$mem], $incr" %}
9553   ins_encode %{
9554     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
9555   %}
9556   ins_pipe(pipe_serial);
9557 %}
9558 
9559 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
9560   predicate(n->as_LoadStore()->result_not_used());
9561   match(Set dummy (GetAndAddI mem incr));
9562   ins_cost(INSN_COST * 9);
9563   format %{ "get_and_addI [$mem], $incr" %}
9564   ins_encode %{
9565     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
9566   %}
9567   ins_pipe(pipe_serial);
9568 %}
9569 
9570 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
9571   match(Set newval (GetAndAddI mem incr));
9572   ins_cost(INSN_COST * 10);
9573   format %{ "get_and_addI $newval, [$mem], $incr" %}
9574   ins_encode %{
9575     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
9576   %}
9577   ins_pipe(pipe_serial);
9578 %}
9579 
9580 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
9581   predicate(n->as_LoadStore()->result_not_used());
9582   match(Set dummy (GetAndAddI mem incr));
9583   ins_cost(INSN_COST * 9);
9584   format %{ "get_and_addI [$mem], $incr" %}
9585   ins_encode %{
9586     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
9587   %}
9588   ins_pipe(pipe_serial);
9589 %}
9590 
9591 // Manifest a CmpL result in an integer register.
9592 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
9593 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
9594 %{
9595   match(Set dst (CmpL3 src1 src2));
9596   effect(KILL flags);
9597 
9598   ins_cost(INSN_COST * 6);
9599   format %{
9600       "cmp $src1, $src2"
9601       "csetw $dst, ne"
9602       "cnegw $dst, lt"
9603   %}
9604   // format %{ "CmpL3 $dst, $src1, $src2" %}
9605   ins_encode %{
9606     __ cmp($src1$$Register, $src2$$Register);
9607     __ csetw($dst$$Register, Assembler::NE);
9608     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9609   %}
9610 
9611   ins_pipe(pipe_class_default);
9612 %}
9613 
9614 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
9615 %{
9616   match(Set dst (CmpL3 src1 src2));
9617   effect(KILL flags);
9618 
9619   ins_cost(INSN_COST * 6);
9620   format %{
9621       "cmp $src1, $src2"
9622       "csetw $dst, ne"
9623       "cnegw $dst, lt"
9624   %}
9625   ins_encode %{
9626     int32_t con = (int32_t)$src2$$constant;
9627      if (con < 0) {
9628       __ adds(zr, $src1$$Register, -con);
9629     } else {
9630       __ subs(zr, $src1$$Register, con);
9631     }
9632     __ csetw($dst$$Register, Assembler::NE);
9633     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
9634   %}
9635 
9636   ins_pipe(pipe_class_default);
9637 %}
9638 
9639 // ============================================================================
9640 // Conditional Move Instructions
9641 
9642 // n.b. we have identical rules for both a signed compare op (cmpOp)
9643 // and an unsigned compare op (cmpOpU). it would be nice if we could
9644 // define an op class which merged both inputs and use it to type the
9645 // argument to a single rule. unfortunatelyt his fails because the
9646 // opclass does not live up to the COND_INTER interface of its
9647 // component operands. When the generic code tries to negate the
9648 // operand it ends up running the generci Machoper::negate method
9649 // which throws a ShouldNotHappen. So, we have to provide two flavours
9650 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
9651 
9652 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9653   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9654 
9655   ins_cost(INSN_COST * 2);
9656   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
9657 
9658   ins_encode %{
9659     __ cselw(as_Register($dst$$reg),
9660              as_Register($src2$$reg),
9661              as_Register($src1$$reg),
9662              (Assembler::Condition)$cmp$$cmpcode);
9663   %}
9664 
9665   ins_pipe(icond_reg_reg);
9666 %}
9667 
9668 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9669   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
9670 
9671   ins_cost(INSN_COST * 2);
9672   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
9673 
9674   ins_encode %{
9675     __ cselw(as_Register($dst$$reg),
9676              as_Register($src2$$reg),
9677              as_Register($src1$$reg),
9678              (Assembler::Condition)$cmp$$cmpcode);
9679   %}
9680 
9681   ins_pipe(icond_reg_reg);
9682 %}
9683 
9684 // special cases where one arg is zero
9685 
9686 // n.b. this is selected in preference to the rule above because it
9687 // avoids loading constant 0 into a source register
9688 
9689 // TODO
9690 // we ought only to be able to cull one of these variants as the ideal
9691 // transforms ought always to order the zero consistently (to left/right?)
9692 
9693 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9694   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9695 
9696   ins_cost(INSN_COST * 2);
9697   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
9698 
9699   ins_encode %{
9700     __ cselw(as_Register($dst$$reg),
9701              as_Register($src$$reg),
9702              zr,
9703              (Assembler::Condition)$cmp$$cmpcode);
9704   %}
9705 
9706   ins_pipe(icond_reg);
9707 %}
9708 
9709 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
9710   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
9711 
9712   ins_cost(INSN_COST * 2);
9713   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
9714 
9715   ins_encode %{
9716     __ cselw(as_Register($dst$$reg),
9717              as_Register($src$$reg),
9718              zr,
9719              (Assembler::Condition)$cmp$$cmpcode);
9720   %}
9721 
9722   ins_pipe(icond_reg);
9723 %}
9724 
9725 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9726   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9727 
9728   ins_cost(INSN_COST * 2);
9729   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
9730 
9731   ins_encode %{
9732     __ cselw(as_Register($dst$$reg),
9733              zr,
9734              as_Register($src$$reg),
9735              (Assembler::Condition)$cmp$$cmpcode);
9736   %}
9737 
9738   ins_pipe(icond_reg);
9739 %}
9740 
9741 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
9742   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
9743 
9744   ins_cost(INSN_COST * 2);
9745   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
9746 
9747   ins_encode %{
9748     __ cselw(as_Register($dst$$reg),
9749              zr,
9750              as_Register($src$$reg),
9751              (Assembler::Condition)$cmp$$cmpcode);
9752   %}
9753 
9754   ins_pipe(icond_reg);
9755 %}
9756 
9757 // special case for creating a boolean 0 or 1
9758 
9759 // n.b. this is selected in preference to the rule above because it
9760 // avoids loading constants 0 and 1 into a source register
9761 
9762 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9763   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9764 
9765   ins_cost(INSN_COST * 2);
9766   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
9767 
9768   ins_encode %{
9769     // equivalently
9770     // cset(as_Register($dst$$reg),
9771     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9772     __ csincw(as_Register($dst$$reg),
9773              zr,
9774              zr,
9775              (Assembler::Condition)$cmp$$cmpcode);
9776   %}
9777 
9778   ins_pipe(icond_none);
9779 %}
9780 
9781 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
9782   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
9783 
9784   ins_cost(INSN_COST * 2);
9785   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9786 
9787   ins_encode %{
9788     // equivalently
9789     // cset(as_Register($dst$$reg),
9790     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9791     __ csincw(as_Register($dst$$reg),
9792              zr,
9793              zr,
9794              (Assembler::Condition)$cmp$$cmpcode);
9795   %}
9796 
9797   ins_pipe(icond_none);
9798 %}
9799 
9800 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9801   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9802 
9803   ins_cost(INSN_COST * 2);
9804   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9805 
9806   ins_encode %{
9807     __ csel(as_Register($dst$$reg),
9808             as_Register($src2$$reg),
9809             as_Register($src1$$reg),
9810             (Assembler::Condition)$cmp$$cmpcode);
9811   %}
9812 
9813   ins_pipe(icond_reg_reg);
9814 %}
9815 
9816 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9817   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9818 
9819   ins_cost(INSN_COST * 2);
9820   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9821 
9822   ins_encode %{
9823     __ csel(as_Register($dst$$reg),
9824             as_Register($src2$$reg),
9825             as_Register($src1$$reg),
9826             (Assembler::Condition)$cmp$$cmpcode);
9827   %}
9828 
9829   ins_pipe(icond_reg_reg);
9830 %}
9831 
9832 // special cases where one arg is zero
9833 
9834 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9835   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9836 
9837   ins_cost(INSN_COST * 2);
9838   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9839 
9840   ins_encode %{
9841     __ csel(as_Register($dst$$reg),
9842             zr,
9843             as_Register($src$$reg),
9844             (Assembler::Condition)$cmp$$cmpcode);
9845   %}
9846 
9847   ins_pipe(icond_reg);
9848 %}
9849 
9850 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9851   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9852 
9853   ins_cost(INSN_COST * 2);
9854   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9855 
9856   ins_encode %{
9857     __ csel(as_Register($dst$$reg),
9858             zr,
9859             as_Register($src$$reg),
9860             (Assembler::Condition)$cmp$$cmpcode);
9861   %}
9862 
9863   ins_pipe(icond_reg);
9864 %}
9865 
9866 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9867   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9868 
9869   ins_cost(INSN_COST * 2);
9870   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9871 
9872   ins_encode %{
9873     __ csel(as_Register($dst$$reg),
9874             as_Register($src$$reg),
9875             zr,
9876             (Assembler::Condition)$cmp$$cmpcode);
9877   %}
9878 
9879   ins_pipe(icond_reg);
9880 %}
9881 
9882 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9883   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9884 
9885   ins_cost(INSN_COST * 2);
9886   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9887 
9888   ins_encode %{
9889     __ csel(as_Register($dst$$reg),
9890             as_Register($src$$reg),
9891             zr,
9892             (Assembler::Condition)$cmp$$cmpcode);
9893   %}
9894 
9895   ins_pipe(icond_reg);
9896 %}
9897 
9898 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9899   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9900 
9901   ins_cost(INSN_COST * 2);
9902   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9903 
9904   ins_encode %{
9905     __ csel(as_Register($dst$$reg),
9906             as_Register($src2$$reg),
9907             as_Register($src1$$reg),
9908             (Assembler::Condition)$cmp$$cmpcode);
9909   %}
9910 
9911   ins_pipe(icond_reg_reg);
9912 %}
9913 
9914 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9915   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9916 
9917   ins_cost(INSN_COST * 2);
9918   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
9919 
9920   ins_encode %{
9921     __ csel(as_Register($dst$$reg),
9922             as_Register($src2$$reg),
9923             as_Register($src1$$reg),
9924             (Assembler::Condition)$cmp$$cmpcode);
9925   %}
9926 
9927   ins_pipe(icond_reg_reg);
9928 %}
9929 
9930 // special cases where one arg is zero
9931 
9932 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9933   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9934 
9935   ins_cost(INSN_COST * 2);
9936   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
9937 
9938   ins_encode %{
9939     __ csel(as_Register($dst$$reg),
9940             zr,
9941             as_Register($src$$reg),
9942             (Assembler::Condition)$cmp$$cmpcode);
9943   %}
9944 
9945   ins_pipe(icond_reg);
9946 %}
9947 
9948 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
9949   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
9950 
9951   ins_cost(INSN_COST * 2);
9952   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
9953 
9954   ins_encode %{
9955     __ csel(as_Register($dst$$reg),
9956             zr,
9957             as_Register($src$$reg),
9958             (Assembler::Condition)$cmp$$cmpcode);
9959   %}
9960 
9961   ins_pipe(icond_reg);
9962 %}
9963 
9964 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9965   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9966 
9967   ins_cost(INSN_COST * 2);
9968   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
9969 
9970   ins_encode %{
9971     __ csel(as_Register($dst$$reg),
9972             as_Register($src$$reg),
9973             zr,
9974             (Assembler::Condition)$cmp$$cmpcode);
9975   %}
9976 
9977   ins_pipe(icond_reg);
9978 %}
9979 
9980 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
9981   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
9982 
9983   ins_cost(INSN_COST * 2);
9984   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
9985 
9986   ins_encode %{
9987     __ csel(as_Register($dst$$reg),
9988             as_Register($src$$reg),
9989             zr,
9990             (Assembler::Condition)$cmp$$cmpcode);
9991   %}
9992 
9993   ins_pipe(icond_reg);
9994 %}
9995 
9996 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
9997   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
9998 
9999   ins_cost(INSN_COST * 2);
10000   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10001 
10002   ins_encode %{
10003     __ cselw(as_Register($dst$$reg),
10004              as_Register($src2$$reg),
10005              as_Register($src1$$reg),
10006              (Assembler::Condition)$cmp$$cmpcode);
10007   %}
10008 
10009   ins_pipe(icond_reg_reg);
10010 %}
10011 
10012 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
10013   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
10014 
10015   ins_cost(INSN_COST * 2);
10016   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
10017 
10018   ins_encode %{
10019     __ cselw(as_Register($dst$$reg),
10020              as_Register($src2$$reg),
10021              as_Register($src1$$reg),
10022              (Assembler::Condition)$cmp$$cmpcode);
10023   %}
10024 
10025   ins_pipe(icond_reg_reg);
10026 %}
10027 
10028 // special cases where one arg is zero
10029 
10030 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10031   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10032 
10033   ins_cost(INSN_COST * 2);
10034   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
10035 
10036   ins_encode %{
10037     __ cselw(as_Register($dst$$reg),
10038              zr,
10039              as_Register($src$$reg),
10040              (Assembler::Condition)$cmp$$cmpcode);
10041   %}
10042 
10043   ins_pipe(icond_reg);
10044 %}
10045 
10046 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
10047   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
10048 
10049   ins_cost(INSN_COST * 2);
10050   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
10051 
10052   ins_encode %{
10053     __ cselw(as_Register($dst$$reg),
10054              zr,
10055              as_Register($src$$reg),
10056              (Assembler::Condition)$cmp$$cmpcode);
10057   %}
10058 
10059   ins_pipe(icond_reg);
10060 %}
10061 
10062 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10063   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10064 
10065   ins_cost(INSN_COST * 2);
10066   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
10067 
10068   ins_encode %{
10069     __ cselw(as_Register($dst$$reg),
10070              as_Register($src$$reg),
10071              zr,
10072              (Assembler::Condition)$cmp$$cmpcode);
10073   %}
10074 
10075   ins_pipe(icond_reg);
10076 %}
10077 
10078 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
10079   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
10080 
10081   ins_cost(INSN_COST * 2);
10082   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
10083 
10084   ins_encode %{
10085     __ cselw(as_Register($dst$$reg),
10086              as_Register($src$$reg),
10087              zr,
10088              (Assembler::Condition)$cmp$$cmpcode);
10089   %}
10090 
10091   ins_pipe(icond_reg);
10092 %}
10093 
10094 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
10095 %{
10096   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10097 
10098   ins_cost(INSN_COST * 3);
10099 
10100   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10101   ins_encode %{
10102     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10103     __ fcsels(as_FloatRegister($dst$$reg),
10104               as_FloatRegister($src2$$reg),
10105               as_FloatRegister($src1$$reg),
10106               cond);
10107   %}
10108 
10109   ins_pipe(fp_cond_reg_reg_s);
10110 %}
10111 
10112 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
10113 %{
10114   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
10115 
10116   ins_cost(INSN_COST * 3);
10117 
10118   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10119   ins_encode %{
10120     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10121     __ fcsels(as_FloatRegister($dst$$reg),
10122               as_FloatRegister($src2$$reg),
10123               as_FloatRegister($src1$$reg),
10124               cond);
10125   %}
10126 
10127   ins_pipe(fp_cond_reg_reg_s);
10128 %}
10129 
10130 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
10131 %{
10132   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10133 
10134   ins_cost(INSN_COST * 3);
10135 
10136   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
10137   ins_encode %{
10138     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10139     __ fcseld(as_FloatRegister($dst$$reg),
10140               as_FloatRegister($src2$$reg),
10141               as_FloatRegister($src1$$reg),
10142               cond);
10143   %}
10144 
10145   ins_pipe(fp_cond_reg_reg_d);
10146 %}
10147 
10148 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
10149 %{
10150   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
10151 
10152   ins_cost(INSN_COST * 3);
10153 
10154   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
10155   ins_encode %{
10156     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
10157     __ fcseld(as_FloatRegister($dst$$reg),
10158               as_FloatRegister($src2$$reg),
10159               as_FloatRegister($src1$$reg),
10160               cond);
10161   %}
10162 
10163   ins_pipe(fp_cond_reg_reg_d);
10164 %}
10165 
10166 // ============================================================================
10167 // Arithmetic Instructions
10168 //
10169 
10170 // Integer Addition
10171 
10172 // TODO
10173 // these currently employ operations which do not set CR and hence are
10174 // not flagged as killing CR but we would like to isolate the cases
10175 // where we want to set flags from those where we don't. need to work
10176 // out how to do that.
10177 
10178 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10179   match(Set dst (AddI src1 src2));
10180 
10181   ins_cost(INSN_COST);
10182   format %{ "addw  $dst, $src1, $src2" %}
10183 
10184   ins_encode %{
10185     __ addw(as_Register($dst$$reg),
10186             as_Register($src1$$reg),
10187             as_Register($src2$$reg));
10188   %}
10189 
10190   ins_pipe(ialu_reg_reg);
10191 %}
10192 
10193 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10194   match(Set dst (AddI src1 src2));
10195 
10196   ins_cost(INSN_COST);
10197   format %{ "addw $dst, $src1, $src2" %}
10198 
10199   // use opcode to indicate that this is an add not a sub
10200   opcode(0x0);
10201 
10202   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10203 
10204   ins_pipe(ialu_reg_imm);
10205 %}
10206 
10207 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
10208   match(Set dst (AddI (ConvL2I src1) src2));
10209 
10210   ins_cost(INSN_COST);
10211   format %{ "addw $dst, $src1, $src2" %}
10212 
10213   // use opcode to indicate that this is an add not a sub
10214   opcode(0x0);
10215 
10216   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10217 
10218   ins_pipe(ialu_reg_imm);
10219 %}
10220 
10221 // Pointer Addition
10222 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
10223   match(Set dst (AddP src1 src2));
10224 
10225   ins_cost(INSN_COST);
10226   format %{ "add $dst, $src1, $src2\t# ptr" %}
10227 
10228   ins_encode %{
10229     __ add(as_Register($dst$$reg),
10230            as_Register($src1$$reg),
10231            as_Register($src2$$reg));
10232   %}
10233 
10234   ins_pipe(ialu_reg_reg);
10235 %}
10236 
10237 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
10238   match(Set dst (AddP src1 (ConvI2L src2)));
10239 
10240   ins_cost(1.9 * INSN_COST);
10241   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
10242 
10243   ins_encode %{
10244     __ add(as_Register($dst$$reg),
10245            as_Register($src1$$reg),
10246            as_Register($src2$$reg), ext::sxtw);
10247   %}
10248 
10249   ins_pipe(ialu_reg_reg);
10250 %}
10251 
10252 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
10253   match(Set dst (AddP src1 (LShiftL src2 scale)));
10254 
10255   ins_cost(1.9 * INSN_COST);
10256   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
10257 
10258   ins_encode %{
10259     __ lea(as_Register($dst$$reg),
10260            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10261                    Address::lsl($scale$$constant)));
10262   %}
10263 
10264   ins_pipe(ialu_reg_reg_shift);
10265 %}
10266 
10267 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
10268   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
10269 
10270   ins_cost(1.9 * INSN_COST);
10271   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
10272 
10273   ins_encode %{
10274     __ lea(as_Register($dst$$reg),
10275            Address(as_Register($src1$$reg), as_Register($src2$$reg),
10276                    Address::sxtw($scale$$constant)));
10277   %}
10278 
10279   ins_pipe(ialu_reg_reg_shift);
10280 %}
10281 
10282 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
10283   match(Set dst (LShiftL (ConvI2L src) scale));
10284 
10285   ins_cost(INSN_COST);
10286   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
10287 
10288   ins_encode %{
10289     __ sbfiz(as_Register($dst$$reg),
10290           as_Register($src$$reg),
10291           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
10292   %}
10293 
10294   ins_pipe(ialu_reg_shift);
10295 %}
10296 
10297 // Pointer Immediate Addition
10298 // n.b. this needs to be more expensive than using an indirect memory
10299 // operand
10300 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
10301   match(Set dst (AddP src1 src2));
10302 
10303   ins_cost(INSN_COST);
10304   format %{ "add $dst, $src1, $src2\t# ptr" %}
10305 
10306   // use opcode to indicate that this is an add not a sub
10307   opcode(0x0);
10308 
10309   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10310 
10311   ins_pipe(ialu_reg_imm);
10312 %}
10313 
10314 // Long Addition
10315 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10316 
10317   match(Set dst (AddL src1 src2));
10318 
10319   ins_cost(INSN_COST);
10320   format %{ "add  $dst, $src1, $src2" %}
10321 
10322   ins_encode %{
10323     __ add(as_Register($dst$$reg),
10324            as_Register($src1$$reg),
10325            as_Register($src2$$reg));
10326   %}
10327 
10328   ins_pipe(ialu_reg_reg);
10329 %}
10330 
10331 // No constant pool entries requiredLong Immediate Addition.
10332 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10333   match(Set dst (AddL src1 src2));
10334 
10335   ins_cost(INSN_COST);
10336   format %{ "add $dst, $src1, $src2" %}
10337 
10338   // use opcode to indicate that this is an add not a sub
10339   opcode(0x0);
10340 
10341   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10342 
10343   ins_pipe(ialu_reg_imm);
10344 %}
10345 
10346 // Integer Subtraction
10347 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10348   match(Set dst (SubI src1 src2));
10349 
10350   ins_cost(INSN_COST);
10351   format %{ "subw  $dst, $src1, $src2" %}
10352 
10353   ins_encode %{
10354     __ subw(as_Register($dst$$reg),
10355             as_Register($src1$$reg),
10356             as_Register($src2$$reg));
10357   %}
10358 
10359   ins_pipe(ialu_reg_reg);
10360 %}
10361 
10362 // Immediate Subtraction
10363 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10364   match(Set dst (SubI src1 src2));
10365 
10366   ins_cost(INSN_COST);
10367   format %{ "subw $dst, $src1, $src2" %}
10368 
10369   // use opcode to indicate that this is a sub not an add
10370   opcode(0x1);
10371 
10372   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10373 
10374   ins_pipe(ialu_reg_imm);
10375 %}
10376 
10377 // Long Subtraction
10378 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10379 
10380   match(Set dst (SubL src1 src2));
10381 
10382   ins_cost(INSN_COST);
10383   format %{ "sub  $dst, $src1, $src2" %}
10384 
10385   ins_encode %{
10386     __ sub(as_Register($dst$$reg),
10387            as_Register($src1$$reg),
10388            as_Register($src2$$reg));
10389   %}
10390 
10391   ins_pipe(ialu_reg_reg);
10392 %}
10393 
10394 // No constant pool entries requiredLong Immediate Subtraction.
10395 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10396   match(Set dst (SubL src1 src2));
10397 
10398   ins_cost(INSN_COST);
10399   format %{ "sub$dst, $src1, $src2" %}
10400 
10401   // use opcode to indicate that this is a sub not an add
10402   opcode(0x1);
10403 
10404   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10405 
10406   ins_pipe(ialu_reg_imm);
10407 %}
10408 
10409 // Integer Negation (special case for sub)
10410 
10411 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10412   match(Set dst (SubI zero src));
10413 
10414   ins_cost(INSN_COST);
10415   format %{ "negw $dst, $src\t# int" %}
10416 
10417   ins_encode %{
10418     __ negw(as_Register($dst$$reg),
10419             as_Register($src$$reg));
10420   %}
10421 
10422   ins_pipe(ialu_reg);
10423 %}
10424 
10425 // Long Negation
10426 
10427 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
10428   match(Set dst (SubL zero src));
10429 
10430   ins_cost(INSN_COST);
10431   format %{ "neg $dst, $src\t# long" %}
10432 
10433   ins_encode %{
10434     __ neg(as_Register($dst$$reg),
10435            as_Register($src$$reg));
10436   %}
10437 
10438   ins_pipe(ialu_reg);
10439 %}
10440 
10441 // Integer Multiply
10442 
10443 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10444   match(Set dst (MulI src1 src2));
10445 
10446   ins_cost(INSN_COST * 3);
10447   format %{ "mulw  $dst, $src1, $src2" %}
10448 
10449   ins_encode %{
10450     __ mulw(as_Register($dst$$reg),
10451             as_Register($src1$$reg),
10452             as_Register($src2$$reg));
10453   %}
10454 
10455   ins_pipe(imul_reg_reg);
10456 %}
10457 
10458 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10459   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10460 
10461   ins_cost(INSN_COST * 3);
10462   format %{ "smull  $dst, $src1, $src2" %}
10463 
10464   ins_encode %{
10465     __ smull(as_Register($dst$$reg),
10466              as_Register($src1$$reg),
10467              as_Register($src2$$reg));
10468   %}
10469 
10470   ins_pipe(imul_reg_reg);
10471 %}
10472 
10473 // Long Multiply
10474 
10475 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10476   match(Set dst (MulL src1 src2));
10477 
10478   ins_cost(INSN_COST * 5);
10479   format %{ "mul  $dst, $src1, $src2" %}
10480 
10481   ins_encode %{
10482     __ mul(as_Register($dst$$reg),
10483            as_Register($src1$$reg),
10484            as_Register($src2$$reg));
10485   %}
10486 
10487   ins_pipe(lmul_reg_reg);
10488 %}
10489 
10490 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10491 %{
10492   match(Set dst (MulHiL src1 src2));
10493 
10494   ins_cost(INSN_COST * 7);
10495   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
10496 
10497   ins_encode %{
10498     __ smulh(as_Register($dst$$reg),
10499              as_Register($src1$$reg),
10500              as_Register($src2$$reg));
10501   %}
10502 
10503   ins_pipe(lmul_reg_reg);
10504 %}
10505 
10506 // Combined Integer Multiply & Add/Sub
10507 
10508 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10509   match(Set dst (AddI src3 (MulI src1 src2)));
10510 
10511   ins_cost(INSN_COST * 3);
10512   format %{ "madd  $dst, $src1, $src2, $src3" %}
10513 
10514   ins_encode %{
10515     __ maddw(as_Register($dst$$reg),
10516              as_Register($src1$$reg),
10517              as_Register($src2$$reg),
10518              as_Register($src3$$reg));
10519   %}
10520 
10521   ins_pipe(imac_reg_reg);
10522 %}
10523 
10524 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10525   match(Set dst (SubI src3 (MulI src1 src2)));
10526 
10527   ins_cost(INSN_COST * 3);
10528   format %{ "msub  $dst, $src1, $src2, $src3" %}
10529 
10530   ins_encode %{
10531     __ msubw(as_Register($dst$$reg),
10532              as_Register($src1$$reg),
10533              as_Register($src2$$reg),
10534              as_Register($src3$$reg));
10535   %}
10536 
10537   ins_pipe(imac_reg_reg);
10538 %}
10539 
10540 // Combined Long Multiply & Add/Sub
10541 
10542 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10543   match(Set dst (AddL src3 (MulL src1 src2)));
10544 
10545   ins_cost(INSN_COST * 5);
10546   format %{ "madd  $dst, $src1, $src2, $src3" %}
10547 
10548   ins_encode %{
10549     __ madd(as_Register($dst$$reg),
10550             as_Register($src1$$reg),
10551             as_Register($src2$$reg),
10552             as_Register($src3$$reg));
10553   %}
10554 
10555   ins_pipe(lmac_reg_reg);
10556 %}
10557 
10558 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10559   match(Set dst (SubL src3 (MulL src1 src2)));
10560 
10561   ins_cost(INSN_COST * 5);
10562   format %{ "msub  $dst, $src1, $src2, $src3" %}
10563 
10564   ins_encode %{
10565     __ msub(as_Register($dst$$reg),
10566             as_Register($src1$$reg),
10567             as_Register($src2$$reg),
10568             as_Register($src3$$reg));
10569   %}
10570 
10571   ins_pipe(lmac_reg_reg);
10572 %}
10573 
10574 // Integer Divide
10575 
10576 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10577   match(Set dst (DivI src1 src2));
10578 
10579   ins_cost(INSN_COST * 19);
10580   format %{ "sdivw  $dst, $src1, $src2" %}
10581 
10582   ins_encode(aarch64_enc_divw(dst, src1, src2));
10583   ins_pipe(idiv_reg_reg);
10584 %}
10585 
10586 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
10587   match(Set dst (URShiftI (RShiftI src1 div1) div2));
10588   ins_cost(INSN_COST);
10589   format %{ "lsrw $dst, $src1, $div1" %}
10590   ins_encode %{
10591     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
10592   %}
10593   ins_pipe(ialu_reg_shift);
10594 %}
10595 
10596 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
10597   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
10598   ins_cost(INSN_COST);
10599   format %{ "addw $dst, $src, LSR $div1" %}
10600 
10601   ins_encode %{
10602     __ addw(as_Register($dst$$reg),
10603               as_Register($src$$reg),
10604               as_Register($src$$reg),
10605               Assembler::LSR, 31);
10606   %}
10607   ins_pipe(ialu_reg);
10608 %}
10609 
10610 // Long Divide
10611 
10612 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10613   match(Set dst (DivL src1 src2));
10614 
10615   ins_cost(INSN_COST * 35);
10616   format %{ "sdiv   $dst, $src1, $src2" %}
10617 
10618   ins_encode(aarch64_enc_div(dst, src1, src2));
10619   ins_pipe(ldiv_reg_reg);
10620 %}
10621 
10622 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
10623   match(Set dst (URShiftL (RShiftL src1 div1) div2));
10624   ins_cost(INSN_COST);
10625   format %{ "lsr $dst, $src1, $div1" %}
10626   ins_encode %{
10627     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
10628   %}
10629   ins_pipe(ialu_reg_shift);
10630 %}
10631 
10632 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
10633   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
10634   ins_cost(INSN_COST);
10635   format %{ "add $dst, $src, $div1" %}
10636 
10637   ins_encode %{
10638     __ add(as_Register($dst$$reg),
10639               as_Register($src$$reg),
10640               as_Register($src$$reg),
10641               Assembler::LSR, 63);
10642   %}
10643   ins_pipe(ialu_reg);
10644 %}
10645 
10646 // Integer Remainder
10647 
10648 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10649   match(Set dst (ModI src1 src2));
10650 
10651   ins_cost(INSN_COST * 22);
10652   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10653             "msubw($dst, rscratch1, $src2, $src1" %}
10654 
10655   ins_encode(aarch64_enc_modw(dst, src1, src2));
10656   ins_pipe(idiv_reg_reg);
10657 %}
10658 
10659 // Long Remainder
10660 
10661 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10662   match(Set dst (ModL src1 src2));
10663 
10664   ins_cost(INSN_COST * 38);
10665   format %{ "sdiv   rscratch1, $src1, $src2\n"
10666             "msub($dst, rscratch1, $src2, $src1" %}
10667 
10668   ins_encode(aarch64_enc_mod(dst, src1, src2));
10669   ins_pipe(ldiv_reg_reg);
10670 %}
10671 
10672 // Integer Shifts
10673 
10674 // Shift Left Register
10675 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10676   match(Set dst (LShiftI src1 src2));
10677 
10678   ins_cost(INSN_COST * 2);
10679   format %{ "lslvw  $dst, $src1, $src2" %}
10680 
10681   ins_encode %{
10682     __ lslvw(as_Register($dst$$reg),
10683              as_Register($src1$$reg),
10684              as_Register($src2$$reg));
10685   %}
10686 
10687   ins_pipe(ialu_reg_reg_vshift);
10688 %}
10689 
10690 // Shift Left Immediate
10691 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10692   match(Set dst (LShiftI src1 src2));
10693 
10694   ins_cost(INSN_COST);
10695   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10696 
10697   ins_encode %{
10698     __ lslw(as_Register($dst$$reg),
10699             as_Register($src1$$reg),
10700             $src2$$constant & 0x1f);
10701   %}
10702 
10703   ins_pipe(ialu_reg_shift);
10704 %}
10705 
10706 // Shift Right Logical Register
10707 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10708   match(Set dst (URShiftI src1 src2));
10709 
10710   ins_cost(INSN_COST * 2);
10711   format %{ "lsrvw  $dst, $src1, $src2" %}
10712 
10713   ins_encode %{
10714     __ lsrvw(as_Register($dst$$reg),
10715              as_Register($src1$$reg),
10716              as_Register($src2$$reg));
10717   %}
10718 
10719   ins_pipe(ialu_reg_reg_vshift);
10720 %}
10721 
10722 // Shift Right Logical Immediate
10723 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10724   match(Set dst (URShiftI src1 src2));
10725 
10726   ins_cost(INSN_COST);
10727   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10728 
10729   ins_encode %{
10730     __ lsrw(as_Register($dst$$reg),
10731             as_Register($src1$$reg),
10732             $src2$$constant & 0x1f);
10733   %}
10734 
10735   ins_pipe(ialu_reg_shift);
10736 %}
10737 
10738 // Shift Right Arithmetic Register
10739 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10740   match(Set dst (RShiftI src1 src2));
10741 
10742   ins_cost(INSN_COST * 2);
10743   format %{ "asrvw  $dst, $src1, $src2" %}
10744 
10745   ins_encode %{
10746     __ asrvw(as_Register($dst$$reg),
10747              as_Register($src1$$reg),
10748              as_Register($src2$$reg));
10749   %}
10750 
10751   ins_pipe(ialu_reg_reg_vshift);
10752 %}
10753 
10754 // Shift Right Arithmetic Immediate
10755 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10756   match(Set dst (RShiftI src1 src2));
10757 
10758   ins_cost(INSN_COST);
10759   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10760 
10761   ins_encode %{
10762     __ asrw(as_Register($dst$$reg),
10763             as_Register($src1$$reg),
10764             $src2$$constant & 0x1f);
10765   %}
10766 
10767   ins_pipe(ialu_reg_shift);
10768 %}
10769 
10770 // Combined Int Mask and Right Shift (using UBFM)
10771 // TODO
10772 
10773 // Long Shifts
10774 
10775 // Shift Left Register
10776 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10777   match(Set dst (LShiftL src1 src2));
10778 
10779   ins_cost(INSN_COST * 2);
10780   format %{ "lslv  $dst, $src1, $src2" %}
10781 
10782   ins_encode %{
10783     __ lslv(as_Register($dst$$reg),
10784             as_Register($src1$$reg),
10785             as_Register($src2$$reg));
10786   %}
10787 
10788   ins_pipe(ialu_reg_reg_vshift);
10789 %}
10790 
10791 // Shift Left Immediate
10792 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10793   match(Set dst (LShiftL src1 src2));
10794 
10795   ins_cost(INSN_COST);
10796   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10797 
10798   ins_encode %{
10799     __ lsl(as_Register($dst$$reg),
10800             as_Register($src1$$reg),
10801             $src2$$constant & 0x3f);
10802   %}
10803 
10804   ins_pipe(ialu_reg_shift);
10805 %}
10806 
10807 // Shift Right Logical Register
10808 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10809   match(Set dst (URShiftL src1 src2));
10810 
10811   ins_cost(INSN_COST * 2);
10812   format %{ "lsrv  $dst, $src1, $src2" %}
10813 
10814   ins_encode %{
10815     __ lsrv(as_Register($dst$$reg),
10816             as_Register($src1$$reg),
10817             as_Register($src2$$reg));
10818   %}
10819 
10820   ins_pipe(ialu_reg_reg_vshift);
10821 %}
10822 
10823 // Shift Right Logical Immediate
10824 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10825   match(Set dst (URShiftL src1 src2));
10826 
10827   ins_cost(INSN_COST);
10828   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10829 
10830   ins_encode %{
10831     __ lsr(as_Register($dst$$reg),
10832            as_Register($src1$$reg),
10833            $src2$$constant & 0x3f);
10834   %}
10835 
10836   ins_pipe(ialu_reg_shift);
10837 %}
10838 
10839 // A special-case pattern for card table stores.
10840 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10841   match(Set dst (URShiftL (CastP2X src1) src2));
10842 
10843   ins_cost(INSN_COST);
10844   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10845 
10846   ins_encode %{
10847     __ lsr(as_Register($dst$$reg),
10848            as_Register($src1$$reg),
10849            $src2$$constant & 0x3f);
10850   %}
10851 
10852   ins_pipe(ialu_reg_shift);
10853 %}
10854 
10855 // Shift Right Arithmetic Register
10856 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10857   match(Set dst (RShiftL src1 src2));
10858 
10859   ins_cost(INSN_COST * 2);
10860   format %{ "asrv  $dst, $src1, $src2" %}
10861 
10862   ins_encode %{
10863     __ asrv(as_Register($dst$$reg),
10864             as_Register($src1$$reg),
10865             as_Register($src2$$reg));
10866   %}
10867 
10868   ins_pipe(ialu_reg_reg_vshift);
10869 %}
10870 
10871 // Shift Right Arithmetic Immediate
10872 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10873   match(Set dst (RShiftL src1 src2));
10874 
10875   ins_cost(INSN_COST);
10876   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10877 
10878   ins_encode %{
10879     __ asr(as_Register($dst$$reg),
10880            as_Register($src1$$reg),
10881            $src2$$constant & 0x3f);
10882   %}
10883 
10884   ins_pipe(ialu_reg_shift);
10885 %}
10886 
10887 // BEGIN This section of the file is automatically generated. Do not edit --------------
10888 
10889 instruct regL_not_reg(iRegLNoSp dst,
10890                          iRegL src1, immL_M1 m1,
10891                          rFlagsReg cr) %{
10892   match(Set dst (XorL src1 m1));
10893   ins_cost(INSN_COST);
10894   format %{ "eon  $dst, $src1, zr" %}
10895 
10896   ins_encode %{
10897     __ eon(as_Register($dst$$reg),
10898               as_Register($src1$$reg),
10899               zr,
10900               Assembler::LSL, 0);
10901   %}
10902 
10903   ins_pipe(ialu_reg);
10904 %}
10905 instruct regI_not_reg(iRegINoSp dst,
10906                          iRegIorL2I src1, immI_M1 m1,
10907                          rFlagsReg cr) %{
10908   match(Set dst (XorI src1 m1));
10909   ins_cost(INSN_COST);
10910   format %{ "eonw  $dst, $src1, zr" %}
10911 
10912   ins_encode %{
10913     __ eonw(as_Register($dst$$reg),
10914               as_Register($src1$$reg),
10915               zr,
10916               Assembler::LSL, 0);
10917   %}
10918 
10919   ins_pipe(ialu_reg);
10920 %}
10921 
10922 instruct AndI_reg_not_reg(iRegINoSp dst,
10923                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10924                          rFlagsReg cr) %{
10925   match(Set dst (AndI src1 (XorI src2 m1)));
10926   ins_cost(INSN_COST);
10927   format %{ "bicw  $dst, $src1, $src2" %}
10928 
10929   ins_encode %{
10930     __ bicw(as_Register($dst$$reg),
10931               as_Register($src1$$reg),
10932               as_Register($src2$$reg),
10933               Assembler::LSL, 0);
10934   %}
10935 
10936   ins_pipe(ialu_reg_reg);
10937 %}
10938 
10939 instruct AndL_reg_not_reg(iRegLNoSp dst,
10940                          iRegL src1, iRegL src2, immL_M1 m1,
10941                          rFlagsReg cr) %{
10942   match(Set dst (AndL src1 (XorL src2 m1)));
10943   ins_cost(INSN_COST);
10944   format %{ "bic  $dst, $src1, $src2" %}
10945 
10946   ins_encode %{
10947     __ bic(as_Register($dst$$reg),
10948               as_Register($src1$$reg),
10949               as_Register($src2$$reg),
10950               Assembler::LSL, 0);
10951   %}
10952 
10953   ins_pipe(ialu_reg_reg);
10954 %}
10955 
10956 instruct OrI_reg_not_reg(iRegINoSp dst,
10957                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10958                          rFlagsReg cr) %{
10959   match(Set dst (OrI src1 (XorI src2 m1)));
10960   ins_cost(INSN_COST);
10961   format %{ "ornw  $dst, $src1, $src2" %}
10962 
10963   ins_encode %{
10964     __ ornw(as_Register($dst$$reg),
10965               as_Register($src1$$reg),
10966               as_Register($src2$$reg),
10967               Assembler::LSL, 0);
10968   %}
10969 
10970   ins_pipe(ialu_reg_reg);
10971 %}
10972 
10973 instruct OrL_reg_not_reg(iRegLNoSp dst,
10974                          iRegL src1, iRegL src2, immL_M1 m1,
10975                          rFlagsReg cr) %{
10976   match(Set dst (OrL src1 (XorL src2 m1)));
10977   ins_cost(INSN_COST);
10978   format %{ "orn  $dst, $src1, $src2" %}
10979 
10980   ins_encode %{
10981     __ orn(as_Register($dst$$reg),
10982               as_Register($src1$$reg),
10983               as_Register($src2$$reg),
10984               Assembler::LSL, 0);
10985   %}
10986 
10987   ins_pipe(ialu_reg_reg);
10988 %}
10989 
10990 instruct XorI_reg_not_reg(iRegINoSp dst,
10991                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
10992                          rFlagsReg cr) %{
10993   match(Set dst (XorI m1 (XorI src2 src1)));
10994   ins_cost(INSN_COST);
10995   format %{ "eonw  $dst, $src1, $src2" %}
10996 
10997   ins_encode %{
10998     __ eonw(as_Register($dst$$reg),
10999               as_Register($src1$$reg),
11000               as_Register($src2$$reg),
11001               Assembler::LSL, 0);
11002   %}
11003 
11004   ins_pipe(ialu_reg_reg);
11005 %}
11006 
11007 instruct XorL_reg_not_reg(iRegLNoSp dst,
11008                          iRegL src1, iRegL src2, immL_M1 m1,
11009                          rFlagsReg cr) %{
11010   match(Set dst (XorL m1 (XorL src2 src1)));
11011   ins_cost(INSN_COST);
11012   format %{ "eon  $dst, $src1, $src2" %}
11013 
11014   ins_encode %{
11015     __ eon(as_Register($dst$$reg),
11016               as_Register($src1$$reg),
11017               as_Register($src2$$reg),
11018               Assembler::LSL, 0);
11019   %}
11020 
11021   ins_pipe(ialu_reg_reg);
11022 %}
11023 
11024 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
11025                          iRegIorL2I src1, iRegIorL2I src2,
11026                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11027   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
11028   ins_cost(1.9 * INSN_COST);
11029   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
11030 
11031   ins_encode %{
11032     __ bicw(as_Register($dst$$reg),
11033               as_Register($src1$$reg),
11034               as_Register($src2$$reg),
11035               Assembler::LSR,
11036               $src3$$constant & 0x1f);
11037   %}
11038 
11039   ins_pipe(ialu_reg_reg_shift);
11040 %}
11041 
11042 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
11043                          iRegL src1, iRegL src2,
11044                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11045   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
11046   ins_cost(1.9 * INSN_COST);
11047   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
11048 
11049   ins_encode %{
11050     __ bic(as_Register($dst$$reg),
11051               as_Register($src1$$reg),
11052               as_Register($src2$$reg),
11053               Assembler::LSR,
11054               $src3$$constant & 0x3f);
11055   %}
11056 
11057   ins_pipe(ialu_reg_reg_shift);
11058 %}
11059 
11060 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
11061                          iRegIorL2I src1, iRegIorL2I src2,
11062                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11063   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
11064   ins_cost(1.9 * INSN_COST);
11065   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
11066 
11067   ins_encode %{
11068     __ bicw(as_Register($dst$$reg),
11069               as_Register($src1$$reg),
11070               as_Register($src2$$reg),
11071               Assembler::ASR,
11072               $src3$$constant & 0x1f);
11073   %}
11074 
11075   ins_pipe(ialu_reg_reg_shift);
11076 %}
11077 
11078 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11079                          iRegL src1, iRegL src2,
11080                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11081   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11082   ins_cost(1.9 * INSN_COST);
11083   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11084 
11085   ins_encode %{
11086     __ bic(as_Register($dst$$reg),
11087               as_Register($src1$$reg),
11088               as_Register($src2$$reg),
11089               Assembler::ASR,
11090               $src3$$constant & 0x3f);
11091   %}
11092 
11093   ins_pipe(ialu_reg_reg_shift);
11094 %}
11095 
11096 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11097                          iRegIorL2I src1, iRegIorL2I src2,
11098                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11099   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11100   ins_cost(1.9 * INSN_COST);
11101   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11102 
11103   ins_encode %{
11104     __ bicw(as_Register($dst$$reg),
11105               as_Register($src1$$reg),
11106               as_Register($src2$$reg),
11107               Assembler::LSL,
11108               $src3$$constant & 0x1f);
11109   %}
11110 
11111   ins_pipe(ialu_reg_reg_shift);
11112 %}
11113 
11114 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11115                          iRegL src1, iRegL src2,
11116                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11117   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11118   ins_cost(1.9 * INSN_COST);
11119   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11120 
11121   ins_encode %{
11122     __ bic(as_Register($dst$$reg),
11123               as_Register($src1$$reg),
11124               as_Register($src2$$reg),
11125               Assembler::LSL,
11126               $src3$$constant & 0x3f);
11127   %}
11128 
11129   ins_pipe(ialu_reg_reg_shift);
11130 %}
11131 
11132 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11133                          iRegIorL2I src1, iRegIorL2I src2,
11134                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11135   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11136   ins_cost(1.9 * INSN_COST);
11137   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11138 
11139   ins_encode %{
11140     __ eonw(as_Register($dst$$reg),
11141               as_Register($src1$$reg),
11142               as_Register($src2$$reg),
11143               Assembler::LSR,
11144               $src3$$constant & 0x1f);
11145   %}
11146 
11147   ins_pipe(ialu_reg_reg_shift);
11148 %}
11149 
11150 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11151                          iRegL src1, iRegL src2,
11152                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11153   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11154   ins_cost(1.9 * INSN_COST);
11155   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11156 
11157   ins_encode %{
11158     __ eon(as_Register($dst$$reg),
11159               as_Register($src1$$reg),
11160               as_Register($src2$$reg),
11161               Assembler::LSR,
11162               $src3$$constant & 0x3f);
11163   %}
11164 
11165   ins_pipe(ialu_reg_reg_shift);
11166 %}
11167 
11168 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11169                          iRegIorL2I src1, iRegIorL2I src2,
11170                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11171   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11172   ins_cost(1.9 * INSN_COST);
11173   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11174 
11175   ins_encode %{
11176     __ eonw(as_Register($dst$$reg),
11177               as_Register($src1$$reg),
11178               as_Register($src2$$reg),
11179               Assembler::ASR,
11180               $src3$$constant & 0x1f);
11181   %}
11182 
11183   ins_pipe(ialu_reg_reg_shift);
11184 %}
11185 
11186 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11187                          iRegL src1, iRegL src2,
11188                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11189   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11190   ins_cost(1.9 * INSN_COST);
11191   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11192 
11193   ins_encode %{
11194     __ eon(as_Register($dst$$reg),
11195               as_Register($src1$$reg),
11196               as_Register($src2$$reg),
11197               Assembler::ASR,
11198               $src3$$constant & 0x3f);
11199   %}
11200 
11201   ins_pipe(ialu_reg_reg_shift);
11202 %}
11203 
11204 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11205                          iRegIorL2I src1, iRegIorL2I src2,
11206                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11207   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11208   ins_cost(1.9 * INSN_COST);
11209   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11210 
11211   ins_encode %{
11212     __ eonw(as_Register($dst$$reg),
11213               as_Register($src1$$reg),
11214               as_Register($src2$$reg),
11215               Assembler::LSL,
11216               $src3$$constant & 0x1f);
11217   %}
11218 
11219   ins_pipe(ialu_reg_reg_shift);
11220 %}
11221 
11222 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11223                          iRegL src1, iRegL src2,
11224                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11225   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11226   ins_cost(1.9 * INSN_COST);
11227   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11228 
11229   ins_encode %{
11230     __ eon(as_Register($dst$$reg),
11231               as_Register($src1$$reg),
11232               as_Register($src2$$reg),
11233               Assembler::LSL,
11234               $src3$$constant & 0x3f);
11235   %}
11236 
11237   ins_pipe(ialu_reg_reg_shift);
11238 %}
11239 
11240 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11241                          iRegIorL2I src1, iRegIorL2I src2,
11242                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11243   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11244   ins_cost(1.9 * INSN_COST);
11245   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11246 
11247   ins_encode %{
11248     __ ornw(as_Register($dst$$reg),
11249               as_Register($src1$$reg),
11250               as_Register($src2$$reg),
11251               Assembler::LSR,
11252               $src3$$constant & 0x1f);
11253   %}
11254 
11255   ins_pipe(ialu_reg_reg_shift);
11256 %}
11257 
11258 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11259                          iRegL src1, iRegL src2,
11260                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11261   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11262   ins_cost(1.9 * INSN_COST);
11263   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11264 
11265   ins_encode %{
11266     __ orn(as_Register($dst$$reg),
11267               as_Register($src1$$reg),
11268               as_Register($src2$$reg),
11269               Assembler::LSR,
11270               $src3$$constant & 0x3f);
11271   %}
11272 
11273   ins_pipe(ialu_reg_reg_shift);
11274 %}
11275 
11276 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11277                          iRegIorL2I src1, iRegIorL2I src2,
11278                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11279   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11280   ins_cost(1.9 * INSN_COST);
11281   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11282 
11283   ins_encode %{
11284     __ ornw(as_Register($dst$$reg),
11285               as_Register($src1$$reg),
11286               as_Register($src2$$reg),
11287               Assembler::ASR,
11288               $src3$$constant & 0x1f);
11289   %}
11290 
11291   ins_pipe(ialu_reg_reg_shift);
11292 %}
11293 
11294 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11295                          iRegL src1, iRegL src2,
11296                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11297   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11298   ins_cost(1.9 * INSN_COST);
11299   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11300 
11301   ins_encode %{
11302     __ orn(as_Register($dst$$reg),
11303               as_Register($src1$$reg),
11304               as_Register($src2$$reg),
11305               Assembler::ASR,
11306               $src3$$constant & 0x3f);
11307   %}
11308 
11309   ins_pipe(ialu_reg_reg_shift);
11310 %}
11311 
11312 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11313                          iRegIorL2I src1, iRegIorL2I src2,
11314                          immI src3, immI_M1 src4, rFlagsReg cr) %{
11315   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11316   ins_cost(1.9 * INSN_COST);
11317   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11318 
11319   ins_encode %{
11320     __ ornw(as_Register($dst$$reg),
11321               as_Register($src1$$reg),
11322               as_Register($src2$$reg),
11323               Assembler::LSL,
11324               $src3$$constant & 0x1f);
11325   %}
11326 
11327   ins_pipe(ialu_reg_reg_shift);
11328 %}
11329 
11330 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11331                          iRegL src1, iRegL src2,
11332                          immI src3, immL_M1 src4, rFlagsReg cr) %{
11333   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11334   ins_cost(1.9 * INSN_COST);
11335   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11336 
11337   ins_encode %{
11338     __ orn(as_Register($dst$$reg),
11339               as_Register($src1$$reg),
11340               as_Register($src2$$reg),
11341               Assembler::LSL,
11342               $src3$$constant & 0x3f);
11343   %}
11344 
11345   ins_pipe(ialu_reg_reg_shift);
11346 %}
11347 
11348 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11349                          iRegIorL2I src1, iRegIorL2I src2,
11350                          immI src3, rFlagsReg cr) %{
11351   match(Set dst (AndI src1 (URShiftI src2 src3)));
11352 
11353   ins_cost(1.9 * INSN_COST);
11354   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11355 
11356   ins_encode %{
11357     __ andw(as_Register($dst$$reg),
11358               as_Register($src1$$reg),
11359               as_Register($src2$$reg),
11360               Assembler::LSR,
11361               $src3$$constant & 0x1f);
11362   %}
11363 
11364   ins_pipe(ialu_reg_reg_shift);
11365 %}
11366 
11367 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11368                          iRegL src1, iRegL src2,
11369                          immI src3, rFlagsReg cr) %{
11370   match(Set dst (AndL src1 (URShiftL src2 src3)));
11371 
11372   ins_cost(1.9 * INSN_COST);
11373   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11374 
11375   ins_encode %{
11376     __ andr(as_Register($dst$$reg),
11377               as_Register($src1$$reg),
11378               as_Register($src2$$reg),
11379               Assembler::LSR,
11380               $src3$$constant & 0x3f);
11381   %}
11382 
11383   ins_pipe(ialu_reg_reg_shift);
11384 %}
11385 
11386 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11387                          iRegIorL2I src1, iRegIorL2I src2,
11388                          immI src3, rFlagsReg cr) %{
11389   match(Set dst (AndI src1 (RShiftI src2 src3)));
11390 
11391   ins_cost(1.9 * INSN_COST);
11392   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11393 
11394   ins_encode %{
11395     __ andw(as_Register($dst$$reg),
11396               as_Register($src1$$reg),
11397               as_Register($src2$$reg),
11398               Assembler::ASR,
11399               $src3$$constant & 0x1f);
11400   %}
11401 
11402   ins_pipe(ialu_reg_reg_shift);
11403 %}
11404 
11405 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11406                          iRegL src1, iRegL src2,
11407                          immI src3, rFlagsReg cr) %{
11408   match(Set dst (AndL src1 (RShiftL src2 src3)));
11409 
11410   ins_cost(1.9 * INSN_COST);
11411   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11412 
11413   ins_encode %{
11414     __ andr(as_Register($dst$$reg),
11415               as_Register($src1$$reg),
11416               as_Register($src2$$reg),
11417               Assembler::ASR,
11418               $src3$$constant & 0x3f);
11419   %}
11420 
11421   ins_pipe(ialu_reg_reg_shift);
11422 %}
11423 
11424 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11425                          iRegIorL2I src1, iRegIorL2I src2,
11426                          immI src3, rFlagsReg cr) %{
11427   match(Set dst (AndI src1 (LShiftI src2 src3)));
11428 
11429   ins_cost(1.9 * INSN_COST);
11430   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11431 
11432   ins_encode %{
11433     __ andw(as_Register($dst$$reg),
11434               as_Register($src1$$reg),
11435               as_Register($src2$$reg),
11436               Assembler::LSL,
11437               $src3$$constant & 0x1f);
11438   %}
11439 
11440   ins_pipe(ialu_reg_reg_shift);
11441 %}
11442 
11443 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11444                          iRegL src1, iRegL src2,
11445                          immI src3, rFlagsReg cr) %{
11446   match(Set dst (AndL src1 (LShiftL src2 src3)));
11447 
11448   ins_cost(1.9 * INSN_COST);
11449   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11450 
11451   ins_encode %{
11452     __ andr(as_Register($dst$$reg),
11453               as_Register($src1$$reg),
11454               as_Register($src2$$reg),
11455               Assembler::LSL,
11456               $src3$$constant & 0x3f);
11457   %}
11458 
11459   ins_pipe(ialu_reg_reg_shift);
11460 %}
11461 
11462 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11463                          iRegIorL2I src1, iRegIorL2I src2,
11464                          immI src3, rFlagsReg cr) %{
11465   match(Set dst (XorI src1 (URShiftI src2 src3)));
11466 
11467   ins_cost(1.9 * INSN_COST);
11468   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11469 
11470   ins_encode %{
11471     __ eorw(as_Register($dst$$reg),
11472               as_Register($src1$$reg),
11473               as_Register($src2$$reg),
11474               Assembler::LSR,
11475               $src3$$constant & 0x1f);
11476   %}
11477 
11478   ins_pipe(ialu_reg_reg_shift);
11479 %}
11480 
11481 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11482                          iRegL src1, iRegL src2,
11483                          immI src3, rFlagsReg cr) %{
11484   match(Set dst (XorL src1 (URShiftL src2 src3)));
11485 
11486   ins_cost(1.9 * INSN_COST);
11487   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11488 
11489   ins_encode %{
11490     __ eor(as_Register($dst$$reg),
11491               as_Register($src1$$reg),
11492               as_Register($src2$$reg),
11493               Assembler::LSR,
11494               $src3$$constant & 0x3f);
11495   %}
11496 
11497   ins_pipe(ialu_reg_reg_shift);
11498 %}
11499 
11500 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11501                          iRegIorL2I src1, iRegIorL2I src2,
11502                          immI src3, rFlagsReg cr) %{
11503   match(Set dst (XorI src1 (RShiftI src2 src3)));
11504 
11505   ins_cost(1.9 * INSN_COST);
11506   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11507 
11508   ins_encode %{
11509     __ eorw(as_Register($dst$$reg),
11510               as_Register($src1$$reg),
11511               as_Register($src2$$reg),
11512               Assembler::ASR,
11513               $src3$$constant & 0x1f);
11514   %}
11515 
11516   ins_pipe(ialu_reg_reg_shift);
11517 %}
11518 
11519 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11520                          iRegL src1, iRegL src2,
11521                          immI src3, rFlagsReg cr) %{
11522   match(Set dst (XorL src1 (RShiftL src2 src3)));
11523 
11524   ins_cost(1.9 * INSN_COST);
11525   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11526 
11527   ins_encode %{
11528     __ eor(as_Register($dst$$reg),
11529               as_Register($src1$$reg),
11530               as_Register($src2$$reg),
11531               Assembler::ASR,
11532               $src3$$constant & 0x3f);
11533   %}
11534 
11535   ins_pipe(ialu_reg_reg_shift);
11536 %}
11537 
11538 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11539                          iRegIorL2I src1, iRegIorL2I src2,
11540                          immI src3, rFlagsReg cr) %{
11541   match(Set dst (XorI src1 (LShiftI src2 src3)));
11542 
11543   ins_cost(1.9 * INSN_COST);
11544   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11545 
11546   ins_encode %{
11547     __ eorw(as_Register($dst$$reg),
11548               as_Register($src1$$reg),
11549               as_Register($src2$$reg),
11550               Assembler::LSL,
11551               $src3$$constant & 0x1f);
11552   %}
11553 
11554   ins_pipe(ialu_reg_reg_shift);
11555 %}
11556 
11557 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11558                          iRegL src1, iRegL src2,
11559                          immI src3, rFlagsReg cr) %{
11560   match(Set dst (XorL src1 (LShiftL src2 src3)));
11561 
11562   ins_cost(1.9 * INSN_COST);
11563   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11564 
11565   ins_encode %{
11566     __ eor(as_Register($dst$$reg),
11567               as_Register($src1$$reg),
11568               as_Register($src2$$reg),
11569               Assembler::LSL,
11570               $src3$$constant & 0x3f);
11571   %}
11572 
11573   ins_pipe(ialu_reg_reg_shift);
11574 %}
11575 
11576 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11577                          iRegIorL2I src1, iRegIorL2I src2,
11578                          immI src3, rFlagsReg cr) %{
11579   match(Set dst (OrI src1 (URShiftI src2 src3)));
11580 
11581   ins_cost(1.9 * INSN_COST);
11582   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11583 
11584   ins_encode %{
11585     __ orrw(as_Register($dst$$reg),
11586               as_Register($src1$$reg),
11587               as_Register($src2$$reg),
11588               Assembler::LSR,
11589               $src3$$constant & 0x1f);
11590   %}
11591 
11592   ins_pipe(ialu_reg_reg_shift);
11593 %}
11594 
11595 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11596                          iRegL src1, iRegL src2,
11597                          immI src3, rFlagsReg cr) %{
11598   match(Set dst (OrL src1 (URShiftL src2 src3)));
11599 
11600   ins_cost(1.9 * INSN_COST);
11601   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11602 
11603   ins_encode %{
11604     __ orr(as_Register($dst$$reg),
11605               as_Register($src1$$reg),
11606               as_Register($src2$$reg),
11607               Assembler::LSR,
11608               $src3$$constant & 0x3f);
11609   %}
11610 
11611   ins_pipe(ialu_reg_reg_shift);
11612 %}
11613 
11614 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11615                          iRegIorL2I src1, iRegIorL2I src2,
11616                          immI src3, rFlagsReg cr) %{
11617   match(Set dst (OrI src1 (RShiftI src2 src3)));
11618 
11619   ins_cost(1.9 * INSN_COST);
11620   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11621 
11622   ins_encode %{
11623     __ orrw(as_Register($dst$$reg),
11624               as_Register($src1$$reg),
11625               as_Register($src2$$reg),
11626               Assembler::ASR,
11627               $src3$$constant & 0x1f);
11628   %}
11629 
11630   ins_pipe(ialu_reg_reg_shift);
11631 %}
11632 
11633 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11634                          iRegL src1, iRegL src2,
11635                          immI src3, rFlagsReg cr) %{
11636   match(Set dst (OrL src1 (RShiftL src2 src3)));
11637 
11638   ins_cost(1.9 * INSN_COST);
11639   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11640 
11641   ins_encode %{
11642     __ orr(as_Register($dst$$reg),
11643               as_Register($src1$$reg),
11644               as_Register($src2$$reg),
11645               Assembler::ASR,
11646               $src3$$constant & 0x3f);
11647   %}
11648 
11649   ins_pipe(ialu_reg_reg_shift);
11650 %}
11651 
11652 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11653                          iRegIorL2I src1, iRegIorL2I src2,
11654                          immI src3, rFlagsReg cr) %{
11655   match(Set dst (OrI src1 (LShiftI src2 src3)));
11656 
11657   ins_cost(1.9 * INSN_COST);
11658   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11659 
11660   ins_encode %{
11661     __ orrw(as_Register($dst$$reg),
11662               as_Register($src1$$reg),
11663               as_Register($src2$$reg),
11664               Assembler::LSL,
11665               $src3$$constant & 0x1f);
11666   %}
11667 
11668   ins_pipe(ialu_reg_reg_shift);
11669 %}
11670 
11671 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11672                          iRegL src1, iRegL src2,
11673                          immI src3, rFlagsReg cr) %{
11674   match(Set dst (OrL src1 (LShiftL src2 src3)));
11675 
11676   ins_cost(1.9 * INSN_COST);
11677   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11678 
11679   ins_encode %{
11680     __ orr(as_Register($dst$$reg),
11681               as_Register($src1$$reg),
11682               as_Register($src2$$reg),
11683               Assembler::LSL,
11684               $src3$$constant & 0x3f);
11685   %}
11686 
11687   ins_pipe(ialu_reg_reg_shift);
11688 %}
11689 
11690 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11691                          iRegIorL2I src1, iRegIorL2I src2,
11692                          immI src3, rFlagsReg cr) %{
11693   match(Set dst (AddI src1 (URShiftI src2 src3)));
11694 
11695   ins_cost(1.9 * INSN_COST);
11696   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11697 
11698   ins_encode %{
11699     __ addw(as_Register($dst$$reg),
11700               as_Register($src1$$reg),
11701               as_Register($src2$$reg),
11702               Assembler::LSR,
11703               $src3$$constant & 0x1f);
11704   %}
11705 
11706   ins_pipe(ialu_reg_reg_shift);
11707 %}
11708 
11709 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11710                          iRegL src1, iRegL src2,
11711                          immI src3, rFlagsReg cr) %{
11712   match(Set dst (AddL src1 (URShiftL src2 src3)));
11713 
11714   ins_cost(1.9 * INSN_COST);
11715   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11716 
11717   ins_encode %{
11718     __ add(as_Register($dst$$reg),
11719               as_Register($src1$$reg),
11720               as_Register($src2$$reg),
11721               Assembler::LSR,
11722               $src3$$constant & 0x3f);
11723   %}
11724 
11725   ins_pipe(ialu_reg_reg_shift);
11726 %}
11727 
11728 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11729                          iRegIorL2I src1, iRegIorL2I src2,
11730                          immI src3, rFlagsReg cr) %{
11731   match(Set dst (AddI src1 (RShiftI src2 src3)));
11732 
11733   ins_cost(1.9 * INSN_COST);
11734   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11735 
11736   ins_encode %{
11737     __ addw(as_Register($dst$$reg),
11738               as_Register($src1$$reg),
11739               as_Register($src2$$reg),
11740               Assembler::ASR,
11741               $src3$$constant & 0x1f);
11742   %}
11743 
11744   ins_pipe(ialu_reg_reg_shift);
11745 %}
11746 
11747 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
11748                          iRegL src1, iRegL src2,
11749                          immI src3, rFlagsReg cr) %{
11750   match(Set dst (AddL src1 (RShiftL src2 src3)));
11751 
11752   ins_cost(1.9 * INSN_COST);
11753   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
11754 
11755   ins_encode %{
11756     __ add(as_Register($dst$$reg),
11757               as_Register($src1$$reg),
11758               as_Register($src2$$reg),
11759               Assembler::ASR,
11760               $src3$$constant & 0x3f);
11761   %}
11762 
11763   ins_pipe(ialu_reg_reg_shift);
11764 %}
11765 
11766 instruct AddI_reg_LShift_reg(iRegINoSp dst,
11767                          iRegIorL2I src1, iRegIorL2I src2,
11768                          immI src3, rFlagsReg cr) %{
11769   match(Set dst (AddI src1 (LShiftI src2 src3)));
11770 
11771   ins_cost(1.9 * INSN_COST);
11772   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
11773 
11774   ins_encode %{
11775     __ addw(as_Register($dst$$reg),
11776               as_Register($src1$$reg),
11777               as_Register($src2$$reg),
11778               Assembler::LSL,
11779               $src3$$constant & 0x1f);
11780   %}
11781 
11782   ins_pipe(ialu_reg_reg_shift);
11783 %}
11784 
11785 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
11786                          iRegL src1, iRegL src2,
11787                          immI src3, rFlagsReg cr) %{
11788   match(Set dst (AddL src1 (LShiftL src2 src3)));
11789 
11790   ins_cost(1.9 * INSN_COST);
11791   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
11792 
11793   ins_encode %{
11794     __ add(as_Register($dst$$reg),
11795               as_Register($src1$$reg),
11796               as_Register($src2$$reg),
11797               Assembler::LSL,
11798               $src3$$constant & 0x3f);
11799   %}
11800 
11801   ins_pipe(ialu_reg_reg_shift);
11802 %}
11803 
11804 instruct SubI_reg_URShift_reg(iRegINoSp dst,
11805                          iRegIorL2I src1, iRegIorL2I src2,
11806                          immI src3, rFlagsReg cr) %{
11807   match(Set dst (SubI src1 (URShiftI src2 src3)));
11808 
11809   ins_cost(1.9 * INSN_COST);
11810   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
11811 
11812   ins_encode %{
11813     __ subw(as_Register($dst$$reg),
11814               as_Register($src1$$reg),
11815               as_Register($src2$$reg),
11816               Assembler::LSR,
11817               $src3$$constant & 0x1f);
11818   %}
11819 
11820   ins_pipe(ialu_reg_reg_shift);
11821 %}
11822 
11823 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
11824                          iRegL src1, iRegL src2,
11825                          immI src3, rFlagsReg cr) %{
11826   match(Set dst (SubL src1 (URShiftL src2 src3)));
11827 
11828   ins_cost(1.9 * INSN_COST);
11829   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
11830 
11831   ins_encode %{
11832     __ sub(as_Register($dst$$reg),
11833               as_Register($src1$$reg),
11834               as_Register($src2$$reg),
11835               Assembler::LSR,
11836               $src3$$constant & 0x3f);
11837   %}
11838 
11839   ins_pipe(ialu_reg_reg_shift);
11840 %}
11841 
11842 instruct SubI_reg_RShift_reg(iRegINoSp dst,
11843                          iRegIorL2I src1, iRegIorL2I src2,
11844                          immI src3, rFlagsReg cr) %{
11845   match(Set dst (SubI src1 (RShiftI src2 src3)));
11846 
11847   ins_cost(1.9 * INSN_COST);
11848   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
11849 
11850   ins_encode %{
11851     __ subw(as_Register($dst$$reg),
11852               as_Register($src1$$reg),
11853               as_Register($src2$$reg),
11854               Assembler::ASR,
11855               $src3$$constant & 0x1f);
11856   %}
11857 
11858   ins_pipe(ialu_reg_reg_shift);
11859 %}
11860 
11861 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
11862                          iRegL src1, iRegL src2,
11863                          immI src3, rFlagsReg cr) %{
11864   match(Set dst (SubL src1 (RShiftL src2 src3)));
11865 
11866   ins_cost(1.9 * INSN_COST);
11867   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
11868 
11869   ins_encode %{
11870     __ sub(as_Register($dst$$reg),
11871               as_Register($src1$$reg),
11872               as_Register($src2$$reg),
11873               Assembler::ASR,
11874               $src3$$constant & 0x3f);
11875   %}
11876 
11877   ins_pipe(ialu_reg_reg_shift);
11878 %}
11879 
11880 instruct SubI_reg_LShift_reg(iRegINoSp dst,
11881                          iRegIorL2I src1, iRegIorL2I src2,
11882                          immI src3, rFlagsReg cr) %{
11883   match(Set dst (SubI src1 (LShiftI src2 src3)));
11884 
11885   ins_cost(1.9 * INSN_COST);
11886   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
11887 
11888   ins_encode %{
11889     __ subw(as_Register($dst$$reg),
11890               as_Register($src1$$reg),
11891               as_Register($src2$$reg),
11892               Assembler::LSL,
11893               $src3$$constant & 0x1f);
11894   %}
11895 
11896   ins_pipe(ialu_reg_reg_shift);
11897 %}
11898 
11899 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
11900                          iRegL src1, iRegL src2,
11901                          immI src3, rFlagsReg cr) %{
11902   match(Set dst (SubL src1 (LShiftL src2 src3)));
11903 
11904   ins_cost(1.9 * INSN_COST);
11905   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
11906 
11907   ins_encode %{
11908     __ sub(as_Register($dst$$reg),
11909               as_Register($src1$$reg),
11910               as_Register($src2$$reg),
11911               Assembler::LSL,
11912               $src3$$constant & 0x3f);
11913   %}
11914 
11915   ins_pipe(ialu_reg_reg_shift);
11916 %}
11917 
11918 
11919 
11920 // Shift Left followed by Shift Right.
11921 // This idiom is used by the compiler for the i2b bytecode etc.
11922 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11923 %{
11924   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
11925   // Make sure we are not going to exceed what sbfm can do.
11926   predicate((unsigned int)n->in(2)->get_int() <= 63
11927             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11928 
11929   ins_cost(INSN_COST * 2);
11930   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11931   ins_encode %{
11932     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11933     int s = 63 - lshift;
11934     int r = (rshift - lshift) & 63;
11935     __ sbfm(as_Register($dst$$reg),
11936             as_Register($src$$reg),
11937             r, s);
11938   %}
11939 
11940   ins_pipe(ialu_reg_shift);
11941 %}
11942 
11943 // Shift Left followed by Shift Right.
11944 // This idiom is used by the compiler for the i2b bytecode etc.
11945 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11946 %{
11947   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
11948   // Make sure we are not going to exceed what sbfmw can do.
11949   predicate((unsigned int)n->in(2)->get_int() <= 31
11950             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11951 
11952   ins_cost(INSN_COST * 2);
11953   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
11954   ins_encode %{
11955     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11956     int s = 31 - lshift;
11957     int r = (rshift - lshift) & 31;
11958     __ sbfmw(as_Register($dst$$reg),
11959             as_Register($src$$reg),
11960             r, s);
11961   %}
11962 
11963   ins_pipe(ialu_reg_shift);
11964 %}
11965 
11966 // Shift Left followed by Shift Right.
11967 // This idiom is used by the compiler for the i2b bytecode etc.
11968 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
11969 %{
11970   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
11971   // Make sure we are not going to exceed what ubfm can do.
11972   predicate((unsigned int)n->in(2)->get_int() <= 63
11973             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
11974 
11975   ins_cost(INSN_COST * 2);
11976   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
11977   ins_encode %{
11978     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
11979     int s = 63 - lshift;
11980     int r = (rshift - lshift) & 63;
11981     __ ubfm(as_Register($dst$$reg),
11982             as_Register($src$$reg),
11983             r, s);
11984   %}
11985 
11986   ins_pipe(ialu_reg_shift);
11987 %}
11988 
11989 // Shift Left followed by Shift Right.
11990 // This idiom is used by the compiler for the i2b bytecode etc.
11991 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
11992 %{
11993   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
11994   // Make sure we are not going to exceed what ubfmw can do.
11995   predicate((unsigned int)n->in(2)->get_int() <= 31
11996             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
11997 
11998   ins_cost(INSN_COST * 2);
11999   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12000   ins_encode %{
12001     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
12002     int s = 31 - lshift;
12003     int r = (rshift - lshift) & 31;
12004     __ ubfmw(as_Register($dst$$reg),
12005             as_Register($src$$reg),
12006             r, s);
12007   %}
12008 
12009   ins_pipe(ialu_reg_shift);
12010 %}
12011 // Bitfield extract with shift & mask
12012 
12013 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12014 %{
12015   match(Set dst (AndI (URShiftI src rshift) mask));
12016 
12017   ins_cost(INSN_COST);
12018   format %{ "ubfxw $dst, $src, $mask" %}
12019   ins_encode %{
12020     int rshift = $rshift$$constant;
12021     long mask = $mask$$constant;
12022     int width = exact_log2(mask+1);
12023     __ ubfxw(as_Register($dst$$reg),
12024             as_Register($src$$reg), rshift, width);
12025   %}
12026   ins_pipe(ialu_reg_shift);
12027 %}
12028 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12029 %{
12030   match(Set dst (AndL (URShiftL src rshift) mask));
12031 
12032   ins_cost(INSN_COST);
12033   format %{ "ubfx $dst, $src, $mask" %}
12034   ins_encode %{
12035     int rshift = $rshift$$constant;
12036     long mask = $mask$$constant;
12037     int width = exact_log2(mask+1);
12038     __ ubfx(as_Register($dst$$reg),
12039             as_Register($src$$reg), rshift, width);
12040   %}
12041   ins_pipe(ialu_reg_shift);
12042 %}
12043 
12044 // We can use ubfx when extending an And with a mask when we know mask
12045 // is positive.  We know that because immI_bitmask guarantees it.
12046 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12047 %{
12048   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12049 
12050   ins_cost(INSN_COST * 2);
12051   format %{ "ubfx $dst, $src, $mask" %}
12052   ins_encode %{
12053     int rshift = $rshift$$constant;
12054     long mask = $mask$$constant;
12055     int width = exact_log2(mask+1);
12056     __ ubfx(as_Register($dst$$reg),
12057             as_Register($src$$reg), rshift, width);
12058   %}
12059   ins_pipe(ialu_reg_shift);
12060 %}
12061 
12062 // Rotations
12063 
12064 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12065 %{
12066   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12067   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12068 
12069   ins_cost(INSN_COST);
12070   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12071 
12072   ins_encode %{
12073     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12074             $rshift$$constant & 63);
12075   %}
12076   ins_pipe(ialu_reg_reg_extr);
12077 %}
12078 
12079 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12080 %{
12081   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12082   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12083 
12084   ins_cost(INSN_COST);
12085   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12086 
12087   ins_encode %{
12088     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12089             $rshift$$constant & 31);
12090   %}
12091   ins_pipe(ialu_reg_reg_extr);
12092 %}
12093 
12094 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12095 %{
12096   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12097   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
12098 
12099   ins_cost(INSN_COST);
12100   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12101 
12102   ins_encode %{
12103     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12104             $rshift$$constant & 63);
12105   %}
12106   ins_pipe(ialu_reg_reg_extr);
12107 %}
12108 
12109 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12110 %{
12111   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12112   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
12113 
12114   ins_cost(INSN_COST);
12115   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12116 
12117   ins_encode %{
12118     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12119             $rshift$$constant & 31);
12120   %}
12121   ins_pipe(ialu_reg_reg_extr);
12122 %}
12123 
12124 
12125 // rol expander
12126 
12127 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12128 %{
12129   effect(DEF dst, USE src, USE shift);
12130 
12131   format %{ "rol    $dst, $src, $shift" %}
12132   ins_cost(INSN_COST * 3);
12133   ins_encode %{
12134     __ subw(rscratch1, zr, as_Register($shift$$reg));
12135     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12136             rscratch1);
12137     %}
12138   ins_pipe(ialu_reg_reg_vshift);
12139 %}
12140 
12141 // rol expander
12142 
12143 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12144 %{
12145   effect(DEF dst, USE src, USE shift);
12146 
12147   format %{ "rol    $dst, $src, $shift" %}
12148   ins_cost(INSN_COST * 3);
12149   ins_encode %{
12150     __ subw(rscratch1, zr, as_Register($shift$$reg));
12151     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12152             rscratch1);
12153     %}
12154   ins_pipe(ialu_reg_reg_vshift);
12155 %}
12156 
12157 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12158 %{
12159   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
12160 
12161   expand %{
12162     rolL_rReg(dst, src, shift, cr);
12163   %}
12164 %}
12165 
12166 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12167 %{
12168   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
12169 
12170   expand %{
12171     rolL_rReg(dst, src, shift, cr);
12172   %}
12173 %}
12174 
12175 instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12176 %{
12177   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
12178 
12179   expand %{
12180     rolL_rReg(dst, src, shift, cr);
12181   %}
12182 %}
12183 
12184 instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12185 %{
12186   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
12187 
12188   expand %{
12189     rolL_rReg(dst, src, shift, cr);
12190   %}
12191 %}
12192 
12193 // ror expander
12194 
12195 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
12196 %{
12197   effect(DEF dst, USE src, USE shift);
12198 
12199   format %{ "ror    $dst, $src, $shift" %}
12200   ins_cost(INSN_COST);
12201   ins_encode %{
12202     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
12203             as_Register($shift$$reg));
12204     %}
12205   ins_pipe(ialu_reg_reg_vshift);
12206 %}
12207 
12208 // ror expander
12209 
12210 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
12211 %{
12212   effect(DEF dst, USE src, USE shift);
12213 
12214   format %{ "ror    $dst, $src, $shift" %}
12215   ins_cost(INSN_COST);
12216   ins_encode %{
12217     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
12218             as_Register($shift$$reg));
12219     %}
12220   ins_pipe(ialu_reg_reg_vshift);
12221 %}
12222 
12223 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
12224 %{
12225   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
12226 
12227   expand %{
12228     rorL_rReg(dst, src, shift, cr);
12229   %}
12230 %}
12231 
12232 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12233 %{
12234   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
12235 
12236   expand %{
12237     rorL_rReg(dst, src, shift, cr);
12238   %}
12239 %}
12240 
12241 instruct rorI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
12242 %{
12243   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
12244 
12245   expand %{
12246     rorL_rReg(dst, src, shift, cr);
12247   %}
12248 %}
12249 
12250 instruct rorI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
12251 %{
12252   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
12253 
12254   expand %{
12255     rorL_rReg(dst, src, shift, cr);
12256   %}
12257 %}
12258 
12259 // Add/subtract (extended)
12260 
12261 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12262 %{
12263   match(Set dst (AddL src1 (ConvI2L src2)));
12264   ins_cost(INSN_COST);
12265   format %{ "add  $dst, $src1, sxtw $src2" %}
12266 
12267    ins_encode %{
12268      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12269             as_Register($src2$$reg), ext::sxtw);
12270    %}
12271   ins_pipe(ialu_reg_reg);
12272 %};
12273 
12274 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12275 %{
12276   match(Set dst (SubL src1 (ConvI2L src2)));
12277   ins_cost(INSN_COST);
12278   format %{ "sub  $dst, $src1, sxtw $src2" %}
12279 
12280    ins_encode %{
12281      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12282             as_Register($src2$$reg), ext::sxtw);
12283    %}
12284   ins_pipe(ialu_reg_reg);
12285 %};
12286 
12287 
12288 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12289 %{
12290   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12291   ins_cost(INSN_COST);
12292   format %{ "add  $dst, $src1, sxth $src2" %}
12293 
12294    ins_encode %{
12295      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12296             as_Register($src2$$reg), ext::sxth);
12297    %}
12298   ins_pipe(ialu_reg_reg);
12299 %}
12300 
12301 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12302 %{
12303   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12304   ins_cost(INSN_COST);
12305   format %{ "add  $dst, $src1, sxtb $src2" %}
12306 
12307    ins_encode %{
12308      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12309             as_Register($src2$$reg), ext::sxtb);
12310    %}
12311   ins_pipe(ialu_reg_reg);
12312 %}
12313 
12314 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12315 %{
12316   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12317   ins_cost(INSN_COST);
12318   format %{ "add  $dst, $src1, uxtb $src2" %}
12319 
12320    ins_encode %{
12321      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12322             as_Register($src2$$reg), ext::uxtb);
12323    %}
12324   ins_pipe(ialu_reg_reg);
12325 %}
12326 
12327 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12328 %{
12329   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12330   ins_cost(INSN_COST);
12331   format %{ "add  $dst, $src1, sxth $src2" %}
12332 
12333    ins_encode %{
12334      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12335             as_Register($src2$$reg), ext::sxth);
12336    %}
12337   ins_pipe(ialu_reg_reg);
12338 %}
12339 
12340 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12341 %{
12342   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12343   ins_cost(INSN_COST);
12344   format %{ "add  $dst, $src1, sxtw $src2" %}
12345 
12346    ins_encode %{
12347      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12348             as_Register($src2$$reg), ext::sxtw);
12349    %}
12350   ins_pipe(ialu_reg_reg);
12351 %}
12352 
12353 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12354 %{
12355   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12356   ins_cost(INSN_COST);
12357   format %{ "add  $dst, $src1, sxtb $src2" %}
12358 
12359    ins_encode %{
12360      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12361             as_Register($src2$$reg), ext::sxtb);
12362    %}
12363   ins_pipe(ialu_reg_reg);
12364 %}
12365 
12366 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12367 %{
12368   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12369   ins_cost(INSN_COST);
12370   format %{ "add  $dst, $src1, uxtb $src2" %}
12371 
12372    ins_encode %{
12373      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12374             as_Register($src2$$reg), ext::uxtb);
12375    %}
12376   ins_pipe(ialu_reg_reg);
12377 %}
12378 
12379 
12380 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12381 %{
12382   match(Set dst (AddI src1 (AndI src2 mask)));
12383   ins_cost(INSN_COST);
12384   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12385 
12386    ins_encode %{
12387      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12388             as_Register($src2$$reg), ext::uxtb);
12389    %}
12390   ins_pipe(ialu_reg_reg);
12391 %}
12392 
12393 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12394 %{
12395   match(Set dst (AddI src1 (AndI src2 mask)));
12396   ins_cost(INSN_COST);
12397   format %{ "addw  $dst, $src1, $src2, uxth" %}
12398 
12399    ins_encode %{
12400      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12401             as_Register($src2$$reg), ext::uxth);
12402    %}
12403   ins_pipe(ialu_reg_reg);
12404 %}
12405 
12406 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12407 %{
12408   match(Set dst (AddL src1 (AndL src2 mask)));
12409   ins_cost(INSN_COST);
12410   format %{ "add  $dst, $src1, $src2, uxtb" %}
12411 
12412    ins_encode %{
12413      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12414             as_Register($src2$$reg), ext::uxtb);
12415    %}
12416   ins_pipe(ialu_reg_reg);
12417 %}
12418 
12419 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12420 %{
12421   match(Set dst (AddL src1 (AndL src2 mask)));
12422   ins_cost(INSN_COST);
12423   format %{ "add  $dst, $src1, $src2, uxth" %}
12424 
12425    ins_encode %{
12426      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12427             as_Register($src2$$reg), ext::uxth);
12428    %}
12429   ins_pipe(ialu_reg_reg);
12430 %}
12431 
12432 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12433 %{
12434   match(Set dst (AddL src1 (AndL src2 mask)));
12435   ins_cost(INSN_COST);
12436   format %{ "add  $dst, $src1, $src2, uxtw" %}
12437 
12438    ins_encode %{
12439      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12440             as_Register($src2$$reg), ext::uxtw);
12441    %}
12442   ins_pipe(ialu_reg_reg);
12443 %}
12444 
12445 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12446 %{
12447   match(Set dst (SubI src1 (AndI src2 mask)));
12448   ins_cost(INSN_COST);
12449   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12450 
12451    ins_encode %{
12452      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12453             as_Register($src2$$reg), ext::uxtb);
12454    %}
12455   ins_pipe(ialu_reg_reg);
12456 %}
12457 
12458 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12459 %{
12460   match(Set dst (SubI src1 (AndI src2 mask)));
12461   ins_cost(INSN_COST);
12462   format %{ "subw  $dst, $src1, $src2, uxth" %}
12463 
12464    ins_encode %{
12465      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12466             as_Register($src2$$reg), ext::uxth);
12467    %}
12468   ins_pipe(ialu_reg_reg);
12469 %}
12470 
12471 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12472 %{
12473   match(Set dst (SubL src1 (AndL src2 mask)));
12474   ins_cost(INSN_COST);
12475   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12476 
12477    ins_encode %{
12478      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12479             as_Register($src2$$reg), ext::uxtb);
12480    %}
12481   ins_pipe(ialu_reg_reg);
12482 %}
12483 
12484 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12485 %{
12486   match(Set dst (SubL src1 (AndL src2 mask)));
12487   ins_cost(INSN_COST);
12488   format %{ "sub  $dst, $src1, $src2, uxth" %}
12489 
12490    ins_encode %{
12491      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12492             as_Register($src2$$reg), ext::uxth);
12493    %}
12494   ins_pipe(ialu_reg_reg);
12495 %}
12496 
12497 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12498 %{
12499   match(Set dst (SubL src1 (AndL src2 mask)));
12500   ins_cost(INSN_COST);
12501   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12502 
12503    ins_encode %{
12504      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12505             as_Register($src2$$reg), ext::uxtw);
12506    %}
12507   ins_pipe(ialu_reg_reg);
12508 %}
12509 
12510 // END This section of the file is automatically generated. Do not edit --------------
12511 
12512 // ============================================================================
12513 // Floating Point Arithmetic Instructions
12514 
12515 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12516   match(Set dst (AddF src1 src2));
12517 
12518   ins_cost(INSN_COST * 5);
12519   format %{ "fadds   $dst, $src1, $src2" %}
12520 
12521   ins_encode %{
12522     __ fadds(as_FloatRegister($dst$$reg),
12523              as_FloatRegister($src1$$reg),
12524              as_FloatRegister($src2$$reg));
12525   %}
12526 
12527   ins_pipe(fp_dop_reg_reg_s);
12528 %}
12529 
12530 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12531   match(Set dst (AddD src1 src2));
12532 
12533   ins_cost(INSN_COST * 5);
12534   format %{ "faddd   $dst, $src1, $src2" %}
12535 
12536   ins_encode %{
12537     __ faddd(as_FloatRegister($dst$$reg),
12538              as_FloatRegister($src1$$reg),
12539              as_FloatRegister($src2$$reg));
12540   %}
12541 
12542   ins_pipe(fp_dop_reg_reg_d);
12543 %}
12544 
12545 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12546   match(Set dst (SubF src1 src2));
12547 
12548   ins_cost(INSN_COST * 5);
12549   format %{ "fsubs   $dst, $src1, $src2" %}
12550 
12551   ins_encode %{
12552     __ fsubs(as_FloatRegister($dst$$reg),
12553              as_FloatRegister($src1$$reg),
12554              as_FloatRegister($src2$$reg));
12555   %}
12556 
12557   ins_pipe(fp_dop_reg_reg_s);
12558 %}
12559 
12560 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12561   match(Set dst (SubD src1 src2));
12562 
12563   ins_cost(INSN_COST * 5);
12564   format %{ "fsubd   $dst, $src1, $src2" %}
12565 
12566   ins_encode %{
12567     __ fsubd(as_FloatRegister($dst$$reg),
12568              as_FloatRegister($src1$$reg),
12569              as_FloatRegister($src2$$reg));
12570   %}
12571 
12572   ins_pipe(fp_dop_reg_reg_d);
12573 %}
12574 
12575 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12576   match(Set dst (MulF src1 src2));
12577 
12578   ins_cost(INSN_COST * 6);
12579   format %{ "fmuls   $dst, $src1, $src2" %}
12580 
12581   ins_encode %{
12582     __ fmuls(as_FloatRegister($dst$$reg),
12583              as_FloatRegister($src1$$reg),
12584              as_FloatRegister($src2$$reg));
12585   %}
12586 
12587   ins_pipe(fp_dop_reg_reg_s);
12588 %}
12589 
12590 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12591   match(Set dst (MulD src1 src2));
12592 
12593   ins_cost(INSN_COST * 6);
12594   format %{ "fmuld   $dst, $src1, $src2" %}
12595 
12596   ins_encode %{
12597     __ fmuld(as_FloatRegister($dst$$reg),
12598              as_FloatRegister($src1$$reg),
12599              as_FloatRegister($src2$$reg));
12600   %}
12601 
12602   ins_pipe(fp_dop_reg_reg_d);
12603 %}
12604 
12605 // We cannot use these fused mul w add/sub ops because they don't
12606 // produce the same result as the equivalent separated ops
12607 // (essentially they don't round the intermediate result). that's a
12608 // shame. leaving them here in case we can idenitfy cases where it is
12609 // legitimate to use them
12610 
12611 
12612 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12613 //   match(Set dst (AddF (MulF src1 src2) src3));
12614 
12615 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
12616 
12617 //   ins_encode %{
12618 //     __ fmadds(as_FloatRegister($dst$$reg),
12619 //              as_FloatRegister($src1$$reg),
12620 //              as_FloatRegister($src2$$reg),
12621 //              as_FloatRegister($src3$$reg));
12622 //   %}
12623 
12624 //   ins_pipe(pipe_class_default);
12625 // %}
12626 
12627 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12628 //   match(Set dst (AddD (MulD src1 src2) src3));
12629 
12630 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
12631 
12632 //   ins_encode %{
12633 //     __ fmaddd(as_FloatRegister($dst$$reg),
12634 //              as_FloatRegister($src1$$reg),
12635 //              as_FloatRegister($src2$$reg),
12636 //              as_FloatRegister($src3$$reg));
12637 //   %}
12638 
12639 //   ins_pipe(pipe_class_default);
12640 // %}
12641 
12642 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12643 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
12644 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
12645 
12646 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
12647 
12648 //   ins_encode %{
12649 //     __ fmsubs(as_FloatRegister($dst$$reg),
12650 //               as_FloatRegister($src1$$reg),
12651 //               as_FloatRegister($src2$$reg),
12652 //              as_FloatRegister($src3$$reg));
12653 //   %}
12654 
12655 //   ins_pipe(pipe_class_default);
12656 // %}
12657 
12658 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12659 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
12660 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
12661 
12662 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
12663 
12664 //   ins_encode %{
12665 //     __ fmsubd(as_FloatRegister($dst$$reg),
12666 //               as_FloatRegister($src1$$reg),
12667 //               as_FloatRegister($src2$$reg),
12668 //               as_FloatRegister($src3$$reg));
12669 //   %}
12670 
12671 //   ins_pipe(pipe_class_default);
12672 // %}
12673 
12674 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
12675 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
12676 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
12677 
12678 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
12679 
12680 //   ins_encode %{
12681 //     __ fnmadds(as_FloatRegister($dst$$reg),
12682 //                as_FloatRegister($src1$$reg),
12683 //                as_FloatRegister($src2$$reg),
12684 //                as_FloatRegister($src3$$reg));
12685 //   %}
12686 
12687 //   ins_pipe(pipe_class_default);
12688 // %}
12689 
12690 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
12691 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
12692 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
12693 
12694 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
12695 
12696 //   ins_encode %{
12697 //     __ fnmaddd(as_FloatRegister($dst$$reg),
12698 //                as_FloatRegister($src1$$reg),
12699 //                as_FloatRegister($src2$$reg),
12700 //                as_FloatRegister($src3$$reg));
12701 //   %}
12702 
12703 //   ins_pipe(pipe_class_default);
12704 // %}
12705 
12706 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
12707 //   match(Set dst (SubF (MulF src1 src2) src3));
12708 
12709 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
12710 
12711 //   ins_encode %{
12712 //     __ fnmsubs(as_FloatRegister($dst$$reg),
12713 //                as_FloatRegister($src1$$reg),
12714 //                as_FloatRegister($src2$$reg),
12715 //                as_FloatRegister($src3$$reg));
12716 //   %}
12717 
12718 //   ins_pipe(pipe_class_default);
12719 // %}
12720 
12721 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
12722 //   match(Set dst (SubD (MulD src1 src2) src3));
12723 
12724 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
12725 
12726 //   ins_encode %{
12727 //   // n.b. insn name should be fnmsubd
12728 //     __ fnmsub(as_FloatRegister($dst$$reg),
12729 //                as_FloatRegister($src1$$reg),
12730 //                as_FloatRegister($src2$$reg),
12731 //                as_FloatRegister($src3$$reg));
12732 //   %}
12733 
12734 //   ins_pipe(pipe_class_default);
12735 // %}
12736 
12737 
12738 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
12739   match(Set dst (DivF src1  src2));
12740 
12741   ins_cost(INSN_COST * 18);
12742   format %{ "fdivs   $dst, $src1, $src2" %}
12743 
12744   ins_encode %{
12745     __ fdivs(as_FloatRegister($dst$$reg),
12746              as_FloatRegister($src1$$reg),
12747              as_FloatRegister($src2$$reg));
12748   %}
12749 
12750   ins_pipe(fp_div_s);
12751 %}
12752 
12753 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
12754   match(Set dst (DivD src1  src2));
12755 
12756   ins_cost(INSN_COST * 32);
12757   format %{ "fdivd   $dst, $src1, $src2" %}
12758 
12759   ins_encode %{
12760     __ fdivd(as_FloatRegister($dst$$reg),
12761              as_FloatRegister($src1$$reg),
12762              as_FloatRegister($src2$$reg));
12763   %}
12764 
12765   ins_pipe(fp_div_d);
12766 %}
12767 
12768 instruct negF_reg_reg(vRegF dst, vRegF src) %{
12769   match(Set dst (NegF src));
12770 
12771   ins_cost(INSN_COST * 3);
12772   format %{ "fneg   $dst, $src" %}
12773 
12774   ins_encode %{
12775     __ fnegs(as_FloatRegister($dst$$reg),
12776              as_FloatRegister($src$$reg));
12777   %}
12778 
12779   ins_pipe(fp_uop_s);
12780 %}
12781 
12782 instruct negD_reg_reg(vRegD dst, vRegD src) %{
12783   match(Set dst (NegD src));
12784 
12785   ins_cost(INSN_COST * 3);
12786   format %{ "fnegd   $dst, $src" %}
12787 
12788   ins_encode %{
12789     __ fnegd(as_FloatRegister($dst$$reg),
12790              as_FloatRegister($src$$reg));
12791   %}
12792 
12793   ins_pipe(fp_uop_d);
12794 %}
12795 
12796 instruct absF_reg(vRegF dst, vRegF src) %{
12797   match(Set dst (AbsF src));
12798 
12799   ins_cost(INSN_COST * 3);
12800   format %{ "fabss   $dst, $src" %}
12801   ins_encode %{
12802     __ fabss(as_FloatRegister($dst$$reg),
12803              as_FloatRegister($src$$reg));
12804   %}
12805 
12806   ins_pipe(fp_uop_s);
12807 %}
12808 
12809 instruct absD_reg(vRegD dst, vRegD src) %{
12810   match(Set dst (AbsD src));
12811 
12812   ins_cost(INSN_COST * 3);
12813   format %{ "fabsd   $dst, $src" %}
12814   ins_encode %{
12815     __ fabsd(as_FloatRegister($dst$$reg),
12816              as_FloatRegister($src$$reg));
12817   %}
12818 
12819   ins_pipe(fp_uop_d);
12820 %}
12821 
12822 instruct sqrtD_reg(vRegD dst, vRegD src) %{
12823   match(Set dst (SqrtD src));
12824 
12825   ins_cost(INSN_COST * 50);
12826   format %{ "fsqrtd  $dst, $src" %}
12827   ins_encode %{
12828     __ fsqrtd(as_FloatRegister($dst$$reg),
12829              as_FloatRegister($src$$reg));
12830   %}
12831 
12832   ins_pipe(fp_div_s);
12833 %}
12834 
12835 instruct sqrtF_reg(vRegF dst, vRegF src) %{
12836   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
12837 
12838   ins_cost(INSN_COST * 50);
12839   format %{ "fsqrts  $dst, $src" %}
12840   ins_encode %{
12841     __ fsqrts(as_FloatRegister($dst$$reg),
12842              as_FloatRegister($src$$reg));
12843   %}
12844 
12845   ins_pipe(fp_div_d);
12846 %}
12847 
12848 // ============================================================================
12849 // Logical Instructions
12850 
12851 // Integer Logical Instructions
12852 
12853 // And Instructions
12854 
12855 
12856 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
12857   match(Set dst (AndI src1 src2));
12858 
12859   format %{ "andw  $dst, $src1, $src2\t# int" %}
12860 
12861   ins_cost(INSN_COST);
12862   ins_encode %{
12863     __ andw(as_Register($dst$$reg),
12864             as_Register($src1$$reg),
12865             as_Register($src2$$reg));
12866   %}
12867 
12868   ins_pipe(ialu_reg_reg);
12869 %}
12870 
12871 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
12872   match(Set dst (AndI src1 src2));
12873 
12874   format %{ "andsw  $dst, $src1, $src2\t# int" %}
12875 
12876   ins_cost(INSN_COST);
12877   ins_encode %{
12878     __ andw(as_Register($dst$$reg),
12879             as_Register($src1$$reg),
12880             (unsigned long)($src2$$constant));
12881   %}
12882 
12883   ins_pipe(ialu_reg_imm);
12884 %}
12885 
12886 // Or Instructions
12887 
12888 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12889   match(Set dst (OrI src1 src2));
12890 
12891   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12892 
12893   ins_cost(INSN_COST);
12894   ins_encode %{
12895     __ orrw(as_Register($dst$$reg),
12896             as_Register($src1$$reg),
12897             as_Register($src2$$reg));
12898   %}
12899 
12900   ins_pipe(ialu_reg_reg);
12901 %}
12902 
12903 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12904   match(Set dst (OrI src1 src2));
12905 
12906   format %{ "orrw  $dst, $src1, $src2\t# int" %}
12907 
12908   ins_cost(INSN_COST);
12909   ins_encode %{
12910     __ orrw(as_Register($dst$$reg),
12911             as_Register($src1$$reg),
12912             (unsigned long)($src2$$constant));
12913   %}
12914 
12915   ins_pipe(ialu_reg_imm);
12916 %}
12917 
12918 // Xor Instructions
12919 
12920 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
12921   match(Set dst (XorI src1 src2));
12922 
12923   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12924 
12925   ins_cost(INSN_COST);
12926   ins_encode %{
12927     __ eorw(as_Register($dst$$reg),
12928             as_Register($src1$$reg),
12929             as_Register($src2$$reg));
12930   %}
12931 
12932   ins_pipe(ialu_reg_reg);
12933 %}
12934 
12935 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
12936   match(Set dst (XorI src1 src2));
12937 
12938   format %{ "eorw  $dst, $src1, $src2\t# int" %}
12939 
12940   ins_cost(INSN_COST);
12941   ins_encode %{
12942     __ eorw(as_Register($dst$$reg),
12943             as_Register($src1$$reg),
12944             (unsigned long)($src2$$constant));
12945   %}
12946 
12947   ins_pipe(ialu_reg_imm);
12948 %}
12949 
12950 // Long Logical Instructions
12951 // TODO
12952 
12953 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
12954   match(Set dst (AndL src1 src2));
12955 
12956   format %{ "and  $dst, $src1, $src2\t# int" %}
12957 
12958   ins_cost(INSN_COST);
12959   ins_encode %{
12960     __ andr(as_Register($dst$$reg),
12961             as_Register($src1$$reg),
12962             as_Register($src2$$reg));
12963   %}
12964 
12965   ins_pipe(ialu_reg_reg);
12966 %}
12967 
12968 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
12969   match(Set dst (AndL src1 src2));
12970 
12971   format %{ "and  $dst, $src1, $src2\t# int" %}
12972 
12973   ins_cost(INSN_COST);
12974   ins_encode %{
12975     __ andr(as_Register($dst$$reg),
12976             as_Register($src1$$reg),
12977             (unsigned long)($src2$$constant));
12978   %}
12979 
12980   ins_pipe(ialu_reg_imm);
12981 %}
12982 
12983 // Or Instructions
12984 
12985 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
12986   match(Set dst (OrL src1 src2));
12987 
12988   format %{ "orr  $dst, $src1, $src2\t# int" %}
12989 
12990   ins_cost(INSN_COST);
12991   ins_encode %{
12992     __ orr(as_Register($dst$$reg),
12993            as_Register($src1$$reg),
12994            as_Register($src2$$reg));
12995   %}
12996 
12997   ins_pipe(ialu_reg_reg);
12998 %}
12999 
13000 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13001   match(Set dst (OrL src1 src2));
13002 
13003   format %{ "orr  $dst, $src1, $src2\t# int" %}
13004 
13005   ins_cost(INSN_COST);
13006   ins_encode %{
13007     __ orr(as_Register($dst$$reg),
13008            as_Register($src1$$reg),
13009            (unsigned long)($src2$$constant));
13010   %}
13011 
13012   ins_pipe(ialu_reg_imm);
13013 %}
13014 
13015 // Xor Instructions
13016 
13017 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13018   match(Set dst (XorL src1 src2));
13019 
13020   format %{ "eor  $dst, $src1, $src2\t# int" %}
13021 
13022   ins_cost(INSN_COST);
13023   ins_encode %{
13024     __ eor(as_Register($dst$$reg),
13025            as_Register($src1$$reg),
13026            as_Register($src2$$reg));
13027   %}
13028 
13029   ins_pipe(ialu_reg_reg);
13030 %}
13031 
13032 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13033   match(Set dst (XorL src1 src2));
13034 
13035   ins_cost(INSN_COST);
13036   format %{ "eor  $dst, $src1, $src2\t# int" %}
13037 
13038   ins_encode %{
13039     __ eor(as_Register($dst$$reg),
13040            as_Register($src1$$reg),
13041            (unsigned long)($src2$$constant));
13042   %}
13043 
13044   ins_pipe(ialu_reg_imm);
13045 %}
13046 
13047 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13048 %{
13049   match(Set dst (ConvI2L src));
13050 
13051   ins_cost(INSN_COST);
13052   format %{ "sxtw  $dst, $src\t# i2l" %}
13053   ins_encode %{
13054     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13055   %}
13056   ins_pipe(ialu_reg_shift);
13057 %}
13058 
13059 // this pattern occurs in bigmath arithmetic
13060 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13061 %{
13062   match(Set dst (AndL (ConvI2L src) mask));
13063 
13064   ins_cost(INSN_COST);
13065   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
13066   ins_encode %{
13067     __ ubfm($dst$$Register, $src$$Register, 0, 31);
13068   %}
13069 
13070   ins_pipe(ialu_reg_shift);
13071 %}
13072 
13073 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
13074   match(Set dst (ConvL2I src));
13075 
13076   ins_cost(INSN_COST);
13077   format %{ "movw  $dst, $src \t// l2i" %}
13078 
13079   ins_encode %{
13080     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
13081   %}
13082 
13083   ins_pipe(ialu_reg);
13084 %}
13085 
13086 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13087 %{
13088   match(Set dst (Conv2B src));
13089   effect(KILL cr);
13090 
13091   format %{
13092     "cmpw $src, zr\n\t"
13093     "cset $dst, ne"
13094   %}
13095 
13096   ins_encode %{
13097     __ cmpw(as_Register($src$$reg), zr);
13098     __ cset(as_Register($dst$$reg), Assembler::NE);
13099   %}
13100 
13101   ins_pipe(ialu_reg);
13102 %}
13103 
13104 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
13105 %{
13106   match(Set dst (Conv2B src));
13107   effect(KILL cr);
13108 
13109   format %{
13110     "cmp  $src, zr\n\t"
13111     "cset $dst, ne"
13112   %}
13113 
13114   ins_encode %{
13115     __ cmp(as_Register($src$$reg), zr);
13116     __ cset(as_Register($dst$$reg), Assembler::NE);
13117   %}
13118 
13119   ins_pipe(ialu_reg);
13120 %}
13121 
13122 instruct convD2F_reg(vRegF dst, vRegD src) %{
13123   match(Set dst (ConvD2F src));
13124 
13125   ins_cost(INSN_COST * 5);
13126   format %{ "fcvtd  $dst, $src \t// d2f" %}
13127 
13128   ins_encode %{
13129     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13130   %}
13131 
13132   ins_pipe(fp_d2f);
13133 %}
13134 
13135 instruct convF2D_reg(vRegD dst, vRegF src) %{
13136   match(Set dst (ConvF2D src));
13137 
13138   ins_cost(INSN_COST * 5);
13139   format %{ "fcvts  $dst, $src \t// f2d" %}
13140 
13141   ins_encode %{
13142     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
13143   %}
13144 
13145   ins_pipe(fp_f2d);
13146 %}
13147 
13148 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13149   match(Set dst (ConvF2I src));
13150 
13151   ins_cost(INSN_COST * 5);
13152   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
13153 
13154   ins_encode %{
13155     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13156   %}
13157 
13158   ins_pipe(fp_f2i);
13159 %}
13160 
13161 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
13162   match(Set dst (ConvF2L src));
13163 
13164   ins_cost(INSN_COST * 5);
13165   format %{ "fcvtzs  $dst, $src \t// f2l" %}
13166 
13167   ins_encode %{
13168     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13169   %}
13170 
13171   ins_pipe(fp_f2l);
13172 %}
13173 
13174 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
13175   match(Set dst (ConvI2F src));
13176 
13177   ins_cost(INSN_COST * 5);
13178   format %{ "scvtfws  $dst, $src \t// i2f" %}
13179 
13180   ins_encode %{
13181     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13182   %}
13183 
13184   ins_pipe(fp_i2f);
13185 %}
13186 
13187 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
13188   match(Set dst (ConvL2F src));
13189 
13190   ins_cost(INSN_COST * 5);
13191   format %{ "scvtfs  $dst, $src \t// l2f" %}
13192 
13193   ins_encode %{
13194     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13195   %}
13196 
13197   ins_pipe(fp_l2f);
13198 %}
13199 
13200 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
13201   match(Set dst (ConvD2I src));
13202 
13203   ins_cost(INSN_COST * 5);
13204   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
13205 
13206   ins_encode %{
13207     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13208   %}
13209 
13210   ins_pipe(fp_d2i);
13211 %}
13212 
13213 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13214   match(Set dst (ConvD2L src));
13215 
13216   ins_cost(INSN_COST * 5);
13217   format %{ "fcvtzd  $dst, $src \t// d2l" %}
13218 
13219   ins_encode %{
13220     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
13221   %}
13222 
13223   ins_pipe(fp_d2l);
13224 %}
13225 
13226 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
13227   match(Set dst (ConvI2D src));
13228 
13229   ins_cost(INSN_COST * 5);
13230   format %{ "scvtfwd  $dst, $src \t// i2d" %}
13231 
13232   ins_encode %{
13233     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13234   %}
13235 
13236   ins_pipe(fp_i2d);
13237 %}
13238 
13239 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
13240   match(Set dst (ConvL2D src));
13241 
13242   ins_cost(INSN_COST * 5);
13243   format %{ "scvtfd  $dst, $src \t// l2d" %}
13244 
13245   ins_encode %{
13246     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
13247   %}
13248 
13249   ins_pipe(fp_l2d);
13250 %}
13251 
13252 // stack <-> reg and reg <-> reg shuffles with no conversion
13253 
13254 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
13255 
13256   match(Set dst (MoveF2I src));
13257 
13258   effect(DEF dst, USE src);
13259 
13260   ins_cost(4 * INSN_COST);
13261 
13262   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
13263 
13264   ins_encode %{
13265     __ ldrw($dst$$Register, Address(sp, $src$$disp));
13266   %}
13267 
13268   ins_pipe(iload_reg_reg);
13269 
13270 %}
13271 
13272 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
13273 
13274   match(Set dst (MoveI2F src));
13275 
13276   effect(DEF dst, USE src);
13277 
13278   ins_cost(4 * INSN_COST);
13279 
13280   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
13281 
13282   ins_encode %{
13283     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13284   %}
13285 
13286   ins_pipe(pipe_class_memory);
13287 
13288 %}
13289 
13290 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
13291 
13292   match(Set dst (MoveD2L src));
13293 
13294   effect(DEF dst, USE src);
13295 
13296   ins_cost(4 * INSN_COST);
13297 
13298   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
13299 
13300   ins_encode %{
13301     __ ldr($dst$$Register, Address(sp, $src$$disp));
13302   %}
13303 
13304   ins_pipe(iload_reg_reg);
13305 
13306 %}
13307 
13308 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
13309 
13310   match(Set dst (MoveL2D src));
13311 
13312   effect(DEF dst, USE src);
13313 
13314   ins_cost(4 * INSN_COST);
13315 
13316   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
13317 
13318   ins_encode %{
13319     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
13320   %}
13321 
13322   ins_pipe(pipe_class_memory);
13323 
13324 %}
13325 
13326 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
13327 
13328   match(Set dst (MoveF2I src));
13329 
13330   effect(DEF dst, USE src);
13331 
13332   ins_cost(INSN_COST);
13333 
13334   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
13335 
13336   ins_encode %{
13337     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13338   %}
13339 
13340   ins_pipe(pipe_class_memory);
13341 
13342 %}
13343 
13344 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
13345 
13346   match(Set dst (MoveI2F src));
13347 
13348   effect(DEF dst, USE src);
13349 
13350   ins_cost(INSN_COST);
13351 
13352   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
13353 
13354   ins_encode %{
13355     __ strw($src$$Register, Address(sp, $dst$$disp));
13356   %}
13357 
13358   ins_pipe(istore_reg_reg);
13359 
13360 %}
13361 
13362 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
13363 
13364   match(Set dst (MoveD2L src));
13365 
13366   effect(DEF dst, USE src);
13367 
13368   ins_cost(INSN_COST);
13369 
13370   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
13371 
13372   ins_encode %{
13373     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
13374   %}
13375 
13376   ins_pipe(pipe_class_memory);
13377 
13378 %}
13379 
13380 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
13381 
13382   match(Set dst (MoveL2D src));
13383 
13384   effect(DEF dst, USE src);
13385 
13386   ins_cost(INSN_COST);
13387 
13388   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
13389 
13390   ins_encode %{
13391     __ str($src$$Register, Address(sp, $dst$$disp));
13392   %}
13393 
13394   ins_pipe(istore_reg_reg);
13395 
13396 %}
13397 
13398 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
13399 
13400   match(Set dst (MoveF2I src));
13401 
13402   effect(DEF dst, USE src);
13403 
13404   ins_cost(INSN_COST);
13405 
13406   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
13407 
13408   ins_encode %{
13409     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
13410   %}
13411 
13412   ins_pipe(pipe_class_memory);
13413 
13414 %}
13415 
13416 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
13417 
13418   match(Set dst (MoveI2F src));
13419 
13420   effect(DEF dst, USE src);
13421 
13422   ins_cost(INSN_COST);
13423 
13424   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
13425 
13426   ins_encode %{
13427     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
13428   %}
13429 
13430   ins_pipe(pipe_class_memory);
13431 
13432 %}
13433 
13434 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
13435 
13436   match(Set dst (MoveD2L src));
13437 
13438   effect(DEF dst, USE src);
13439 
13440   ins_cost(INSN_COST);
13441 
13442   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
13443 
13444   ins_encode %{
13445     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
13446   %}
13447 
13448   ins_pipe(pipe_class_memory);
13449 
13450 %}
13451 
13452 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
13453 
13454   match(Set dst (MoveL2D src));
13455 
13456   effect(DEF dst, USE src);
13457 
13458   ins_cost(INSN_COST);
13459 
13460   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
13461 
13462   ins_encode %{
13463     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
13464   %}
13465 
13466   ins_pipe(pipe_class_memory);
13467 
13468 %}
13469 
13470 // ============================================================================
13471 // clearing of an array
13472 
13473 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13474 %{
13475   match(Set dummy (ClearArray cnt base));
13476   effect(USE_KILL cnt, USE_KILL base);
13477 
13478   ins_cost(4 * INSN_COST);
13479   format %{ "ClearArray $cnt, $base" %}
13480 
13481   ins_encode(aarch64_enc_clear_array_reg_reg(cnt, base));
13482 
13483   ins_pipe(pipe_class_memory);
13484 %}
13485 
13486 // ============================================================================
13487 // Overflow Math Instructions
13488 
13489 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13490 %{
13491   match(Set cr (OverflowAddI op1 op2));
13492 
13493   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13494   ins_cost(INSN_COST);
13495   ins_encode %{
13496     __ cmnw($op1$$Register, $op2$$Register);
13497   %}
13498 
13499   ins_pipe(icmp_reg_reg);
13500 %}
13501 
13502 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13503 %{
13504   match(Set cr (OverflowAddI op1 op2));
13505 
13506   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13507   ins_cost(INSN_COST);
13508   ins_encode %{
13509     __ cmnw($op1$$Register, $op2$$constant);
13510   %}
13511 
13512   ins_pipe(icmp_reg_imm);
13513 %}
13514 
13515 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13516 %{
13517   match(Set cr (OverflowAddL op1 op2));
13518 
13519   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13520   ins_cost(INSN_COST);
13521   ins_encode %{
13522     __ cmn($op1$$Register, $op2$$Register);
13523   %}
13524 
13525   ins_pipe(icmp_reg_reg);
13526 %}
13527 
13528 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13529 %{
13530   match(Set cr (OverflowAddL op1 op2));
13531 
13532   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13533   ins_cost(INSN_COST);
13534   ins_encode %{
13535     __ cmn($op1$$Register, $op2$$constant);
13536   %}
13537 
13538   ins_pipe(icmp_reg_imm);
13539 %}
13540 
13541 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13542 %{
13543   match(Set cr (OverflowSubI op1 op2));
13544 
13545   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13546   ins_cost(INSN_COST);
13547   ins_encode %{
13548     __ cmpw($op1$$Register, $op2$$Register);
13549   %}
13550 
13551   ins_pipe(icmp_reg_reg);
13552 %}
13553 
13554 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13555 %{
13556   match(Set cr (OverflowSubI op1 op2));
13557 
13558   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13559   ins_cost(INSN_COST);
13560   ins_encode %{
13561     __ cmpw($op1$$Register, $op2$$constant);
13562   %}
13563 
13564   ins_pipe(icmp_reg_imm);
13565 %}
13566 
13567 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13568 %{
13569   match(Set cr (OverflowSubL op1 op2));
13570 
13571   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13572   ins_cost(INSN_COST);
13573   ins_encode %{
13574     __ cmp($op1$$Register, $op2$$Register);
13575   %}
13576 
13577   ins_pipe(icmp_reg_reg);
13578 %}
13579 
13580 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13581 %{
13582   match(Set cr (OverflowSubL op1 op2));
13583 
13584   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13585   ins_cost(INSN_COST);
13586   ins_encode %{
13587     __ cmp($op1$$Register, $op2$$constant);
13588   %}
13589 
13590   ins_pipe(icmp_reg_imm);
13591 %}
13592 
13593 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13594 %{
13595   match(Set cr (OverflowSubI zero op1));
13596 
13597   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13598   ins_cost(INSN_COST);
13599   ins_encode %{
13600     __ cmpw(zr, $op1$$Register);
13601   %}
13602 
13603   ins_pipe(icmp_reg_imm);
13604 %}
13605 
13606 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13607 %{
13608   match(Set cr (OverflowSubL zero op1));
13609 
13610   format %{ "cmp   zr, $op1\t# overflow check long" %}
13611   ins_cost(INSN_COST);
13612   ins_encode %{
13613     __ cmp(zr, $op1$$Register);
13614   %}
13615 
13616   ins_pipe(icmp_reg_imm);
13617 %}
13618 
13619 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13620 %{
13621   match(Set cr (OverflowMulI op1 op2));
13622 
13623   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13624             "cmp   rscratch1, rscratch1, sxtw\n\t"
13625             "movw  rscratch1, #0x80000000\n\t"
13626             "cselw rscratch1, rscratch1, zr, NE\n\t"
13627             "cmpw  rscratch1, #1" %}
13628   ins_cost(5 * INSN_COST);
13629   ins_encode %{
13630     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13631     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13632     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13633     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13634     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13635   %}
13636 
13637   ins_pipe(pipe_slow);
13638 %}
13639 
13640 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
13641 %{
13642   match(If cmp (OverflowMulI op1 op2));
13643   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13644             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13645   effect(USE labl, KILL cr);
13646 
13647   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
13648             "cmp   rscratch1, rscratch1, sxtw\n\t"
13649             "b$cmp   $labl" %}
13650   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
13651   ins_encode %{
13652     Label* L = $labl$$label;
13653     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13654     __ smull(rscratch1, $op1$$Register, $op2$$Register);
13655     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
13656     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13657   %}
13658 
13659   ins_pipe(pipe_serial);
13660 %}
13661 
13662 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13663 %{
13664   match(Set cr (OverflowMulL op1 op2));
13665 
13666   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13667             "smulh rscratch2, $op1, $op2\n\t"
13668             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13669             "movw  rscratch1, #0x80000000\n\t"
13670             "cselw rscratch1, rscratch1, zr, NE\n\t"
13671             "cmpw  rscratch1, #1" %}
13672   ins_cost(6 * INSN_COST);
13673   ins_encode %{
13674     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13675     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13676     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13677     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
13678     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
13679     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
13680   %}
13681 
13682   ins_pipe(pipe_slow);
13683 %}
13684 
13685 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
13686 %{
13687   match(If cmp (OverflowMulL op1 op2));
13688   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
13689             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
13690   effect(USE labl, KILL cr);
13691 
13692   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
13693             "smulh rscratch2, $op1, $op2\n\t"
13694             "cmp   rscratch2, rscratch1, ASR #31\n\t"
13695             "b$cmp $labl" %}
13696   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
13697   ins_encode %{
13698     Label* L = $labl$$label;
13699     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13700     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
13701     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
13702     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
13703     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
13704   %}
13705 
13706   ins_pipe(pipe_serial);
13707 %}
13708 
13709 // ============================================================================
13710 // Compare Instructions
13711 
13712 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
13713 %{
13714   match(Set cr (CmpI op1 op2));
13715 
13716   effect(DEF cr, USE op1, USE op2);
13717 
13718   ins_cost(INSN_COST);
13719   format %{ "cmpw  $op1, $op2" %}
13720 
13721   ins_encode(aarch64_enc_cmpw(op1, op2));
13722 
13723   ins_pipe(icmp_reg_reg);
13724 %}
13725 
13726 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
13727 %{
13728   match(Set cr (CmpI op1 zero));
13729 
13730   effect(DEF cr, USE op1);
13731 
13732   ins_cost(INSN_COST);
13733   format %{ "cmpw $op1, 0" %}
13734 
13735   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13736 
13737   ins_pipe(icmp_reg_imm);
13738 %}
13739 
13740 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
13741 %{
13742   match(Set cr (CmpI op1 op2));
13743 
13744   effect(DEF cr, USE op1);
13745 
13746   ins_cost(INSN_COST);
13747   format %{ "cmpw  $op1, $op2" %}
13748 
13749   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13750 
13751   ins_pipe(icmp_reg_imm);
13752 %}
13753 
13754 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
13755 %{
13756   match(Set cr (CmpI op1 op2));
13757 
13758   effect(DEF cr, USE op1);
13759 
13760   ins_cost(INSN_COST * 2);
13761   format %{ "cmpw  $op1, $op2" %}
13762 
13763   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13764 
13765   ins_pipe(icmp_reg_imm);
13766 %}
13767 
13768 // Unsigned compare Instructions; really, same as signed compare
13769 // except it should only be used to feed an If or a CMovI which takes a
13770 // cmpOpU.
13771 
13772 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
13773 %{
13774   match(Set cr (CmpU op1 op2));
13775 
13776   effect(DEF cr, USE op1, USE op2);
13777 
13778   ins_cost(INSN_COST);
13779   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13780 
13781   ins_encode(aarch64_enc_cmpw(op1, op2));
13782 
13783   ins_pipe(icmp_reg_reg);
13784 %}
13785 
13786 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
13787 %{
13788   match(Set cr (CmpU op1 zero));
13789 
13790   effect(DEF cr, USE op1);
13791 
13792   ins_cost(INSN_COST);
13793   format %{ "cmpw $op1, #0\t# unsigned" %}
13794 
13795   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
13796 
13797   ins_pipe(icmp_reg_imm);
13798 %}
13799 
13800 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
13801 %{
13802   match(Set cr (CmpU op1 op2));
13803 
13804   effect(DEF cr, USE op1);
13805 
13806   ins_cost(INSN_COST);
13807   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13808 
13809   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
13810 
13811   ins_pipe(icmp_reg_imm);
13812 %}
13813 
13814 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
13815 %{
13816   match(Set cr (CmpU op1 op2));
13817 
13818   effect(DEF cr, USE op1);
13819 
13820   ins_cost(INSN_COST * 2);
13821   format %{ "cmpw  $op1, $op2\t# unsigned" %}
13822 
13823   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
13824 
13825   ins_pipe(icmp_reg_imm);
13826 %}
13827 
13828 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13829 %{
13830   match(Set cr (CmpL op1 op2));
13831 
13832   effect(DEF cr, USE op1, USE op2);
13833 
13834   ins_cost(INSN_COST);
13835   format %{ "cmp  $op1, $op2" %}
13836 
13837   ins_encode(aarch64_enc_cmp(op1, op2));
13838 
13839   ins_pipe(icmp_reg_reg);
13840 %}
13841 
13842 instruct compL_reg_immI0(rFlagsReg cr, iRegL op1, immI0 zero)
13843 %{
13844   match(Set cr (CmpL op1 zero));
13845 
13846   effect(DEF cr, USE op1);
13847 
13848   ins_cost(INSN_COST);
13849   format %{ "tst  $op1" %}
13850 
13851   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
13852 
13853   ins_pipe(icmp_reg_imm);
13854 %}
13855 
13856 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
13857 %{
13858   match(Set cr (CmpL op1 op2));
13859 
13860   effect(DEF cr, USE op1);
13861 
13862   ins_cost(INSN_COST);
13863   format %{ "cmp  $op1, $op2" %}
13864 
13865   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
13866 
13867   ins_pipe(icmp_reg_imm);
13868 %}
13869 
13870 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
13871 %{
13872   match(Set cr (CmpL op1 op2));
13873 
13874   effect(DEF cr, USE op1);
13875 
13876   ins_cost(INSN_COST * 2);
13877   format %{ "cmp  $op1, $op2" %}
13878 
13879   ins_encode(aarch64_enc_cmp_imm(op1, op2));
13880 
13881   ins_pipe(icmp_reg_imm);
13882 %}
13883 
13884 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
13885 %{
13886   match(Set cr (CmpP op1 op2));
13887 
13888   effect(DEF cr, USE op1, USE op2);
13889 
13890   ins_cost(INSN_COST);
13891   format %{ "cmp  $op1, $op2\t // ptr" %}
13892 
13893   ins_encode(aarch64_enc_cmpp(op1, op2));
13894 
13895   ins_pipe(icmp_reg_reg);
13896 %}
13897 
13898 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
13899 %{
13900   match(Set cr (CmpN op1 op2));
13901 
13902   effect(DEF cr, USE op1, USE op2);
13903 
13904   ins_cost(INSN_COST);
13905   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
13906 
13907   ins_encode(aarch64_enc_cmpn(op1, op2));
13908 
13909   ins_pipe(icmp_reg_reg);
13910 %}
13911 
13912 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
13913 %{
13914   match(Set cr (CmpP op1 zero));
13915 
13916   effect(DEF cr, USE op1, USE zero);
13917 
13918   ins_cost(INSN_COST);
13919   format %{ "cmp  $op1, 0\t // ptr" %}
13920 
13921   ins_encode(aarch64_enc_testp(op1));
13922 
13923   ins_pipe(icmp_reg_imm);
13924 %}
13925 
13926 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
13927 %{
13928   match(Set cr (CmpN op1 zero));
13929 
13930   effect(DEF cr, USE op1, USE zero);
13931 
13932   ins_cost(INSN_COST);
13933   format %{ "cmp  $op1, 0\t // compressed ptr" %}
13934 
13935   ins_encode(aarch64_enc_testn(op1));
13936 
13937   ins_pipe(icmp_reg_imm);
13938 %}
13939 
13940 // FP comparisons
13941 //
13942 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
13943 // using normal cmpOp. See declaration of rFlagsReg for details.
13944 
13945 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
13946 %{
13947   match(Set cr (CmpF src1 src2));
13948 
13949   ins_cost(3 * INSN_COST);
13950   format %{ "fcmps $src1, $src2" %}
13951 
13952   ins_encode %{
13953     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13954   %}
13955 
13956   ins_pipe(pipe_class_compare);
13957 %}
13958 
13959 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
13960 %{
13961   match(Set cr (CmpF src1 src2));
13962 
13963   ins_cost(3 * INSN_COST);
13964   format %{ "fcmps $src1, 0.0" %}
13965 
13966   ins_encode %{
13967     __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
13968   %}
13969 
13970   ins_pipe(pipe_class_compare);
13971 %}
13972 // FROM HERE
13973 
13974 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
13975 %{
13976   match(Set cr (CmpD src1 src2));
13977 
13978   ins_cost(3 * INSN_COST);
13979   format %{ "fcmpd $src1, $src2" %}
13980 
13981   ins_encode %{
13982     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
13983   %}
13984 
13985   ins_pipe(pipe_class_compare);
13986 %}
13987 
13988 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
13989 %{
13990   match(Set cr (CmpD src1 src2));
13991 
13992   ins_cost(3 * INSN_COST);
13993   format %{ "fcmpd $src1, 0.0" %}
13994 
13995   ins_encode %{
13996     __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
13997   %}
13998 
13999   ins_pipe(pipe_class_compare);
14000 %}
14001 
14002 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
14003 %{
14004   match(Set dst (CmpF3 src1 src2));
14005   effect(KILL cr);
14006 
14007   ins_cost(5 * INSN_COST);
14008   format %{ "fcmps $src1, $src2\n\t"
14009             "csinvw($dst, zr, zr, eq\n\t"
14010             "csnegw($dst, $dst, $dst, lt)"
14011   %}
14012 
14013   ins_encode %{
14014     Label done;
14015     FloatRegister s1 = as_FloatRegister($src1$$reg);
14016     FloatRegister s2 = as_FloatRegister($src2$$reg);
14017     Register d = as_Register($dst$$reg);
14018     __ fcmps(s1, s2);
14019     // installs 0 if EQ else -1
14020     __ csinvw(d, zr, zr, Assembler::EQ);
14021     // keeps -1 if less or unordered else installs 1
14022     __ csnegw(d, d, d, Assembler::LT);
14023     __ bind(done);
14024   %}
14025 
14026   ins_pipe(pipe_class_default);
14027 
14028 %}
14029 
14030 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
14031 %{
14032   match(Set dst (CmpD3 src1 src2));
14033   effect(KILL cr);
14034 
14035   ins_cost(5 * INSN_COST);
14036   format %{ "fcmpd $src1, $src2\n\t"
14037             "csinvw($dst, zr, zr, eq\n\t"
14038             "csnegw($dst, $dst, $dst, lt)"
14039   %}
14040 
14041   ins_encode %{
14042     Label done;
14043     FloatRegister s1 = as_FloatRegister($src1$$reg);
14044     FloatRegister s2 = as_FloatRegister($src2$$reg);
14045     Register d = as_Register($dst$$reg);
14046     __ fcmpd(s1, s2);
14047     // installs 0 if EQ else -1
14048     __ csinvw(d, zr, zr, Assembler::EQ);
14049     // keeps -1 if less or unordered else installs 1
14050     __ csnegw(d, d, d, Assembler::LT);
14051     __ bind(done);
14052   %}
14053   ins_pipe(pipe_class_default);
14054 
14055 %}
14056 
14057 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
14058 %{
14059   match(Set dst (CmpF3 src1 zero));
14060   effect(KILL cr);
14061 
14062   ins_cost(5 * INSN_COST);
14063   format %{ "fcmps $src1, 0.0\n\t"
14064             "csinvw($dst, zr, zr, eq\n\t"
14065             "csnegw($dst, $dst, $dst, lt)"
14066   %}
14067 
14068   ins_encode %{
14069     Label done;
14070     FloatRegister s1 = as_FloatRegister($src1$$reg);
14071     Register d = as_Register($dst$$reg);
14072     __ fcmps(s1, 0.0D);
14073     // installs 0 if EQ else -1
14074     __ csinvw(d, zr, zr, Assembler::EQ);
14075     // keeps -1 if less or unordered else installs 1
14076     __ csnegw(d, d, d, Assembler::LT);
14077     __ bind(done);
14078   %}
14079 
14080   ins_pipe(pipe_class_default);
14081 
14082 %}
14083 
14084 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
14085 %{
14086   match(Set dst (CmpD3 src1 zero));
14087   effect(KILL cr);
14088 
14089   ins_cost(5 * INSN_COST);
14090   format %{ "fcmpd $src1, 0.0\n\t"
14091             "csinvw($dst, zr, zr, eq\n\t"
14092             "csnegw($dst, $dst, $dst, lt)"
14093   %}
14094 
14095   ins_encode %{
14096     Label done;
14097     FloatRegister s1 = as_FloatRegister($src1$$reg);
14098     Register d = as_Register($dst$$reg);
14099     __ fcmpd(s1, 0.0D);
14100     // installs 0 if EQ else -1
14101     __ csinvw(d, zr, zr, Assembler::EQ);
14102     // keeps -1 if less or unordered else installs 1
14103     __ csnegw(d, d, d, Assembler::LT);
14104     __ bind(done);
14105   %}
14106   ins_pipe(pipe_class_default);
14107 
14108 %}
14109 
14110 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
14111 %{
14112   match(Set dst (CmpLTMask p q));
14113   effect(KILL cr);
14114 
14115   ins_cost(3 * INSN_COST);
14116 
14117   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
14118             "csetw $dst, lt\n\t"
14119             "subw $dst, zr, $dst"
14120   %}
14121 
14122   ins_encode %{
14123     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
14124     __ csetw(as_Register($dst$$reg), Assembler::LT);
14125     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
14126   %}
14127 
14128   ins_pipe(ialu_reg_reg);
14129 %}
14130 
14131 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
14132 %{
14133   match(Set dst (CmpLTMask src zero));
14134   effect(KILL cr);
14135 
14136   ins_cost(INSN_COST);
14137 
14138   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
14139 
14140   ins_encode %{
14141     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
14142   %}
14143 
14144   ins_pipe(ialu_reg_shift);
14145 %}
14146 
14147 // ============================================================================
14148 // Max and Min
14149 
14150 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14151 %{
14152   match(Set dst (MinI src1 src2));
14153 
14154   effect(DEF dst, USE src1, USE src2, KILL cr);
14155   size(8);
14156 
14157   ins_cost(INSN_COST * 3);
14158   format %{
14159     "cmpw $src1 $src2\t signed int\n\t"
14160     "cselw $dst, $src1, $src2 lt\t"
14161   %}
14162 
14163   ins_encode %{
14164     __ cmpw(as_Register($src1$$reg),
14165             as_Register($src2$$reg));
14166     __ cselw(as_Register($dst$$reg),
14167              as_Register($src1$$reg),
14168              as_Register($src2$$reg),
14169              Assembler::LT);
14170   %}
14171 
14172   ins_pipe(ialu_reg_reg);
14173 %}
14174 // FROM HERE
14175 
14176 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
14177 %{
14178   match(Set dst (MaxI src1 src2));
14179 
14180   effect(DEF dst, USE src1, USE src2, KILL cr);
14181   size(8);
14182 
14183   ins_cost(INSN_COST * 3);
14184   format %{
14185     "cmpw $src1 $src2\t signed int\n\t"
14186     "cselw $dst, $src1, $src2 gt\t"
14187   %}
14188 
14189   ins_encode %{
14190     __ cmpw(as_Register($src1$$reg),
14191             as_Register($src2$$reg));
14192     __ cselw(as_Register($dst$$reg),
14193              as_Register($src1$$reg),
14194              as_Register($src2$$reg),
14195              Assembler::GT);
14196   %}
14197 
14198   ins_pipe(ialu_reg_reg);
14199 %}
14200 
14201 // ============================================================================
14202 // Branch Instructions
14203 
14204 // Direct Branch.
14205 instruct branch(label lbl)
14206 %{
14207   match(Goto);
14208 
14209   effect(USE lbl);
14210 
14211   ins_cost(BRANCH_COST);
14212   format %{ "b  $lbl" %}
14213 
14214   ins_encode(aarch64_enc_b(lbl));
14215 
14216   ins_pipe(pipe_branch);
14217 %}
14218 
14219 // Conditional Near Branch
14220 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
14221 %{
14222   // Same match rule as `branchConFar'.
14223   match(If cmp cr);
14224 
14225   effect(USE lbl);
14226 
14227   ins_cost(BRANCH_COST);
14228   // If set to 1 this indicates that the current instruction is a
14229   // short variant of a long branch. This avoids using this
14230   // instruction in first-pass matching. It will then only be used in
14231   // the `Shorten_branches' pass.
14232   // ins_short_branch(1);
14233   format %{ "b$cmp  $lbl" %}
14234 
14235   ins_encode(aarch64_enc_br_con(cmp, lbl));
14236 
14237   ins_pipe(pipe_branch_cond);
14238 %}
14239 
14240 // Conditional Near Branch Unsigned
14241 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14242 %{
14243   // Same match rule as `branchConFar'.
14244   match(If cmp cr);
14245 
14246   effect(USE lbl);
14247 
14248   ins_cost(BRANCH_COST);
14249   // If set to 1 this indicates that the current instruction is a
14250   // short variant of a long branch. This avoids using this
14251   // instruction in first-pass matching. It will then only be used in
14252   // the `Shorten_branches' pass.
14253   // ins_short_branch(1);
14254   format %{ "b$cmp  $lbl\t# unsigned" %}
14255 
14256   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14257 
14258   ins_pipe(pipe_branch_cond);
14259 %}
14260 
14261 // Make use of CBZ and CBNZ.  These instructions, as well as being
14262 // shorter than (cmp; branch), have the additional benefit of not
14263 // killing the flags.
14264 
14265 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
14266   match(If cmp (CmpI op1 op2));
14267   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14268             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14269   effect(USE labl);
14270 
14271   ins_cost(BRANCH_COST);
14272   format %{ "cbw$cmp   $op1, $labl" %}
14273   ins_encode %{
14274     Label* L = $labl$$label;
14275     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14276     if (cond == Assembler::EQ)
14277       __ cbzw($op1$$Register, *L);
14278     else
14279       __ cbnzw($op1$$Register, *L);
14280   %}
14281   ins_pipe(pipe_cmp_branch);
14282 %}
14283 
14284 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
14285   match(If cmp (CmpL op1 op2));
14286   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14287             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14288   effect(USE labl);
14289 
14290   ins_cost(BRANCH_COST);
14291   format %{ "cb$cmp   $op1, $labl" %}
14292   ins_encode %{
14293     Label* L = $labl$$label;
14294     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14295     if (cond == Assembler::EQ)
14296       __ cbz($op1$$Register, *L);
14297     else
14298       __ cbnz($op1$$Register, *L);
14299   %}
14300   ins_pipe(pipe_cmp_branch);
14301 %}
14302 
14303 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
14304   match(If cmp (CmpP op1 op2));
14305   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14306             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14307   effect(USE labl);
14308 
14309   ins_cost(BRANCH_COST);
14310   format %{ "cb$cmp   $op1, $labl" %}
14311   ins_encode %{
14312     Label* L = $labl$$label;
14313     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14314     if (cond == Assembler::EQ)
14315       __ cbz($op1$$Register, *L);
14316     else
14317       __ cbnz($op1$$Register, *L);
14318   %}
14319   ins_pipe(pipe_cmp_branch);
14320 %}
14321 
14322 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
14323   match(If cmp (CmpP (DecodeN oop) zero));
14324   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
14325             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
14326   effect(USE labl);
14327 
14328   ins_cost(BRANCH_COST);
14329   format %{ "cb$cmp   $oop, $labl" %}
14330   ins_encode %{
14331     Label* L = $labl$$label;
14332     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14333     if (cond == Assembler::EQ)
14334       __ cbzw($oop$$Register, *L);
14335     else
14336       __ cbnzw($oop$$Register, *L);
14337   %}
14338   ins_pipe(pipe_cmp_branch);
14339 %}
14340 
14341 // Test bit and Branch
14342 
14343 // Patterns for short (< 32KiB) variants
14344 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14345   match(If cmp (CmpL op1 op2));
14346   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14347             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14348   effect(USE labl);
14349 
14350   ins_cost(BRANCH_COST);
14351   format %{ "cb$cmp   $op1, $labl # long" %}
14352   ins_encode %{
14353     Label* L = $labl$$label;
14354     Assembler::Condition cond =
14355       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14356     __ tbr(cond, $op1$$Register, 63, *L);
14357   %}
14358   ins_pipe(pipe_cmp_branch);
14359   ins_short_branch(1);
14360 %}
14361 
14362 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14363   match(If cmp (CmpI op1 op2));
14364   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14365             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14366   effect(USE labl);
14367 
14368   ins_cost(BRANCH_COST);
14369   format %{ "cb$cmp   $op1, $labl # int" %}
14370   ins_encode %{
14371     Label* L = $labl$$label;
14372     Assembler::Condition cond =
14373       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14374     __ tbr(cond, $op1$$Register, 31, *L);
14375   %}
14376   ins_pipe(pipe_cmp_branch);
14377   ins_short_branch(1);
14378 %}
14379 
14380 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14381   match(If cmp (CmpL (AndL op1 op2) op3));
14382   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14383             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14384             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14385   effect(USE labl);
14386 
14387   ins_cost(BRANCH_COST);
14388   format %{ "tb$cmp   $op1, $op2, $labl" %}
14389   ins_encode %{
14390     Label* L = $labl$$label;
14391     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14392     int bit = exact_log2($op2$$constant);
14393     __ tbr(cond, $op1$$Register, bit, *L);
14394   %}
14395   ins_pipe(pipe_cmp_branch);
14396   ins_short_branch(1);
14397 %}
14398 
14399 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14400   match(If cmp (CmpI (AndI op1 op2) op3));
14401   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14402             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14403             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14404   effect(USE labl);
14405 
14406   ins_cost(BRANCH_COST);
14407   format %{ "tb$cmp   $op1, $op2, $labl" %}
14408   ins_encode %{
14409     Label* L = $labl$$label;
14410     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14411     int bit = exact_log2($op2$$constant);
14412     __ tbr(cond, $op1$$Register, bit, *L);
14413   %}
14414   ins_pipe(pipe_cmp_branch);
14415   ins_short_branch(1);
14416 %}
14417 
14418 // And far variants
14419 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
14420   match(If cmp (CmpL op1 op2));
14421   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14422             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14423   effect(USE labl);
14424 
14425   ins_cost(BRANCH_COST);
14426   format %{ "cb$cmp   $op1, $labl # long" %}
14427   ins_encode %{
14428     Label* L = $labl$$label;
14429     Assembler::Condition cond =
14430       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14431     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14432   %}
14433   ins_pipe(pipe_cmp_branch);
14434 %}
14435 
14436 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14437   match(If cmp (CmpI op1 op2));
14438   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
14439             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
14440   effect(USE labl);
14441 
14442   ins_cost(BRANCH_COST);
14443   format %{ "cb$cmp   $op1, $labl # int" %}
14444   ins_encode %{
14445     Label* L = $labl$$label;
14446     Assembler::Condition cond =
14447       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14448     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
14449   %}
14450   ins_pipe(pipe_cmp_branch);
14451 %}
14452 
14453 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
14454   match(If cmp (CmpL (AndL op1 op2) op3));
14455   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14456             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14457             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
14458   effect(USE labl);
14459 
14460   ins_cost(BRANCH_COST);
14461   format %{ "tb$cmp   $op1, $op2, $labl" %}
14462   ins_encode %{
14463     Label* L = $labl$$label;
14464     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14465     int bit = exact_log2($op2$$constant);
14466     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14467   %}
14468   ins_pipe(pipe_cmp_branch);
14469 %}
14470 
14471 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
14472   match(If cmp (CmpI (AndI op1 op2) op3));
14473   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
14474             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
14475             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
14476   effect(USE labl);
14477 
14478   ins_cost(BRANCH_COST);
14479   format %{ "tb$cmp   $op1, $op2, $labl" %}
14480   ins_encode %{
14481     Label* L = $labl$$label;
14482     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14483     int bit = exact_log2($op2$$constant);
14484     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14485   %}
14486   ins_pipe(pipe_cmp_branch);
14487 %}
14488 
14489 // Test bits
14490 
14491 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14492   match(Set cr (CmpL (AndL op1 op2) op3));
14493   predicate(Assembler::operand_valid_for_logical_immediate
14494             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14495 
14496   ins_cost(INSN_COST);
14497   format %{ "tst $op1, $op2 # long" %}
14498   ins_encode %{
14499     __ tst($op1$$Register, $op2$$constant);
14500   %}
14501   ins_pipe(ialu_reg_reg);
14502 %}
14503 
14504 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14505   match(Set cr (CmpI (AndI op1 op2) op3));
14506   predicate(Assembler::operand_valid_for_logical_immediate
14507             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14508 
14509   ins_cost(INSN_COST);
14510   format %{ "tst $op1, $op2 # int" %}
14511   ins_encode %{
14512     __ tstw($op1$$Register, $op2$$constant);
14513   %}
14514   ins_pipe(ialu_reg_reg);
14515 %}
14516 
14517 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
14518   match(Set cr (CmpL (AndL op1 op2) op3));
14519 
14520   ins_cost(INSN_COST);
14521   format %{ "tst $op1, $op2 # long" %}
14522   ins_encode %{
14523     __ tst($op1$$Register, $op2$$Register);
14524   %}
14525   ins_pipe(ialu_reg_reg);
14526 %}
14527 
14528 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
14529   match(Set cr (CmpI (AndI op1 op2) op3));
14530 
14531   ins_cost(INSN_COST);
14532   format %{ "tstw $op1, $op2 # int" %}
14533   ins_encode %{
14534     __ tstw($op1$$Register, $op2$$Register);
14535   %}
14536   ins_pipe(ialu_reg_reg);
14537 %}
14538 
14539 
14540 // Conditional Far Branch
14541 // Conditional Far Branch Unsigned
14542 // TODO: fixme
14543 
14544 // counted loop end branch near
14545 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
14546 %{
14547   match(CountedLoopEnd cmp cr);
14548 
14549   effect(USE lbl);
14550 
14551   ins_cost(BRANCH_COST);
14552   // short variant.
14553   // ins_short_branch(1);
14554   format %{ "b$cmp $lbl \t// counted loop end" %}
14555 
14556   ins_encode(aarch64_enc_br_con(cmp, lbl));
14557 
14558   ins_pipe(pipe_branch);
14559 %}
14560 
14561 // counted loop end branch near Unsigned
14562 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
14563 %{
14564   match(CountedLoopEnd cmp cr);
14565 
14566   effect(USE lbl);
14567 
14568   ins_cost(BRANCH_COST);
14569   // short variant.
14570   // ins_short_branch(1);
14571   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
14572 
14573   ins_encode(aarch64_enc_br_conU(cmp, lbl));
14574 
14575   ins_pipe(pipe_branch);
14576 %}
14577 
14578 // counted loop end branch far
14579 // counted loop end branch far unsigned
14580 // TODO: fixme
14581 
14582 // ============================================================================
14583 // inlined locking and unlocking
14584 
14585 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14586 %{
14587   match(Set cr (FastLock object box));
14588   effect(TEMP tmp, TEMP tmp2);
14589 
14590   // TODO
14591   // identify correct cost
14592   ins_cost(5 * INSN_COST);
14593   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
14594 
14595   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
14596 
14597   ins_pipe(pipe_serial);
14598 %}
14599 
14600 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
14601 %{
14602   match(Set cr (FastUnlock object box));
14603   effect(TEMP tmp, TEMP tmp2);
14604 
14605   ins_cost(5 * INSN_COST);
14606   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
14607 
14608   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
14609 
14610   ins_pipe(pipe_serial);
14611 %}
14612 
14613 
14614 // ============================================================================
14615 // Safepoint Instructions
14616 
14617 // TODO
14618 // provide a near and far version of this code
14619 
14620 instruct safePoint(iRegP poll)
14621 %{
14622   match(SafePoint poll);
14623 
14624   format %{
14625     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
14626   %}
14627   ins_encode %{
14628     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
14629   %}
14630   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
14631 %}
14632 
14633 
14634 // ============================================================================
14635 // Procedure Call/Return Instructions
14636 
14637 // Call Java Static Instruction
14638 
14639 instruct CallStaticJavaDirect(method meth)
14640 %{
14641   match(CallStaticJava);
14642 
14643   effect(USE meth);
14644 
14645   ins_cost(CALL_COST);
14646 
14647   format %{ "call,static $meth \t// ==> " %}
14648 
14649   ins_encode( aarch64_enc_java_static_call(meth),
14650               aarch64_enc_call_epilog );
14651 
14652   ins_pipe(pipe_class_call);
14653 %}
14654 
14655 // TO HERE
14656 
14657 // Call Java Dynamic Instruction
14658 instruct CallDynamicJavaDirect(method meth)
14659 %{
14660   match(CallDynamicJava);
14661 
14662   effect(USE meth);
14663 
14664   ins_cost(CALL_COST);
14665 
14666   format %{ "CALL,dynamic $meth \t// ==> " %}
14667 
14668   ins_encode( aarch64_enc_java_dynamic_call(meth),
14669                aarch64_enc_call_epilog );
14670 
14671   ins_pipe(pipe_class_call);
14672 %}
14673 
14674 // Call Runtime Instruction
14675 
14676 instruct CallRuntimeDirect(method meth)
14677 %{
14678   match(CallRuntime);
14679 
14680   effect(USE meth);
14681 
14682   ins_cost(CALL_COST);
14683 
14684   format %{ "CALL, runtime $meth" %}
14685 
14686   ins_encode( aarch64_enc_java_to_runtime(meth) );
14687 
14688   ins_pipe(pipe_class_call);
14689 %}
14690 
14691 // Call Runtime Instruction
14692 
14693 instruct CallLeafDirect(method meth)
14694 %{
14695   match(CallLeaf);
14696 
14697   effect(USE meth);
14698 
14699   ins_cost(CALL_COST);
14700 
14701   format %{ "CALL, runtime leaf $meth" %}
14702 
14703   ins_encode( aarch64_enc_java_to_runtime(meth) );
14704 
14705   ins_pipe(pipe_class_call);
14706 %}
14707 
14708 // Call Runtime Instruction
14709 
14710 instruct CallLeafNoFPDirect(method meth)
14711 %{
14712   match(CallLeafNoFP);
14713 
14714   effect(USE meth);
14715 
14716   ins_cost(CALL_COST);
14717 
14718   format %{ "CALL, runtime leaf nofp $meth" %}
14719 
14720   ins_encode( aarch64_enc_java_to_runtime(meth) );
14721 
14722   ins_pipe(pipe_class_call);
14723 %}
14724 
14725 // Tail Call; Jump from runtime stub to Java code.
14726 // Also known as an 'interprocedural jump'.
14727 // Target of jump will eventually return to caller.
14728 // TailJump below removes the return address.
14729 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
14730 %{
14731   match(TailCall jump_target method_oop);
14732 
14733   ins_cost(CALL_COST);
14734 
14735   format %{ "br $jump_target\t# $method_oop holds method oop" %}
14736 
14737   ins_encode(aarch64_enc_tail_call(jump_target));
14738 
14739   ins_pipe(pipe_class_call);
14740 %}
14741 
14742 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
14743 %{
14744   match(TailJump jump_target ex_oop);
14745 
14746   ins_cost(CALL_COST);
14747 
14748   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
14749 
14750   ins_encode(aarch64_enc_tail_jmp(jump_target));
14751 
14752   ins_pipe(pipe_class_call);
14753 %}
14754 
14755 // Create exception oop: created by stack-crawling runtime code.
14756 // Created exception is now available to this handler, and is setup
14757 // just prior to jumping to this handler. No code emitted.
14758 // TODO check
14759 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
14760 instruct CreateException(iRegP_R0 ex_oop)
14761 %{
14762   match(Set ex_oop (CreateEx));
14763 
14764   format %{ " -- \t// exception oop; no code emitted" %}
14765 
14766   size(0);
14767 
14768   ins_encode( /*empty*/ );
14769 
14770   ins_pipe(pipe_class_empty);
14771 %}
14772 
14773 // Rethrow exception: The exception oop will come in the first
14774 // argument position. Then JUMP (not call) to the rethrow stub code.
14775 instruct RethrowException() %{
14776   match(Rethrow);
14777   ins_cost(CALL_COST);
14778 
14779   format %{ "b rethrow_stub" %}
14780 
14781   ins_encode( aarch64_enc_rethrow() );
14782 
14783   ins_pipe(pipe_class_call);
14784 %}
14785 
14786 
14787 // Return Instruction
14788 // epilog node loads ret address into lr as part of frame pop
14789 instruct Ret()
14790 %{
14791   match(Return);
14792 
14793   format %{ "ret\t// return register" %}
14794 
14795   ins_encode( aarch64_enc_ret() );
14796 
14797   ins_pipe(pipe_branch);
14798 %}
14799 
14800 // Die now.
14801 instruct ShouldNotReachHere() %{
14802   match(Halt);
14803 
14804   ins_cost(CALL_COST);
14805   format %{ "ShouldNotReachHere" %}
14806 
14807   ins_encode %{
14808     // TODO
14809     // implement proper trap call here
14810     __ brk(999);
14811   %}
14812 
14813   ins_pipe(pipe_class_default);
14814 %}
14815 
14816 // ============================================================================
14817 // Partial Subtype Check
14818 //
14819 // superklass array for an instance of the superklass.  Set a hidden
14820 // internal cache on a hit (cache is checked with exposed code in
14821 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
14822 // encoding ALSO sets flags.
14823 
14824 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
14825 %{
14826   match(Set result (PartialSubtypeCheck sub super));
14827   effect(KILL cr, KILL temp);
14828 
14829   ins_cost(1100);  // slightly larger than the next version
14830   format %{ "partialSubtypeCheck $result, $sub, $super" %}
14831 
14832   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14833 
14834   opcode(0x1); // Force zero of result reg on hit
14835 
14836   ins_pipe(pipe_class_memory);
14837 %}
14838 
14839 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
14840 %{
14841   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
14842   effect(KILL temp, KILL result);
14843 
14844   ins_cost(1100);  // slightly larger than the next version
14845   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
14846 
14847   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
14848 
14849   opcode(0x0); // Don't zero result reg on hit
14850 
14851   ins_pipe(pipe_class_memory);
14852 %}
14853 
14854 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
14855                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
14856 %{
14857   predicate(!CompactStrings);
14858   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
14859   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
14860 
14861   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
14862   ins_encode %{
14863     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14864     __ asrw($cnt1$$Register, $cnt1$$Register, 1);
14865     __ asrw($cnt2$$Register, $cnt2$$Register, 1);
14866     __ string_compare($str1$$Register, $str2$$Register,
14867                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
14868                       $tmp1$$Register);
14869   %}
14870   ins_pipe(pipe_class_memory);
14871 %}
14872 
14873 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
14874        iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14875 %{
14876   predicate(!CompactStrings);
14877   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
14878   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
14879          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14880   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
14881 
14882   ins_encode %{
14883     __ string_indexof($str1$$Register, $str2$$Register,
14884                       $cnt1$$Register, $cnt2$$Register,
14885                       $tmp1$$Register, $tmp2$$Register,
14886                       $tmp3$$Register, $tmp4$$Register,
14887                       -1, $result$$Register);
14888   %}
14889   ins_pipe(pipe_class_memory);
14890 %}
14891 
14892 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
14893                  immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
14894                  iRegI tmp3, iRegI tmp4, rFlagsReg cr)
14895 %{
14896   predicate(!CompactStrings);
14897   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
14898   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
14899          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
14900   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
14901 
14902   ins_encode %{
14903     int icnt2 = (int)$int_cnt2$$constant;
14904     __ string_indexof($str1$$Register, $str2$$Register,
14905                       $cnt1$$Register, zr,
14906                       $tmp1$$Register, $tmp2$$Register,
14907                       $tmp3$$Register, $tmp4$$Register,
14908                       icnt2, $result$$Register);
14909   %}
14910   ins_pipe(pipe_class_memory);
14911 %}
14912 
14913 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
14914                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
14915 %{
14916   predicate(!CompactStrings);
14917   match(Set result (StrEquals (Binary str1 str2) cnt));
14918   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
14919 
14920   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
14921   ins_encode %{
14922     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
14923     __ asrw($cnt$$Register, $cnt$$Register, 1);
14924     __ string_equals($str1$$Register, $str2$$Register,
14925                       $cnt$$Register, $result$$Register,
14926                       $tmp$$Register);
14927   %}
14928   ins_pipe(pipe_class_memory);
14929 %}
14930 
14931 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
14932                       iRegP_R10 tmp, rFlagsReg cr)
14933 %{
14934   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
14935   match(Set result (AryEq ary1 ary2));
14936   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
14937 
14938   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
14939   ins_encode %{
14940     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
14941                           $result$$Register, $tmp$$Register);
14942   %}
14943   ins_pipe(pipe_class_memory);
14944 %}
14945 
14946 // encode char[] to byte[] in ISO_8859_1
14947 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
14948                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
14949                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
14950                           iRegI_R0 result, rFlagsReg cr)
14951 %{
14952   match(Set result (EncodeISOArray src (Binary dst len)));
14953   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
14954          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
14955 
14956   format %{ "Encode array $src,$dst,$len -> $result" %}
14957   ins_encode %{
14958     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
14959          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
14960          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
14961   %}
14962   ins_pipe( pipe_class_memory );
14963 %}
14964 
14965 // ============================================================================
14966 // This name is KNOWN by the ADLC and cannot be changed.
14967 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
14968 // for this guy.
14969 instruct tlsLoadP(thread_RegP dst)
14970 %{
14971   match(Set dst (ThreadLocal));
14972 
14973   ins_cost(0);
14974 
14975   format %{ " -- \t// $dst=Thread::current(), empty" %}
14976 
14977   size(0);
14978 
14979   ins_encode( /*empty*/ );
14980 
14981   ins_pipe(pipe_class_empty);
14982 %}
14983 
14984 // ====================VECTOR INSTRUCTIONS=====================================
14985 
14986 // Load vector (32 bits)
14987 instruct loadV4(vecD dst, vmem mem)
14988 %{
14989   predicate(n->as_LoadVector()->memory_size() == 4);
14990   match(Set dst (LoadVector mem));
14991   ins_cost(4 * INSN_COST);
14992   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
14993   ins_encode( aarch64_enc_ldrvS(dst, mem) );
14994   ins_pipe(vload_reg_mem64);
14995 %}
14996 
14997 // Load vector (64 bits)
14998 instruct loadV8(vecD dst, vmem mem)
14999 %{
15000   predicate(n->as_LoadVector()->memory_size() == 8);
15001   match(Set dst (LoadVector mem));
15002   ins_cost(4 * INSN_COST);
15003   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
15004   ins_encode( aarch64_enc_ldrvD(dst, mem) );
15005   ins_pipe(vload_reg_mem64);
15006 %}
15007 
15008 // Load Vector (128 bits)
15009 instruct loadV16(vecX dst, vmem mem)
15010 %{
15011   predicate(n->as_LoadVector()->memory_size() == 16);
15012   match(Set dst (LoadVector mem));
15013   ins_cost(4 * INSN_COST);
15014   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
15015   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
15016   ins_pipe(vload_reg_mem128);
15017 %}
15018 
15019 // Store Vector (32 bits)
15020 instruct storeV4(vecD src, vmem mem)
15021 %{
15022   predicate(n->as_StoreVector()->memory_size() == 4);
15023   match(Set mem (StoreVector mem src));
15024   ins_cost(4 * INSN_COST);
15025   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
15026   ins_encode( aarch64_enc_strvS(src, mem) );
15027   ins_pipe(vstore_reg_mem64);
15028 %}
15029 
15030 // Store Vector (64 bits)
15031 instruct storeV8(vecD src, vmem mem)
15032 %{
15033   predicate(n->as_StoreVector()->memory_size() == 8);
15034   match(Set mem (StoreVector mem src));
15035   ins_cost(4 * INSN_COST);
15036   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
15037   ins_encode( aarch64_enc_strvD(src, mem) );
15038   ins_pipe(vstore_reg_mem64);
15039 %}
15040 
15041 // Store Vector (128 bits)
15042 instruct storeV16(vecX src, vmem mem)
15043 %{
15044   predicate(n->as_StoreVector()->memory_size() == 16);
15045   match(Set mem (StoreVector mem src));
15046   ins_cost(4 * INSN_COST);
15047   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
15048   ins_encode( aarch64_enc_strvQ(src, mem) );
15049   ins_pipe(vstore_reg_mem128);
15050 %}
15051 
15052 instruct replicate8B(vecD dst, iRegIorL2I src)
15053 %{
15054   predicate(n->as_Vector()->length() == 4 ||
15055             n->as_Vector()->length() == 8);
15056   match(Set dst (ReplicateB src));
15057   ins_cost(INSN_COST);
15058   format %{ "dup  $dst, $src\t# vector (8B)" %}
15059   ins_encode %{
15060     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
15061   %}
15062   ins_pipe(vdup_reg_reg64);
15063 %}
15064 
15065 instruct replicate16B(vecX dst, iRegIorL2I src)
15066 %{
15067   predicate(n->as_Vector()->length() == 16);
15068   match(Set dst (ReplicateB src));
15069   ins_cost(INSN_COST);
15070   format %{ "dup  $dst, $src\t# vector (16B)" %}
15071   ins_encode %{
15072     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
15073   %}
15074   ins_pipe(vdup_reg_reg128);
15075 %}
15076 
15077 instruct replicate8B_imm(vecD dst, immI con)
15078 %{
15079   predicate(n->as_Vector()->length() == 4 ||
15080             n->as_Vector()->length() == 8);
15081   match(Set dst (ReplicateB con));
15082   ins_cost(INSN_COST);
15083   format %{ "movi  $dst, $con\t# vector(8B)" %}
15084   ins_encode %{
15085     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
15086   %}
15087   ins_pipe(vmovi_reg_imm64);
15088 %}
15089 
15090 instruct replicate16B_imm(vecX dst, immI con)
15091 %{
15092   predicate(n->as_Vector()->length() == 16);
15093   match(Set dst (ReplicateB con));
15094   ins_cost(INSN_COST);
15095   format %{ "movi  $dst, $con\t# vector(16B)" %}
15096   ins_encode %{
15097     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
15098   %}
15099   ins_pipe(vmovi_reg_imm128);
15100 %}
15101 
15102 instruct replicate4S(vecD dst, iRegIorL2I src)
15103 %{
15104   predicate(n->as_Vector()->length() == 2 ||
15105             n->as_Vector()->length() == 4);
15106   match(Set dst (ReplicateS src));
15107   ins_cost(INSN_COST);
15108   format %{ "dup  $dst, $src\t# vector (4S)" %}
15109   ins_encode %{
15110     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
15111   %}
15112   ins_pipe(vdup_reg_reg64);
15113 %}
15114 
15115 instruct replicate8S(vecX dst, iRegIorL2I src)
15116 %{
15117   predicate(n->as_Vector()->length() == 8);
15118   match(Set dst (ReplicateS src));
15119   ins_cost(INSN_COST);
15120   format %{ "dup  $dst, $src\t# vector (8S)" %}
15121   ins_encode %{
15122     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
15123   %}
15124   ins_pipe(vdup_reg_reg128);
15125 %}
15126 
15127 instruct replicate4S_imm(vecD dst, immI con)
15128 %{
15129   predicate(n->as_Vector()->length() == 2 ||
15130             n->as_Vector()->length() == 4);
15131   match(Set dst (ReplicateS con));
15132   ins_cost(INSN_COST);
15133   format %{ "movi  $dst, $con\t# vector(4H)" %}
15134   ins_encode %{
15135     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
15136   %}
15137   ins_pipe(vmovi_reg_imm64);
15138 %}
15139 
15140 instruct replicate8S_imm(vecX dst, immI con)
15141 %{
15142   predicate(n->as_Vector()->length() == 8);
15143   match(Set dst (ReplicateS con));
15144   ins_cost(INSN_COST);
15145   format %{ "movi  $dst, $con\t# vector(8H)" %}
15146   ins_encode %{
15147     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
15148   %}
15149   ins_pipe(vmovi_reg_imm128);
15150 %}
15151 
15152 instruct replicate2I(vecD dst, iRegIorL2I src)
15153 %{
15154   predicate(n->as_Vector()->length() == 2);
15155   match(Set dst (ReplicateI src));
15156   ins_cost(INSN_COST);
15157   format %{ "dup  $dst, $src\t# vector (2I)" %}
15158   ins_encode %{
15159     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
15160   %}
15161   ins_pipe(vdup_reg_reg64);
15162 %}
15163 
15164 instruct replicate4I(vecX dst, iRegIorL2I src)
15165 %{
15166   predicate(n->as_Vector()->length() == 4);
15167   match(Set dst (ReplicateI src));
15168   ins_cost(INSN_COST);
15169   format %{ "dup  $dst, $src\t# vector (4I)" %}
15170   ins_encode %{
15171     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
15172   %}
15173   ins_pipe(vdup_reg_reg128);
15174 %}
15175 
15176 instruct replicate2I_imm(vecD dst, immI con)
15177 %{
15178   predicate(n->as_Vector()->length() == 2);
15179   match(Set dst (ReplicateI con));
15180   ins_cost(INSN_COST);
15181   format %{ "movi  $dst, $con\t# vector(2I)" %}
15182   ins_encode %{
15183     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
15184   %}
15185   ins_pipe(vmovi_reg_imm64);
15186 %}
15187 
15188 instruct replicate4I_imm(vecX dst, immI con)
15189 %{
15190   predicate(n->as_Vector()->length() == 4);
15191   match(Set dst (ReplicateI con));
15192   ins_cost(INSN_COST);
15193   format %{ "movi  $dst, $con\t# vector(4I)" %}
15194   ins_encode %{
15195     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
15196   %}
15197   ins_pipe(vmovi_reg_imm128);
15198 %}
15199 
15200 instruct replicate2L(vecX dst, iRegL src)
15201 %{
15202   predicate(n->as_Vector()->length() == 2);
15203   match(Set dst (ReplicateL src));
15204   ins_cost(INSN_COST);
15205   format %{ "dup  $dst, $src\t# vector (2L)" %}
15206   ins_encode %{
15207     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
15208   %}
15209   ins_pipe(vdup_reg_reg128);
15210 %}
15211 
15212 instruct replicate2L_zero(vecX dst, immI0 zero)
15213 %{
15214   predicate(n->as_Vector()->length() == 2);
15215   match(Set dst (ReplicateI zero));
15216   ins_cost(INSN_COST);
15217   format %{ "movi  $dst, $zero\t# vector(4I)" %}
15218   ins_encode %{
15219     __ eor(as_FloatRegister($dst$$reg), __ T16B,
15220            as_FloatRegister($dst$$reg),
15221            as_FloatRegister($dst$$reg));
15222   %}
15223   ins_pipe(vmovi_reg_imm128);
15224 %}
15225 
15226 instruct replicate2F(vecD dst, vRegF src)
15227 %{
15228   predicate(n->as_Vector()->length() == 2);
15229   match(Set dst (ReplicateF src));
15230   ins_cost(INSN_COST);
15231   format %{ "dup  $dst, $src\t# vector (2F)" %}
15232   ins_encode %{
15233     __ dup(as_FloatRegister($dst$$reg), __ T2S,
15234            as_FloatRegister($src$$reg));
15235   %}
15236   ins_pipe(vdup_reg_freg64);
15237 %}
15238 
15239 instruct replicate4F(vecX dst, vRegF src)
15240 %{
15241   predicate(n->as_Vector()->length() == 4);
15242   match(Set dst (ReplicateF src));
15243   ins_cost(INSN_COST);
15244   format %{ "dup  $dst, $src\t# vector (4F)" %}
15245   ins_encode %{
15246     __ dup(as_FloatRegister($dst$$reg), __ T4S,
15247            as_FloatRegister($src$$reg));
15248   %}
15249   ins_pipe(vdup_reg_freg128);
15250 %}
15251 
15252 instruct replicate2D(vecX dst, vRegD src)
15253 %{
15254   predicate(n->as_Vector()->length() == 2);
15255   match(Set dst (ReplicateD src));
15256   ins_cost(INSN_COST);
15257   format %{ "dup  $dst, $src\t# vector (2D)" %}
15258   ins_encode %{
15259     __ dup(as_FloatRegister($dst$$reg), __ T2D,
15260            as_FloatRegister($src$$reg));
15261   %}
15262   ins_pipe(vdup_reg_dreg128);
15263 %}
15264 
15265 // ====================REDUCTION ARITHMETIC====================================
15266 
15267 instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
15268 %{
15269   match(Set dst (AddReductionVI src1 src2));
15270   ins_cost(INSN_COST);
15271   effect(TEMP tmp, TEMP tmp2);
15272   format %{ "umov  $tmp, $src2, S, 0\n\t"
15273             "umov  $tmp2, $src2, S, 1\n\t"
15274             "addw  $dst, $src1, $tmp\n\t"
15275             "addw  $dst, $dst, $tmp2\t add reduction2i"
15276   %}
15277   ins_encode %{
15278     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15279     __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15280     __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
15281     __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
15282   %}
15283   ins_pipe(pipe_class_default);
15284 %}
15285 
15286 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15287 %{
15288   match(Set dst (AddReductionVI src1 src2));
15289   ins_cost(INSN_COST);
15290   effect(TEMP tmp, TEMP tmp2);
15291   format %{ "addv  $tmp, T4S, $src2\n\t"
15292             "umov  $tmp2, $tmp, S, 0\n\t"
15293             "addw  $dst, $tmp2, $src1\t add reduction4i"
15294   %}
15295   ins_encode %{
15296     __ addv(as_FloatRegister($tmp$$reg), __ T4S,
15297             as_FloatRegister($src2$$reg));
15298     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15299     __ addw($dst$$Register, $tmp2$$Register, $src1$$Register);
15300   %}
15301   ins_pipe(pipe_class_default);
15302 %}
15303 
15304 instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
15305 %{
15306   match(Set dst (MulReductionVI src1 src2));
15307   ins_cost(INSN_COST);
15308   effect(TEMP tmp, TEMP dst);
15309   format %{ "umov  $tmp, $src2, S, 0\n\t"
15310             "mul   $dst, $tmp, $src1\n\t"
15311             "umov  $tmp, $src2, S, 1\n\t"
15312             "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
15313   %}
15314   ins_encode %{
15315     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
15316     __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
15317     __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
15318     __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
15319   %}
15320   ins_pipe(pipe_class_default);
15321 %}
15322 
15323 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
15324 %{
15325   match(Set dst (MulReductionVI src1 src2));
15326   ins_cost(INSN_COST);
15327   effect(TEMP tmp, TEMP tmp2, TEMP dst);
15328   format %{ "ins   $tmp, $src2, 0, 1\n\t"
15329             "mul   $tmp, $tmp, $src2\n\t"
15330             "umov  $tmp2, $tmp, S, 0\n\t"
15331             "mul   $dst, $tmp2, $src1\n\t"
15332             "umov  $tmp2, $tmp, S, 1\n\t"
15333             "mul   $dst, $tmp2, $dst\t mul reduction4i\n\t"
15334   %}
15335   ins_encode %{
15336     __ ins(as_FloatRegister($tmp$$reg), __ D,
15337            as_FloatRegister($src2$$reg), 0, 1);
15338     __ mulv(as_FloatRegister($tmp$$reg), __ T2S,
15339            as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg));
15340     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0);
15341     __ mul($dst$$Register, $tmp2$$Register, $src1$$Register);
15342     __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1);
15343     __ mul($dst$$Register, $tmp2$$Register, $dst$$Register);
15344   %}
15345   ins_pipe(pipe_class_default);
15346 %}
15347 
15348 instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15349 %{
15350   match(Set dst (AddReductionVF src1 src2));
15351   ins_cost(INSN_COST);
15352   effect(TEMP tmp, TEMP dst);
15353   format %{ "fadds $dst, $src1, $src2\n\t"
15354             "ins   $tmp, S, $src2, 0, 1\n\t"
15355             "fadds $dst, $dst, $tmp\t add reduction2f"
15356   %}
15357   ins_encode %{
15358     __ fadds(as_FloatRegister($dst$$reg),
15359              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15360     __ ins(as_FloatRegister($tmp$$reg), __ S,
15361            as_FloatRegister($src2$$reg), 0, 1);
15362     __ fadds(as_FloatRegister($dst$$reg),
15363              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15364   %}
15365   ins_pipe(pipe_class_default);
15366 %}
15367 
15368 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15369 %{
15370   match(Set dst (AddReductionVF src1 src2));
15371   ins_cost(INSN_COST);
15372   effect(TEMP tmp, TEMP dst);
15373   format %{ "fadds $dst, $src1, $src2\n\t"
15374             "ins   $tmp, S, $src2, 0, 1\n\t"
15375             "fadds $dst, $dst, $tmp\n\t"
15376             "ins   $tmp, S, $src2, 0, 2\n\t"
15377             "fadds $dst, $dst, $tmp\n\t"
15378             "ins   $tmp, S, $src2, 0, 3\n\t"
15379             "fadds $dst, $dst, $tmp\t add reduction4f"
15380   %}
15381   ins_encode %{
15382     __ fadds(as_FloatRegister($dst$$reg),
15383              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15384     __ ins(as_FloatRegister($tmp$$reg), __ S,
15385            as_FloatRegister($src2$$reg), 0, 1);
15386     __ fadds(as_FloatRegister($dst$$reg),
15387              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15388     __ ins(as_FloatRegister($tmp$$reg), __ S,
15389            as_FloatRegister($src2$$reg), 0, 2);
15390     __ fadds(as_FloatRegister($dst$$reg),
15391              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15392     __ ins(as_FloatRegister($tmp$$reg), __ S,
15393            as_FloatRegister($src2$$reg), 0, 3);
15394     __ fadds(as_FloatRegister($dst$$reg),
15395              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15396   %}
15397   ins_pipe(pipe_class_default);
15398 %}
15399 
15400 instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
15401 %{
15402   match(Set dst (MulReductionVF src1 src2));
15403   ins_cost(INSN_COST);
15404   effect(TEMP tmp, TEMP dst);
15405   format %{ "fmuls $dst, $src1, $src2\n\t"
15406             "ins   $tmp, S, $src2, 0, 1\n\t"
15407             "fmuls $dst, $dst, $tmp\t add reduction4f"
15408   %}
15409   ins_encode %{
15410     __ fmuls(as_FloatRegister($dst$$reg),
15411              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15412     __ ins(as_FloatRegister($tmp$$reg), __ S,
15413            as_FloatRegister($src2$$reg), 0, 1);
15414     __ fmuls(as_FloatRegister($dst$$reg),
15415              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15416   %}
15417   ins_pipe(pipe_class_default);
15418 %}
15419 
15420 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
15421 %{
15422   match(Set dst (MulReductionVF src1 src2));
15423   ins_cost(INSN_COST);
15424   effect(TEMP tmp, TEMP dst);
15425   format %{ "fmuls $dst, $src1, $src2\n\t"
15426             "ins   $tmp, S, $src2, 0, 1\n\t"
15427             "fmuls $dst, $dst, $tmp\n\t"
15428             "ins   $tmp, S, $src2, 0, 2\n\t"
15429             "fmuls $dst, $dst, $tmp\n\t"
15430             "ins   $tmp, S, $src2, 0, 3\n\t"
15431             "fmuls $dst, $dst, $tmp\t add reduction4f"
15432   %}
15433   ins_encode %{
15434     __ fmuls(as_FloatRegister($dst$$reg),
15435              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15436     __ ins(as_FloatRegister($tmp$$reg), __ S,
15437            as_FloatRegister($src2$$reg), 0, 1);
15438     __ fmuls(as_FloatRegister($dst$$reg),
15439              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15440     __ ins(as_FloatRegister($tmp$$reg), __ S,
15441            as_FloatRegister($src2$$reg), 0, 2);
15442     __ fmuls(as_FloatRegister($dst$$reg),
15443              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15444     __ ins(as_FloatRegister($tmp$$reg), __ S,
15445            as_FloatRegister($src2$$reg), 0, 3);
15446     __ fmuls(as_FloatRegister($dst$$reg),
15447              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15448   %}
15449   ins_pipe(pipe_class_default);
15450 %}
15451 
15452 instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15453 %{
15454   match(Set dst (AddReductionVD src1 src2));
15455   ins_cost(INSN_COST);
15456   effect(TEMP tmp, TEMP dst);
15457   format %{ "faddd $dst, $src1, $src2\n\t"
15458             "ins   $tmp, D, $src2, 0, 1\n\t"
15459             "faddd $dst, $dst, $tmp\t add reduction2d"
15460   %}
15461   ins_encode %{
15462     __ faddd(as_FloatRegister($dst$$reg),
15463              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15464     __ ins(as_FloatRegister($tmp$$reg), __ D,
15465            as_FloatRegister($src2$$reg), 0, 1);
15466     __ faddd(as_FloatRegister($dst$$reg),
15467              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15468   %}
15469   ins_pipe(pipe_class_default);
15470 %}
15471 
15472 instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp)
15473 %{
15474   match(Set dst (MulReductionVD src1 src2));
15475   ins_cost(INSN_COST);
15476   effect(TEMP tmp, TEMP dst);
15477   format %{ "fmuld $dst, $src1, $src2\n\t"
15478             "ins   $tmp, D, $src2, 0, 1\n\t"
15479             "fmuld $dst, $dst, $tmp\t add reduction2d"
15480   %}
15481   ins_encode %{
15482     __ fmuld(as_FloatRegister($dst$$reg),
15483              as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15484     __ ins(as_FloatRegister($tmp$$reg), __ D,
15485            as_FloatRegister($src2$$reg), 0, 1);
15486     __ fmuld(as_FloatRegister($dst$$reg),
15487              as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
15488   %}
15489   ins_pipe(pipe_class_default);
15490 %}
15491 
15492 // ====================VECTOR ARITHMETIC=======================================
15493 
15494 // --------------------------------- ADD --------------------------------------
15495 
15496 instruct vadd8B(vecD dst, vecD src1, vecD src2)
15497 %{
15498   predicate(n->as_Vector()->length() == 4 ||
15499             n->as_Vector()->length() == 8);
15500   match(Set dst (AddVB src1 src2));
15501   ins_cost(INSN_COST);
15502   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
15503   ins_encode %{
15504     __ addv(as_FloatRegister($dst$$reg), __ T8B,
15505             as_FloatRegister($src1$$reg),
15506             as_FloatRegister($src2$$reg));
15507   %}
15508   ins_pipe(vdop64);
15509 %}
15510 
15511 instruct vadd16B(vecX dst, vecX src1, vecX src2)
15512 %{
15513   predicate(n->as_Vector()->length() == 16);
15514   match(Set dst (AddVB src1 src2));
15515   ins_cost(INSN_COST);
15516   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
15517   ins_encode %{
15518     __ addv(as_FloatRegister($dst$$reg), __ T16B,
15519             as_FloatRegister($src1$$reg),
15520             as_FloatRegister($src2$$reg));
15521   %}
15522   ins_pipe(vdop128);
15523 %}
15524 
15525 instruct vadd4S(vecD dst, vecD src1, vecD src2)
15526 %{
15527   predicate(n->as_Vector()->length() == 2 ||
15528             n->as_Vector()->length() == 4);
15529   match(Set dst (AddVS src1 src2));
15530   ins_cost(INSN_COST);
15531   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
15532   ins_encode %{
15533     __ addv(as_FloatRegister($dst$$reg), __ T4H,
15534             as_FloatRegister($src1$$reg),
15535             as_FloatRegister($src2$$reg));
15536   %}
15537   ins_pipe(vdop64);
15538 %}
15539 
15540 instruct vadd8S(vecX dst, vecX src1, vecX src2)
15541 %{
15542   predicate(n->as_Vector()->length() == 8);
15543   match(Set dst (AddVS src1 src2));
15544   ins_cost(INSN_COST);
15545   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
15546   ins_encode %{
15547     __ addv(as_FloatRegister($dst$$reg), __ T8H,
15548             as_FloatRegister($src1$$reg),
15549             as_FloatRegister($src2$$reg));
15550   %}
15551   ins_pipe(vdop128);
15552 %}
15553 
15554 instruct vadd2I(vecD dst, vecD src1, vecD src2)
15555 %{
15556   predicate(n->as_Vector()->length() == 2);
15557   match(Set dst (AddVI src1 src2));
15558   ins_cost(INSN_COST);
15559   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
15560   ins_encode %{
15561     __ addv(as_FloatRegister($dst$$reg), __ T2S,
15562             as_FloatRegister($src1$$reg),
15563             as_FloatRegister($src2$$reg));
15564   %}
15565   ins_pipe(vdop64);
15566 %}
15567 
15568 instruct vadd4I(vecX dst, vecX src1, vecX src2)
15569 %{
15570   predicate(n->as_Vector()->length() == 4);
15571   match(Set dst (AddVI src1 src2));
15572   ins_cost(INSN_COST);
15573   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
15574   ins_encode %{
15575     __ addv(as_FloatRegister($dst$$reg), __ T4S,
15576             as_FloatRegister($src1$$reg),
15577             as_FloatRegister($src2$$reg));
15578   %}
15579   ins_pipe(vdop128);
15580 %}
15581 
15582 instruct vadd2L(vecX dst, vecX src1, vecX src2)
15583 %{
15584   predicate(n->as_Vector()->length() == 2);
15585   match(Set dst (AddVL src1 src2));
15586   ins_cost(INSN_COST);
15587   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
15588   ins_encode %{
15589     __ addv(as_FloatRegister($dst$$reg), __ T2D,
15590             as_FloatRegister($src1$$reg),
15591             as_FloatRegister($src2$$reg));
15592   %}
15593   ins_pipe(vdop128);
15594 %}
15595 
15596 instruct vadd2F(vecD dst, vecD src1, vecD src2)
15597 %{
15598   predicate(n->as_Vector()->length() == 2);
15599   match(Set dst (AddVF src1 src2));
15600   ins_cost(INSN_COST);
15601   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
15602   ins_encode %{
15603     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
15604             as_FloatRegister($src1$$reg),
15605             as_FloatRegister($src2$$reg));
15606   %}
15607   ins_pipe(vdop_fp64);
15608 %}
15609 
15610 instruct vadd4F(vecX dst, vecX src1, vecX src2)
15611 %{
15612   predicate(n->as_Vector()->length() == 4);
15613   match(Set dst (AddVF src1 src2));
15614   ins_cost(INSN_COST);
15615   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
15616   ins_encode %{
15617     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
15618             as_FloatRegister($src1$$reg),
15619             as_FloatRegister($src2$$reg));
15620   %}
15621   ins_pipe(vdop_fp128);
15622 %}
15623 
15624 instruct vadd2D(vecX dst, vecX src1, vecX src2)
15625 %{
15626   match(Set dst (AddVD src1 src2));
15627   ins_cost(INSN_COST);
15628   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
15629   ins_encode %{
15630     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
15631             as_FloatRegister($src1$$reg),
15632             as_FloatRegister($src2$$reg));
15633   %}
15634   ins_pipe(vdop_fp128);
15635 %}
15636 
15637 // --------------------------------- SUB --------------------------------------
15638 
15639 instruct vsub8B(vecD dst, vecD src1, vecD src2)
15640 %{
15641   predicate(n->as_Vector()->length() == 4 ||
15642             n->as_Vector()->length() == 8);
15643   match(Set dst (SubVB src1 src2));
15644   ins_cost(INSN_COST);
15645   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
15646   ins_encode %{
15647     __ subv(as_FloatRegister($dst$$reg), __ T8B,
15648             as_FloatRegister($src1$$reg),
15649             as_FloatRegister($src2$$reg));
15650   %}
15651   ins_pipe(vdop64);
15652 %}
15653 
15654 instruct vsub16B(vecX dst, vecX src1, vecX src2)
15655 %{
15656   predicate(n->as_Vector()->length() == 16);
15657   match(Set dst (SubVB src1 src2));
15658   ins_cost(INSN_COST);
15659   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
15660   ins_encode %{
15661     __ subv(as_FloatRegister($dst$$reg), __ T16B,
15662             as_FloatRegister($src1$$reg),
15663             as_FloatRegister($src2$$reg));
15664   %}
15665   ins_pipe(vdop128);
15666 %}
15667 
15668 instruct vsub4S(vecD dst, vecD src1, vecD src2)
15669 %{
15670   predicate(n->as_Vector()->length() == 2 ||
15671             n->as_Vector()->length() == 4);
15672   match(Set dst (SubVS src1 src2));
15673   ins_cost(INSN_COST);
15674   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
15675   ins_encode %{
15676     __ subv(as_FloatRegister($dst$$reg), __ T4H,
15677             as_FloatRegister($src1$$reg),
15678             as_FloatRegister($src2$$reg));
15679   %}
15680   ins_pipe(vdop64);
15681 %}
15682 
15683 instruct vsub8S(vecX dst, vecX src1, vecX src2)
15684 %{
15685   predicate(n->as_Vector()->length() == 8);
15686   match(Set dst (SubVS src1 src2));
15687   ins_cost(INSN_COST);
15688   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
15689   ins_encode %{
15690     __ subv(as_FloatRegister($dst$$reg), __ T8H,
15691             as_FloatRegister($src1$$reg),
15692             as_FloatRegister($src2$$reg));
15693   %}
15694   ins_pipe(vdop128);
15695 %}
15696 
15697 instruct vsub2I(vecD dst, vecD src1, vecD src2)
15698 %{
15699   predicate(n->as_Vector()->length() == 2);
15700   match(Set dst (SubVI src1 src2));
15701   ins_cost(INSN_COST);
15702   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
15703   ins_encode %{
15704     __ subv(as_FloatRegister($dst$$reg), __ T2S,
15705             as_FloatRegister($src1$$reg),
15706             as_FloatRegister($src2$$reg));
15707   %}
15708   ins_pipe(vdop64);
15709 %}
15710 
15711 instruct vsub4I(vecX dst, vecX src1, vecX src2)
15712 %{
15713   predicate(n->as_Vector()->length() == 4);
15714   match(Set dst (SubVI src1 src2));
15715   ins_cost(INSN_COST);
15716   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
15717   ins_encode %{
15718     __ subv(as_FloatRegister($dst$$reg), __ T4S,
15719             as_FloatRegister($src1$$reg),
15720             as_FloatRegister($src2$$reg));
15721   %}
15722   ins_pipe(vdop128);
15723 %}
15724 
15725 instruct vsub2L(vecX dst, vecX src1, vecX src2)
15726 %{
15727   predicate(n->as_Vector()->length() == 2);
15728   match(Set dst (SubVL src1 src2));
15729   ins_cost(INSN_COST);
15730   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
15731   ins_encode %{
15732     __ subv(as_FloatRegister($dst$$reg), __ T2D,
15733             as_FloatRegister($src1$$reg),
15734             as_FloatRegister($src2$$reg));
15735   %}
15736   ins_pipe(vdop128);
15737 %}
15738 
15739 instruct vsub2F(vecD dst, vecD src1, vecD src2)
15740 %{
15741   predicate(n->as_Vector()->length() == 2);
15742   match(Set dst (SubVF src1 src2));
15743   ins_cost(INSN_COST);
15744   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
15745   ins_encode %{
15746     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
15747             as_FloatRegister($src1$$reg),
15748             as_FloatRegister($src2$$reg));
15749   %}
15750   ins_pipe(vdop_fp64);
15751 %}
15752 
15753 instruct vsub4F(vecX dst, vecX src1, vecX src2)
15754 %{
15755   predicate(n->as_Vector()->length() == 4);
15756   match(Set dst (SubVF src1 src2));
15757   ins_cost(INSN_COST);
15758   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
15759   ins_encode %{
15760     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
15761             as_FloatRegister($src1$$reg),
15762             as_FloatRegister($src2$$reg));
15763   %}
15764   ins_pipe(vdop_fp128);
15765 %}
15766 
15767 instruct vsub2D(vecX dst, vecX src1, vecX src2)
15768 %{
15769   predicate(n->as_Vector()->length() == 2);
15770   match(Set dst (SubVD src1 src2));
15771   ins_cost(INSN_COST);
15772   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
15773   ins_encode %{
15774     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
15775             as_FloatRegister($src1$$reg),
15776             as_FloatRegister($src2$$reg));
15777   %}
15778   ins_pipe(vdop_fp128);
15779 %}
15780 
15781 // --------------------------------- MUL --------------------------------------
15782 
15783 instruct vmul4S(vecD dst, vecD src1, vecD src2)
15784 %{
15785   predicate(n->as_Vector()->length() == 2 ||
15786             n->as_Vector()->length() == 4);
15787   match(Set dst (MulVS src1 src2));
15788   ins_cost(INSN_COST);
15789   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
15790   ins_encode %{
15791     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
15792             as_FloatRegister($src1$$reg),
15793             as_FloatRegister($src2$$reg));
15794   %}
15795   ins_pipe(vmul64);
15796 %}
15797 
15798 instruct vmul8S(vecX dst, vecX src1, vecX src2)
15799 %{
15800   predicate(n->as_Vector()->length() == 8);
15801   match(Set dst (MulVS src1 src2));
15802   ins_cost(INSN_COST);
15803   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
15804   ins_encode %{
15805     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
15806             as_FloatRegister($src1$$reg),
15807             as_FloatRegister($src2$$reg));
15808   %}
15809   ins_pipe(vmul128);
15810 %}
15811 
15812 instruct vmul2I(vecD dst, vecD src1, vecD src2)
15813 %{
15814   predicate(n->as_Vector()->length() == 2);
15815   match(Set dst (MulVI src1 src2));
15816   ins_cost(INSN_COST);
15817   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
15818   ins_encode %{
15819     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
15820             as_FloatRegister($src1$$reg),
15821             as_FloatRegister($src2$$reg));
15822   %}
15823   ins_pipe(vmul64);
15824 %}
15825 
15826 instruct vmul4I(vecX dst, vecX src1, vecX src2)
15827 %{
15828   predicate(n->as_Vector()->length() == 4);
15829   match(Set dst (MulVI src1 src2));
15830   ins_cost(INSN_COST);
15831   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
15832   ins_encode %{
15833     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
15834             as_FloatRegister($src1$$reg),
15835             as_FloatRegister($src2$$reg));
15836   %}
15837   ins_pipe(vmul128);
15838 %}
15839 
15840 instruct vmul2F(vecD dst, vecD src1, vecD src2)
15841 %{
15842   predicate(n->as_Vector()->length() == 2);
15843   match(Set dst (MulVF src1 src2));
15844   ins_cost(INSN_COST);
15845   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
15846   ins_encode %{
15847     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
15848             as_FloatRegister($src1$$reg),
15849             as_FloatRegister($src2$$reg));
15850   %}
15851   ins_pipe(vmuldiv_fp64);
15852 %}
15853 
15854 instruct vmul4F(vecX dst, vecX src1, vecX src2)
15855 %{
15856   predicate(n->as_Vector()->length() == 4);
15857   match(Set dst (MulVF src1 src2));
15858   ins_cost(INSN_COST);
15859   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
15860   ins_encode %{
15861     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
15862             as_FloatRegister($src1$$reg),
15863             as_FloatRegister($src2$$reg));
15864   %}
15865   ins_pipe(vmuldiv_fp128);
15866 %}
15867 
15868 instruct vmul2D(vecX dst, vecX src1, vecX src2)
15869 %{
15870   predicate(n->as_Vector()->length() == 2);
15871   match(Set dst (MulVD src1 src2));
15872   ins_cost(INSN_COST);
15873   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
15874   ins_encode %{
15875     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
15876             as_FloatRegister($src1$$reg),
15877             as_FloatRegister($src2$$reg));
15878   %}
15879   ins_pipe(vmuldiv_fp128);
15880 %}
15881 
15882 // --------------------------------- MLA --------------------------------------
15883 
15884 instruct vmla4S(vecD dst, vecD src1, vecD src2)
15885 %{
15886   predicate(n->as_Vector()->length() == 2 ||
15887             n->as_Vector()->length() == 4);
15888   match(Set dst (AddVS dst (MulVS src1 src2)));
15889   ins_cost(INSN_COST);
15890   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
15891   ins_encode %{
15892     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
15893             as_FloatRegister($src1$$reg),
15894             as_FloatRegister($src2$$reg));
15895   %}
15896   ins_pipe(vmla64);
15897 %}
15898 
15899 instruct vmla8S(vecX dst, vecX src1, vecX src2)
15900 %{
15901   predicate(n->as_Vector()->length() == 8);
15902   match(Set dst (AddVS dst (MulVS src1 src2)));
15903   ins_cost(INSN_COST);
15904   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
15905   ins_encode %{
15906     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
15907             as_FloatRegister($src1$$reg),
15908             as_FloatRegister($src2$$reg));
15909   %}
15910   ins_pipe(vmla128);
15911 %}
15912 
15913 instruct vmla2I(vecD dst, vecD src1, vecD src2)
15914 %{
15915   predicate(n->as_Vector()->length() == 2);
15916   match(Set dst (AddVI dst (MulVI src1 src2)));
15917   ins_cost(INSN_COST);
15918   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
15919   ins_encode %{
15920     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
15921             as_FloatRegister($src1$$reg),
15922             as_FloatRegister($src2$$reg));
15923   %}
15924   ins_pipe(vmla64);
15925 %}
15926 
15927 instruct vmla4I(vecX dst, vecX src1, vecX src2)
15928 %{
15929   predicate(n->as_Vector()->length() == 4);
15930   match(Set dst (AddVI dst (MulVI src1 src2)));
15931   ins_cost(INSN_COST);
15932   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
15933   ins_encode %{
15934     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
15935             as_FloatRegister($src1$$reg),
15936             as_FloatRegister($src2$$reg));
15937   %}
15938   ins_pipe(vmla128);
15939 %}
15940 
15941 // --------------------------------- MLS --------------------------------------
15942 
15943 instruct vmls4S(vecD dst, vecD src1, vecD src2)
15944 %{
15945   predicate(n->as_Vector()->length() == 2 ||
15946             n->as_Vector()->length() == 4);
15947   match(Set dst (SubVS dst (MulVS src1 src2)));
15948   ins_cost(INSN_COST);
15949   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
15950   ins_encode %{
15951     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
15952             as_FloatRegister($src1$$reg),
15953             as_FloatRegister($src2$$reg));
15954   %}
15955   ins_pipe(vmla64);
15956 %}
15957 
15958 instruct vmls8S(vecX dst, vecX src1, vecX src2)
15959 %{
15960   predicate(n->as_Vector()->length() == 8);
15961   match(Set dst (SubVS dst (MulVS src1 src2)));
15962   ins_cost(INSN_COST);
15963   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
15964   ins_encode %{
15965     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
15966             as_FloatRegister($src1$$reg),
15967             as_FloatRegister($src2$$reg));
15968   %}
15969   ins_pipe(vmla128);
15970 %}
15971 
15972 instruct vmls2I(vecD dst, vecD src1, vecD src2)
15973 %{
15974   predicate(n->as_Vector()->length() == 2);
15975   match(Set dst (SubVI dst (MulVI src1 src2)));
15976   ins_cost(INSN_COST);
15977   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
15978   ins_encode %{
15979     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
15980             as_FloatRegister($src1$$reg),
15981             as_FloatRegister($src2$$reg));
15982   %}
15983   ins_pipe(vmla64);
15984 %}
15985 
15986 instruct vmls4I(vecX dst, vecX src1, vecX src2)
15987 %{
15988   predicate(n->as_Vector()->length() == 4);
15989   match(Set dst (SubVI dst (MulVI src1 src2)));
15990   ins_cost(INSN_COST);
15991   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
15992   ins_encode %{
15993     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
15994             as_FloatRegister($src1$$reg),
15995             as_FloatRegister($src2$$reg));
15996   %}
15997   ins_pipe(vmla128);
15998 %}
15999 
16000 // --------------------------------- DIV --------------------------------------
16001 
16002 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
16003 %{
16004   predicate(n->as_Vector()->length() == 2);
16005   match(Set dst (DivVF src1 src2));
16006   ins_cost(INSN_COST);
16007   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
16008   ins_encode %{
16009     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
16010             as_FloatRegister($src1$$reg),
16011             as_FloatRegister($src2$$reg));
16012   %}
16013   ins_pipe(vmuldiv_fp64);
16014 %}
16015 
16016 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
16017 %{
16018   predicate(n->as_Vector()->length() == 4);
16019   match(Set dst (DivVF src1 src2));
16020   ins_cost(INSN_COST);
16021   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
16022   ins_encode %{
16023     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
16024             as_FloatRegister($src1$$reg),
16025             as_FloatRegister($src2$$reg));
16026   %}
16027   ins_pipe(vmuldiv_fp128);
16028 %}
16029 
16030 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
16031 %{
16032   predicate(n->as_Vector()->length() == 2);
16033   match(Set dst (DivVD src1 src2));
16034   ins_cost(INSN_COST);
16035   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
16036   ins_encode %{
16037     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
16038             as_FloatRegister($src1$$reg),
16039             as_FloatRegister($src2$$reg));
16040   %}
16041   ins_pipe(vmuldiv_fp128);
16042 %}
16043 
16044 // --------------------------------- SQRT -------------------------------------
16045 
16046 instruct vsqrt2D(vecX dst, vecX src)
16047 %{
16048   predicate(n->as_Vector()->length() == 2);
16049   match(Set dst (SqrtVD src));
16050   format %{ "fsqrt  $dst, $src\t# vector (2D)" %}
16051   ins_encode %{
16052     __ fsqrt(as_FloatRegister($dst$$reg), __ T2D,
16053              as_FloatRegister($src$$reg));
16054   %}
16055   ins_pipe(vsqrt_fp128);
16056 %}
16057 
16058 // --------------------------------- ABS --------------------------------------
16059 
16060 instruct vabs2F(vecD dst, vecD src)
16061 %{
16062   predicate(n->as_Vector()->length() == 2);
16063   match(Set dst (AbsVF src));
16064   ins_cost(INSN_COST * 3);
16065   format %{ "fabs  $dst,$src\t# vector (2S)" %}
16066   ins_encode %{
16067     __ fabs(as_FloatRegister($dst$$reg), __ T2S,
16068             as_FloatRegister($src$$reg));
16069   %}
16070   ins_pipe(vunop_fp64);
16071 %}
16072 
16073 instruct vabs4F(vecX dst, vecX src)
16074 %{
16075   predicate(n->as_Vector()->length() == 4);
16076   match(Set dst (AbsVF src));
16077   ins_cost(INSN_COST * 3);
16078   format %{ "fabs  $dst,$src\t# vector (4S)" %}
16079   ins_encode %{
16080     __ fabs(as_FloatRegister($dst$$reg), __ T4S,
16081             as_FloatRegister($src$$reg));
16082   %}
16083   ins_pipe(vunop_fp128);
16084 %}
16085 
16086 instruct vabs2D(vecX dst, vecX src)
16087 %{
16088   predicate(n->as_Vector()->length() == 2);
16089   match(Set dst (AbsVD src));
16090   ins_cost(INSN_COST * 3);
16091   format %{ "fabs  $dst,$src\t# vector (2D)" %}
16092   ins_encode %{
16093     __ fabs(as_FloatRegister($dst$$reg), __ T2D,
16094             as_FloatRegister($src$$reg));
16095   %}
16096   ins_pipe(vunop_fp128);
16097 %}
16098 
16099 // --------------------------------- NEG --------------------------------------
16100 
16101 instruct vneg2F(vecD dst, vecD src)
16102 %{
16103   predicate(n->as_Vector()->length() == 2);
16104   match(Set dst (NegVF src));
16105   ins_cost(INSN_COST * 3);
16106   format %{ "fneg  $dst,$src\t# vector (2S)" %}
16107   ins_encode %{
16108     __ fneg(as_FloatRegister($dst$$reg), __ T2S,
16109             as_FloatRegister($src$$reg));
16110   %}
16111   ins_pipe(vunop_fp64);
16112 %}
16113 
16114 instruct vneg4F(vecX dst, vecX src)
16115 %{
16116   predicate(n->as_Vector()->length() == 4);
16117   match(Set dst (NegVF src));
16118   ins_cost(INSN_COST * 3);
16119   format %{ "fneg  $dst,$src\t# vector (4S)" %}
16120   ins_encode %{
16121     __ fneg(as_FloatRegister($dst$$reg), __ T4S,
16122             as_FloatRegister($src$$reg));
16123   %}
16124   ins_pipe(vunop_fp128);
16125 %}
16126 
16127 instruct vneg2D(vecX dst, vecX src)
16128 %{
16129   predicate(n->as_Vector()->length() == 2);
16130   match(Set dst (NegVD src));
16131   ins_cost(INSN_COST * 3);
16132   format %{ "fneg  $dst,$src\t# vector (2D)" %}
16133   ins_encode %{
16134     __ fneg(as_FloatRegister($dst$$reg), __ T2D,
16135             as_FloatRegister($src$$reg));
16136   %}
16137   ins_pipe(vunop_fp128);
16138 %}
16139 
16140 // --------------------------------- AND --------------------------------------
16141 
16142 instruct vand8B(vecD dst, vecD src1, vecD src2)
16143 %{
16144   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16145             n->as_Vector()->length_in_bytes() == 8);
16146   match(Set dst (AndV src1 src2));
16147   ins_cost(INSN_COST);
16148   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16149   ins_encode %{
16150     __ andr(as_FloatRegister($dst$$reg), __ T8B,
16151             as_FloatRegister($src1$$reg),
16152             as_FloatRegister($src2$$reg));
16153   %}
16154   ins_pipe(vlogical64);
16155 %}
16156 
16157 instruct vand16B(vecX dst, vecX src1, vecX src2)
16158 %{
16159   predicate(n->as_Vector()->length_in_bytes() == 16);
16160   match(Set dst (AndV src1 src2));
16161   ins_cost(INSN_COST);
16162   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
16163   ins_encode %{
16164     __ andr(as_FloatRegister($dst$$reg), __ T16B,
16165             as_FloatRegister($src1$$reg),
16166             as_FloatRegister($src2$$reg));
16167   %}
16168   ins_pipe(vlogical128);
16169 %}
16170 
16171 // --------------------------------- OR ---------------------------------------
16172 
16173 instruct vor8B(vecD dst, vecD src1, vecD src2)
16174 %{
16175   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16176             n->as_Vector()->length_in_bytes() == 8);
16177   match(Set dst (OrV src1 src2));
16178   ins_cost(INSN_COST);
16179   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
16180   ins_encode %{
16181     __ orr(as_FloatRegister($dst$$reg), __ T8B,
16182             as_FloatRegister($src1$$reg),
16183             as_FloatRegister($src2$$reg));
16184   %}
16185   ins_pipe(vlogical64);
16186 %}
16187 
16188 instruct vor16B(vecX dst, vecX src1, vecX src2)
16189 %{
16190   predicate(n->as_Vector()->length_in_bytes() == 16);
16191   match(Set dst (OrV src1 src2));
16192   ins_cost(INSN_COST);
16193   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
16194   ins_encode %{
16195     __ orr(as_FloatRegister($dst$$reg), __ T16B,
16196             as_FloatRegister($src1$$reg),
16197             as_FloatRegister($src2$$reg));
16198   %}
16199   ins_pipe(vlogical128);
16200 %}
16201 
16202 // --------------------------------- XOR --------------------------------------
16203 
16204 instruct vxor8B(vecD dst, vecD src1, vecD src2)
16205 %{
16206   predicate(n->as_Vector()->length_in_bytes() == 4 ||
16207             n->as_Vector()->length_in_bytes() == 8);
16208   match(Set dst (XorV src1 src2));
16209   ins_cost(INSN_COST);
16210   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
16211   ins_encode %{
16212     __ eor(as_FloatRegister($dst$$reg), __ T8B,
16213             as_FloatRegister($src1$$reg),
16214             as_FloatRegister($src2$$reg));
16215   %}
16216   ins_pipe(vlogical64);
16217 %}
16218 
16219 instruct vxor16B(vecX dst, vecX src1, vecX src2)
16220 %{
16221   predicate(n->as_Vector()->length_in_bytes() == 16);
16222   match(Set dst (XorV src1 src2));
16223   ins_cost(INSN_COST);
16224   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
16225   ins_encode %{
16226     __ eor(as_FloatRegister($dst$$reg), __ T16B,
16227             as_FloatRegister($src1$$reg),
16228             as_FloatRegister($src2$$reg));
16229   %}
16230   ins_pipe(vlogical128);
16231 %}
16232 
16233 // ------------------------------ Shift ---------------------------------------
16234 
16235 instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
16236   match(Set dst (LShiftCntV cnt));
16237   format %{ "dup  $dst, $cnt\t# shift count (vecX)" %}
16238   ins_encode %{
16239     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16240   %}
16241   ins_pipe(vdup_reg_reg128);
16242 %}
16243 
16244 // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
16245 instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
16246   match(Set dst (RShiftCntV cnt));
16247   format %{ "dup  $dst, $cnt\t# shift count (vecX)\n\tneg  $dst, $dst\t T16B" %}
16248   ins_encode %{
16249     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
16250     __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
16251   %}
16252   ins_pipe(vdup_reg_reg128);
16253 %}
16254 
16255 instruct vsll8B(vecD dst, vecD src, vecX shift) %{
16256   predicate(n->as_Vector()->length() == 4 ||
16257             n->as_Vector()->length() == 8);
16258   match(Set dst (LShiftVB src shift));
16259   match(Set dst (RShiftVB src shift));
16260   ins_cost(INSN_COST);
16261   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
16262   ins_encode %{
16263     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
16264             as_FloatRegister($src$$reg),
16265             as_FloatRegister($shift$$reg));
16266   %}
16267   ins_pipe(vshift64);
16268 %}
16269 
16270 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
16271   predicate(n->as_Vector()->length() == 16);
16272   match(Set dst (LShiftVB src shift));
16273   match(Set dst (RShiftVB src shift));
16274   ins_cost(INSN_COST);
16275   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
16276   ins_encode %{
16277     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
16278             as_FloatRegister($src$$reg),
16279             as_FloatRegister($shift$$reg));
16280   %}
16281   ins_pipe(vshift128);
16282 %}
16283 
16284 instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
16285   predicate(n->as_Vector()->length() == 4 ||
16286             n->as_Vector()->length() == 8);
16287   match(Set dst (URShiftVB src shift));
16288   ins_cost(INSN_COST);
16289   format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
16290   ins_encode %{
16291     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
16292             as_FloatRegister($src$$reg),
16293             as_FloatRegister($shift$$reg));
16294   %}
16295   ins_pipe(vshift64);
16296 %}
16297 
16298 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
16299   predicate(n->as_Vector()->length() == 16);
16300   match(Set dst (URShiftVB src shift));
16301   ins_cost(INSN_COST);
16302   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
16303   ins_encode %{
16304     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
16305             as_FloatRegister($src$$reg),
16306             as_FloatRegister($shift$$reg));
16307   %}
16308   ins_pipe(vshift128);
16309 %}
16310 
16311 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
16312   predicate(n->as_Vector()->length() == 4 ||
16313             n->as_Vector()->length() == 8);
16314   match(Set dst (LShiftVB src shift));
16315   ins_cost(INSN_COST);
16316   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
16317   ins_encode %{
16318     int sh = (int)$shift$$constant & 31;
16319     if (sh >= 8) {
16320       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16321              as_FloatRegister($src$$reg),
16322              as_FloatRegister($src$$reg));
16323     } else {
16324       __ shl(as_FloatRegister($dst$$reg), __ T8B,
16325              as_FloatRegister($src$$reg), sh);
16326     }
16327   %}
16328   ins_pipe(vshift64_imm);
16329 %}
16330 
16331 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
16332   predicate(n->as_Vector()->length() == 16);
16333   match(Set dst (LShiftVB src shift));
16334   ins_cost(INSN_COST);
16335   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
16336   ins_encode %{
16337     int sh = (int)$shift$$constant & 31;
16338     if (sh >= 8) {
16339       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16340              as_FloatRegister($src$$reg),
16341              as_FloatRegister($src$$reg));
16342     } else {
16343       __ shl(as_FloatRegister($dst$$reg), __ T16B,
16344              as_FloatRegister($src$$reg), sh);
16345     }
16346   %}
16347   ins_pipe(vshift128_imm);
16348 %}
16349 
16350 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
16351   predicate(n->as_Vector()->length() == 4 ||
16352             n->as_Vector()->length() == 8);
16353   match(Set dst (RShiftVB src shift));
16354   ins_cost(INSN_COST);
16355   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
16356   ins_encode %{
16357     int sh = (int)$shift$$constant & 31;
16358     if (sh >= 8) sh = 7;
16359     sh = -sh & 7;
16360     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
16361            as_FloatRegister($src$$reg), sh);
16362   %}
16363   ins_pipe(vshift64_imm);
16364 %}
16365 
16366 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
16367   predicate(n->as_Vector()->length() == 16);
16368   match(Set dst (RShiftVB src shift));
16369   ins_cost(INSN_COST);
16370   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
16371   ins_encode %{
16372     int sh = (int)$shift$$constant & 31;
16373     if (sh >= 8) sh = 7;
16374     sh = -sh & 7;
16375     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
16376            as_FloatRegister($src$$reg), sh);
16377   %}
16378   ins_pipe(vshift128_imm);
16379 %}
16380 
16381 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
16382   predicate(n->as_Vector()->length() == 4 ||
16383             n->as_Vector()->length() == 8);
16384   match(Set dst (URShiftVB src shift));
16385   ins_cost(INSN_COST);
16386   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
16387   ins_encode %{
16388     int sh = (int)$shift$$constant & 31;
16389     if (sh >= 8) {
16390       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16391              as_FloatRegister($src$$reg),
16392              as_FloatRegister($src$$reg));
16393     } else {
16394       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
16395              as_FloatRegister($src$$reg), -sh & 7);
16396     }
16397   %}
16398   ins_pipe(vshift64_imm);
16399 %}
16400 
16401 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
16402   predicate(n->as_Vector()->length() == 16);
16403   match(Set dst (URShiftVB src shift));
16404   ins_cost(INSN_COST);
16405   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
16406   ins_encode %{
16407     int sh = (int)$shift$$constant & 31;
16408     if (sh >= 8) {
16409       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16410              as_FloatRegister($src$$reg),
16411              as_FloatRegister($src$$reg));
16412     } else {
16413       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
16414              as_FloatRegister($src$$reg), -sh & 7);
16415     }
16416   %}
16417   ins_pipe(vshift128_imm);
16418 %}
16419 
16420 instruct vsll4S(vecD dst, vecD src, vecX shift) %{
16421   predicate(n->as_Vector()->length() == 2 ||
16422             n->as_Vector()->length() == 4);
16423   match(Set dst (LShiftVS src shift));
16424   match(Set dst (RShiftVS src shift));
16425   ins_cost(INSN_COST);
16426   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
16427   ins_encode %{
16428     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
16429             as_FloatRegister($src$$reg),
16430             as_FloatRegister($shift$$reg));
16431   %}
16432   ins_pipe(vshift64);
16433 %}
16434 
16435 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
16436   predicate(n->as_Vector()->length() == 8);
16437   match(Set dst (LShiftVS src shift));
16438   match(Set dst (RShiftVS src shift));
16439   ins_cost(INSN_COST);
16440   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
16441   ins_encode %{
16442     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
16443             as_FloatRegister($src$$reg),
16444             as_FloatRegister($shift$$reg));
16445   %}
16446   ins_pipe(vshift128);
16447 %}
16448 
16449 instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
16450   predicate(n->as_Vector()->length() == 2 ||
16451             n->as_Vector()->length() == 4);
16452   match(Set dst (URShiftVS src shift));
16453   ins_cost(INSN_COST);
16454   format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
16455   ins_encode %{
16456     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
16457             as_FloatRegister($src$$reg),
16458             as_FloatRegister($shift$$reg));
16459   %}
16460   ins_pipe(vshift64);
16461 %}
16462 
16463 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
16464   predicate(n->as_Vector()->length() == 8);
16465   match(Set dst (URShiftVS src shift));
16466   ins_cost(INSN_COST);
16467   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
16468   ins_encode %{
16469     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
16470             as_FloatRegister($src$$reg),
16471             as_FloatRegister($shift$$reg));
16472   %}
16473   ins_pipe(vshift128);
16474 %}
16475 
16476 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
16477   predicate(n->as_Vector()->length() == 2 ||
16478             n->as_Vector()->length() == 4);
16479   match(Set dst (LShiftVS src shift));
16480   ins_cost(INSN_COST);
16481   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
16482   ins_encode %{
16483     int sh = (int)$shift$$constant & 31;
16484     if (sh >= 16) {
16485       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16486              as_FloatRegister($src$$reg),
16487              as_FloatRegister($src$$reg));
16488     } else {
16489       __ shl(as_FloatRegister($dst$$reg), __ T4H,
16490              as_FloatRegister($src$$reg), sh);
16491     }
16492   %}
16493   ins_pipe(vshift64_imm);
16494 %}
16495 
16496 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
16497   predicate(n->as_Vector()->length() == 8);
16498   match(Set dst (LShiftVS src shift));
16499   ins_cost(INSN_COST);
16500   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
16501   ins_encode %{
16502     int sh = (int)$shift$$constant & 31;
16503     if (sh >= 16) {
16504       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16505              as_FloatRegister($src$$reg),
16506              as_FloatRegister($src$$reg));
16507     } else {
16508       __ shl(as_FloatRegister($dst$$reg), __ T8H,
16509              as_FloatRegister($src$$reg), sh);
16510     }
16511   %}
16512   ins_pipe(vshift128_imm);
16513 %}
16514 
16515 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
16516   predicate(n->as_Vector()->length() == 2 ||
16517             n->as_Vector()->length() == 4);
16518   match(Set dst (RShiftVS src shift));
16519   ins_cost(INSN_COST);
16520   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
16521   ins_encode %{
16522     int sh = (int)$shift$$constant & 31;
16523     if (sh >= 16) sh = 15;
16524     sh = -sh & 15;
16525     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
16526            as_FloatRegister($src$$reg), sh);
16527   %}
16528   ins_pipe(vshift64_imm);
16529 %}
16530 
16531 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
16532   predicate(n->as_Vector()->length() == 8);
16533   match(Set dst (RShiftVS src shift));
16534   ins_cost(INSN_COST);
16535   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
16536   ins_encode %{
16537     int sh = (int)$shift$$constant & 31;
16538     if (sh >= 16) sh = 15;
16539     sh = -sh & 15;
16540     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
16541            as_FloatRegister($src$$reg), sh);
16542   %}
16543   ins_pipe(vshift128_imm);
16544 %}
16545 
16546 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
16547   predicate(n->as_Vector()->length() == 2 ||
16548             n->as_Vector()->length() == 4);
16549   match(Set dst (URShiftVS src shift));
16550   ins_cost(INSN_COST);
16551   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
16552   ins_encode %{
16553     int sh = (int)$shift$$constant & 31;
16554     if (sh >= 16) {
16555       __ eor(as_FloatRegister($dst$$reg), __ T8B,
16556              as_FloatRegister($src$$reg),
16557              as_FloatRegister($src$$reg));
16558     } else {
16559       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
16560              as_FloatRegister($src$$reg), -sh & 15);
16561     }
16562   %}
16563   ins_pipe(vshift64_imm);
16564 %}
16565 
16566 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
16567   predicate(n->as_Vector()->length() == 8);
16568   match(Set dst (URShiftVS src shift));
16569   ins_cost(INSN_COST);
16570   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
16571   ins_encode %{
16572     int sh = (int)$shift$$constant & 31;
16573     if (sh >= 16) {
16574       __ eor(as_FloatRegister($dst$$reg), __ T16B,
16575              as_FloatRegister($src$$reg),
16576              as_FloatRegister($src$$reg));
16577     } else {
16578       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
16579              as_FloatRegister($src$$reg), -sh & 15);
16580     }
16581   %}
16582   ins_pipe(vshift128_imm);
16583 %}
16584 
16585 instruct vsll2I(vecD dst, vecD src, vecX shift) %{
16586   predicate(n->as_Vector()->length() == 2);
16587   match(Set dst (LShiftVI src shift));
16588   match(Set dst (RShiftVI src shift));
16589   ins_cost(INSN_COST);
16590   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
16591   ins_encode %{
16592     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
16593             as_FloatRegister($src$$reg),
16594             as_FloatRegister($shift$$reg));
16595   %}
16596   ins_pipe(vshift64_imm);
16597 %}
16598 
16599 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
16600   predicate(n->as_Vector()->length() == 4);
16601   match(Set dst (LShiftVI src shift));
16602   match(Set dst (RShiftVI src shift));
16603   ins_cost(INSN_COST);
16604   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
16605   ins_encode %{
16606     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
16607             as_FloatRegister($src$$reg),
16608             as_FloatRegister($shift$$reg));
16609   %}
16610   ins_pipe(vshift128_imm);
16611 %}
16612 
16613 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
16614   predicate(n->as_Vector()->length() == 2);
16615   match(Set dst (URShiftVI src shift));
16616   ins_cost(INSN_COST);
16617   format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
16618   ins_encode %{
16619     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
16620             as_FloatRegister($src$$reg),
16621             as_FloatRegister($shift$$reg));
16622   %}
16623   ins_pipe(vshift64_imm);
16624 %}
16625 
16626 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
16627   predicate(n->as_Vector()->length() == 4);
16628   match(Set dst (URShiftVI src shift));
16629   ins_cost(INSN_COST);
16630   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
16631   ins_encode %{
16632     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
16633             as_FloatRegister($src$$reg),
16634             as_FloatRegister($shift$$reg));
16635   %}
16636   ins_pipe(vshift128_imm);
16637 %}
16638 
16639 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
16640   predicate(n->as_Vector()->length() == 2);
16641   match(Set dst (LShiftVI src shift));
16642   ins_cost(INSN_COST);
16643   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
16644   ins_encode %{
16645     __ shl(as_FloatRegister($dst$$reg), __ T2S,
16646            as_FloatRegister($src$$reg),
16647            (int)$shift$$constant & 31);
16648   %}
16649   ins_pipe(vshift64_imm);
16650 %}
16651 
16652 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
16653   predicate(n->as_Vector()->length() == 4);
16654   match(Set dst (LShiftVI src shift));
16655   ins_cost(INSN_COST);
16656   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
16657   ins_encode %{
16658     __ shl(as_FloatRegister($dst$$reg), __ T4S,
16659            as_FloatRegister($src$$reg),
16660            (int)$shift$$constant & 31);
16661   %}
16662   ins_pipe(vshift128_imm);
16663 %}
16664 
16665 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
16666   predicate(n->as_Vector()->length() == 2);
16667   match(Set dst (RShiftVI src shift));
16668   ins_cost(INSN_COST);
16669   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
16670   ins_encode %{
16671     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
16672             as_FloatRegister($src$$reg),
16673             -(int)$shift$$constant & 31);
16674   %}
16675   ins_pipe(vshift64_imm);
16676 %}
16677 
16678 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
16679   predicate(n->as_Vector()->length() == 4);
16680   match(Set dst (RShiftVI src shift));
16681   ins_cost(INSN_COST);
16682   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
16683   ins_encode %{
16684     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
16685             as_FloatRegister($src$$reg),
16686             -(int)$shift$$constant & 31);
16687   %}
16688   ins_pipe(vshift128_imm);
16689 %}
16690 
16691 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
16692   predicate(n->as_Vector()->length() == 2);
16693   match(Set dst (URShiftVI src shift));
16694   ins_cost(INSN_COST);
16695   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
16696   ins_encode %{
16697     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
16698             as_FloatRegister($src$$reg),
16699             -(int)$shift$$constant & 31);
16700   %}
16701   ins_pipe(vshift64_imm);
16702 %}
16703 
16704 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
16705   predicate(n->as_Vector()->length() == 4);
16706   match(Set dst (URShiftVI src shift));
16707   ins_cost(INSN_COST);
16708   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
16709   ins_encode %{
16710     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
16711             as_FloatRegister($src$$reg),
16712             -(int)$shift$$constant & 31);
16713   %}
16714   ins_pipe(vshift128_imm);
16715 %}
16716 
16717 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
16718   predicate(n->as_Vector()->length() == 2);
16719   match(Set dst (LShiftVL src shift));
16720   match(Set dst (RShiftVL src shift));
16721   ins_cost(INSN_COST);
16722   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
16723   ins_encode %{
16724     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
16725             as_FloatRegister($src$$reg),
16726             as_FloatRegister($shift$$reg));
16727   %}
16728   ins_pipe(vshift128);
16729 %}
16730 
16731 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
16732   predicate(n->as_Vector()->length() == 2);
16733   match(Set dst (URShiftVL src shift));
16734   ins_cost(INSN_COST);
16735   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
16736   ins_encode %{
16737     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
16738             as_FloatRegister($src$$reg),
16739             as_FloatRegister($shift$$reg));
16740   %}
16741   ins_pipe(vshift128);
16742 %}
16743 
16744 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
16745   predicate(n->as_Vector()->length() == 2);
16746   match(Set dst (LShiftVL src shift));
16747   ins_cost(INSN_COST);
16748   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
16749   ins_encode %{
16750     __ shl(as_FloatRegister($dst$$reg), __ T2D,
16751            as_FloatRegister($src$$reg),
16752            (int)$shift$$constant & 63);
16753   %}
16754   ins_pipe(vshift128);
16755 %}
16756 
16757 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
16758   predicate(n->as_Vector()->length() == 2);
16759   match(Set dst (RShiftVL src shift));
16760   ins_cost(INSN_COST);
16761   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
16762   ins_encode %{
16763     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
16764             as_FloatRegister($src$$reg),
16765             -(int)$shift$$constant & 63);
16766   %}
16767   ins_pipe(vshift128_imm);
16768 %}
16769 
16770 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
16771   predicate(n->as_Vector()->length() == 2);
16772   match(Set dst (URShiftVL src shift));
16773   ins_cost(INSN_COST);
16774   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
16775   ins_encode %{
16776     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
16777             as_FloatRegister($src$$reg),
16778             -(int)$shift$$constant & 63);
16779   %}
16780   ins_pipe(vshift128_imm);
16781 %}
16782 
16783 //----------PEEPHOLE RULES-----------------------------------------------------
16784 // These must follow all instruction definitions as they use the names
16785 // defined in the instructions definitions.
16786 //
16787 // peepmatch ( root_instr_name [preceding_instruction]* );
16788 //
16789 // peepconstraint %{
16790 // (instruction_number.operand_name relational_op instruction_number.operand_name
16791 //  [, ...] );
16792 // // instruction numbers are zero-based using left to right order in peepmatch
16793 //
16794 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
16795 // // provide an instruction_number.operand_name for each operand that appears
16796 // // in the replacement instruction's match rule
16797 //
16798 // ---------VM FLAGS---------------------------------------------------------
16799 //
16800 // All peephole optimizations can be turned off using -XX:-OptoPeephole
16801 //
16802 // Each peephole rule is given an identifying number starting with zero and
16803 // increasing by one in the order seen by the parser.  An individual peephole
16804 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
16805 // on the command-line.
16806 //
16807 // ---------CURRENT LIMITATIONS----------------------------------------------
16808 //
16809 // Only match adjacent instructions in same basic block
16810 // Only equality constraints
16811 // Only constraints between operands, not (0.dest_reg == RAX_enc)
16812 // Only one replacement instruction
16813 //
16814 // ---------EXAMPLE----------------------------------------------------------
16815 //
16816 // // pertinent parts of existing instructions in architecture description
16817 // instruct movI(iRegINoSp dst, iRegI src)
16818 // %{
16819 //   match(Set dst (CopyI src));
16820 // %}
16821 //
16822 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
16823 // %{
16824 //   match(Set dst (AddI dst src));
16825 //   effect(KILL cr);
16826 // %}
16827 //
16828 // // Change (inc mov) to lea
16829 // peephole %{
16830 //   // increment preceeded by register-register move
16831 //   peepmatch ( incI_iReg movI );
16832 //   // require that the destination register of the increment
16833 //   // match the destination register of the move
16834 //   peepconstraint ( 0.dst == 1.dst );
16835 //   // construct a replacement instruction that sets
16836 //   // the destination to ( move's source register + one )
16837 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
16838 // %}
16839 //
16840 
16841 // Implementation no longer uses movX instructions since
16842 // machine-independent system no longer uses CopyX nodes.
16843 //
16844 // peephole
16845 // %{
16846 //   peepmatch (incI_iReg movI);
16847 //   peepconstraint (0.dst == 1.dst);
16848 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16849 // %}
16850 
16851 // peephole
16852 // %{
16853 //   peepmatch (decI_iReg movI);
16854 //   peepconstraint (0.dst == 1.dst);
16855 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16856 // %}
16857 
16858 // peephole
16859 // %{
16860 //   peepmatch (addI_iReg_imm movI);
16861 //   peepconstraint (0.dst == 1.dst);
16862 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
16863 // %}
16864 
16865 // peephole
16866 // %{
16867 //   peepmatch (incL_iReg movL);
16868 //   peepconstraint (0.dst == 1.dst);
16869 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16870 // %}
16871 
16872 // peephole
16873 // %{
16874 //   peepmatch (decL_iReg movL);
16875 //   peepconstraint (0.dst == 1.dst);
16876 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16877 // %}
16878 
16879 // peephole
16880 // %{
16881 //   peepmatch (addL_iReg_imm movL);
16882 //   peepconstraint (0.dst == 1.dst);
16883 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
16884 // %}
16885 
16886 // peephole
16887 // %{
16888 //   peepmatch (addP_iReg_imm movP);
16889 //   peepconstraint (0.dst == 1.dst);
16890 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
16891 // %}
16892 
16893 // // Change load of spilled value to only a spill
16894 // instruct storeI(memory mem, iRegI src)
16895 // %{
16896 //   match(Set mem (StoreI mem src));
16897 // %}
16898 //
16899 // instruct loadI(iRegINoSp dst, memory mem)
16900 // %{
16901 //   match(Set dst (LoadI mem));
16902 // %}
16903 //
16904 
16905 //----------SMARTSPILL RULES---------------------------------------------------
16906 // These must follow all instruction definitions as they use the names
16907 // defined in the instructions definitions.
16908 
16909 // Local Variables:
16910 // mode: c++
16911 // End: