1 //
   2 // Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3 // Copyright (c) 2014, 2019, Red Hat Inc.
   4 // All rights reserved.
   5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6 //
   7 // This code is free software; you can redistribute it and/or modify it
   8 // under the terms of the GNU General Public License version 2 only, as
   9 // published by the Free Software Foundation.
  10 //
  11 // This code is distributed in the hope that it will be useful, but WITHOUT
  12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14 // version 2 for more details (a copy is included in the LICENSE file that
  15 // accompanied this code).
  16 //
  17 // You should have received a copy of the GNU General Public License version
  18 // 2 along with this work; if not, write to the Free Software Foundation,
  19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20 //
  21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22 // or visit www.oracle.com if you need additional information or have any
  23 // questions.
  24 //
  25 //
  26 
  27 // AArch64 Architecture Description File
  28 
  29 //----------REGISTER DEFINITION BLOCK------------------------------------------
  30 // This information is used by the matcher and the register allocator to
  31 // describe individual registers and classes of registers within the target
  32 // archtecture.
  33 
  34 register %{
  35 //----------Architecture Description Register Definitions----------------------
  36 // General Registers
  37 // "reg_def"  name ( register save type, C convention save type,
  38 //                   ideal register type, encoding );
  39 // Register Save Types:
  40 //
  41 // NS  = No-Save:       The register allocator assumes that these registers
  42 //                      can be used without saving upon entry to the method, &
  43 //                      that they do not need to be saved at call sites.
  44 //
  45 // SOC = Save-On-Call:  The register allocator assumes that these registers
  46 //                      can be used without saving upon entry to the method,
  47 //                      but that they must be saved at call sites.
  48 //
  49 // SOE = Save-On-Entry: The register allocator assumes that these registers
  50 //                      must be saved before using them upon entry to the
  51 //                      method, but they do not need to be saved at call
  52 //                      sites.
  53 //
  54 // AS  = Always-Save:   The register allocator assumes that these registers
  55 //                      must be saved before using them upon entry to the
  56 //                      method, & that they must be saved at call sites.
  57 //
  58 // Ideal Register Type is used to determine how to save & restore a
  59 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
  60 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
  61 //
  62 // The encoding number is the actual bit-pattern placed into the opcodes.
  63 
  64 // We must define the 64 bit int registers in two 32 bit halves, the
  65 // real lower register and a virtual upper half register. upper halves
  66 // are used by the register allocator but are not actually supplied as
  67 // operands to memory ops.
  68 //
  69 // follow the C1 compiler in making registers
  70 //
  71 //   r0-r7,r10-r26 volatile (caller save)
  72 //   r27-r32 system (no save, no allocate)
  73 //   r8-r9 invisible to the allocator (so we can use them as scratch regs)
  74 //
  75 // as regards Java usage. we don't use any callee save registers
  76 // because this makes it difficult to de-optimise a frame (see comment
  77 // in x86 implementation of Deoptimization::unwind_callee_save_values)
  78 //
  79 
  80 // General Registers
  81 
  82 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
  83 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
  84 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
  85 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
  86 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
  87 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
  88 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
  89 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
  90 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
  91 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
  92 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
  93 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
  94 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
  95 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
  96 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
  97 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
  98 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  99 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
 100 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
 101 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
 102 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
 103 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
 104 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
 105 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
 106 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
 107 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
 108 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
 109 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
 110 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
 111 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
 112 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
 113 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
 114 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
 115 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
 116 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
 117 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
 118 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
 119 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
 120 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
 121 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
 122 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
 123 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
 124 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
 125 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
 126 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
 127 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
 128 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
 129 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
 130 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
 131 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
 132 reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
 133 reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());  
 134 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
 135 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
 136 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
 137 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
 138 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
 139 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
 140 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
 141 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
 142 
 143 // ----------------------------
 144 // Float/Double Registers
 145 // ----------------------------
 146 
 147 // Double Registers
 148 
 149 // The rules of ADL require that double registers be defined in pairs.
 150 // Each pair must be two 32-bit values, but not necessarily a pair of
 151 // single float registers. In each pair, ADLC-assigned register numbers
 152 // must be adjacent, with the lower number even. Finally, when the
 153 // CPU stores such a register pair to memory, the word associated with
 154 // the lower ADLC-assigned number must be stored to the lower address.
 155 
 156 // AArch64 has 32 floating-point registers. Each can store a vector of
 157 // single or double precision floating-point values up to 8 * 32
 158 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
 159 // use the first float or double element of the vector.
 160 
 161 // for Java use float registers v0-v15 are always save on call whereas
 162 // the platform ABI treats v8-v15 as callee save). float registers
 163 // v16-v31 are SOC as per the platform spec
 164 
 165   reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()          );
 166   reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next()  );
 167   reg_def V0_J ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(2) );
 168   reg_def V0_K ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next(3) );
 169 
 170   reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()          );
 171   reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next()  );
 172   reg_def V1_J ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(2) );
 173   reg_def V1_K ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next(3) );
 174 
 175   reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()          );
 176   reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next()  );
 177   reg_def V2_J ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(2) );
 178   reg_def V2_K ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next(3) );
 179 
 180   reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()          );
 181   reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next()  );
 182   reg_def V3_J ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(2) );
 183   reg_def V3_K ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next(3) );
 184 
 185   reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()          );
 186   reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next()  );
 187   reg_def V4_J ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(2) );
 188   reg_def V4_K ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next(3) );
 189 
 190   reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()          );
 191   reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next()  );
 192   reg_def V5_J ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(2) );
 193   reg_def V5_K ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next(3) );
 194 
 195   reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()          );
 196   reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next()  );
 197   reg_def V6_J ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(2) );
 198   reg_def V6_K ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next(3) );
 199 
 200   reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()          );
 201   reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next()  );
 202   reg_def V7_J ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(2) );
 203   reg_def V7_K ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next(3) );
 204 
 205   reg_def V8   ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()          );
 206   reg_def V8_H ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next()  );
 207   reg_def V8_J ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(2) );
 208   reg_def V8_K ( SOC, SOC, Op_RegF,  8, v8->as_VMReg()->next(3) );
 209 
 210   reg_def V9   ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()          );
 211   reg_def V9_H ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next()  );
 212   reg_def V9_J ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(2) );
 213   reg_def V9_K ( SOC, SOC, Op_RegF,  9, v9->as_VMReg()->next(3) );
 214 
 215   reg_def V10  ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()         );
 216   reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() );
 217   reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2));
 218   reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3));
 219 
 220   reg_def V11  ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()         );
 221   reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() );
 222   reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2));
 223   reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3));
 224 
 225   reg_def V12  ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()         );
 226   reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() );
 227   reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2));
 228   reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3));
 229 
 230   reg_def V13  ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()         );
 231   reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() );
 232   reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2));
 233   reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3));
 234 
 235   reg_def V14  ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()         );
 236   reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() );
 237   reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2));
 238   reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3));
 239 
 240   reg_def V15  ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()         );
 241   reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() );
 242   reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2));
 243   reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3));
 244 
 245   reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()         );
 246   reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
 247   reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2));
 248   reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3));
 249 
 250   reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()         );
 251   reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
 252   reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2));
 253   reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3));
 254 
 255   reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()         );
 256   reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
 257   reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2));
 258   reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3));
 259 
 260   reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()         );
 261   reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
 262   reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2));
 263   reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3));
 264 
 265   reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()         );
 266   reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
 267   reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2));
 268   reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3));
 269 
 270   reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()         );
 271   reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
 272   reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2));
 273   reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3));
 274 
 275   reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()         );
 276   reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
 277   reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2));
 278   reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3));
 279 
 280   reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()         );
 281   reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
 282   reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2));
 283   reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3));
 284 
 285   reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()         );
 286   reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
 287   reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2));
 288   reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3));
 289 
 290   reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()         );
 291   reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
 292   reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2));
 293   reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3));
 294 
 295   reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()         );
 296   reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
 297   reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2));
 298   reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3));
 299 
 300   reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()         );
 301   reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
 302   reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2));
 303   reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3));
 304 
 305   reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()         );
 306   reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
 307   reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2));
 308   reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3));
 309 
 310   reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()         );
 311   reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
 312   reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2));
 313   reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3));
 314 
 315   reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()         );
 316   reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
 317   reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2));
 318   reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3));
 319 
 320   reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()         );
 321   reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
 322   reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2));
 323   reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3));
 324 
 325 // ----------------------------
 326 // Special Registers
 327 // ----------------------------
 328 
 329 // the AArch64 CSPR status flag register is not directly acessible as
 330 // instruction operand. the FPSR status flag register is a system
 331 // register which can be written/read using MSR/MRS but again does not
 332 // appear as an operand (a code identifying the FSPR occurs as an
 333 // immediate value in the instruction).
 334 
 335 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
 336 
 337 
 338 // Specify priority of register selection within phases of register
 339 // allocation.  Highest priority is first.  A useful heuristic is to
 340 // give registers a low priority when they are required by machine
 341 // instructions, like EAX and EDX on I486, and choose no-save registers
 342 // before save-on-call, & save-on-call before save-on-entry.  Registers
 343 // which participate in fixed calling sequences should come last.
 344 // Registers which are used as pairs must fall on an even boundary.
 345 
 346 alloc_class chunk0(
 347     // volatiles
 348     R10, R10_H,
 349     R11, R11_H,
 350     R12, R12_H,
 351     R13, R13_H,
 352     R14, R14_H,
 353     R15, R15_H,
 354     R16, R16_H,
 355     R17, R17_H,
 356     R18, R18_H,
 357 
 358     // arg registers
 359     R0, R0_H,
 360     R1, R1_H,
 361     R2, R2_H,
 362     R3, R3_H,
 363     R4, R4_H,
 364     R5, R5_H,
 365     R6, R6_H,
 366     R7, R7_H,
 367 
 368     // non-volatiles
 369     R19, R19_H,
 370     R20, R20_H,
 371     R21, R21_H,
 372     R22, R22_H,
 373     R23, R23_H,
 374     R24, R24_H,
 375     R25, R25_H,
 376     R26, R26_H,
 377     
 378     // non-allocatable registers
 379 
 380     R27, R27_H, // heapbase
 381     R28, R28_H, // thread
 382     R29, R29_H, // fp
 383     R30, R30_H, // lr
 384     R31, R31_H, // sp
 385 );
 386 
 387 alloc_class chunk1(
 388 
 389     // no save
 390     V16, V16_H, V16_J, V16_K,
 391     V17, V17_H, V17_J, V17_K,
 392     V18, V18_H, V18_J, V18_K,
 393     V19, V19_H, V19_J, V19_K,
 394     V20, V20_H, V20_J, V20_K,
 395     V21, V21_H, V21_J, V21_K,
 396     V22, V22_H, V22_J, V22_K,
 397     V23, V23_H, V23_J, V23_K,
 398     V24, V24_H, V24_J, V24_K,
 399     V25, V25_H, V25_J, V25_K,
 400     V26, V26_H, V26_J, V26_K,
 401     V27, V27_H, V27_J, V27_K,
 402     V28, V28_H, V28_J, V28_K,
 403     V29, V29_H, V29_J, V29_K,
 404     V30, V30_H, V30_J, V30_K,
 405     V31, V31_H, V31_J, V31_K,
 406 
 407     // arg registers
 408     V0, V0_H, V0_J, V0_K,
 409     V1, V1_H, V1_J, V1_K,
 410     V2, V2_H, V2_J, V2_K,
 411     V3, V3_H, V3_J, V3_K,
 412     V4, V4_H, V4_J, V4_K,
 413     V5, V5_H, V5_J, V5_K,
 414     V6, V6_H, V6_J, V6_K,
 415     V7, V7_H, V7_J, V7_K,
 416 
 417     // non-volatiles
 418     V8, V8_H, V8_J, V8_K,
 419     V9, V9_H, V9_J, V9_K,
 420     V10, V10_H, V10_J, V10_K,
 421     V11, V11_H, V11_J, V11_K,
 422     V12, V12_H, V12_J, V12_K,
 423     V13, V13_H, V13_J, V13_K,
 424     V14, V14_H, V14_J, V14_K,
 425     V15, V15_H, V15_J, V15_K,
 426 );
 427 
 428 alloc_class chunk2(RFLAGS);
 429 
 430 //----------Architecture Description Register Classes--------------------------
 431 // Several register classes are automatically defined based upon information in
 432 // this architecture description.
 433 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
 434 // 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
 435 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
 436 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
 437 //
 438 
 439 // Class for all 32 bit integer registers -- excludes SP which will
 440 // never be used as an integer register
 441 reg_class any_reg32(
 442     R0,
 443     R1,
 444     R2,
 445     R3,
 446     R4,
 447     R5,
 448     R6,
 449     R7,
 450     R10,
 451     R11,
 452     R12,
 453     R13,
 454     R14,
 455     R15,
 456     R16,
 457     R17,
 458     R18,
 459     R19,
 460     R20,
 461     R21,
 462     R22,
 463     R23,
 464     R24,
 465     R25,
 466     R26,
 467     R27,
 468     R28,
 469     R29,
 470     R30
 471 );
 472 
 473 // Singleton class for R0 int register
 474 reg_class int_r0_reg(R0);
 475 
 476 // Singleton class for R2 int register
 477 reg_class int_r2_reg(R2);
 478 
 479 // Singleton class for R3 int register
 480 reg_class int_r3_reg(R3);
 481 
 482 // Singleton class for R4 int register
 483 reg_class int_r4_reg(R4);
 484 
 485 // Class for all long integer registers (including RSP)
 486 reg_class any_reg(
 487     R0, R0_H,
 488     R1, R1_H,
 489     R2, R2_H,
 490     R3, R3_H,
 491     R4, R4_H,
 492     R5, R5_H,
 493     R6, R6_H,
 494     R7, R7_H,
 495     R10, R10_H,
 496     R11, R11_H,
 497     R12, R12_H,
 498     R13, R13_H,
 499     R14, R14_H,
 500     R15, R15_H,
 501     R16, R16_H,
 502     R17, R17_H,
 503     R18, R18_H,
 504     R19, R19_H,
 505     R20, R20_H,
 506     R21, R21_H,
 507     R22, R22_H,
 508     R23, R23_H,
 509     R24, R24_H,
 510     R25, R25_H,
 511     R26, R26_H,
 512     R27, R27_H,
 513     R28, R28_H,
 514     R29, R29_H,
 515     R30, R30_H,
 516     R31, R31_H
 517 );
 518 
 519 // Class for all non-special integer registers
 520 reg_class no_special_reg32(
 521     R0,
 522     R1,
 523     R2,
 524     R3,
 525     R4,
 526     R5,
 527     R6,
 528     R7,
 529     R10,
 530     R11,
 531     R12,                        // rmethod
 532     R13,
 533     R14,
 534     R15,
 535     R16,
 536     R17,
 537     R18,
 538     R19,
 539     R20,
 540     R21,
 541     R22,
 542     R23,
 543     R24,
 544     R25,
 545     R26
 546  /* R27, */                     // heapbase
 547  /* R28, */                     // thread
 548  /* R29, */                     // fp
 549  /* R30, */                     // lr
 550  /* R31 */                      // sp
 551 );
 552 
 553 // Class for all non-special long integer registers
 554 reg_class no_special_reg(
 555     R0, R0_H,
 556     R1, R1_H,
 557     R2, R2_H,
 558     R3, R3_H,
 559     R4, R4_H,
 560     R5, R5_H,
 561     R6, R6_H,
 562     R7, R7_H,
 563     R10, R10_H,
 564     R11, R11_H,
 565     R12, R12_H,                 // rmethod
 566     R13, R13_H,
 567     R14, R14_H,
 568     R15, R15_H,
 569     R16, R16_H,
 570     R17, R17_H,
 571     R18, R18_H,
 572     R19, R19_H,
 573     R20, R20_H,
 574     R21, R21_H,
 575     R22, R22_H,
 576     R23, R23_H,
 577     R24, R24_H,
 578     R25, R25_H,
 579     R26, R26_H,
 580  /* R27, R27_H, */              // heapbase
 581  /* R28, R28_H, */              // thread
 582  /* R29, R29_H, */              // fp
 583  /* R30, R30_H, */              // lr
 584  /* R31, R31_H */               // sp
 585 );
 586 
 587 // Class for 64 bit register r0
 588 reg_class r0_reg(
 589     R0, R0_H
 590 );
 591 
 592 // Class for 64 bit register r1
 593 reg_class r1_reg(
 594     R1, R1_H
 595 );
 596 
 597 // Class for 64 bit register r2
 598 reg_class r2_reg(
 599     R2, R2_H
 600 );
 601 
 602 // Class for 64 bit register r3
 603 reg_class r3_reg(
 604     R3, R3_H
 605 );
 606 
 607 // Class for 64 bit register r4
 608 reg_class r4_reg(
 609     R4, R4_H
 610 );
 611 
 612 // Class for 64 bit register r5
 613 reg_class r5_reg(
 614     R5, R5_H
 615 );
 616 
 617 // Class for 64 bit register r10
 618 reg_class r10_reg(
 619     R10, R10_H
 620 );
 621 
 622 // Class for 64 bit register r11
 623 reg_class r11_reg(
 624     R11, R11_H
 625 );
 626 
 627 // Class for method register
 628 reg_class method_reg(
 629     R12, R12_H
 630 );
 631 
 632 // Class for heapbase register
 633 reg_class heapbase_reg(
 634     R27, R27_H
 635 );
 636 
 637 // Class for thread register
 638 reg_class thread_reg(
 639     R28, R28_H
 640 );
 641 
 642 // Class for frame pointer register
 643 reg_class fp_reg(
 644     R29, R29_H
 645 );
 646 
 647 // Class for link register
 648 reg_class lr_reg(
 649     R30, R30_H
 650 );
 651 
 652 // Class for long sp register
 653 reg_class sp_reg(
 654   R31, R31_H
 655 );
 656 
 657 // Class for all pointer registers
 658 reg_class ptr_reg(
 659     R0, R0_H,
 660     R1, R1_H,
 661     R2, R2_H,
 662     R3, R3_H,
 663     R4, R4_H,
 664     R5, R5_H,
 665     R6, R6_H,
 666     R7, R7_H,
 667     R10, R10_H,
 668     R11, R11_H,
 669     R12, R12_H,
 670     R13, R13_H,
 671     R14, R14_H,
 672     R15, R15_H,
 673     R16, R16_H,
 674     R17, R17_H,
 675     R18, R18_H,
 676     R19, R19_H,
 677     R20, R20_H,
 678     R21, R21_H,
 679     R22, R22_H,
 680     R23, R23_H,
 681     R24, R24_H,
 682     R25, R25_H,
 683     R26, R26_H,
 684     R27, R27_H,
 685     R28, R28_H,
 686     R29, R29_H,
 687     R30, R30_H,
 688     R31, R31_H
 689 );
 690 
 691 // Class for all non_special pointer registers
 692 reg_class no_special_ptr_reg(
 693     R0, R0_H,
 694     R1, R1_H,
 695     R2, R2_H,
 696     R3, R3_H,
 697     R4, R4_H,
 698     R5, R5_H,
 699     R6, R6_H,
 700     R7, R7_H,
 701     R10, R10_H,
 702     R11, R11_H,
 703     R12, R12_H,
 704     R13, R13_H,
 705     R14, R14_H,
 706     R15, R15_H,
 707     R16, R16_H,
 708     R17, R17_H,
 709     R18, R18_H,
 710     R19, R19_H,
 711     R20, R20_H,
 712     R21, R21_H,
 713     R22, R22_H,
 714     R23, R23_H,
 715     R24, R24_H,
 716     R25, R25_H,
 717     R26, R26_H,
 718  /* R27, R27_H, */              // heapbase
 719  /* R28, R28_H, */              // thread
 720  /* R29, R29_H, */              // fp
 721  /* R30, R30_H, */              // lr
 722  /* R31, R31_H */               // sp
 723 );
 724 
 725 // Class for all float registers
 726 reg_class float_reg(
 727     V0,
 728     V1,
 729     V2,
 730     V3,
 731     V4,
 732     V5,
 733     V6,
 734     V7,
 735     V8,
 736     V9,
 737     V10,
 738     V11,
 739     V12,
 740     V13,
 741     V14,
 742     V15,
 743     V16,
 744     V17,
 745     V18,
 746     V19,
 747     V20,
 748     V21,
 749     V22,
 750     V23,
 751     V24,
 752     V25,
 753     V26,
 754     V27,
 755     V28,
 756     V29,
 757     V30,
 758     V31
 759 );
 760 
 761 // Double precision float registers have virtual `high halves' that
 762 // are needed by the allocator.
 763 // Class for all double registers
 764 reg_class double_reg(
 765     V0, V0_H, 
 766     V1, V1_H, 
 767     V2, V2_H, 
 768     V3, V3_H, 
 769     V4, V4_H, 
 770     V5, V5_H, 
 771     V6, V6_H, 
 772     V7, V7_H, 
 773     V8, V8_H, 
 774     V9, V9_H, 
 775     V10, V10_H, 
 776     V11, V11_H, 
 777     V12, V12_H, 
 778     V13, V13_H, 
 779     V14, V14_H, 
 780     V15, V15_H, 
 781     V16, V16_H, 
 782     V17, V17_H, 
 783     V18, V18_H, 
 784     V19, V19_H, 
 785     V20, V20_H, 
 786     V21, V21_H, 
 787     V22, V22_H, 
 788     V23, V23_H, 
 789     V24, V24_H, 
 790     V25, V25_H, 
 791     V26, V26_H, 
 792     V27, V27_H, 
 793     V28, V28_H, 
 794     V29, V29_H, 
 795     V30, V30_H, 
 796     V31, V31_H
 797 );
 798 
 799 // Class for all 64bit vector registers
 800 reg_class vectord_reg(
 801     V0, V0_H,
 802     V1, V1_H,
 803     V2, V2_H,
 804     V3, V3_H,
 805     V4, V4_H,
 806     V5, V5_H,
 807     V6, V6_H,
 808     V7, V7_H,
 809     V8, V8_H,
 810     V9, V9_H,
 811     V10, V10_H,
 812     V11, V11_H,
 813     V12, V12_H,
 814     V13, V13_H,
 815     V14, V14_H,
 816     V15, V15_H,
 817     V16, V16_H,
 818     V17, V17_H,
 819     V18, V18_H,
 820     V19, V19_H,
 821     V20, V20_H,
 822     V21, V21_H,
 823     V22, V22_H,
 824     V23, V23_H,
 825     V24, V24_H,
 826     V25, V25_H,
 827     V26, V26_H,
 828     V27, V27_H,
 829     V28, V28_H,
 830     V29, V29_H,
 831     V30, V30_H,
 832     V31, V31_H
 833 );
 834 
 835 // Class for all 128bit vector registers
 836 reg_class vectorx_reg(
 837     V0, V0_H, V0_J, V0_K,
 838     V1, V1_H, V1_J, V1_K,
 839     V2, V2_H, V2_J, V2_K,
 840     V3, V3_H, V3_J, V3_K,
 841     V4, V4_H, V4_J, V4_K,
 842     V5, V5_H, V5_J, V5_K,
 843     V6, V6_H, V6_J, V6_K,
 844     V7, V7_H, V7_J, V7_K,
 845     V8, V8_H, V8_J, V8_K,
 846     V9, V9_H, V9_J, V9_K,
 847     V10, V10_H, V10_J, V10_K,
 848     V11, V11_H, V11_J, V11_K,
 849     V12, V12_H, V12_J, V12_K,
 850     V13, V13_H, V13_J, V13_K,
 851     V14, V14_H, V14_J, V14_K,
 852     V15, V15_H, V15_J, V15_K,
 853     V16, V16_H, V16_J, V16_K,
 854     V17, V17_H, V17_J, V17_K,
 855     V18, V18_H, V18_J, V18_K,
 856     V19, V19_H, V19_J, V19_K,
 857     V20, V20_H, V20_J, V20_K,
 858     V21, V21_H, V21_J, V21_K,
 859     V22, V22_H, V22_J, V22_K,
 860     V23, V23_H, V23_J, V23_K,
 861     V24, V24_H, V24_J, V24_K,
 862     V25, V25_H, V25_J, V25_K,
 863     V26, V26_H, V26_J, V26_K,
 864     V27, V27_H, V27_J, V27_K,
 865     V28, V28_H, V28_J, V28_K,
 866     V29, V29_H, V29_J, V29_K,
 867     V30, V30_H, V30_J, V30_K,
 868     V31, V31_H, V31_J, V31_K
 869 );
 870 
 871 // Class for 128 bit register v0
 872 reg_class v0_reg(
 873     V0, V0_H
 874 );
 875 
 876 // Class for 128 bit register v1
 877 reg_class v1_reg(
 878     V1, V1_H
 879 );
 880 
 881 // Class for 128 bit register v2
 882 reg_class v2_reg(
 883     V2, V2_H
 884 );
 885 
 886 // Class for 128 bit register v3
 887 reg_class v3_reg(
 888     V3, V3_H
 889 );
 890 
 891 // Singleton class for condition codes
 892 reg_class int_flags(RFLAGS);
 893 
 894 %}
 895 
 896 //----------DEFINITION BLOCK---------------------------------------------------
 897 // Define name --> value mappings to inform the ADLC of an integer valued name
 898 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 899 // Format:
 900 //        int_def  <name>         ( <int_value>, <expression>);
 901 // Generated Code in ad_<arch>.hpp
 902 //        #define  <name>   (<expression>)
 903 //        // value == <int_value>
 904 // Generated code in ad_<arch>.cpp adlc_verification()
 905 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 906 //
 907 
 908 // we follow the ppc-aix port in using a simple cost model which ranks
 909 // register operations as cheap, memory ops as more expensive and
 910 // branches as most expensive. the first two have a low as well as a
 911 // normal cost. huge cost appears to be a way of saying don't do
 912 // something
 913 
 914 definitions %{
 915   // The default cost (of a register move instruction).
 916   int_def INSN_COST            (    100,     100);
 917   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 918   int_def CALL_COST            (    200,     2 * INSN_COST);
 919   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 920 %}
 921 
 922 
 923 //----------SOURCE BLOCK-------------------------------------------------------
 924 // This is a block of C++ code which provides values, functions, and
 925 // definitions necessary in the rest of the architecture description
 926 
 927 source_hpp %{
 928 
 929 class CallStubImpl {
 930  
 931   //--------------------------------------------------------------
 932   //---<  Used for optimization in Compile::shorten_branches  >---
 933   //--------------------------------------------------------------
 934 
 935  public:
 936   // Size of call trampoline stub.
 937   static uint size_call_trampoline() {
 938     return 0; // no call trampolines on this platform
 939   }
 940   
 941   // number of relocations needed by a call trampoline stub
 942   static uint reloc_call_trampoline() { 
 943     return 0; // no call trampolines on this platform
 944   }
 945 };
 946 
 947 class HandlerImpl {
 948 
 949  public:
 950 
 951   static int emit_exception_handler(CodeBuffer &cbuf);
 952   static int emit_deopt_handler(CodeBuffer& cbuf);
 953 
 954   static uint size_exception_handler() {
 955     return MacroAssembler::far_branch_size();
 956   }
 957 
 958   static uint size_deopt_handler() {
 959     // count one adr and one far branch instruction
 960     // return 4 * NativeInstruction::instruction_size;
 961     return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
 962   }
 963 };
 964 
 965   bool is_CAS(int opcode);
 966 
 967   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 968 
 969   bool unnecessary_acquire(const Node *barrier);
 970   bool needs_acquiring_load(const Node *load);
 971 
 972   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 973 
 974   bool unnecessary_release(const Node *barrier);
 975   bool unnecessary_volatile(const Node *barrier);
 976   bool needs_releasing_store(const Node *store);
 977 
 978   // predicate controlling translation of CompareAndSwapX
 979   bool needs_acquiring_load_exclusive(const Node *load);
 980 
 981   // predicate controlling translation of StoreCM
 982   bool unnecessary_storestore(const Node *storecm);
 983 %}
 984 
 985 source %{
 986 
 987   // Optimizaton of volatile gets and puts
 988   // -------------------------------------
 989   //
 990   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 991   // use to implement volatile reads and writes. For a volatile read
 992   // we simply need
 993   //
 994   //   ldar<x>
 995   //
 996   // and for a volatile write we need
 997   //
 998   //   stlr<x>
 999   // 
1000   // Alternatively, we can implement them by pairing a normal
1001   // load/store with a memory barrier. For a volatile read we need
1002   // 
1003   //   ldr<x>
1004   //   dmb ishld
1005   //
1006   // for a volatile write
1007   //
1008   //   dmb ish
1009   //   str<x>
1010   //   dmb ish
1011   //
1012   // We can also use ldaxr and stlxr to implement compare and swap CAS
1013   // sequences. These are normally translated to an instruction
1014   // sequence like the following
1015   //
1016   //   dmb      ish
1017   // retry:
1018   //   ldxr<x>   rval raddr
1019   //   cmp       rval rold
1020   //   b.ne done
1021   //   stlxr<x>  rval, rnew, rold
1022   //   cbnz      rval retry
1023   // done:
1024   //   cset      r0, eq
1025   //   dmb ishld
1026   //
1027   // Note that the exclusive store is already using an stlxr
1028   // instruction. That is required to ensure visibility to other
1029   // threads of the exclusive write (assuming it succeeds) before that
1030   // of any subsequent writes.
1031   //
1032   // The following instruction sequence is an improvement on the above
1033   //
1034   // retry:
1035   //   ldaxr<x>  rval raddr
1036   //   cmp       rval rold
1037   //   b.ne done
1038   //   stlxr<x>  rval, rnew, rold
1039   //   cbnz      rval retry
1040   // done:
1041   //   cset      r0, eq
1042   //
1043   // We don't need the leading dmb ish since the stlxr guarantees
1044   // visibility of prior writes in the case that the swap is
1045   // successful. Crucially we don't have to worry about the case where
1046   // the swap is not successful since no valid program should be
1047   // relying on visibility of prior changes by the attempting thread
1048   // in the case where the CAS fails.
1049   //
1050   // Similarly, we don't need the trailing dmb ishld if we substitute
1051   // an ldaxr instruction since that will provide all the guarantees we
1052   // require regarding observation of changes made by other threads
1053   // before any change to the CAS address observed by the load.
1054   //
1055   // In order to generate the desired instruction sequence we need to
1056   // be able to identify specific 'signature' ideal graph node
1057   // sequences which i) occur as a translation of a volatile reads or
1058   // writes or CAS operations and ii) do not occur through any other
1059   // translation or graph transformation. We can then provide
1060   // alternative aldc matching rules which translate these node
1061   // sequences to the desired machine code sequences. Selection of the
1062   // alternative rules can be implemented by predicates which identify
1063   // the relevant node sequences.
1064   //
1065   // The ideal graph generator translates a volatile read to the node
1066   // sequence
1067   //
1068   //   LoadX[mo_acquire]
1069   //   MemBarAcquire
1070   //
1071   // As a special case when using the compressed oops optimization we
1072   // may also see this variant
1073   //
1074   //   LoadN[mo_acquire]
1075   //   DecodeN
1076   //   MemBarAcquire
1077   //
1078   // A volatile write is translated to the node sequence
1079   //
1080   //   MemBarRelease
1081   //   StoreX[mo_release] {CardMark}-optional
1082   //   MemBarVolatile
1083   //
1084   // n.b. the above node patterns are generated with a strict
1085   // 'signature' configuration of input and output dependencies (see
1086   // the predicates below for exact details). The card mark may be as
1087   // simple as a few extra nodes or, in a few GC configurations, may
1088   // include more complex control flow between the leading and
1089   // trailing memory barriers. However, whatever the card mark
1090   // configuration these signatures are unique to translated volatile
1091   // reads/stores -- they will not appear as a result of any other
1092   // bytecode translation or inlining nor as a consequence of
1093   // optimizing transforms.
1094   //
1095   // We also want to catch inlined unsafe volatile gets and puts and
1096   // be able to implement them using either ldar<x>/stlr<x> or some
1097   // combination of ldr<x>/stlr<x> and dmb instructions.
1098   //
1099   // Inlined unsafe volatiles puts manifest as a minor variant of the
1100   // normal volatile put node sequence containing an extra cpuorder
1101   // membar
1102   //
1103   //   MemBarRelease
1104   //   MemBarCPUOrder
1105   //   StoreX[mo_release] {CardMark}-optional
1106   //   MemBarVolatile
1107   //
1108   // n.b. as an aside, the cpuorder membar is not itself subject to
1109   // matching and translation by adlc rules.  However, the rule
1110   // predicates need to detect its presence in order to correctly
1111   // select the desired adlc rules.
1112   //
1113   // Inlined unsafe volatile gets manifest as a somewhat different
1114   // node sequence to a normal volatile get
1115   //
1116   //   MemBarCPUOrder
1117   //        ||       \\
1118   //   MemBarAcquire LoadX[mo_acquire]
1119   //        ||
1120   //   MemBarCPUOrder
1121   //
1122   // In this case the acquire membar does not directly depend on the
1123   // load. However, we can be sure that the load is generated from an
1124   // inlined unsafe volatile get if we see it dependent on this unique
1125   // sequence of membar nodes. Similarly, given an acquire membar we
1126   // can know that it was added because of an inlined unsafe volatile
1127   // get if it is fed and feeds a cpuorder membar and if its feed
1128   // membar also feeds an acquiring load.
1129   //
1130   // Finally an inlined (Unsafe) CAS operation is translated to the
1131   // following ideal graph
1132   //
1133   //   MemBarRelease
1134   //   MemBarCPUOrder
1135   //   CompareAndSwapX {CardMark}-optional
1136   //   MemBarCPUOrder
1137   //   MemBarAcquire
1138   //
1139   // So, where we can identify these volatile read and write
1140   // signatures we can choose to plant either of the above two code
1141   // sequences. For a volatile read we can simply plant a normal
1142   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
1143   // also choose to inhibit translation of the MemBarAcquire and
1144   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
1145   //
1146   // When we recognise a volatile store signature we can choose to
1147   // plant at a dmb ish as a translation for the MemBarRelease, a
1148   // normal str<x> and then a dmb ish for the MemBarVolatile.
1149   // Alternatively, we can inhibit translation of the MemBarRelease
1150   // and MemBarVolatile and instead plant a simple stlr<x>
1151   // instruction.
1152   //
1153   // when we recognise a CAS signature we can choose to plant a dmb
1154   // ish as a translation for the MemBarRelease, the conventional
1155   // macro-instruction sequence for the CompareAndSwap node (which
1156   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
1157   // Alternatively, we can elide generation of the dmb instructions
1158   // and plant the alternative CompareAndSwap macro-instruction
1159   // sequence (which uses ldaxr<x>).
1160   // 
1161   // Of course, the above only applies when we see these signature
1162   // configurations. We still want to plant dmb instructions in any
1163   // other cases where we may see a MemBarAcquire, MemBarRelease or
1164   // MemBarVolatile. For example, at the end of a constructor which
1165   // writes final/volatile fields we will see a MemBarRelease
1166   // instruction and this needs a 'dmb ish' lest we risk the
1167   // constructed object being visible without making the
1168   // final/volatile field writes visible.
1169   //
1170   // n.b. the translation rules below which rely on detection of the
1171   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
1172   // If we see anything other than the signature configurations we
1173   // always just translate the loads and stores to ldr<x> and str<x>
1174   // and translate acquire, release and volatile membars to the
1175   // relevant dmb instructions.
1176   //
1177 
1178   // is_CAS(int opcode)
1179   //
1180   // return true if opcode is one of the possible CompareAndSwapX
1181   // values otherwise false.
1182 
1183   bool is_CAS(int opcode)
1184   {
1185     switch(opcode) {
1186     // We handle these
1187     case Op_CompareAndSwapI:
1188     case Op_CompareAndSwapL:
1189     case Op_CompareAndSwapP:
1190     case Op_CompareAndSwapN:
1191     case Op_GetAndSetI:
1192     case Op_GetAndSetL:
1193     case Op_GetAndSetP:
1194     case Op_GetAndSetN:
1195     case Op_GetAndAddI:
1196     case Op_GetAndAddL:
1197       return true;
1198     default:
1199       return false;
1200     }
1201   }
1202 
1203 // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
1204 
1205 bool unnecessary_acquire(const Node *barrier)
1206 {
1207   assert(barrier->is_MemBar(), "expecting a membar");
1208 
1209   if (UseBarriersForVolatile) {
1210     // we need to plant a dmb
1211     return false;
1212   }
1213 
1214   MemBarNode* mb = barrier->as_MemBar();
1215 
1216   if (mb->trailing_load()) {
1217     return true;
1218   }
1219 
1220   if (mb->trailing_load_store()) {
1221     Node* load_store = mb->in(MemBarNode::Precedent);
1222     assert(load_store->is_LoadStore(), "unexpected graph shape");
1223     return is_CAS(load_store->Opcode());
1224   }
1225 
1226   return false;
1227 }
1228 
1229 bool needs_acquiring_load(const Node *n)
1230 {
1231   assert(n->is_Load(), "expecting a load");
1232   if (UseBarriersForVolatile) {
1233     // we use a normal load and a dmb
1234     return false;
1235   }
1236 
1237   LoadNode *ld = n->as_Load();
1238 
1239   return ld->is_acquire();
1240 }
1241 
1242 bool unnecessary_release(const Node *n)
1243 {
1244   assert((n->is_MemBar() &&
1245           n->Opcode() == Op_MemBarRelease),
1246          "expecting a release membar");
1247 
1248   if (UseBarriersForVolatile) {
1249     // we need to plant a dmb
1250     return false;
1251   }
1252 
1253   MemBarNode *barrier = n->as_MemBar();
1254 
1255   if (!barrier->leading()) {
1256     return false;
1257   } else {
1258     Node* trailing = barrier->trailing_membar();
1259     MemBarNode* trailing_mb = trailing->as_MemBar();
1260     assert(trailing_mb->trailing(), "Not a trailing membar?");
1261     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
1262 
1263     Node* mem = trailing_mb->in(MemBarNode::Precedent);
1264     if (mem->is_Store()) {
1265       assert(mem->as_Store()->is_release(), "");
1266       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
1267       return true;
1268     } else {
1269       assert(mem->is_LoadStore(), "");
1270       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
1271       return is_CAS(mem->Opcode());
1272     }
1273   }
1274 
1275   return false;
1276 }
1277 
1278 bool unnecessary_volatile(const Node *n)
1279 {
1280   // assert n->is_MemBar();
1281   if (UseBarriersForVolatile) {
1282     // we need to plant a dmb
1283     return false;
1284   }
1285 
1286   MemBarNode *mbvol = n->as_MemBar();
1287 
1288   bool release = mbvol->trailing_store();
1289   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
1290 #ifdef ASSERT
1291   if (release) {
1292     Node* leading = mbvol->leading_membar();
1293     assert(leading->Opcode() == Op_MemBarRelease, "");
1294     assert(leading->as_MemBar()->leading_store(), "");
1295     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
1296    }
1297 #endif
1298 
1299   return release;
1300 }
1301 
1302 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
1303 
1304 bool needs_releasing_store(const Node *n)
1305 {
1306   // assert n->is_Store();
1307   if (UseBarriersForVolatile) {
1308     // we use a normal store and dmb combination
1309     return false;
1310   }
1311 
1312   StoreNode *st = n->as_Store();
1313 
1314   return st->trailing_membar() != NULL;
1315 }
1316 
1317 // predicate controlling translation of CAS
1318 //
1319 // returns true if CAS needs to use an acquiring load otherwise false
1320 
1321 bool needs_acquiring_load_exclusive(const Node *n)
1322 {
1323   assert(is_CAS(n->Opcode()), "expecting a compare and swap");
1324   if (UseBarriersForVolatile) {
1325     return false;
1326   }
1327 
1328   LoadStoreNode* ldst = n->as_LoadStore();
1329   assert(ldst->trailing_membar() != NULL, "expected trailing membar");
1330 
1331   // so we can just return true here
1332   return true;
1333 }
1334 
1335 // predicate controlling translation of StoreCM
1336 //
1337 // returns true if a StoreStore must precede the card write otherwise
1338 // false
1339 
1340 bool unnecessary_storestore(const Node *storecm)
1341 {
1342   assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
1343 
1344   // we need to generate a dmb ishst between an object put and the
1345   // associated card mark when we are using CMS without conditional
1346   // card marking
1347 
1348   if (UseConcMarkSweepGC && !UseCondCardMark) {
1349     return false;
1350   }
1351 
1352   // a storestore is unnecesary in all other cases
1353 
1354   return true;
1355 }
1356 
1357 
1358 #define __ _masm.
1359 
1360 // advance declaratuons for helper functions to convert register
1361 // indices to register objects
1362 
1363 // the ad file has to provide implementations of certain methods
1364 // expected by the generic code
1365 //
1366 // REQUIRED FUNCTIONALITY
1367 
1368 //=============================================================================
1369 
1370 // !!!!! Special hack to get all types of calls to specify the byte offset
1371 //       from the start of the call to the point where the return address
1372 //       will point.
1373 
1374 int MachCallStaticJavaNode::ret_addr_offset()
1375 {
1376   // call should be a simple bl
1377   // unless this is a method handle invoke in which case it is
1378   // mov(rfp, sp), bl, mov(sp, rfp)
1379   int off = 4;
1380   if (_method_handle_invoke) {
1381     off += 4;
1382   }
1383   return off;
1384 }
1385 
1386 int MachCallDynamicJavaNode::ret_addr_offset()
1387 {
1388   return 16; // movz, movk, movk, bl
1389 }
1390 
1391 int MachCallRuntimeNode::ret_addr_offset() {
1392   // for generated stubs the call will be
1393   //   bl(addr)
1394   // for real runtime callouts it will be six instructions
1395   // see aarch64_enc_java_to_runtime
1396   //   adr(rscratch2, retaddr)
1397   //   lea(rscratch1, RuntimeAddress(addr)
1398   //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
1399   //   blr(rscratch1)
1400   CodeBlob *cb = CodeCache::find_blob(_entry_point);
1401   if (cb) {
1402     return MacroAssembler::far_branch_size();
1403   } else {
1404     return 6 * NativeInstruction::instruction_size;
1405   }
1406 }
1407 
1408 // Indicate if the safepoint node needs the polling page as an input
1409 
1410 // the shared code plants the oop data at the start of the generated
1411 // code for the safepoint node and that needs ot be at the load
1412 // instruction itself. so we cannot plant a mov of the safepoint poll
1413 // address followed by a load. setting this to true means the mov is
1414 // scheduled as a prior instruction. that's better for scheduling
1415 // anyway.
1416 
1417 bool SafePointNode::needs_polling_address_input()
1418 {
1419   return true;
1420 }
1421 
1422 //=============================================================================
1423 
1424 #ifndef PRODUCT
1425 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1426   st->print("BREAKPOINT");
1427 }
1428 #endif
1429 
1430 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1431   MacroAssembler _masm(&cbuf);
1432   __ brk(0);
1433 }
1434 
1435 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1436   return MachNode::size(ra_);
1437 }
1438 
1439 //=============================================================================
1440 
1441 #ifndef PRODUCT
1442   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1443     st->print("nop \t# %d bytes pad for loops and calls", _count);
1444   }
1445 #endif
1446 
1447   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
1448     MacroAssembler _masm(&cbuf);
1449     for (int i = 0; i < _count; i++) { 
1450       __ nop();
1451     }
1452   }
1453 
1454   uint MachNopNode::size(PhaseRegAlloc*) const {
1455     return _count * NativeInstruction::instruction_size;
1456   }
1457 
1458 //=============================================================================
1459 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1460 
1461 int Compile::ConstantTable::calculate_table_base_offset() const {
1462   return 0;  // absolute addressing, no offset
1463 }
1464 
1465 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1466 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1467   ShouldNotReachHere();
1468 }
1469 
1470 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1471   // Empty encoding
1472 }
1473 
1474 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1475   return 0;
1476 }
1477 
1478 #ifndef PRODUCT
1479 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1480   st->print("-- \t// MachConstantBaseNode (empty encoding)");
1481 }
1482 #endif
1483 
1484 #ifndef PRODUCT
1485 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1486   Compile* C = ra_->C;
1487 
1488   int framesize = C->frame_slots() << LogBytesPerInt;
1489 
1490   if (C->need_stack_bang(framesize))
1491     st->print("# stack bang size=%d\n\t", framesize);
1492 
1493   if (framesize == 0) {
1494     // Is this even possible?
1495     st->print("stp  lr, rfp, [sp, #%d]!", -(2 * wordSize)); 
1496   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1497     st->print("sub  sp, sp, #%d\n\t", framesize);
1498     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
1499   } else {
1500     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize)); 
1501     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1502     st->print("sub  sp, sp, rscratch1");
1503   }
1504 }
1505 #endif
1506 
1507 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1508   Compile* C = ra_->C;
1509   MacroAssembler _masm(&cbuf);
1510 
1511   // n.b. frame size includes space for return pc and rfp
1512   long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
1513   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1514 
1515   // insert a nop at the start of the prolog so we can patch in a
1516   // branch if we need to invalidate the method later
1517   __ nop();
1518 
1519   if (C->need_stack_bang(framesize))
1520     __ generate_stack_overflow_check(framesize);
1521 
1522   __ build_frame(framesize);
1523 
1524   if (VerifyStackAtCalls) {
1525     Unimplemented();
1526   }
1527 
1528   C->set_frame_complete(cbuf.insts_size());
1529 
1530   if (C->has_mach_constant_base_node()) {
1531     // NOTE: We set the table base offset here because users might be
1532     // emitted before MachConstantBaseNode.
1533     Compile::ConstantTable& constant_table = C->constant_table();
1534     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1535   }
1536 }
1537 
1538 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1539 {
1540   return MachNode::size(ra_); // too many variables; just compute it
1541                               // the hard way
1542 }
1543 
1544 int MachPrologNode::reloc() const
1545 {
1546   return 0;
1547 }
1548 
1549 //=============================================================================
1550 
1551 #ifndef PRODUCT
1552 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1553   Compile* C = ra_->C;
1554   int framesize = C->frame_slots() << LogBytesPerInt;
1555 
1556   st->print("# pop frame %d\n\t",framesize);
1557 
1558   if (framesize == 0) {
1559     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1560   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
1561     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
1562     st->print("add  sp, sp, #%d\n\t", framesize);
1563   } else {
1564     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
1565     st->print("add  sp, sp, rscratch1\n\t");
1566     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
1567   }
1568 
1569   if (do_polling() && C->is_method_compilation()) {
1570     st->print("# touch polling page\n\t");
1571     st->print("mov  rscratch1, #" INTPTR_FORMAT "\n\t", p2i(os::get_polling_page()));
1572     st->print("ldr zr, [rscratch1]");
1573   }
1574 }
1575 #endif
1576 
1577 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1578   Compile* C = ra_->C;
1579   MacroAssembler _masm(&cbuf);
1580   int framesize = C->frame_slots() << LogBytesPerInt;
1581 
1582   __ remove_frame(framesize);
1583 
1584   if (do_polling() && C->is_method_compilation()) {
1585     __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
1586   }
1587 }
1588 
1589 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1590   // Variable size. Determine dynamically.
1591   return MachNode::size(ra_);
1592 }
1593 
1594 int MachEpilogNode::reloc() const {
1595   // Return number of relocatable values contained in this instruction.
1596   return 1; // 1 for polling page.
1597 }
1598 
1599 const Pipeline * MachEpilogNode::pipeline() const {
1600   return MachNode::pipeline_class();
1601 }
1602 
1603 // This method seems to be obsolete. It is declared in machnode.hpp
1604 // and defined in all *.ad files, but it is never called. Should we
1605 // get rid of it?
1606 int MachEpilogNode::safepoint_offset() const {
1607   assert(do_polling(), "no return for this epilog node");
1608   return 4;
1609 }
1610 
1611 //=============================================================================
1612 
1613 // Figure out which register class each belongs in: rc_int, rc_float or
1614 // rc_stack.
1615 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1616 
1617 static enum RC rc_class(OptoReg::Name reg) {
1618 
1619   if (reg == OptoReg::Bad) {
1620     return rc_bad;
1621   }
1622 
1623   // we have 30 int registers * 2 halves
1624   // (rscratch1 and rscratch2 are omitted)
1625 
1626   if (reg < 60) {
1627     return rc_int;
1628   }
1629 
1630   // we have 32 float register * 2 halves
1631   if (reg < 60 + 128) {
1632     return rc_float;
1633   }
1634 
1635   // Between float regs & stack is the flags regs.
1636   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1637 
1638   return rc_stack;
1639 }
1640 
1641 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1642   Compile* C = ra_->C;
1643 
1644   // Get registers to move.
1645   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1646   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1647   OptoReg::Name dst_hi = ra_->get_reg_second(this);
1648   OptoReg::Name dst_lo = ra_->get_reg_first(this);
1649 
1650   enum RC src_hi_rc = rc_class(src_hi);
1651   enum RC src_lo_rc = rc_class(src_lo);
1652   enum RC dst_hi_rc = rc_class(dst_hi);
1653   enum RC dst_lo_rc = rc_class(dst_lo);
1654 
1655   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1656 
1657   if (src_hi != OptoReg::Bad) {
1658     assert((src_lo&1)==0 && src_lo+1==src_hi &&
1659            (dst_lo&1)==0 && dst_lo+1==dst_hi,
1660            "expected aligned-adjacent pairs");
1661   }
1662 
1663   if (src_lo == dst_lo && src_hi == dst_hi) {
1664     return 0;            // Self copy, no move.
1665   }
1666 
1667   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1668               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1669   int src_offset = ra_->reg2offset(src_lo);
1670   int dst_offset = ra_->reg2offset(dst_lo);
1671 
1672   if (bottom_type()->isa_vect() != NULL) {
1673     uint ireg = ideal_reg();
1674     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
1675     if (cbuf) {
1676       MacroAssembler _masm(cbuf);
1677       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
1678       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1679         // stack->stack
1680         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
1681         if (ireg == Op_VecD) {
1682           __ unspill(rscratch1, true, src_offset);
1683           __ spill(rscratch1, true, dst_offset);
1684         } else {
1685           __ spill_copy128(src_offset, dst_offset);
1686         }
1687       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
1688         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1689                ireg == Op_VecD ? __ T8B : __ T16B,
1690                as_FloatRegister(Matcher::_regEncode[src_lo]));
1691       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
1692         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1693                        ireg == Op_VecD ? __ D : __ Q,
1694                        ra_->reg2offset(dst_lo));
1695       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
1696         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1697                        ireg == Op_VecD ? __ D : __ Q,
1698                        ra_->reg2offset(src_lo));
1699       } else {
1700         ShouldNotReachHere();
1701       }
1702     }
1703   } else if (cbuf) {
1704     MacroAssembler _masm(cbuf);
1705     switch (src_lo_rc) {
1706     case rc_int:
1707       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
1708         if (is64) {
1709             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
1710                    as_Register(Matcher::_regEncode[src_lo]));
1711         } else {
1712             MacroAssembler _masm(cbuf);
1713             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
1714                     as_Register(Matcher::_regEncode[src_lo]));
1715         }
1716       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1717         if (is64) {
1718             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1719                      as_Register(Matcher::_regEncode[src_lo]));
1720         } else {
1721             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1722                      as_Register(Matcher::_regEncode[src_lo]));
1723         }
1724       } else {                    // gpr --> stack spill
1725         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1726         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1727       }
1728       break;
1729     case rc_float:
1730       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
1731         if (is64) {
1732             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
1733                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1734         } else {
1735             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
1736                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1737         }
1738       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1739           if (cbuf) {
1740             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1741                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1742         } else {
1743             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1744                      as_FloatRegister(Matcher::_regEncode[src_lo]));
1745         }
1746       } else {                    // fpr --> stack spill
1747         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1748         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1749                  is64 ? __ D : __ S, dst_offset);
1750       }
1751       break;
1752     case rc_stack:
1753       if (dst_lo_rc == rc_int) {  // stack --> gpr load
1754         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1755       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1756         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1757                    is64 ? __ D : __ S, src_offset);
1758       } else {                    // stack --> stack copy
1759         assert(dst_lo_rc == rc_stack, "spill to bad register class");
1760         __ unspill(rscratch1, is64, src_offset);
1761         __ spill(rscratch1, is64, dst_offset);
1762       }
1763       break;
1764     default:
1765       assert(false, "bad rc_class for spill");
1766       ShouldNotReachHere();
1767     }
1768   }
1769 
1770   if (st) {
1771     st->print("spill ");
1772     if (src_lo_rc == rc_stack) {
1773       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
1774     } else {
1775       st->print("%s -> ", Matcher::regName[src_lo]);
1776     }
1777     if (dst_lo_rc == rc_stack) {
1778       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
1779     } else {
1780       st->print("%s", Matcher::regName[dst_lo]);
1781     }
1782     if (bottom_type()->isa_vect() != NULL) {
1783       st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128);
1784     } else {
1785       st->print("\t# spill size = %d", is64 ? 64:32);
1786     }
1787   }
1788 
1789   return 0;
1790 
1791 }
1792 
1793 #ifndef PRODUCT
1794 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1795   if (!ra_)
1796     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1797   else
1798     implementation(NULL, ra_, false, st);
1799 }
1800 #endif
1801 
1802 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1803   implementation(&cbuf, ra_, false, NULL);
1804 }
1805 
1806 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1807   return MachNode::size(ra_);
1808 }
1809 
1810 //=============================================================================
1811 
1812 #ifndef PRODUCT
1813 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1814   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1815   int reg = ra_->get_reg_first(this);
1816   st->print("add %s, rsp, #%d]\t# box lock",
1817             Matcher::regName[reg], offset);
1818 }
1819 #endif
1820 
1821 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1822   MacroAssembler _masm(&cbuf);
1823 
1824   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1825   int reg    = ra_->get_encode(this);
1826 
1827   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
1828     __ add(as_Register(reg), sp, offset);
1829   } else {
1830     ShouldNotReachHere();
1831   }
1832 }
1833 
1834 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1835   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1836   return 4;
1837 }
1838 
1839 //=============================================================================
1840 
1841 #ifndef PRODUCT
1842 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1843 {
1844   st->print_cr("# MachUEPNode");
1845   if (UseCompressedClassPointers) {
1846     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1847     if (Universe::narrow_klass_shift() != 0) {
1848       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1849     }
1850   } else {
1851    st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1852   }
1853   st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
1854   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
1855 }
1856 #endif
1857 
1858 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1859 {
1860   // This is the unverified entry point.
1861   MacroAssembler _masm(&cbuf);
1862 
1863   __ cmp_klass(j_rarg0, rscratch2, rscratch1);
1864   Label skip;
1865   // TODO
1866   // can we avoid this skip and still use a reloc?
1867   __ br(Assembler::EQ, skip);
1868   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1869   __ bind(skip);
1870 }
1871 
1872 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1873 {
1874   return MachNode::size(ra_);
1875 }
1876 
1877 // REQUIRED EMIT CODE
1878 
1879 //=============================================================================
1880 
1881 // Emit exception handler code.
1882 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
1883 {
1884   // mov rscratch1 #exception_blob_entry_point
1885   // br rscratch1
1886   // Note that the code buffer's insts_mark is always relative to insts.
1887   // That's why we must use the macroassembler to generate a handler.
1888   MacroAssembler _masm(&cbuf);
1889   address base = __ start_a_stub(size_exception_handler());
1890   if (base == NULL) {
1891     ciEnv::current()->record_failure("CodeCache is full");
1892     return 0;  // CodeBuffer::expand failed
1893   }
1894   int offset = __ offset();
1895   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
1896   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1897   __ end_a_stub();
1898   return offset;
1899 }
1900 
1901 // Emit deopt handler code.
1902 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
1903 {
1904   // Note that the code buffer's insts_mark is always relative to insts.
1905   // That's why we must use the macroassembler to generate a handler.
1906   MacroAssembler _masm(&cbuf);
1907   address base = __ start_a_stub(size_deopt_handler());
1908   if (base == NULL) {
1909     ciEnv::current()->record_failure("CodeCache is full");
1910     return 0;  // CodeBuffer::expand failed
1911   }
1912   int offset = __ offset();
1913 
1914   __ adr(lr, __ pc());
1915   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1916 
1917   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1918   __ end_a_stub();
1919   return offset;
1920 }
1921 
1922 // REQUIRED MATCHER CODE
1923 
1924 //=============================================================================
1925 
1926 const bool Matcher::match_rule_supported(int opcode) {
1927 
1928   // TODO 
1929   // identify extra cases that we might want to provide match rules for
1930   // e.g. Op_StrEquals and other intrinsics
1931   if (!has_match_rule(opcode)) {
1932     return false;
1933   }
1934 
1935   return true;  // Per default match rules are supported.
1936 }
1937 
1938 int Matcher::regnum_to_fpu_offset(int regnum)
1939 {
1940   Unimplemented();
1941   return 0;
1942 }
1943 
1944 // Is this branch offset short enough that a short branch can be used?
1945 //
1946 // NOTE: If the platform does not provide any short branch variants, then
1947 //       this method should return false for offset 0.
1948 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1949   // The passed offset is relative to address of the branch.
1950 
1951   return (-32768 <= offset && offset < 32768);
1952 }
1953 
1954 const bool Matcher::isSimpleConstant64(jlong value) {
1955   // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1956   // Probably always true, even if a temp register is required.
1957   return true;
1958 }
1959 
1960 // true just means we have fast l2f conversion
1961 const bool Matcher::convL2FSupported(void) {
1962   return true;
1963 }
1964 
1965 // Vector width in bytes.
1966 const int Matcher::vector_width_in_bytes(BasicType bt) {
1967   int size = MIN2(16,(int)MaxVectorSize);
1968   // Minimum 2 values in vector
1969   if (size < 2*type2aelembytes(bt)) size = 0;
1970   // But never < 4
1971   if (size < 4) size = 0;
1972   return size;
1973 }
1974 
1975 // Limits on vector size (number of elements) loaded into vector.
1976 const int Matcher::max_vector_size(const BasicType bt) {
1977   return vector_width_in_bytes(bt)/type2aelembytes(bt);
1978 }
1979 const int Matcher::min_vector_size(const BasicType bt) {
1980 //  For the moment limit the vector size to 8 bytes
1981     int size = 8 / type2aelembytes(bt);
1982     if (size < 2) size = 2;
1983     return size;
1984 }
1985 
1986 // Vector ideal reg.
1987 const uint Matcher::vector_ideal_reg(int len) {
1988   switch(len) {
1989     case  8: return Op_VecD;
1990     case 16: return Op_VecX;
1991   }
1992   ShouldNotReachHere();
1993   return 0;
1994 }
1995 
1996 const uint Matcher::vector_shift_count_ideal_reg(int size) {
1997   switch(size) {
1998     case  8: return Op_VecD;
1999     case 16: return Op_VecX;
2000   }
2001   ShouldNotReachHere();
2002   return 0;
2003 }
2004 
2005 // AES support not yet implemented
2006 const bool Matcher::pass_original_key_for_aes() {
2007   return false;
2008 }
2009 
2010 // x86 supports misaligned vectors store/load.
2011 const bool Matcher::misaligned_vectors_ok() {
2012   return !AlignVector; // can be changed by flag
2013 }
2014 
2015 // false => size gets scaled to BytesPerLong, ok.
2016 const bool Matcher::init_array_count_is_in_bytes = false;
2017 
2018 // Threshold size for cleararray.
2019 const int Matcher::init_array_short_size = 4 * BytesPerLong;
2020 
2021 // Use conditional move (CMOVL)
2022 const int Matcher::long_cmove_cost() {
2023   // long cmoves are no more expensive than int cmoves
2024   return 0;
2025 }
2026 
2027 const int Matcher::float_cmove_cost() {
2028   // float cmoves are no more expensive than int cmoves
2029   return 0;
2030 }
2031 
2032 // Does the CPU require late expand (see block.cpp for description of late expand)?
2033 const bool Matcher::require_postalloc_expand = false;
2034 
2035 // Should the Matcher clone shifts on addressing modes, expecting them
2036 // to be subsumed into complex addressing expressions or compute them
2037 // into registers?  True for Intel but false for most RISCs
2038 const bool Matcher::clone_shift_expressions = false;
2039 
2040 // Do we need to mask the count passed to shift instructions or does
2041 // the cpu only look at the lower 5/6 bits anyway?
2042 const bool Matcher::need_masked_shift_count = false;
2043 
2044 // This affects two different things:
2045 //  - how Decode nodes are matched
2046 //  - how ImplicitNullCheck opportunities are recognized
2047 // If true, the matcher will try to remove all Decodes and match them
2048 // (as operands) into nodes. NullChecks are not prepared to deal with 
2049 // Decodes by final_graph_reshaping().
2050 // If false, final_graph_reshaping() forces the decode behind the Cmp
2051 // for a NullCheck. The matcher matches the Decode node into a register.
2052 // Implicit_null_check optimization moves the Decode along with the 
2053 // memory operation back up before the NullCheck.
2054 bool Matcher::narrow_oop_use_complex_address() {
2055   return Universe::narrow_oop_shift() == 0;
2056 }
2057 
2058 bool Matcher::narrow_klass_use_complex_address() {
2059 // TODO
2060 // decide whether we need to set this to true
2061   return false;
2062 }
2063 
2064 // Is it better to copy float constants, or load them directly from
2065 // memory?  Intel can load a float constant from a direct address,
2066 // requiring no extra registers.  Most RISCs will have to materialize
2067 // an address into a register first, so they would do better to copy
2068 // the constant from stack.
2069 const bool Matcher::rematerialize_float_constants = false;
2070 
2071 // If CPU can load and store mis-aligned doubles directly then no
2072 // fixup is needed.  Else we split the double into 2 integer pieces
2073 // and move it piece-by-piece.  Only happens when passing doubles into
2074 // C code as the Java calling convention forces doubles to be aligned.
2075 const bool Matcher::misaligned_doubles_ok = true;
2076 
2077 // No-op on amd64
2078 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2079   Unimplemented();
2080 }
2081 
2082 // Advertise here if the CPU requires explicit rounding operations to
2083 // implement the UseStrictFP mode.
2084 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2085 
2086 // Are floats converted to double when stored to stack during
2087 // deoptimization?
2088 bool Matcher::float_in_double() { return true; }
2089 
2090 // Do ints take an entire long register or just half?
2091 // The relevant question is how the int is callee-saved:
2092 // the whole long is written but de-opt'ing will have to extract
2093 // the relevant 32 bits.
2094 const bool Matcher::int_in_long = true;
2095 
2096 // Return whether or not this register is ever used as an argument.
2097 // This function is used on startup to build the trampoline stubs in
2098 // generateOptoStub.  Registers not mentioned will be killed by the VM
2099 // call in the trampoline, and arguments in those registers not be
2100 // available to the callee.
2101 bool Matcher::can_be_java_arg(int reg)
2102 {
2103   return
2104     reg ==  R0_num || reg == R0_H_num ||
2105     reg ==  R1_num || reg == R1_H_num ||
2106     reg ==  R2_num || reg == R2_H_num ||
2107     reg ==  R3_num || reg == R3_H_num ||
2108     reg ==  R4_num || reg == R4_H_num ||
2109     reg ==  R5_num || reg == R5_H_num ||
2110     reg ==  R6_num || reg == R6_H_num ||
2111     reg ==  R7_num || reg == R7_H_num ||
2112     reg ==  V0_num || reg == V0_H_num ||
2113     reg ==  V1_num || reg == V1_H_num ||
2114     reg ==  V2_num || reg == V2_H_num ||
2115     reg ==  V3_num || reg == V3_H_num ||
2116     reg ==  V4_num || reg == V4_H_num ||
2117     reg ==  V5_num || reg == V5_H_num ||
2118     reg ==  V6_num || reg == V6_H_num ||
2119     reg ==  V7_num || reg == V7_H_num;
2120 }
2121 
2122 bool Matcher::is_spillable_arg(int reg)
2123 {
2124   return can_be_java_arg(reg);
2125 }
2126 
2127 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2128   return false;
2129 }
2130 
2131 RegMask Matcher::divI_proj_mask() {
2132   ShouldNotReachHere();
2133   return RegMask();
2134 }
2135 
2136 // Register for MODI projection of divmodI.
2137 RegMask Matcher::modI_proj_mask() {
2138   ShouldNotReachHere();
2139   return RegMask();
2140 }
2141 
2142 // Register for DIVL projection of divmodL.
2143 RegMask Matcher::divL_proj_mask() {
2144   ShouldNotReachHere();
2145   return RegMask();
2146 }
2147 
2148 // Register for MODL projection of divmodL.
2149 RegMask Matcher::modL_proj_mask() {
2150   ShouldNotReachHere();
2151   return RegMask();
2152 }
2153 
2154 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2155   return FP_REG_mask();
2156 }
2157 
2158 
2159 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
2160   MacroAssembler _masm(&cbuf);                                              \
2161   {                                                                     \
2162     guarantee(INDEX == -1, "mode not permitted for volatile");          \
2163     guarantee(DISP == 0, "mode not permitted for volatile");            \
2164     guarantee(SCALE == 0, "mode not permitted for volatile");           \
2165     __ INSN(REG, as_Register(BASE));                                    \
2166   }
2167 
2168 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2169 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2170 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2171                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
2172 
2173   // Used for all non-volatile memory accesses.  The use of
2174   // $mem->opcode() to discover whether this pattern uses sign-extended
2175   // offsets is something of a kludge.
2176   static void loadStore(MacroAssembler masm, mem_insn insn,
2177                          Register reg, int opcode,
2178                          Register base, int index, int size, int disp)
2179   {
2180     Address::extend scale;
2181 
2182     // Hooboy, this is fugly.  We need a way to communicate to the
2183     // encoder that the index needs to be sign extended, so we have to
2184     // enumerate all the cases.
2185     switch (opcode) {
2186     case INDINDEXSCALEDOFFSETI2L:
2187     case INDINDEXSCALEDI2L:
2188     case INDINDEXSCALEDOFFSETI2LN:
2189     case INDINDEXSCALEDI2LN:
2190     case INDINDEXOFFSETI2L:
2191     case INDINDEXOFFSETI2LN:
2192       scale = Address::sxtw(size);
2193       break;
2194     default:
2195       scale = Address::lsl(size);
2196     }
2197 
2198     if (index == -1) {
2199       (masm.*insn)(reg, Address(base, disp));
2200     } else {
2201       if (disp == 0) {
2202         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2203       } else {
2204         masm.lea(rscratch1, Address(base, disp));
2205         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2206       }
2207     }
2208   }
2209 
2210   static void loadStore(MacroAssembler masm, mem_float_insn insn,
2211                          FloatRegister reg, int opcode,
2212                          Register base, int index, int size, int disp)
2213   {
2214     Address::extend scale;
2215 
2216     switch (opcode) {
2217     case INDINDEXSCALEDOFFSETI2L:
2218     case INDINDEXSCALEDI2L:
2219     case INDINDEXSCALEDOFFSETI2LN:
2220     case INDINDEXSCALEDI2LN:
2221       scale = Address::sxtw(size);
2222       break;
2223     default:
2224       scale = Address::lsl(size);
2225     }
2226 
2227      if (index == -1) {
2228       (masm.*insn)(reg, Address(base, disp));
2229     } else {
2230       if (disp == 0) {
2231         (masm.*insn)(reg, Address(base, as_Register(index), scale));
2232       } else {
2233         masm.lea(rscratch1, Address(base, disp));
2234         (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
2235       }
2236     }
2237   }
2238 
2239   static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2240                          FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2241                          int opcode, Register base, int index, int size, int disp)
2242   {
2243     if (index == -1) {
2244       (masm.*insn)(reg, T, Address(base, disp));
2245     } else {
2246       assert(disp == 0, "unsupported address mode");
2247       (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2248     }
2249   }
2250 
2251 %}
2252 
2253 
2254 
2255 //----------ENCODING BLOCK-----------------------------------------------------
2256 // This block specifies the encoding classes used by the compiler to
2257 // output byte streams.  Encoding classes are parameterized macros
2258 // used by Machine Instruction Nodes in order to generate the bit
2259 // encoding of the instruction.  Operands specify their base encoding
2260 // interface with the interface keyword.  There are currently
2261 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2262 // COND_INTER.  REG_INTER causes an operand to generate a function
2263 // which returns its register number when queried.  CONST_INTER causes
2264 // an operand to generate a function which returns the value of the
2265 // constant when queried.  MEMORY_INTER causes an operand to generate
2266 // four functions which return the Base Register, the Index Register,
2267 // the Scale Value, and the Offset Value of the operand when queried.
2268 // COND_INTER causes an operand to generate six functions which return
2269 // the encoding code (ie - encoding bits for the instruction)
2270 // associated with each basic boolean condition for a conditional
2271 // instruction.
2272 //
2273 // Instructions specify two basic values for encoding.  Again, a
2274 // function is available to check if the constant displacement is an
2275 // oop. They use the ins_encode keyword to specify their encoding
2276 // classes (which must be a sequence of enc_class names, and their
2277 // parameters, specified in the encoding block), and they use the
2278 // opcode keyword to specify, in order, their primary, secondary, and
2279 // tertiary opcode.  Only the opcode sections which a particular
2280 // instruction needs for encoding need to be specified.
2281 encode %{
2282   // Build emit functions for each basic byte or larger field in the
2283   // intel encoding scheme (opcode, rm, sib, immediate), and call them
2284   // from C++ code in the enc_class source block.  Emit functions will
2285   // live in the main source block for now.  In future, we can
2286   // generalize this by adding a syntax that specifies the sizes of
2287   // fields in an order, so that the adlc can build the emit functions
2288   // automagically
2289 
2290   // catch all for unimplemented encodings
2291   enc_class enc_unimplemented %{
2292     MacroAssembler _masm(&cbuf);
2293     __ unimplemented("C2 catch all");    
2294   %}
2295 
2296   // BEGIN Non-volatile memory access
2297 
2298   enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
2299     Register dst_reg = as_Register($dst$$reg);
2300     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
2301                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2302   %}
2303 
2304   enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
2305     Register dst_reg = as_Register($dst$$reg);
2306     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
2307                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2308   %}
2309 
2310   enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
2311     Register dst_reg = as_Register($dst$$reg);
2312     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2313                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2314   %}
2315 
2316   enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
2317     Register dst_reg = as_Register($dst$$reg);
2318     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
2319                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2320   %}
2321 
2322   enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
2323     Register dst_reg = as_Register($dst$$reg);
2324     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
2325                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2326   %}
2327 
2328   enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
2329     Register dst_reg = as_Register($dst$$reg);
2330     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
2331                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2332   %}
2333 
2334   enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
2335     Register dst_reg = as_Register($dst$$reg);
2336     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2337                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2338   %}
2339 
2340   enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
2341     Register dst_reg = as_Register($dst$$reg);
2342     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
2343                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2344   %}
2345 
2346   enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
2347     Register dst_reg = as_Register($dst$$reg);
2348     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2349                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2350   %}
2351 
2352   enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
2353     Register dst_reg = as_Register($dst$$reg);
2354     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
2355                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2356   %}
2357 
2358   enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
2359     Register dst_reg = as_Register($dst$$reg);
2360     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
2361                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2362   %}
2363 
2364   enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
2365     Register dst_reg = as_Register($dst$$reg);
2366     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
2367                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2368   %}
2369 
2370   enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
2371     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2372     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
2373                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2374   %}
2375 
2376   enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
2377     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2378     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
2379                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2380   %}
2381 
2382   enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
2383     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2384     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
2385        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2386   %}
2387 
2388   enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
2389     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2390     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
2391        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2392   %}
2393 
2394   enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{
2395     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2396     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
2397        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2398   %}
2399 
2400   enc_class aarch64_enc_strb(iRegI src, memory mem) %{
2401     Register src_reg = as_Register($src$$reg);
2402     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
2403                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2404   %}
2405 
2406   enc_class aarch64_enc_strb0(memory mem) %{
2407     MacroAssembler _masm(&cbuf);
2408     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2409                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2410   %}
2411 
2412   enc_class aarch64_enc_strb0_ordered(memory mem) %{
2413     MacroAssembler _masm(&cbuf);
2414     __ membar(Assembler::StoreStore);
2415     loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
2416                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2417   %}
2418 
2419   enc_class aarch64_enc_strh(iRegI src, memory mem) %{
2420     Register src_reg = as_Register($src$$reg);
2421     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
2422                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2423   %}
2424 
2425   enc_class aarch64_enc_strh0(memory mem) %{
2426     MacroAssembler _masm(&cbuf);
2427     loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
2428                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2429   %}
2430 
2431   enc_class aarch64_enc_strw(iRegI src, memory mem) %{
2432     Register src_reg = as_Register($src$$reg);
2433     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
2434                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2435   %}
2436 
2437   enc_class aarch64_enc_strw0(memory mem) %{
2438     MacroAssembler _masm(&cbuf);
2439     loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
2440                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2441   %}
2442 
2443   enc_class aarch64_enc_str(iRegL src, memory mem) %{
2444     Register src_reg = as_Register($src$$reg);
2445     // we sometimes get asked to store the stack pointer into the
2446     // current thread -- we cannot do that directly on AArch64
2447     if (src_reg == r31_sp) {
2448       MacroAssembler _masm(&cbuf);
2449       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2450       __ mov(rscratch2, sp);
2451       src_reg = rscratch2;
2452     }
2453     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
2454                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2455   %}
2456 
2457   enc_class aarch64_enc_str0(memory mem) %{
2458     MacroAssembler _masm(&cbuf);
2459     loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
2460                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2461   %}
2462 
2463   enc_class aarch64_enc_strs(vRegF src, memory mem) %{
2464     FloatRegister src_reg = as_FloatRegister($src$$reg);
2465     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
2466                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2467   %}
2468 
2469   enc_class aarch64_enc_strd(vRegD src, memory mem) %{
2470     FloatRegister src_reg = as_FloatRegister($src$$reg);
2471     loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
2472                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2473   %}
2474 
2475   enc_class aarch64_enc_strvS(vecD src, memory mem) %{
2476     FloatRegister src_reg = as_FloatRegister($src$$reg);
2477     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
2478        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2479   %}
2480 
2481   enc_class aarch64_enc_strvD(vecD src, memory mem) %{
2482     FloatRegister src_reg = as_FloatRegister($src$$reg);
2483     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
2484        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2485   %}
2486 
2487   enc_class aarch64_enc_strvQ(vecX src, memory mem) %{
2488     FloatRegister src_reg = as_FloatRegister($src$$reg);
2489     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
2490        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
2491   %}
2492 
2493   // END Non-volatile memory access
2494 
2495   // this encoding writes the address of the first instruction in the
2496   // call sequence for the runtime call into the anchor pc slot. this
2497   // address allows the runtime to i) locate the code buffer for the
2498   // caller (any address in the buffer would do) and ii) find the oop
2499   // map associated with the call (has to address the instruction
2500   // following the call). note that we have to store the address which
2501   // follows the actual call.
2502   // 
2503   // the offset from the current pc can be computed by considering
2504   // what gets generated between this point up to and including the
2505   // call. it looks like this
2506   //
2507   //   movz xscratch1 0xnnnn        <-- current pc is here
2508   //   movk xscratch1 0xnnnn
2509   //   movk xscratch1 0xnnnn
2510   //   str xscratch1, [xthread,#anchor_pc_off]
2511   //   mov xscratch2, sp
2512   //   str xscratch2, [xthread,#anchor_sp_off
2513   //   mov x0, x1
2514   //   . . .
2515   //   mov xn-1, xn
2516   //   mov xn, thread            <-- always passed
2517   //   mov xn+1, rfp             <-- optional iff primary == 1
2518   //   movz xscratch1 0xnnnn
2519   //   movk xscratch1 0xnnnn
2520   //   movk xscratch1 0xnnnn
2521   //   blr  xscratch1
2522   //   . . .
2523   //
2524   // where the called routine has n args (including the thread and,
2525   // possibly the stub's caller return address currently in rfp).  we
2526   // can compute n by looking at the number of args passed into the
2527   // stub. we assert that nargs is < 7.
2528   //
2529   // so the offset we need to add to the pc (in 32-bit words) is
2530   //   3 +        <-- load 48-bit constant return pc
2531   //   1 +        <-- write anchor pc
2532   //   1 +        <-- copy sp
2533   //   1 +        <-- write anchor sp
2534   //   nargs +    <-- java stub arg count
2535   //   1 +        <-- extra thread arg
2536   // [ 1 + ]      <-- optional ret address of stub caller
2537   //   3 +        <-- load 64 bit call target address
2538   //   1          <-- blr instruction
2539   //
2540   // i.e we need to add (nargs + 11) * 4 bytes or (nargs + 12) * 4 bytes
2541   //
2542 
2543   enc_class aarch64_enc_save_pc() %{
2544     Compile* C = ra_->C;
2545     int nargs = C->tf()->domain()->cnt() - TypeFunc::Parms;
2546     if ($primary) { nargs++; }
2547     assert(nargs <= 8, "opto runtime stub has more than 8 args!");
2548     MacroAssembler _masm(&cbuf);
2549     address pc = __ pc();
2550     int call_offset = (nargs + 11) * 4;
2551     int field_offset = in_bytes(JavaThread::frame_anchor_offset()) +
2552                        in_bytes(JavaFrameAnchor::last_Java_pc_offset());
2553     __ lea(rscratch1, InternalAddress(pc + call_offset));
2554     __ str(rscratch1, Address(rthread, field_offset));
2555   %}
2556 
2557   // volatile loads and stores
2558 
2559   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
2560     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2561                  rscratch1, stlrb);
2562     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
2563       __ dmb(__ ISH);
2564   %}
2565 
2566   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
2567     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2568                  rscratch1, stlrh);
2569     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
2570       __ dmb(__ ISH);
2571   %}
2572 
2573   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
2574     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2575                  rscratch1, stlrw);
2576     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
2577       __ dmb(__ ISH);
2578   %}
2579 
2580 
2581   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
2582     Register dst_reg = as_Register($dst$$reg);
2583     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2584              rscratch1, ldarb);
2585     __ sxtbw(dst_reg, dst_reg);
2586   %}
2587 
2588   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
2589     Register dst_reg = as_Register($dst$$reg);
2590     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2591              rscratch1, ldarb);
2592     __ sxtb(dst_reg, dst_reg);
2593   %}
2594 
2595   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
2596     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2597              rscratch1, ldarb);
2598   %}
2599 
2600   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
2601     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2602              rscratch1, ldarb);
2603   %}
2604 
2605   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
2606     Register dst_reg = as_Register($dst$$reg);
2607     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2608              rscratch1, ldarh);
2609     __ sxthw(dst_reg, dst_reg);
2610   %}
2611 
2612   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
2613     Register dst_reg = as_Register($dst$$reg);
2614     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2615              rscratch1, ldarh);
2616     __ sxth(dst_reg, dst_reg);
2617   %}
2618 
2619   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
2620     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2621              rscratch1, ldarh);
2622   %}
2623 
2624   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
2625     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2626              rscratch1, ldarh);
2627   %}
2628 
2629   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
2630     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2631              rscratch1, ldarw);
2632   %}
2633 
2634   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
2635     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2636              rscratch1, ldarw);
2637   %}
2638 
2639   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
2640     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2641              rscratch1, ldar);
2642   %}
2643 
2644   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
2645     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2646              rscratch1, ldarw);
2647     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
2648   %}
2649 
2650   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
2651     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2652              rscratch1, ldar);
2653     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
2654   %}
2655 
2656   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
2657     Register src_reg = as_Register($src$$reg);
2658     // we sometimes get asked to store the stack pointer into the
2659     // current thread -- we cannot do that directly on AArch64
2660     if (src_reg == r31_sp) {
2661         MacroAssembler _masm(&cbuf);
2662       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
2663       __ mov(rscratch2, sp);
2664       src_reg = rscratch2;
2665     }
2666     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2667                  rscratch1, stlr);
2668     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
2669       __ dmb(__ ISH);
2670   %}
2671 
2672   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
2673     {
2674       MacroAssembler _masm(&cbuf);
2675       FloatRegister src_reg = as_FloatRegister($src$$reg);
2676       __ fmovs(rscratch2, src_reg);
2677     }
2678     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2679                  rscratch1, stlrw);
2680     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
2681       __ dmb(__ ISH);
2682   %}
2683 
2684   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
2685     {
2686       MacroAssembler _masm(&cbuf);
2687       FloatRegister src_reg = as_FloatRegister($src$$reg);
2688       __ fmovd(rscratch2, src_reg);
2689     }
2690     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
2691                  rscratch1, stlr);
2692     if (VM_Version::cpu_cpuFeatures() & VM_Version::CPU_DMB_ATOMICS)
2693       __ dmb(__ ISH);
2694   %}
2695 
2696   // synchronized read/update encodings
2697 
2698   enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
2699     MacroAssembler _masm(&cbuf);
2700     Register dst_reg = as_Register($dst$$reg);
2701     Register base = as_Register($mem$$base);
2702     int index = $mem$$index;
2703     int scale = $mem$$scale;
2704     int disp = $mem$$disp;
2705     if (index == -1) {
2706        if (disp != 0) {      
2707         __ lea(rscratch1, Address(base, disp));
2708         __ ldaxr(dst_reg, rscratch1);
2709       } else {
2710         // TODO
2711         // should we ever get anything other than this case?
2712         __ ldaxr(dst_reg, base);
2713       }
2714     } else {
2715       Register index_reg = as_Register(index);
2716       if (disp == 0) {
2717         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
2718         __ ldaxr(dst_reg, rscratch1);
2719       } else {
2720         __ lea(rscratch1, Address(base, disp));
2721         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
2722         __ ldaxr(dst_reg, rscratch1);
2723       }
2724     }
2725   %}
2726 
2727   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
2728     MacroAssembler _masm(&cbuf);
2729     Register src_reg = as_Register($src$$reg);
2730     Register base = as_Register($mem$$base);
2731     int index = $mem$$index;
2732     int scale = $mem$$scale;
2733     int disp = $mem$$disp;
2734     if (index == -1) {
2735        if (disp != 0) {      
2736         __ lea(rscratch2, Address(base, disp));
2737         __ stlxr(rscratch1, src_reg, rscratch2);
2738       } else {
2739         // TODO
2740         // should we ever get anything other than this case?
2741         __ stlxr(rscratch1, src_reg, base);
2742       }
2743     } else {
2744       Register index_reg = as_Register(index);
2745       if (disp == 0) {
2746         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
2747         __ stlxr(rscratch1, src_reg, rscratch2);
2748       } else {
2749         __ lea(rscratch2, Address(base, disp));
2750         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
2751         __ stlxr(rscratch1, src_reg, rscratch2);
2752       }
2753     }
2754     __ cmpw(rscratch1, zr);
2755   %}
2756 
2757   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2758     MacroAssembler _masm(&cbuf);
2759     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2760     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2761                Assembler::xword, /*acquire*/ false, /*release*/ true);
2762   %}
2763 
2764   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2765     MacroAssembler _masm(&cbuf);
2766     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2767     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2768                Assembler::word, /*acquire*/ false, /*release*/ true);
2769   %}
2770 
2771 
2772   // The only difference between aarch64_enc_cmpxchg and
2773   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
2774   // CompareAndSwap sequence to serve as a barrier on acquiring a
2775   // lock.
2776   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
2777     MacroAssembler _masm(&cbuf);
2778     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2779     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2780                Assembler::xword, /*acquire*/ true, /*release*/ true);
2781   %}
2782 
2783   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
2784     MacroAssembler _masm(&cbuf);
2785     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
2786     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
2787                Assembler::word, /*acquire*/ true, /*release*/ true);
2788   %}
2789 
2790   // auxiliary used for CompareAndSwapX to set result register
2791   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
2792     MacroAssembler _masm(&cbuf);
2793     Register res_reg = as_Register($res$$reg);
2794     __ cset(res_reg, Assembler::EQ);
2795   %}
2796 
2797   // prefetch encodings
2798 
2799   enc_class aarch64_enc_prefetchr(memory mem) %{
2800     MacroAssembler _masm(&cbuf);
2801     Register base = as_Register($mem$$base);
2802     int index = $mem$$index;
2803     int scale = $mem$$scale;
2804     int disp = $mem$$disp;
2805     if (index == -1) {
2806       __ prfm(Address(base, disp), PLDL1KEEP);
2807     } else {
2808       Register index_reg = as_Register(index);
2809       if (disp == 0) {
2810         __ prfm(Address(base, index_reg, Address::lsl(scale)), PLDL1KEEP);
2811       } else {
2812         __ lea(rscratch1, Address(base, disp));
2813         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PLDL1KEEP);
2814       }
2815     }
2816   %}
2817 
2818   enc_class aarch64_enc_prefetchw(memory mem) %{
2819     MacroAssembler _masm(&cbuf);
2820     Register base = as_Register($mem$$base);
2821     int index = $mem$$index;
2822     int scale = $mem$$scale;
2823     int disp = $mem$$disp;
2824     if (index == -1) {
2825       __ prfm(Address(base, disp), PSTL1KEEP);
2826     } else {
2827       Register index_reg = as_Register(index);
2828       if (disp == 0) {
2829         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
2830       } else {
2831         __ lea(rscratch1, Address(base, disp));
2832         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
2833       }
2834     }
2835   %}
2836 
2837   enc_class aarch64_enc_prefetchnta(memory mem) %{
2838     MacroAssembler _masm(&cbuf);
2839     Register base = as_Register($mem$$base);
2840     int index = $mem$$index;
2841     int scale = $mem$$scale;
2842     int disp = $mem$$disp;
2843     if (index == -1) {
2844       __ prfm(Address(base, disp), PSTL1STRM);
2845     } else {
2846       Register index_reg = as_Register(index);
2847       if (disp == 0) {
2848         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1STRM);
2849         __ nop();
2850       } else {
2851         __ lea(rscratch1, Address(base, disp));
2852         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1STRM);
2853       }
2854     }
2855   %}
2856 
2857   /// mov envcodings
2858 
2859   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
2860     MacroAssembler _masm(&cbuf);
2861     u_int32_t con = (u_int32_t)$src$$constant;
2862     Register dst_reg = as_Register($dst$$reg);
2863     if (con == 0) {
2864       __ movw(dst_reg, zr);
2865     } else {
2866       __ movw(dst_reg, con);
2867     }
2868   %}
2869 
2870   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
2871     MacroAssembler _masm(&cbuf);
2872     Register dst_reg = as_Register($dst$$reg);
2873     u_int64_t con = (u_int64_t)$src$$constant;
2874     if (con == 0) {
2875       __ mov(dst_reg, zr);
2876     } else {
2877       __ mov(dst_reg, con);
2878     }
2879   %}
2880 
2881   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
2882     MacroAssembler _masm(&cbuf);
2883     Register dst_reg = as_Register($dst$$reg);
2884     address con = (address)$src$$constant;
2885     if (con == NULL || con == (address)1) {
2886       ShouldNotReachHere();
2887     } else {
2888       relocInfo::relocType rtype = $src->constant_reloc();
2889       if (rtype == relocInfo::oop_type) {
2890         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
2891       } else if (rtype == relocInfo::metadata_type) {
2892         __ mov_metadata(dst_reg, (Metadata*)con);
2893       } else {
2894         assert(rtype == relocInfo::none, "unexpected reloc type");
2895         if (con < (address)(uintptr_t)os::vm_page_size()) {
2896           __ mov(dst_reg, con);
2897         } else {
2898           unsigned long offset;
2899           __ adrp(dst_reg, con, offset);
2900           __ add(dst_reg, dst_reg, offset);
2901         }
2902       }
2903     }
2904   %}
2905 
2906   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
2907     MacroAssembler _masm(&cbuf);
2908     Register dst_reg = as_Register($dst$$reg);
2909     __ mov(dst_reg, zr);
2910   %}
2911 
2912   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
2913     MacroAssembler _masm(&cbuf);
2914     Register dst_reg = as_Register($dst$$reg);
2915     __ mov(dst_reg, (u_int64_t)1);
2916   %}
2917 
2918   enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
2919     MacroAssembler _masm(&cbuf);
2920     address page = (address)$src$$constant;
2921     Register dst_reg = as_Register($dst$$reg);
2922     unsigned long off;
2923     __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
2924     assert(off == 0, "assumed offset == 0");
2925   %}
2926 
2927   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
2928     MacroAssembler _masm(&cbuf);
2929     __ load_byte_map_base($dst$$Register);
2930   %}
2931 
2932   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
2933     MacroAssembler _masm(&cbuf);
2934     Register dst_reg = as_Register($dst$$reg);
2935     address con = (address)$src$$constant;
2936     if (con == NULL) {
2937       ShouldNotReachHere();
2938     } else {
2939       relocInfo::relocType rtype = $src->constant_reloc();
2940       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
2941       __ set_narrow_oop(dst_reg, (jobject)con);
2942     }
2943   %}
2944 
2945   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
2946     MacroAssembler _masm(&cbuf);
2947     Register dst_reg = as_Register($dst$$reg);
2948     __ mov(dst_reg, zr);
2949   %}
2950 
2951   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
2952     MacroAssembler _masm(&cbuf);
2953     Register dst_reg = as_Register($dst$$reg);
2954     address con = (address)$src$$constant;
2955     if (con == NULL) {
2956       ShouldNotReachHere();
2957     } else {
2958       relocInfo::relocType rtype = $src->constant_reloc();
2959       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
2960       __ set_narrow_klass(dst_reg, (Klass *)con);
2961     }
2962   %}
2963 
2964   // arithmetic encodings
2965 
2966   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
2967     MacroAssembler _masm(&cbuf);
2968     Register dst_reg = as_Register($dst$$reg);
2969     Register src_reg = as_Register($src1$$reg);
2970     int32_t con = (int32_t)$src2$$constant;
2971     // add has primary == 0, subtract has primary == 1
2972     if ($primary) { con = -con; }
2973     if (con < 0) {
2974       __ subw(dst_reg, src_reg, -con);
2975     } else {
2976       __ addw(dst_reg, src_reg, con);
2977     }
2978   %}
2979 
2980   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
2981     MacroAssembler _masm(&cbuf);
2982     Register dst_reg = as_Register($dst$$reg);
2983     Register src_reg = as_Register($src1$$reg);
2984     int32_t con = (int32_t)$src2$$constant;
2985     // add has primary == 0, subtract has primary == 1
2986     if ($primary) { con = -con; }
2987     if (con < 0) {
2988       __ sub(dst_reg, src_reg, -con);
2989     } else {
2990       __ add(dst_reg, src_reg, con);
2991     }
2992   %}
2993 
2994   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
2995     MacroAssembler _masm(&cbuf);
2996    Register dst_reg = as_Register($dst$$reg);
2997    Register src1_reg = as_Register($src1$$reg);
2998    Register src2_reg = as_Register($src2$$reg);
2999     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
3000   %}
3001 
3002   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
3003     MacroAssembler _masm(&cbuf);
3004    Register dst_reg = as_Register($dst$$reg);
3005    Register src1_reg = as_Register($src1$$reg);
3006    Register src2_reg = as_Register($src2$$reg);
3007     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
3008   %}
3009 
3010   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
3011     MacroAssembler _masm(&cbuf);
3012    Register dst_reg = as_Register($dst$$reg);
3013    Register src1_reg = as_Register($src1$$reg);
3014    Register src2_reg = as_Register($src2$$reg);
3015     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
3016   %}
3017 
3018   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
3019     MacroAssembler _masm(&cbuf);
3020    Register dst_reg = as_Register($dst$$reg);
3021    Register src1_reg = as_Register($src1$$reg);
3022    Register src2_reg = as_Register($src2$$reg);
3023     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
3024   %}
3025 
3026   // compare instruction encodings
3027 
3028   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
3029     MacroAssembler _masm(&cbuf);
3030     Register reg1 = as_Register($src1$$reg);
3031     Register reg2 = as_Register($src2$$reg);
3032     __ cmpw(reg1, reg2);
3033   %}
3034 
3035   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3036     MacroAssembler _masm(&cbuf);
3037     Register reg = as_Register($src1$$reg);
3038     int32_t val = $src2$$constant;
3039     if (val >= 0) {
3040       __ subsw(zr, reg, val);
3041     } else {
3042       __ addsw(zr, reg, -val);
3043     }
3044   %}
3045 
3046   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3047     MacroAssembler _masm(&cbuf);
3048     Register reg1 = as_Register($src1$$reg);
3049     u_int32_t val = (u_int32_t)$src2$$constant;
3050     __ movw(rscratch1, val);
3051     __ cmpw(reg1, rscratch1);
3052   %}
3053 
3054   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3055     MacroAssembler _masm(&cbuf);
3056     Register reg1 = as_Register($src1$$reg);
3057     Register reg2 = as_Register($src2$$reg);
3058     __ cmp(reg1, reg2);
3059   %}
3060 
3061   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3062     MacroAssembler _masm(&cbuf);
3063     Register reg = as_Register($src1$$reg);
3064     int64_t val = $src2$$constant;
3065     if (val >= 0) {
3066       __ subs(zr, reg, val);
3067     } else if (val != -val) {
3068       __ adds(zr, reg, -val);
3069     } else {
3070     // aargh, Long.MIN_VALUE is a special case
3071       __ orr(rscratch1, zr, (u_int64_t)val);
3072       __ subs(zr, reg, rscratch1);
3073     }
3074   %}
3075 
3076   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3077     MacroAssembler _masm(&cbuf);
3078     Register reg1 = as_Register($src1$$reg);
3079     u_int64_t val = (u_int64_t)$src2$$constant;
3080     __ mov(rscratch1, val);
3081     __ cmp(reg1, rscratch1);
3082   %}
3083 
3084   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3085     MacroAssembler _masm(&cbuf);
3086     Register reg1 = as_Register($src1$$reg);
3087     Register reg2 = as_Register($src2$$reg);
3088     __ cmp(reg1, reg2);
3089   %}
3090 
3091   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3092     MacroAssembler _masm(&cbuf);
3093     Register reg1 = as_Register($src1$$reg);
3094     Register reg2 = as_Register($src2$$reg);
3095     __ cmpw(reg1, reg2);
3096   %}
3097 
3098   enc_class aarch64_enc_testp(iRegP src) %{
3099     MacroAssembler _masm(&cbuf);
3100     Register reg = as_Register($src$$reg);
3101     __ cmp(reg, zr);
3102   %}
3103 
3104   enc_class aarch64_enc_testn(iRegN src) %{
3105     MacroAssembler _masm(&cbuf);
3106     Register reg = as_Register($src$$reg);
3107     __ cmpw(reg, zr);
3108   %}
3109 
3110   enc_class aarch64_enc_b(label lbl) %{
3111     MacroAssembler _masm(&cbuf);
3112     Label *L = $lbl$$label;
3113     __ b(*L);
3114   %}
3115 
3116   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
3117     MacroAssembler _masm(&cbuf);
3118     Label *L = $lbl$$label;
3119     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3120   %}
3121 
3122   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
3123     MacroAssembler _masm(&cbuf);
3124     Label *L = $lbl$$label;
3125     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
3126   %}
3127 
3128   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
3129   %{
3130      Register sub_reg = as_Register($sub$$reg);
3131      Register super_reg = as_Register($super$$reg);
3132      Register temp_reg = as_Register($temp$$reg);
3133      Register result_reg = as_Register($result$$reg);
3134 
3135      Label miss;
3136      MacroAssembler _masm(&cbuf);
3137      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
3138                                      NULL, &miss,
3139                                      /*set_cond_codes:*/ true);
3140      if ($primary) {
3141        __ mov(result_reg, zr);
3142      }
3143      __ bind(miss);
3144   %}
3145 
3146   enc_class aarch64_enc_java_static_call(method meth) %{
3147     MacroAssembler _masm(&cbuf);
3148 
3149     address mark = __ pc();
3150     address addr = (address)$meth$$method;
3151     address call;
3152     if (!_method) {
3153       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3154       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3155     } else if (_optimized_virtual) {
3156       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3157     } else {
3158       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3159     }
3160     if (call == NULL) {
3161       ciEnv::current()->record_failure("CodeCache is full"); 
3162       return;
3163     }
3164 
3165     if (_method) {
3166       // Emit stub for static call
3167       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
3168       if (stub == NULL) {
3169         ciEnv::current()->record_failure("CodeCache is full"); 
3170         return;
3171       }
3172     }
3173   %}
3174 
3175   enc_class aarch64_enc_java_handle_call(method meth) %{
3176     MacroAssembler _masm(&cbuf);
3177     relocInfo::relocType reloc;
3178 
3179     // RFP is preserved across all calls, even compiled calls.
3180     // Use it to preserve SP.
3181     __ mov(rfp, sp);
3182 
3183     address mark = __ pc();
3184     address addr = (address)$meth$$method;
3185     address call;
3186     if (!_method) {
3187       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
3188       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
3189     } else if (_optimized_virtual) {
3190       call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
3191     } else {
3192       call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
3193     }
3194     if (call == NULL) {
3195       ciEnv::current()->record_failure("CodeCache is full"); 
3196       return;
3197     }
3198 
3199     if (_method) {
3200       // Emit stub for static call
3201       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
3202       if (stub == NULL) {
3203         ciEnv::current()->record_failure("CodeCache is full"); 
3204         return;
3205       }
3206     }
3207 
3208     // now restore sp
3209     __ mov(sp, rfp);
3210   %}
3211 
3212   enc_class aarch64_enc_java_dynamic_call(method meth) %{
3213     MacroAssembler _masm(&cbuf);
3214     address call = __ ic_call((address)$meth$$method);
3215     if (call == NULL) {
3216       ciEnv::current()->record_failure("CodeCache is full"); 
3217       return;
3218     }
3219   %}
3220 
3221   enc_class aarch64_enc_call_epilog() %{
3222     MacroAssembler _masm(&cbuf);
3223     if (VerifyStackAtCalls) {
3224       // Check that stack depth is unchanged: find majik cookie on stack
3225       __ call_Unimplemented();
3226     }
3227   %}
3228 
3229   enc_class aarch64_enc_java_to_runtime(method meth) %{
3230     MacroAssembler _masm(&cbuf);
3231 
3232     // some calls to generated routines (arraycopy code) are scheduled
3233     // by C2 as runtime calls. if so we can call them using a br (they
3234     // will be in a reachable segment) otherwise we have to use a blr
3235     // which loads the absolute address into a register.
3236     address entry = (address)$meth$$method;
3237     CodeBlob *cb = CodeCache::find_blob(entry);
3238     if (cb) {
3239       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
3240       if (call == NULL) {
3241         ciEnv::current()->record_failure("CodeCache is full"); 
3242         return;
3243       }
3244     } else {
3245       Label retaddr;
3246       __ adr(rscratch2, retaddr);
3247       __ lea(rscratch1, RuntimeAddress(entry));
3248       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
3249       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
3250       __ blr(rscratch1);
3251       __ bind(retaddr);
3252       __ add(sp, sp, 2 * wordSize);
3253     }
3254   %}
3255 
3256   enc_class aarch64_enc_rethrow() %{
3257     MacroAssembler _masm(&cbuf);
3258     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
3259   %}
3260 
3261   enc_class aarch64_enc_ret() %{
3262     MacroAssembler _masm(&cbuf);
3263     __ ret(lr);
3264   %}
3265 
3266   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
3267     MacroAssembler _masm(&cbuf);
3268     Register target_reg = as_Register($jump_target$$reg);
3269     __ br(target_reg);
3270   %}
3271 
3272   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
3273     MacroAssembler _masm(&cbuf);
3274     Register target_reg = as_Register($jump_target$$reg);
3275     // exception oop should be in r0
3276     // ret addr has been popped into lr
3277     // callee expects it in r3
3278     __ mov(r3, lr);
3279     __ br(target_reg);
3280   %}
3281 
3282   enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3283     MacroAssembler _masm(&cbuf);
3284     Register oop = as_Register($object$$reg);
3285     Register box = as_Register($box$$reg);
3286     Register disp_hdr = as_Register($tmp$$reg);
3287     Register tmp = as_Register($tmp2$$reg);
3288     Label cont;
3289     Label object_has_monitor;
3290     Label cas_failed;
3291 
3292     assert_different_registers(oop, box, tmp, disp_hdr);
3293 
3294     // Load markOop from object into displaced_header.
3295     __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
3296 
3297     // Always do locking in runtime.
3298     if (EmitSync & 0x01) {
3299       __ cmp(oop, zr);
3300       return;
3301     }
3302     
3303     if (UseBiasedLocking && !UseOptoBiasInlining) {
3304       __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
3305     }
3306 
3307     // Handle existing monitor
3308     if ((EmitSync & 0x02) == 0) {
3309       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3310     }
3311 
3312     // Set tmp to be (markOop of object | UNLOCK_VALUE).
3313     __ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
3314 
3315     // Load Compare Value application register.
3316 
3317     // Initialize the box. (Must happen before we update the object mark!)
3318     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3319 
3320     // Compare object markOop with an unlocked value (tmp) and if
3321     // equal exchange the stack address of our box with object markOop.
3322     // On failure disp_hdr contains the possibly locked markOop.
3323     if (UseLSE) {
3324       __ mov(disp_hdr, tmp);
3325       __ casal(Assembler::xword, disp_hdr, box, oop);  // Updates disp_hdr
3326       __ cmp(tmp, disp_hdr);
3327       __ br(Assembler::EQ, cont);
3328     } else {
3329       Label retry_load;
3330       if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
3331         __ prfm(Address(oop), PSTL1STRM);
3332       __ bind(retry_load);
3333       __ ldaxr(disp_hdr, oop);
3334       __ cmp(tmp, disp_hdr);
3335       __ br(Assembler::NE, cas_failed);
3336       // use stlxr to ensure update is immediately visible
3337       __ stlxr(disp_hdr, box, oop);
3338       __ cbzw(disp_hdr, cont);
3339       __ b(retry_load);
3340     }
3341 
3342     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3343 
3344     // If the compare-and-exchange succeeded, then we found an unlocked
3345     // object, will have now locked it will continue at label cont
3346 
3347     __ bind(cas_failed);
3348     // We did not see an unlocked object so try the fast recursive case.
3349 
3350     // Check if the owner is self by comparing the value in the
3351     // markOop of object (disp_hdr) with the stack pointer.
3352     __ mov(rscratch1, sp);
3353     __ sub(disp_hdr, disp_hdr, rscratch1);
3354     __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place));
3355     // If condition is true we are cont and hence we can store 0 as the
3356     // displaced header in the box, which indicates that it is a recursive lock.
3357     __ ands(tmp/*==0?*/, disp_hdr, tmp);
3358     __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3359 
3360     // Handle existing monitor.
3361     if ((EmitSync & 0x02) == 0) {
3362       __ b(cont);
3363 
3364       __ bind(object_has_monitor);
3365       // The object's monitor m is unlocked iff m->owner == NULL,
3366       // otherwise m->owner may contain a thread or a stack address.
3367       //
3368       // Try to CAS m->owner from NULL to current thread.
3369       __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
3370       __ mov(disp_hdr, zr);
3371 
3372       if (UseLSE) {
3373         __ mov(rscratch1, disp_hdr);
3374         __ casal(Assembler::xword, rscratch1, rthread, tmp);
3375         __ cmp(rscratch1, disp_hdr);
3376       } else {
3377         Label retry_load, fail;
3378         if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
3379           __ prfm(Address(tmp), PSTL1STRM);
3380         __ bind(retry_load);
3381         __ ldaxr(rscratch1, tmp);
3382         __ cmp(disp_hdr, rscratch1);
3383         __ br(Assembler::NE, fail);
3384         // use stlxr to ensure update is immediately visible
3385         __ stlxr(rscratch1, rthread, tmp);
3386         __ cbnzw(rscratch1, retry_load);
3387         __ bind(fail);
3388       }
3389 
3390       // Store a non-null value into the box to avoid looking like a re-entrant
3391       // lock. The fast-path monitor unlock code checks for
3392       // markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
3393       // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
3394       __ mov(tmp, (address)markOopDesc::unused_mark());
3395       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3396     }
3397 
3398     __ bind(cont);
3399     // flag == EQ indicates success
3400     // flag == NE indicates failure
3401   %}
3402 
3403   enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
3404     MacroAssembler _masm(&cbuf);
3405     Register oop = as_Register($object$$reg);
3406     Register box = as_Register($box$$reg);
3407     Register disp_hdr = as_Register($tmp$$reg);
3408     Register tmp = as_Register($tmp2$$reg);
3409     Label cont;
3410     Label object_has_monitor;
3411 
3412     assert_different_registers(oop, box, tmp, disp_hdr);
3413 
3414     // Always do locking in runtime.
3415     if (EmitSync & 0x01) {
3416       __ cmp(oop, zr); // Oop can't be 0 here => always false.
3417       return;
3418     }
3419 
3420     if (UseBiasedLocking && !UseOptoBiasInlining) {
3421       __ biased_locking_exit(oop, tmp, cont);
3422     }
3423 
3424     // Find the lock address and load the displaced header from the stack.
3425     __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3426 
3427     // If the displaced header is 0, we have a recursive unlock.
3428     __ cmp(disp_hdr, zr);
3429     __ br(Assembler::EQ, cont);
3430 
3431     // Handle existing monitor.
3432     if ((EmitSync & 0x02) == 0) {
3433       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
3434       __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
3435     }
3436 
3437     // Check if it is still a light weight lock, this is is true if we
3438     // see the stack address of the basicLock in the markOop of the
3439     // object.
3440 
3441     if (UseLSE) {
3442       __ mov(tmp, box);
3443       __ casl(Assembler::xword, tmp, disp_hdr, oop);
3444       __ cmp(tmp, box);
3445       __ b(cont);
3446     } else {
3447       Label retry_load;
3448       if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_STXR_PREFETCH))
3449         __ prfm(Address(oop), PSTL1STRM);
3450       __ bind(retry_load);
3451       __ ldxr(tmp, oop);
3452       __ cmp(box, tmp);
3453       __ br(Assembler::NE, cont);
3454       // use stlxr to ensure update is immediately visible
3455       __ stlxr(tmp, disp_hdr, oop);
3456       __ cbzw(tmp, cont);
3457       __ b(retry_load);
3458     }
3459 
3460     assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3461 
3462     // Handle existing monitor.
3463     if ((EmitSync & 0x02) == 0) {
3464       __ bind(object_has_monitor);
3465       __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
3466       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3467       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
3468       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
3469       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
3470       __ cmp(rscratch1, zr);
3471       __ br(Assembler::NE, cont);
3472 
3473       __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
3474       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
3475       __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
3476       __ cmp(rscratch1, zr);
3477       __ br(Assembler::NE, cont);
3478       // need a release store here
3479       __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
3480       __ stlr(zr, tmp); // set unowned
3481     }
3482 
3483     __ bind(cont);
3484     // flag == EQ indicates success
3485     // flag == NE indicates failure
3486   %}
3487 
3488 %}
3489 
3490 //----------FRAME--------------------------------------------------------------
3491 // Definition of frame structure and management information.
3492 //
3493 //  S T A C K   L A Y O U T    Allocators stack-slot number
3494 //                             |   (to get allocators register number
3495 //  G  Owned by    |        |  v    add OptoReg::stack0())
3496 //  r   CALLER     |        |
3497 //  o     |        +--------+      pad to even-align allocators stack-slot
3498 //  w     V        |  pad0  |        numbers; owned by CALLER
3499 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
3500 //  h     ^        |   in   |  5
3501 //        |        |  args  |  4   Holes in incoming args owned by SELF
3502 //  |     |        |        |  3
3503 //  |     |        +--------+
3504 //  V     |        | old out|      Empty on Intel, window on Sparc
3505 //        |    old |preserve|      Must be even aligned.
3506 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
3507 //        |        |   in   |  3   area for Intel ret address
3508 //     Owned by    |preserve|      Empty on Sparc.
3509 //       SELF      +--------+
3510 //        |        |  pad2  |  2   pad to align old SP
3511 //        |        +--------+  1
3512 //        |        | locks  |  0
3513 //        |        +--------+----> OptoReg::stack0(), even aligned
3514 //        |        |  pad1  | 11   pad to align new SP
3515 //        |        +--------+
3516 //        |        |        | 10
3517 //        |        | spills |  9   spills
3518 //        V        |        |  8   (pad0 slot for callee)
3519 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
3520 //        ^        |  out   |  7
3521 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
3522 //     Owned by    +--------+
3523 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
3524 //        |    new |preserve|      Must be even-aligned.
3525 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
3526 //        |        |        |
3527 //
3528 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
3529 //         known from SELF's arguments and the Java calling convention.
3530 //         Region 6-7 is determined per call site.
3531 // Note 2: If the calling convention leaves holes in the incoming argument
3532 //         area, those holes are owned by SELF.  Holes in the outgoing area
3533 //         are owned by the CALLEE.  Holes should not be nessecary in the
3534 //         incoming area, as the Java calling convention is completely under
3535 //         the control of the AD file.  Doubles can be sorted and packed to
3536 //         avoid holes.  Holes in the outgoing arguments may be nessecary for
3537 //         varargs C calling conventions.
3538 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
3539 //         even aligned with pad0 as needed.
3540 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
3541 //           (the latter is true on Intel but is it false on AArch64?)
3542 //         region 6-11 is even aligned; it may be padded out more so that
3543 //         the region from SP to FP meets the minimum stack alignment.
3544 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3545 //         alignment.  Region 11, pad1, may be dynamically extended so that
3546 //         SP meets the minimum alignment.
3547 
3548 frame %{
3549   // What direction does stack grow in (assumed to be same for C & Java)
3550   stack_direction(TOWARDS_LOW);
3551 
3552   // These three registers define part of the calling convention
3553   // between compiled code and the interpreter.
3554 
3555   // Inline Cache Register or methodOop for I2C.
3556   inline_cache_reg(R12);
3557 
3558   // Method Oop Register when calling interpreter.
3559   interpreter_method_oop_reg(R12);
3560 
3561   // Number of stack slots consumed by locking an object
3562   sync_stack_slots(2);
3563 
3564   // Compiled code's Frame Pointer
3565   frame_pointer(R31);
3566 
3567   // Interpreter stores its frame pointer in a register which is
3568   // stored to the stack by I2CAdaptors.
3569   // I2CAdaptors convert from interpreted java to compiled java.
3570   interpreter_frame_pointer(R29);
3571 
3572   // Stack alignment requirement
3573   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3574 
3575   // Number of stack slots between incoming argument block and the start of
3576   // a new frame.  The PROLOG must add this many slots to the stack.  The
3577   // EPILOG must remove this many slots. aarch64 needs two slots for
3578   // return address and fp.
3579   // TODO think this is correct but check
3580   in_preserve_stack_slots(4);
3581 
3582   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3583   // for calls to C.  Supports the var-args backing area for register parms.
3584   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3585 
3586   // The after-PROLOG location of the return address.  Location of
3587   // return address specifies a type (REG or STACK) and a number
3588   // representing the register number (i.e. - use a register name) or
3589   // stack slot.
3590   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3591   // Otherwise, it is above the locks and verification slot and alignment word
3592   // TODO this may well be correct but need to check why that - 2 is there
3593   // ppc port uses 0 but we definitely need to allow for fixed_slots
3594   // which folds in the space used for monitors
3595   return_addr(STACK - 2 +
3596               round_to((Compile::current()->in_preserve_stack_slots() +
3597                         Compile::current()->fixed_slots()),
3598                        stack_alignment_in_slots()));
3599 
3600   // Body of function which returns an integer array locating
3601   // arguments either in registers or in stack slots.  Passed an array
3602   // of ideal registers called "sig" and a "length" count.  Stack-slot
3603   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3604   // arguments for a CALLEE.  Incoming stack arguments are
3605   // automatically biased by the preserve_stack_slots field above.
3606 
3607   calling_convention
3608   %{
3609     // No difference between ingoing/outgoing just pass false
3610     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3611   %}
3612 
3613   c_calling_convention
3614   %{
3615     // This is obviously always outgoing
3616     (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
3617   %}
3618 
3619   // Location of compiled Java return values.  Same as C for now.
3620   return_value
3621   %{
3622     // TODO do we allow ideal_reg == Op_RegN???
3623     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3624            "only return normal values");
3625 
3626     static const int lo[Op_RegL + 1] = { // enum name
3627       0,                                 // Op_Node
3628       0,                                 // Op_Set
3629       R0_num,                            // Op_RegN
3630       R0_num,                            // Op_RegI
3631       R0_num,                            // Op_RegP
3632       V0_num,                            // Op_RegF
3633       V0_num,                            // Op_RegD
3634       R0_num                             // Op_RegL
3635     };
3636   
3637     static const int hi[Op_RegL + 1] = { // enum name
3638       0,                                 // Op_Node
3639       0,                                 // Op_Set
3640       OptoReg::Bad,                       // Op_RegN
3641       OptoReg::Bad,                      // Op_RegI
3642       R0_H_num,                          // Op_RegP
3643       OptoReg::Bad,                      // Op_RegF
3644       V0_H_num,                          // Op_RegD
3645       R0_H_num                           // Op_RegL
3646     };
3647 
3648     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3649   %}
3650 %}
3651 
3652 //----------ATTRIBUTES---------------------------------------------------------
3653 //----------Operand Attributes-------------------------------------------------
3654 op_attrib op_cost(1);        // Required cost attribute
3655 
3656 //----------Instruction Attributes---------------------------------------------
3657 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3658 ins_attrib ins_size(32);        // Required size attribute (in bits)
3659 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3660                                 // a non-matching short branch variant
3661                                 // of some long branch?
3662 ins_attrib ins_alignment(4);    // Required alignment attribute (must
3663                                 // be a power of 2) specifies the
3664                                 // alignment that some part of the
3665                                 // instruction (not necessarily the
3666                                 // start) requires.  If > 1, a
3667                                 // compute_padding() function must be
3668                                 // provided for the instruction
3669 
3670 //----------OPERANDS-----------------------------------------------------------
3671 // Operand definitions must precede instruction definitions for correct parsing
3672 // in the ADLC because operands constitute user defined types which are used in
3673 // instruction definitions.
3674 
3675 //----------Simple Operands----------------------------------------------------
3676 
3677 // Integer operands 32 bit
3678 // 32 bit immediate
3679 operand immI()
3680 %{
3681   match(ConI);
3682 
3683   op_cost(0);
3684   format %{ %}
3685   interface(CONST_INTER);
3686 %}
3687 
3688 // 32 bit zero
3689 operand immI0()
3690 %{
3691   predicate(n->get_int() == 0);
3692   match(ConI);
3693 
3694   op_cost(0);
3695   format %{ %}
3696   interface(CONST_INTER);
3697 %}
3698 
3699 // 32 bit unit increment
3700 operand immI_1()
3701 %{
3702   predicate(n->get_int() == 1);
3703   match(ConI);
3704 
3705   op_cost(0);
3706   format %{ %}
3707   interface(CONST_INTER);
3708 %}
3709 
3710 // 32 bit unit decrement
3711 operand immI_M1()
3712 %{
3713   predicate(n->get_int() == -1);
3714   match(ConI);
3715 
3716   op_cost(0);
3717   format %{ %}
3718   interface(CONST_INTER);
3719 %}
3720 
3721 operand immI_le_4()
3722 %{
3723   predicate(n->get_int() <= 4);
3724   match(ConI);
3725 
3726   op_cost(0);
3727   format %{ %}
3728   interface(CONST_INTER);
3729 %}
3730 
3731 operand immI_31()
3732 %{
3733   predicate(n->get_int() == 31);
3734   match(ConI);
3735 
3736   op_cost(0);
3737   format %{ %}
3738   interface(CONST_INTER);
3739 %}
3740 
3741 operand immI_8()
3742 %{
3743   predicate(n->get_int() == 8);
3744   match(ConI);
3745 
3746   op_cost(0);
3747   format %{ %}
3748   interface(CONST_INTER);
3749 %}
3750 
3751 operand immI_16()
3752 %{
3753   predicate(n->get_int() == 16);
3754   match(ConI);
3755 
3756   op_cost(0);
3757   format %{ %}
3758   interface(CONST_INTER);
3759 %}
3760 
3761 operand immI_24()
3762 %{
3763   predicate(n->get_int() == 24);
3764   match(ConI);
3765 
3766   op_cost(0);
3767   format %{ %}
3768   interface(CONST_INTER);
3769 %}
3770 
3771 operand immI_32()
3772 %{
3773   predicate(n->get_int() == 32);
3774   match(ConI);
3775 
3776   op_cost(0);
3777   format %{ %}
3778   interface(CONST_INTER);
3779 %}
3780 
3781 operand immI_48()
3782 %{
3783   predicate(n->get_int() == 48);
3784   match(ConI);
3785 
3786   op_cost(0);
3787   format %{ %}
3788   interface(CONST_INTER);
3789 %}
3790 
3791 operand immI_56()
3792 %{
3793   predicate(n->get_int() == 56);
3794   match(ConI);
3795 
3796   op_cost(0);
3797   format %{ %}
3798   interface(CONST_INTER);
3799 %}
3800 
3801 operand immI_64()
3802 %{
3803   predicate(n->get_int() == 64);
3804   match(ConI);
3805 
3806   op_cost(0);
3807   format %{ %}
3808   interface(CONST_INTER);
3809 %}
3810 
3811 operand immI_255()
3812 %{
3813   predicate(n->get_int() == 255);
3814   match(ConI);
3815 
3816   op_cost(0);
3817   format %{ %}
3818   interface(CONST_INTER);
3819 %}
3820 
3821 operand immI_65535()
3822 %{
3823   predicate(n->get_int() == 65535);
3824   match(ConI);
3825 
3826   op_cost(0);
3827   format %{ %}
3828   interface(CONST_INTER);
3829 %}
3830 
3831 operand immL_63()
3832 %{
3833   predicate(n->get_int() == 63);
3834   match(ConI);
3835 
3836   op_cost(0);
3837   format %{ %}
3838   interface(CONST_INTER);
3839 %}
3840 
3841 operand immL_255()
3842 %{
3843   predicate(n->get_int() == 255);
3844   match(ConI);
3845 
3846   op_cost(0);
3847   format %{ %}
3848   interface(CONST_INTER);
3849 %}
3850 
3851 operand immL_65535()
3852 %{
3853   predicate(n->get_long() == 65535L);
3854   match(ConL);
3855 
3856   op_cost(0);
3857   format %{ %}
3858   interface(CONST_INTER);
3859 %}
3860 
3861 operand immL_4294967295()
3862 %{
3863   predicate(n->get_long() == 4294967295L);
3864   match(ConL);
3865 
3866   op_cost(0);
3867   format %{ %}
3868   interface(CONST_INTER);
3869 %}
3870 
3871 operand immL_bitmask()
3872 %{
3873   predicate((n->get_long() != 0)
3874             && ((n->get_long() & 0xc000000000000000l) == 0)
3875             && is_power_of_2(n->get_long() + 1));
3876   match(ConL);
3877 
3878   op_cost(0);
3879   format %{ %}
3880   interface(CONST_INTER);
3881 %}
3882 
3883 operand immI_bitmask()
3884 %{
3885   predicate((n->get_int() != 0)
3886             && ((n->get_int() & 0xc0000000) == 0)
3887             && is_power_of_2(n->get_int() + 1));
3888   match(ConI);
3889 
3890   op_cost(0);
3891   format %{ %}
3892   interface(CONST_INTER);
3893 %}
3894 
3895 // Scale values for scaled offset addressing modes (up to long but not quad)
3896 operand immIScale()
3897 %{
3898   predicate(0 <= n->get_int() && (n->get_int() <= 3));
3899   match(ConI);
3900 
3901   op_cost(0);
3902   format %{ %}
3903   interface(CONST_INTER);
3904 %}
3905 
3906 // 26 bit signed offset -- for pc-relative branches
3907 operand immI26()
3908 %{
3909   predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
3910   match(ConI);
3911 
3912   op_cost(0);
3913   format %{ %}
3914   interface(CONST_INTER);
3915 %}
3916 
3917 // 19 bit signed offset -- for pc-relative loads
3918 operand immI19()
3919 %{
3920   predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
3921   match(ConI);
3922 
3923   op_cost(0);
3924   format %{ %}
3925   interface(CONST_INTER);
3926 %}
3927 
3928 // 12 bit unsigned offset -- for base plus immediate loads
3929 operand immIU12()
3930 %{
3931   predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
3932   match(ConI);
3933 
3934   op_cost(0);
3935   format %{ %}
3936   interface(CONST_INTER);
3937 %}
3938 
3939 operand immLU12()
3940 %{
3941   predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
3942   match(ConL);
3943 
3944   op_cost(0);
3945   format %{ %}
3946   interface(CONST_INTER);
3947 %}
3948 
3949 // Offset for scaled or unscaled immediate loads and stores
3950 operand immIOffset()
3951 %{
3952   predicate(Address::offset_ok_for_immed(n->get_int()));
3953   match(ConI);
3954 
3955   op_cost(0);
3956   format %{ %}
3957   interface(CONST_INTER);
3958 %}
3959 
3960 operand immIOffset4()
3961 %{
3962   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
3963   match(ConI);
3964 
3965   op_cost(0);
3966   format %{ %}
3967   interface(CONST_INTER);
3968 %}
3969 
3970 operand immIOffset8()
3971 %{
3972   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
3973   match(ConI);
3974 
3975   op_cost(0);
3976   format %{ %}
3977   interface(CONST_INTER);
3978 %}
3979 
3980 operand immIOffset16()
3981 %{
3982   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
3983   match(ConI);
3984 
3985   op_cost(0);
3986   format %{ %}
3987   interface(CONST_INTER);
3988 %}
3989 
3990 operand immLoffset()
3991 %{
3992   predicate(Address::offset_ok_for_immed(n->get_long()));
3993   match(ConL);
3994 
3995   op_cost(0);
3996   format %{ %}
3997   interface(CONST_INTER);
3998 %}
3999 
4000 operand immLoffset4()
4001 %{
4002   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
4003   match(ConL);
4004 
4005   op_cost(0);
4006   format %{ %}
4007   interface(CONST_INTER);
4008 %}
4009 
4010 operand immLoffset8()
4011 %{
4012   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
4013   match(ConL);
4014 
4015   op_cost(0);
4016   format %{ %}
4017   interface(CONST_INTER);
4018 %}
4019 
4020 operand immLoffset16()
4021 %{
4022   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4023   match(ConL);
4024 
4025   op_cost(0);
4026   format %{ %}
4027   interface(CONST_INTER);
4028 %}
4029 
4030 // 32 bit integer valid for add sub immediate
4031 operand immIAddSub()
4032 %{
4033   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4034   match(ConI);
4035   op_cost(0);
4036   format %{ %}
4037   interface(CONST_INTER);
4038 %}
4039 
4040 // 32 bit unsigned integer valid for logical immediate
4041 // TODO -- check this is right when e.g the mask is 0x80000000
4042 operand immILog()
4043 %{
4044   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4045   match(ConI);
4046 
4047   op_cost(0);
4048   format %{ %}
4049   interface(CONST_INTER);
4050 %}
4051 
4052 // Integer operands 64 bit
4053 // 64 bit immediate
4054 operand immL()
4055 %{
4056   match(ConL);
4057 
4058   op_cost(0);
4059   format %{ %}
4060   interface(CONST_INTER);
4061 %}
4062 
4063 // 64 bit zero
4064 operand immL0()
4065 %{
4066   predicate(n->get_long() == 0);
4067   match(ConL);
4068 
4069   op_cost(0);
4070   format %{ %}
4071   interface(CONST_INTER);
4072 %}
4073 
4074 // 64 bit unit increment
4075 operand immL_1()
4076 %{
4077   predicate(n->get_long() == 1);
4078   match(ConL);
4079 
4080   op_cost(0);
4081   format %{ %}
4082   interface(CONST_INTER);
4083 %}
4084 
4085 // 64 bit unit decrement
4086 operand immL_M1()
4087 %{
4088   predicate(n->get_long() == -1);
4089   match(ConL);
4090 
4091   op_cost(0);
4092   format %{ %}
4093   interface(CONST_INTER);
4094 %}
4095 
4096 // 32 bit offset of pc in thread anchor
4097 
4098 operand immL_pc_off()
4099 %{
4100   predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
4101                              in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
4102   match(ConL);
4103 
4104   op_cost(0);
4105   format %{ %}
4106   interface(CONST_INTER);
4107 %}
4108 
4109 // 64 bit integer valid for add sub immediate
4110 operand immLAddSub()
4111 %{
4112   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4113   match(ConL);
4114   op_cost(0);
4115   format %{ %}
4116   interface(CONST_INTER);
4117 %}
4118 
4119 // 64 bit integer valid for logical immediate
4120 operand immLLog()
4121 %{
4122   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4123   match(ConL);
4124   op_cost(0);
4125   format %{ %}
4126   interface(CONST_INTER);
4127 %}
4128 
4129 // Long Immediate: low 32-bit mask
4130 operand immL_32bits()
4131 %{
4132   predicate(n->get_long() == 0xFFFFFFFFL);
4133   match(ConL);
4134   op_cost(0);
4135   format %{ %}
4136   interface(CONST_INTER);
4137 %}
4138 
4139 // Pointer operands
4140 // Pointer Immediate
4141 operand immP()
4142 %{
4143   match(ConP);
4144 
4145   op_cost(0);
4146   format %{ %}
4147   interface(CONST_INTER);
4148 %}
4149 
4150 // NULL Pointer Immediate
4151 operand immP0()
4152 %{
4153   predicate(n->get_ptr() == 0);
4154   match(ConP);
4155 
4156   op_cost(0);
4157   format %{ %}
4158   interface(CONST_INTER);
4159 %}
4160 
4161 // Pointer Immediate One
4162 // this is used in object initialization (initial object header)
4163 operand immP_1()
4164 %{
4165   predicate(n->get_ptr() == 1);
4166   match(ConP);
4167 
4168   op_cost(0);
4169   format %{ %}
4170   interface(CONST_INTER);
4171 %}
4172 
4173 // Polling Page Pointer Immediate
4174 operand immPollPage()
4175 %{
4176   predicate((address)n->get_ptr() == os::get_polling_page());
4177   match(ConP);
4178 
4179   op_cost(0);
4180   format %{ %}
4181   interface(CONST_INTER);
4182 %}
4183 
4184 // Card Table Byte Map Base
4185 operand immByteMapBase()
4186 %{
4187   // Get base of card map
4188   predicate((jbyte*)n->get_ptr() ==
4189         ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
4190   match(ConP);
4191 
4192   op_cost(0);
4193   format %{ %}
4194   interface(CONST_INTER);
4195 %}
4196 
4197 // Pointer Immediate Minus One
4198 // this is used when we want to write the current PC to the thread anchor
4199 operand immP_M1()
4200 %{
4201   predicate(n->get_ptr() == -1);
4202   match(ConP);
4203 
4204   op_cost(0);
4205   format %{ %}
4206   interface(CONST_INTER);
4207 %}
4208 
4209 // Pointer Immediate Minus Two
4210 // this is used when we want to write the current PC to the thread anchor
4211 operand immP_M2()
4212 %{
4213   predicate(n->get_ptr() == -2);
4214   match(ConP);
4215 
4216   op_cost(0);
4217   format %{ %}
4218   interface(CONST_INTER);
4219 %}
4220 
4221 // Float and Double operands
4222 // Double Immediate
4223 operand immD()
4224 %{
4225   match(ConD);
4226   op_cost(0);
4227   format %{ %}
4228   interface(CONST_INTER);
4229 %}
4230 
4231 // constant 'double +0.0'.
4232 operand immD0()
4233 %{
4234   predicate((n->getd() == 0) &&
4235             (fpclassify(n->getd()) == FP_ZERO) && (signbit(n->getd()) == 0));
4236   match(ConD);
4237   op_cost(0);
4238   format %{ %}
4239   interface(CONST_INTER);
4240 %}
4241 
4242 // constant 'double +0.0'.
4243 operand immDPacked()
4244 %{
4245   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
4246   match(ConD);
4247   op_cost(0);
4248   format %{ %}
4249   interface(CONST_INTER);
4250 %}
4251 
4252 // Float Immediate
4253 operand immF()
4254 %{
4255   match(ConF);
4256   op_cost(0);
4257   format %{ %}
4258   interface(CONST_INTER);
4259 %}
4260 
4261 // constant 'float +0.0'.
4262 operand immF0()
4263 %{
4264   predicate((n->getf() == 0) &&
4265             (fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0));
4266   match(ConF);
4267   op_cost(0);
4268   format %{ %}
4269   interface(CONST_INTER);
4270 %}
4271 
4272 // 
4273 operand immFPacked()
4274 %{
4275   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
4276   match(ConF);
4277   op_cost(0);
4278   format %{ %}
4279   interface(CONST_INTER);
4280 %}
4281 
4282 // Narrow pointer operands
4283 // Narrow Pointer Immediate
4284 operand immN()
4285 %{
4286   match(ConN);
4287 
4288   op_cost(0);
4289   format %{ %}
4290   interface(CONST_INTER);
4291 %}
4292 
4293 // Narrow NULL Pointer Immediate
4294 operand immN0()
4295 %{
4296   predicate(n->get_narrowcon() == 0);
4297   match(ConN);
4298 
4299   op_cost(0);
4300   format %{ %}
4301   interface(CONST_INTER);
4302 %}
4303 
4304 operand immNKlass()
4305 %{
4306   match(ConNKlass);
4307 
4308   op_cost(0);
4309   format %{ %}
4310   interface(CONST_INTER);
4311 %}
4312 
4313 // Integer 32 bit Register Operands
4314 // Integer 32 bitRegister (excludes SP)
4315 operand iRegI()
4316 %{
4317   constraint(ALLOC_IN_RC(any_reg32));
4318   match(RegI);
4319   match(iRegINoSp);
4320   op_cost(0);
4321   format %{ %}
4322   interface(REG_INTER);
4323 %}
4324 
4325 // Integer 32 bit Register not Special
4326 operand iRegINoSp()
4327 %{
4328   constraint(ALLOC_IN_RC(no_special_reg32));
4329   match(RegI);
4330   op_cost(0);
4331   format %{ %}
4332   interface(REG_INTER);
4333 %}
4334 
4335 // Integer 64 bit Register Operands
4336 // Integer 64 bit Register (includes SP)
4337 operand iRegL()
4338 %{
4339   constraint(ALLOC_IN_RC(any_reg));
4340   match(RegL);
4341   match(iRegLNoSp);
4342   op_cost(0);
4343   format %{ %}
4344   interface(REG_INTER);
4345 %}
4346 
4347 // Integer 64 bit Register not Special
4348 operand iRegLNoSp()
4349 %{
4350   constraint(ALLOC_IN_RC(no_special_reg));
4351   match(RegL);
4352   format %{ %}
4353   interface(REG_INTER);
4354 %}
4355 
4356 // Pointer Register Operands
4357 // Pointer Register
4358 operand iRegP()
4359 %{
4360   constraint(ALLOC_IN_RC(ptr_reg));
4361   match(RegP);
4362   match(iRegPNoSp);
4363   match(iRegP_R0);
4364   //match(iRegP_R2);
4365   //match(iRegP_R4);
4366   //match(iRegP_R5);
4367   match(thread_RegP);
4368   op_cost(0);
4369   format %{ %}
4370   interface(REG_INTER);
4371 %}
4372 
4373 // Pointer 64 bit Register not Special
4374 operand iRegPNoSp()
4375 %{
4376   constraint(ALLOC_IN_RC(no_special_ptr_reg));
4377   match(RegP);
4378   // match(iRegP);
4379   // match(iRegP_R0);
4380   // match(iRegP_R2);
4381   // match(iRegP_R4);
4382   // match(iRegP_R5);
4383   // match(thread_RegP);
4384   op_cost(0);
4385   format %{ %}
4386   interface(REG_INTER);
4387 %}
4388 
4389 // Pointer 64 bit Register R0 only
4390 operand iRegP_R0()
4391 %{
4392   constraint(ALLOC_IN_RC(r0_reg));
4393   match(RegP);
4394   // match(iRegP);
4395   match(iRegPNoSp);
4396   op_cost(0);
4397   format %{ %}
4398   interface(REG_INTER);
4399 %}
4400 
4401 // Pointer 64 bit Register R1 only
4402 operand iRegP_R1()
4403 %{
4404   constraint(ALLOC_IN_RC(r1_reg));
4405   match(RegP);
4406   // match(iRegP);
4407   match(iRegPNoSp);
4408   op_cost(0);
4409   format %{ %}
4410   interface(REG_INTER);
4411 %}
4412 
4413 // Pointer 64 bit Register R2 only
4414 operand iRegP_R2()
4415 %{
4416   constraint(ALLOC_IN_RC(r2_reg));
4417   match(RegP);
4418   // match(iRegP);
4419   match(iRegPNoSp);
4420   op_cost(0);
4421   format %{ %}
4422   interface(REG_INTER);
4423 %}
4424 
4425 // Pointer 64 bit Register R3 only
4426 operand iRegP_R3()
4427 %{
4428   constraint(ALLOC_IN_RC(r3_reg));
4429   match(RegP);
4430   // match(iRegP);
4431   match(iRegPNoSp);
4432   op_cost(0);
4433   format %{ %}
4434   interface(REG_INTER);
4435 %}
4436 
4437 // Pointer 64 bit Register R4 only
4438 operand iRegP_R4()
4439 %{
4440   constraint(ALLOC_IN_RC(r4_reg));
4441   match(RegP);
4442   // match(iRegP);
4443   match(iRegPNoSp);
4444   op_cost(0);
4445   format %{ %}
4446   interface(REG_INTER);
4447 %}
4448 
4449 // Pointer 64 bit Register R5 only
4450 operand iRegP_R5()
4451 %{
4452   constraint(ALLOC_IN_RC(r5_reg));
4453   match(RegP);
4454   // match(iRegP);
4455   match(iRegPNoSp);
4456   op_cost(0);
4457   format %{ %}
4458   interface(REG_INTER);
4459 %}
4460 
4461 // Pointer 64 bit Register R10 only
4462 operand iRegP_R10()
4463 %{
4464   constraint(ALLOC_IN_RC(r10_reg));
4465   match(RegP);
4466   // match(iRegP);
4467   match(iRegPNoSp);
4468   op_cost(0);
4469   format %{ %}
4470   interface(REG_INTER);
4471 %}
4472 
4473 // Long 64 bit Register R11 only
4474 operand iRegL_R11()
4475 %{
4476   constraint(ALLOC_IN_RC(r11_reg));
4477   match(RegL);
4478   match(iRegLNoSp);
4479   op_cost(0);
4480   format %{ %}
4481   interface(REG_INTER);
4482 %}
4483 
4484 // Pointer 64 bit Register FP only
4485 operand iRegP_FP()
4486 %{
4487   constraint(ALLOC_IN_RC(fp_reg));
4488   match(RegP);
4489   // match(iRegP);
4490   op_cost(0);
4491   format %{ %}
4492   interface(REG_INTER);
4493 %}
4494 
4495 // Register R0 only
4496 operand iRegI_R0()
4497 %{
4498   constraint(ALLOC_IN_RC(int_r0_reg));
4499   match(RegI);
4500   match(iRegINoSp);
4501   op_cost(0);
4502   format %{ %}
4503   interface(REG_INTER);
4504 %}
4505 
4506 // Register R2 only
4507 operand iRegI_R2()
4508 %{
4509   constraint(ALLOC_IN_RC(int_r2_reg));
4510   match(RegI);
4511   match(iRegINoSp);
4512   op_cost(0);
4513   format %{ %}
4514   interface(REG_INTER);
4515 %}
4516 
4517 // Register R3 only
4518 operand iRegI_R3()
4519 %{
4520   constraint(ALLOC_IN_RC(int_r3_reg));
4521   match(RegI);
4522   match(iRegINoSp);
4523   op_cost(0);
4524   format %{ %}
4525   interface(REG_INTER);
4526 %}
4527 
4528 
4529 // Register R2 only
4530 operand iRegI_R4()
4531 %{
4532   constraint(ALLOC_IN_RC(int_r4_reg));
4533   match(RegI);
4534   match(iRegINoSp);
4535   op_cost(0);
4536   format %{ %}
4537   interface(REG_INTER);
4538 %}
4539 
4540 
4541 // Pointer Register Operands
4542 // Narrow Pointer Register
4543 operand iRegN()
4544 %{
4545   constraint(ALLOC_IN_RC(any_reg32));
4546   match(RegN);
4547   match(iRegNNoSp);
4548   op_cost(0);
4549   format %{ %}
4550   interface(REG_INTER);
4551 %}
4552 
4553 // Integer 64 bit Register not Special
4554 operand iRegNNoSp()
4555 %{
4556   constraint(ALLOC_IN_RC(no_special_reg32));
4557   match(RegN);
4558   op_cost(0);
4559   format %{ %}
4560   interface(REG_INTER);
4561 %}
4562 
4563 // heap base register -- used for encoding immN0
4564 
4565 operand iRegIHeapbase()
4566 %{
4567   constraint(ALLOC_IN_RC(heapbase_reg));
4568   match(RegI);
4569   op_cost(0);
4570   format %{ %}
4571   interface(REG_INTER);
4572 %}
4573 
4574 // Float Register
4575 // Float register operands
4576 operand vRegF()
4577 %{
4578   constraint(ALLOC_IN_RC(float_reg));
4579   match(RegF);
4580 
4581   op_cost(0);
4582   format %{ %}
4583   interface(REG_INTER);
4584 %}
4585 
4586 // Double Register
4587 // Double register operands
4588 operand vRegD()
4589 %{
4590   constraint(ALLOC_IN_RC(double_reg));
4591   match(RegD);
4592 
4593   op_cost(0);
4594   format %{ %}
4595   interface(REG_INTER);
4596 %}
4597 
4598 operand vecD()
4599 %{
4600   constraint(ALLOC_IN_RC(vectord_reg));
4601   match(VecD);
4602 
4603   op_cost(0);
4604   format %{ %}
4605   interface(REG_INTER);
4606 %}
4607 
4608 operand vecX()
4609 %{
4610   constraint(ALLOC_IN_RC(vectorx_reg));
4611   match(VecX);
4612 
4613   op_cost(0);
4614   format %{ %}
4615   interface(REG_INTER);
4616 %}
4617 
4618 operand vRegD_V0()
4619 %{
4620   constraint(ALLOC_IN_RC(v0_reg));
4621   match(RegD);
4622   op_cost(0);
4623   format %{ %}
4624   interface(REG_INTER);
4625 %}
4626 
4627 operand vRegD_V1()
4628 %{
4629   constraint(ALLOC_IN_RC(v1_reg));
4630   match(RegD);
4631   op_cost(0);
4632   format %{ %}
4633   interface(REG_INTER);
4634 %}
4635 
4636 operand vRegD_V2()
4637 %{
4638   constraint(ALLOC_IN_RC(v2_reg));
4639   match(RegD);
4640   op_cost(0);
4641   format %{ %}
4642   interface(REG_INTER);
4643 %}
4644 
4645 operand vRegD_V3()
4646 %{
4647   constraint(ALLOC_IN_RC(v3_reg));
4648   match(RegD);
4649   op_cost(0);
4650   format %{ %}
4651   interface(REG_INTER);
4652 %}
4653 
4654 // Flags register, used as output of signed compare instructions
4655 
4656 // note that on AArch64 we also use this register as the output for
4657 // for floating point compare instructions (CmpF CmpD). this ensures
4658 // that ordered inequality tests use GT, GE, LT or LE none of which
4659 // pass through cases where the result is unordered i.e. one or both
4660 // inputs to the compare is a NaN. this means that the ideal code can
4661 // replace e.g. a GT with an LE and not end up capturing the NaN case
4662 // (where the comparison should always fail). EQ and NE tests are
4663 // always generated in ideal code so that unordered folds into the NE
4664 // case, matching the behaviour of AArch64 NE.
4665 //
4666 // This differs from x86 where the outputs of FP compares use a
4667 // special FP flags registers and where compares based on this
4668 // register are distinguished into ordered inequalities (cmpOpUCF) and
4669 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
4670 // to explicitly handle the unordered case in branches. x86 also has
4671 // to include extra CMoveX rules to accept a cmpOpUCF input.
4672 
4673 operand rFlagsReg()
4674 %{
4675   constraint(ALLOC_IN_RC(int_flags));
4676   match(RegFlags);
4677 
4678   op_cost(0);
4679   format %{ "RFLAGS" %}
4680   interface(REG_INTER);
4681 %}
4682 
4683 // Flags register, used as output of unsigned compare instructions
4684 operand rFlagsRegU()
4685 %{
4686   constraint(ALLOC_IN_RC(int_flags));
4687   match(RegFlags);
4688 
4689   op_cost(0);
4690   format %{ "RFLAGSU" %}
4691   interface(REG_INTER);
4692 %}
4693 
4694 // Special Registers
4695 
4696 // Method Register
4697 operand inline_cache_RegP(iRegP reg)
4698 %{
4699   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
4700   match(reg);
4701   match(iRegPNoSp);
4702   op_cost(0);
4703   format %{ %}
4704   interface(REG_INTER);
4705 %}
4706 
4707 operand interpreter_method_oop_RegP(iRegP reg)
4708 %{
4709   constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
4710   match(reg);
4711   match(iRegPNoSp);
4712   op_cost(0);
4713   format %{ %}
4714   interface(REG_INTER);
4715 %}
4716 
4717 // Thread Register
4718 operand thread_RegP(iRegP reg)
4719 %{
4720   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
4721   match(reg);
4722   op_cost(0);
4723   format %{ %}
4724   interface(REG_INTER);
4725 %}
4726 
4727 operand lr_RegP(iRegP reg)
4728 %{
4729   constraint(ALLOC_IN_RC(lr_reg)); // link_reg
4730   match(reg);
4731   op_cost(0);
4732   format %{ %}
4733   interface(REG_INTER);
4734 %}
4735 
4736 //----------Memory Operands----------------------------------------------------
4737 
4738 operand indirect(iRegP reg)
4739 %{
4740   constraint(ALLOC_IN_RC(ptr_reg));
4741   match(reg);
4742   op_cost(0);
4743   format %{ "[$reg]" %}
4744   interface(MEMORY_INTER) %{
4745     base($reg);
4746     index(0xffffffff);
4747     scale(0x0);
4748     disp(0x0);
4749   %}
4750 %}
4751 
4752 operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
4753 %{
4754   constraint(ALLOC_IN_RC(ptr_reg));
4755   match(AddP (AddP reg (LShiftL lreg scale)) off);
4756   op_cost(INSN_COST);
4757   format %{ "$reg, $lreg lsl($scale), $off" %}
4758   interface(MEMORY_INTER) %{
4759     base($reg);
4760     index($lreg);
4761     scale($scale);
4762     disp($off);
4763   %}
4764 %}
4765 
4766 operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
4767 %{
4768   constraint(ALLOC_IN_RC(ptr_reg));
4769   match(AddP (AddP reg (LShiftL lreg scale)) off);
4770   op_cost(INSN_COST);
4771   format %{ "$reg, $lreg lsl($scale), $off" %}
4772   interface(MEMORY_INTER) %{
4773     base($reg);
4774     index($lreg);
4775     scale($scale);
4776     disp($off);
4777   %}
4778 %}
4779 
4780 operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
4781 %{
4782   constraint(ALLOC_IN_RC(ptr_reg));
4783   match(AddP (AddP reg (ConvI2L ireg)) off);
4784   op_cost(INSN_COST);
4785   format %{ "$reg, $ireg, $off I2L" %}
4786   interface(MEMORY_INTER) %{
4787     base($reg);
4788     index($ireg);
4789     scale(0x0);
4790     disp($off);
4791   %}
4792 %}
4793 
4794 operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
4795 %{
4796   constraint(ALLOC_IN_RC(ptr_reg));
4797   match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
4798   op_cost(INSN_COST);
4799   format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
4800   interface(MEMORY_INTER) %{
4801     base($reg);
4802     index($ireg);
4803     scale($scale);
4804     disp($off);
4805   %}
4806 %}
4807 
4808 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
4809 %{
4810   constraint(ALLOC_IN_RC(ptr_reg));
4811   match(AddP reg (LShiftL (ConvI2L ireg) scale));
4812   op_cost(0);
4813   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
4814   interface(MEMORY_INTER) %{
4815     base($reg);
4816     index($ireg);
4817     scale($scale);
4818     disp(0x0);
4819   %}
4820 %}
4821 
4822 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
4823 %{
4824   constraint(ALLOC_IN_RC(ptr_reg));
4825   match(AddP reg (LShiftL lreg scale));
4826   op_cost(0);
4827   format %{ "$reg, $lreg lsl($scale)" %}
4828   interface(MEMORY_INTER) %{
4829     base($reg);
4830     index($lreg);
4831     scale($scale);
4832     disp(0x0);
4833   %}
4834 %}
4835 
4836 operand indIndex(iRegP reg, iRegL lreg)
4837 %{
4838   constraint(ALLOC_IN_RC(ptr_reg));
4839   match(AddP reg lreg);
4840   op_cost(0);
4841   format %{ "$reg, $lreg" %}
4842   interface(MEMORY_INTER) %{
4843     base($reg);
4844     index($lreg);
4845     scale(0x0);
4846     disp(0x0);
4847   %}
4848 %}
4849 
4850 operand indOffI(iRegP reg, immIOffset off)
4851 %{
4852   constraint(ALLOC_IN_RC(ptr_reg));
4853   match(AddP reg off);
4854   op_cost(0);
4855   format %{ "[$reg, $off]" %}
4856   interface(MEMORY_INTER) %{
4857     base($reg);
4858     index(0xffffffff);
4859     scale(0x0);
4860     disp($off);
4861   %}
4862 %}
4863 
4864 operand indOffI4(iRegP reg, immIOffset4 off)
4865 %{
4866   constraint(ALLOC_IN_RC(ptr_reg));
4867   match(AddP reg off);
4868   op_cost(0);
4869   format %{ "[$reg, $off]" %}
4870   interface(MEMORY_INTER) %{
4871     base($reg);
4872     index(0xffffffff);
4873     scale(0x0);
4874     disp($off);
4875   %}
4876 %}
4877 
4878 operand indOffI8(iRegP reg, immIOffset8 off)
4879 %{
4880   constraint(ALLOC_IN_RC(ptr_reg));
4881   match(AddP reg off);
4882   op_cost(0);
4883   format %{ "[$reg, $off]" %}
4884   interface(MEMORY_INTER) %{
4885     base($reg);
4886     index(0xffffffff);
4887     scale(0x0);
4888     disp($off);
4889   %}
4890 %}
4891 
4892 operand indOffI16(iRegP reg, immIOffset16 off)
4893 %{
4894   constraint(ALLOC_IN_RC(ptr_reg));
4895   match(AddP reg off);
4896   op_cost(0);
4897   format %{ "[$reg, $off]" %}
4898   interface(MEMORY_INTER) %{
4899     base($reg);
4900     index(0xffffffff);
4901     scale(0x0);
4902     disp($off);
4903   %}
4904 %}
4905 
4906 operand indOffL(iRegP reg, immLoffset off)
4907 %{
4908   constraint(ALLOC_IN_RC(ptr_reg));
4909   match(AddP reg off);
4910   op_cost(0);
4911   format %{ "[$reg, $off]" %}
4912   interface(MEMORY_INTER) %{
4913     base($reg);
4914     index(0xffffffff);
4915     scale(0x0);
4916     disp($off);
4917   %}
4918 %}
4919 
4920 operand indOffL4(iRegP reg, immLoffset4 off)
4921 %{
4922   constraint(ALLOC_IN_RC(ptr_reg));
4923   match(AddP reg off);
4924   op_cost(0);
4925   format %{ "[$reg, $off]" %}
4926   interface(MEMORY_INTER) %{
4927     base($reg);
4928     index(0xffffffff);
4929     scale(0x0);
4930     disp($off);
4931   %}
4932 %}
4933 
4934 operand indOffL8(iRegP reg, immLoffset8 off)
4935 %{
4936   constraint(ALLOC_IN_RC(ptr_reg));
4937   match(AddP reg off);
4938   op_cost(0);
4939   format %{ "[$reg, $off]" %}
4940   interface(MEMORY_INTER) %{
4941     base($reg);
4942     index(0xffffffff);
4943     scale(0x0);
4944     disp($off);
4945   %}
4946 %}
4947 
4948 operand indOffL16(iRegP reg, immLoffset16 off)
4949 %{
4950   constraint(ALLOC_IN_RC(ptr_reg));
4951   match(AddP reg off);
4952   op_cost(0);
4953   format %{ "[$reg, $off]" %}
4954   interface(MEMORY_INTER) %{
4955     base($reg);
4956     index(0xffffffff);
4957     scale(0x0);
4958     disp($off);
4959   %}
4960 %}
4961 
4962 operand indirectN(iRegN reg)
4963 %{
4964   predicate(Universe::narrow_oop_shift() == 0);
4965   constraint(ALLOC_IN_RC(ptr_reg));
4966   match(DecodeN reg);
4967   op_cost(0);
4968   format %{ "[$reg]\t# narrow" %}
4969   interface(MEMORY_INTER) %{
4970     base($reg);
4971     index(0xffffffff);
4972     scale(0x0);
4973     disp(0x0);
4974   %}
4975 %}
4976 
4977 operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
4978 %{
4979   predicate(Universe::narrow_oop_shift() == 0);
4980   constraint(ALLOC_IN_RC(ptr_reg));
4981   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
4982   op_cost(0);
4983   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
4984   interface(MEMORY_INTER) %{
4985     base($reg);
4986     index($lreg);
4987     scale($scale);
4988     disp($off);
4989   %}
4990 %}
4991 
4992 operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
4993 %{
4994   predicate(Universe::narrow_oop_shift() == 0);
4995   constraint(ALLOC_IN_RC(ptr_reg));
4996   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
4997   op_cost(INSN_COST);
4998   format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
4999   interface(MEMORY_INTER) %{
5000     base($reg);
5001     index($lreg);
5002     scale($scale);
5003     disp($off);
5004   %}
5005 %}
5006 
5007 operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
5008 %{
5009   predicate(Universe::narrow_oop_shift() == 0);
5010   constraint(ALLOC_IN_RC(ptr_reg));
5011   match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
5012   op_cost(INSN_COST);
5013   format %{ "$reg, $ireg, $off I2L\t# narrow" %}
5014   interface(MEMORY_INTER) %{
5015     base($reg);
5016     index($ireg);
5017     scale(0x0);
5018     disp($off);
5019   %}
5020 %}
5021 
5022 operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
5023 %{
5024   predicate(Universe::narrow_oop_shift() == 0);
5025   constraint(ALLOC_IN_RC(ptr_reg));
5026   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
5027   op_cost(INSN_COST);
5028   format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
5029   interface(MEMORY_INTER) %{
5030     base($reg);
5031     index($ireg);
5032     scale($scale);
5033     disp($off);
5034   %}
5035 %}
5036 
5037 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
5038 %{
5039   predicate(Universe::narrow_oop_shift() == 0);
5040   constraint(ALLOC_IN_RC(ptr_reg));
5041   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
5042   op_cost(0);
5043   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
5044   interface(MEMORY_INTER) %{
5045     base($reg);
5046     index($ireg);
5047     scale($scale);
5048     disp(0x0);
5049   %}
5050 %}
5051 
5052 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
5053 %{
5054   predicate(Universe::narrow_oop_shift() == 0);
5055   constraint(ALLOC_IN_RC(ptr_reg));
5056   match(AddP (DecodeN reg) (LShiftL lreg scale));
5057   op_cost(0);
5058   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
5059   interface(MEMORY_INTER) %{
5060     base($reg);
5061     index($lreg);
5062     scale($scale);
5063     disp(0x0);
5064   %}
5065 %}
5066 
5067 operand indIndexN(iRegN reg, iRegL lreg)
5068 %{
5069   predicate(Universe::narrow_oop_shift() == 0);
5070   constraint(ALLOC_IN_RC(ptr_reg));
5071   match(AddP (DecodeN reg) lreg);
5072   op_cost(0);
5073   format %{ "$reg, $lreg\t# narrow" %}
5074   interface(MEMORY_INTER) %{
5075     base($reg);
5076     index($lreg);
5077     scale(0x0);
5078     disp(0x0);
5079   %}
5080 %}
5081 
5082 operand indOffIN(iRegN reg, immIOffset off)
5083 %{
5084   predicate(Universe::narrow_oop_shift() == 0);
5085   constraint(ALLOC_IN_RC(ptr_reg));
5086   match(AddP (DecodeN reg) off);
5087   op_cost(0);
5088   format %{ "[$reg, $off]\t# narrow" %}
5089   interface(MEMORY_INTER) %{
5090     base($reg);
5091     index(0xffffffff);
5092     scale(0x0);
5093     disp($off);
5094   %}
5095 %}
5096 
5097 operand indOffLN(iRegN reg, immLoffset off)
5098 %{
5099   predicate(Universe::narrow_oop_shift() == 0);
5100   constraint(ALLOC_IN_RC(ptr_reg));
5101   match(AddP (DecodeN reg) off);
5102   op_cost(0);
5103   format %{ "[$reg, $off]\t# narrow" %}
5104   interface(MEMORY_INTER) %{
5105     base($reg);
5106     index(0xffffffff);
5107     scale(0x0);
5108     disp($off);
5109   %}
5110 %}
5111 
5112 
5113 
5114 // AArch64 opto stubs need to write to the pc slot in the thread anchor
5115 operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
5116 %{
5117   constraint(ALLOC_IN_RC(ptr_reg));
5118   match(AddP reg off);
5119   op_cost(0);
5120   format %{ "[$reg, $off]" %}
5121   interface(MEMORY_INTER) %{
5122     base($reg);
5123     index(0xffffffff);
5124     scale(0x0);
5125     disp($off);
5126   %}
5127 %}
5128 
5129 //----------Special Memory Operands--------------------------------------------
5130 // Stack Slot Operand - This operand is used for loading and storing temporary
5131 //                      values on the stack where a match requires a value to
5132 //                      flow through memory.
5133 operand stackSlotP(sRegP reg)
5134 %{
5135   constraint(ALLOC_IN_RC(stack_slots));
5136   op_cost(100);
5137   // No match rule because this operand is only generated in matching
5138   // match(RegP);
5139   format %{ "[$reg]" %}
5140   interface(MEMORY_INTER) %{
5141     base(0x1e);  // RSP
5142     index(0x0);  // No Index
5143     scale(0x0);  // No Scale
5144     disp($reg);  // Stack Offset
5145   %}
5146 %}
5147 
5148 operand stackSlotI(sRegI reg)
5149 %{
5150   constraint(ALLOC_IN_RC(stack_slots));
5151   // No match rule because this operand is only generated in matching
5152   // match(RegI);
5153   format %{ "[$reg]" %}
5154   interface(MEMORY_INTER) %{
5155     base(0x1e);  // RSP
5156     index(0x0);  // No Index
5157     scale(0x0);  // No Scale
5158     disp($reg);  // Stack Offset
5159   %}
5160 %}
5161 
5162 operand stackSlotF(sRegF reg)
5163 %{
5164   constraint(ALLOC_IN_RC(stack_slots));
5165   // No match rule because this operand is only generated in matching
5166   // match(RegF);
5167   format %{ "[$reg]" %}
5168   interface(MEMORY_INTER) %{
5169     base(0x1e);  // RSP
5170     index(0x0);  // No Index
5171     scale(0x0);  // No Scale
5172     disp($reg);  // Stack Offset
5173   %}
5174 %}
5175 
5176 operand stackSlotD(sRegD reg)
5177 %{
5178   constraint(ALLOC_IN_RC(stack_slots));
5179   // No match rule because this operand is only generated in matching
5180   // match(RegD);
5181   format %{ "[$reg]" %}
5182   interface(MEMORY_INTER) %{
5183     base(0x1e);  // RSP
5184     index(0x0);  // No Index
5185     scale(0x0);  // No Scale
5186     disp($reg);  // Stack Offset
5187   %}
5188 %}
5189 
5190 operand stackSlotL(sRegL reg)
5191 %{
5192   constraint(ALLOC_IN_RC(stack_slots));
5193   // No match rule because this operand is only generated in matching
5194   // match(RegL);
5195   format %{ "[$reg]" %}
5196   interface(MEMORY_INTER) %{
5197     base(0x1e);  // RSP
5198     index(0x0);  // No Index
5199     scale(0x0);  // No Scale
5200     disp($reg);  // Stack Offset
5201   %}
5202 %}
5203 
5204 // Operands for expressing Control Flow
5205 // NOTE: Label is a predefined operand which should not be redefined in
5206 //       the AD file. It is generically handled within the ADLC.
5207 
5208 //----------Conditional Branch Operands----------------------------------------
5209 // Comparison Op  - This is the operation of the comparison, and is limited to
5210 //                  the following set of codes:
5211 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5212 //
5213 // Other attributes of the comparison, such as unsignedness, are specified
5214 // by the comparison instruction that sets a condition code flags register.
5215 // That result is represented by a flags operand whose subtype is appropriate
5216 // to the unsignedness (etc.) of the comparison.
5217 //
5218 // Later, the instruction which matches both the Comparison Op (a Bool) and
5219 // the flags (produced by the Cmp) specifies the coding of the comparison op
5220 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5221 
5222 // used for signed integral comparisons and fp comparisons
5223 
5224 operand cmpOp()
5225 %{
5226   match(Bool);
5227 
5228   format %{ "" %}
5229   interface(COND_INTER) %{
5230     equal(0x0, "eq");
5231     not_equal(0x1, "ne");
5232     less(0xb, "lt");
5233     greater_equal(0xa, "ge");
5234     less_equal(0xd, "le");
5235     greater(0xc, "gt");
5236     overflow(0x6, "vs");
5237     no_overflow(0x7, "vc");
5238   %}
5239 %}
5240 
5241 // used for unsigned integral comparisons
5242 
5243 operand cmpOpU()
5244 %{
5245   match(Bool);
5246 
5247   format %{ "" %}
5248   interface(COND_INTER) %{
5249     equal(0x0, "eq");
5250     not_equal(0x1, "ne");
5251     less(0x3, "lo");
5252     greater_equal(0x2, "hs");
5253     less_equal(0x9, "ls");
5254     greater(0x8, "hi");
5255     overflow(0x6, "vs");
5256     no_overflow(0x7, "vc");
5257   %}
5258 %}
5259 
5260 // Special operand allowing long args to int ops to be truncated for free
5261 
5262 operand iRegL2I(iRegL reg) %{
5263 
5264   op_cost(0);
5265 
5266   match(ConvL2I reg);
5267 
5268   format %{ "l2i($reg)" %}
5269 
5270   interface(REG_INTER)
5271 %}
5272 
5273 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
5274 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
5275 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
5276 
5277 //----------OPERAND CLASSES----------------------------------------------------
5278 // Operand Classes are groups of operands that are used as to simplify
5279 // instruction definitions by not requiring the AD writer to specify
5280 // separate instructions for every form of operand when the
5281 // instruction accepts multiple operand types with the same basic
5282 // encoding and format. The classic case of this is memory operands.
5283 
5284 // memory is used to define read/write location for load/store
5285 // instruction defs. we can turn a memory op into an Address
5286 
5287 opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
5288                indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
5289  
5290  // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5291 
5292 
5293 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
5294 // operations. it allows the src to be either an iRegI or a (ConvL2I
5295 // iRegL). in the latter case the l2i normally planted for a ConvL2I
5296 // can be elided because the 32-bit instruction will just employ the
5297 // lower 32 bits anyway.
5298 //
5299 // n.b. this does not elide all L2I conversions. if the truncated
5300 // value is consumed by more than one operation then the ConvL2I
5301 // cannot be bundled into the consuming nodes so an l2i gets planted
5302 // (actually a movw $dst $src) and the downstream instructions consume
5303 // the result of the l2i as an iRegI input. That's a shame since the
5304 // movw is actually redundant but its not too costly.
5305 
5306 opclass iRegIorL2I(iRegI, iRegL2I);
5307 
5308 //----------PIPELINE-----------------------------------------------------------
5309 // Rules which define the behavior of the target architectures pipeline.
5310 
5311 // For specific pipelines, eg A53, define the stages of that pipeline
5312 //pipe_desc(ISS, EX1, EX2, WR);
5313 #define ISS S0
5314 #define EX1 S1
5315 #define EX2 S2
5316 #define WR  S3
5317 
5318 // Integer ALU reg operation
5319 pipeline %{
5320 
5321 attributes %{
5322   // ARM instructions are of fixed length
5323   fixed_size_instructions;        // Fixed size instructions TODO does
5324   max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
5325   // ARM instructions come in 32-bit word units
5326   instruction_unit_size = 4;         // An instruction is 4 bytes long
5327   instruction_fetch_unit_size = 64;  // The processor fetches one line
5328   instruction_fetch_units = 1;       // of 64 bytes
5329 
5330   // List of nop instructions
5331   nops( MachNop );
5332 %}
5333 
5334 // We don't use an actual pipeline model so don't care about resources
5335 // or description. we do use pipeline classes to introduce fixed
5336 // latencies
5337 
5338 //----------RESOURCES----------------------------------------------------------
5339 // Resources are the functional units available to the machine
5340 
5341 resources( INS0, INS1, INS01 = INS0 | INS1,
5342            ALU0, ALU1, ALU = ALU0 | ALU1,
5343            MAC,
5344            DIV,
5345            BRANCH,
5346            LDST,
5347            NEON_FP);
5348 
5349 //----------PIPELINE DESCRIPTION-----------------------------------------------
5350 // Pipeline Description specifies the stages in the machine's pipeline
5351 
5352 // Define the pipeline as a generic 6 stage pipeline
5353 pipe_desc(S0, S1, S2, S3, S4, S5);
5354 
5355 //----------PIPELINE CLASSES---------------------------------------------------
5356 // Pipeline Classes describe the stages in which input and output are
5357 // referenced by the hardware pipeline.
5358 
5359 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
5360 %{
5361   single_instruction;
5362   src1   : S1(read);
5363   src2   : S2(read);
5364   dst    : S5(write);
5365   INS01  : ISS;
5366   NEON_FP : S5;
5367 %}
5368 
5369 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
5370 %{
5371   single_instruction;
5372   src1   : S1(read);
5373   src2   : S2(read);
5374   dst    : S5(write);
5375   INS01  : ISS;
5376   NEON_FP : S5;
5377 %}
5378 
5379 pipe_class fp_uop_s(vRegF dst, vRegF src)
5380 %{
5381   single_instruction;
5382   src    : S1(read);
5383   dst    : S5(write);
5384   INS01  : ISS;
5385   NEON_FP : S5;
5386 %}
5387 
5388 pipe_class fp_uop_d(vRegD dst, vRegD src)
5389 %{
5390   single_instruction;
5391   src    : S1(read);
5392   dst    : S5(write);
5393   INS01  : ISS;
5394   NEON_FP : S5;
5395 %}
5396 
5397 pipe_class fp_d2f(vRegF dst, vRegD src)
5398 %{
5399   single_instruction;
5400   src    : S1(read);
5401   dst    : S5(write);
5402   INS01  : ISS;
5403   NEON_FP : S5;
5404 %}
5405 
5406 pipe_class fp_f2d(vRegD dst, vRegF src)
5407 %{
5408   single_instruction;
5409   src    : S1(read);
5410   dst    : S5(write);
5411   INS01  : ISS;
5412   NEON_FP : S5;
5413 %}
5414 
5415 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
5416 %{
5417   single_instruction;
5418   src    : S1(read);
5419   dst    : S5(write);
5420   INS01  : ISS;
5421   NEON_FP : S5;
5422 %}
5423 
5424 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
5425 %{
5426   single_instruction;
5427   src    : S1(read);
5428   dst    : S5(write);
5429   INS01  : ISS;
5430   NEON_FP : S5;
5431 %}
5432 
5433 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
5434 %{
5435   single_instruction;
5436   src    : S1(read);
5437   dst    : S5(write);
5438   INS01  : ISS;
5439   NEON_FP : S5;
5440 %}
5441 
5442 pipe_class fp_l2f(vRegF dst, iRegL src)
5443 %{
5444   single_instruction;
5445   src    : S1(read);
5446   dst    : S5(write);
5447   INS01  : ISS;
5448   NEON_FP : S5;
5449 %}
5450 
5451 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
5452 %{
5453   single_instruction;
5454   src    : S1(read);
5455   dst    : S5(write);
5456   INS01  : ISS;
5457   NEON_FP : S5;
5458 %}
5459 
5460 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
5461 %{
5462   single_instruction;
5463   src    : S1(read);
5464   dst    : S5(write);
5465   INS01  : ISS;
5466   NEON_FP : S5;
5467 %}
5468 
5469 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
5470 %{
5471   single_instruction;
5472   src    : S1(read);
5473   dst    : S5(write);
5474   INS01  : ISS;
5475   NEON_FP : S5;
5476 %}
5477 
5478 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
5479 %{
5480   single_instruction;
5481   src    : S1(read);
5482   dst    : S5(write);
5483   INS01  : ISS;
5484   NEON_FP : S5;
5485 %}
5486 
5487 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
5488 %{
5489   single_instruction;
5490   src1   : S1(read);
5491   src2   : S2(read);
5492   dst    : S5(write);
5493   INS0   : ISS;
5494   NEON_FP : S5;
5495 %}
5496 
5497 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
5498 %{
5499   single_instruction;
5500   src1   : S1(read);
5501   src2   : S2(read);
5502   dst    : S5(write);
5503   INS0   : ISS;
5504   NEON_FP : S5;
5505 %}
5506 
5507 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
5508 %{
5509   single_instruction;
5510   cr     : S1(read);
5511   src1   : S1(read);
5512   src2   : S1(read);
5513   dst    : S3(write);
5514   INS01  : ISS;
5515   NEON_FP : S3;
5516 %}
5517 
5518 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
5519 %{
5520   single_instruction;
5521   cr     : S1(read);
5522   src1   : S1(read);
5523   src2   : S1(read);
5524   dst    : S3(write);
5525   INS01  : ISS;
5526   NEON_FP : S3;
5527 %}
5528 
5529 pipe_class fp_imm_s(vRegF dst)
5530 %{
5531   single_instruction;
5532   dst    : S3(write);
5533   INS01  : ISS;
5534   NEON_FP : S3;
5535 %}
5536 
5537 pipe_class fp_imm_d(vRegD dst)
5538 %{
5539   single_instruction;
5540   dst    : S3(write);
5541   INS01  : ISS;
5542   NEON_FP : S3;
5543 %}
5544 
5545 pipe_class fp_load_constant_s(vRegF dst)
5546 %{
5547   single_instruction;
5548   dst    : S4(write);
5549   INS01  : ISS;
5550   NEON_FP : S4;
5551 %}
5552 
5553 pipe_class fp_load_constant_d(vRegD dst)
5554 %{
5555   single_instruction;
5556   dst    : S4(write);
5557   INS01  : ISS;
5558   NEON_FP : S4;
5559 %}
5560 
5561 pipe_class vmul64(vecD dst, vecD src1, vecD src2)
5562 %{
5563   single_instruction;
5564   dst    : S5(write);
5565   src1   : S1(read);
5566   src2   : S1(read);
5567   INS01  : ISS;
5568   NEON_FP : S5;
5569 %}
5570 
5571 pipe_class vmul128(vecX dst, vecX src1, vecX src2)
5572 %{
5573   single_instruction;
5574   dst    : S5(write);
5575   src1   : S1(read);
5576   src2   : S1(read);
5577   INS0   : ISS;
5578   NEON_FP : S5;
5579 %}
5580 
5581 pipe_class vmla64(vecD dst, vecD src1, vecD src2)
5582 %{
5583   single_instruction;
5584   dst    : S5(write);
5585   src1   : S1(read);
5586   src2   : S1(read);
5587   dst    : S1(read);
5588   INS01  : ISS;
5589   NEON_FP : S5;
5590 %}
5591 
5592 pipe_class vmla128(vecX dst, vecX src1, vecX src2)
5593 %{
5594   single_instruction;
5595   dst    : S5(write);
5596   src1   : S1(read);
5597   src2   : S1(read);
5598   dst    : S1(read);
5599   INS0   : ISS;
5600   NEON_FP : S5;
5601 %}
5602 
5603 pipe_class vdop64(vecD dst, vecD src1, vecD src2)
5604 %{
5605   single_instruction;
5606   dst    : S4(write);
5607   src1   : S2(read);
5608   src2   : S2(read);
5609   INS01  : ISS;
5610   NEON_FP : S4;
5611 %}
5612 
5613 pipe_class vdop128(vecX dst, vecX src1, vecX src2)
5614 %{
5615   single_instruction;
5616   dst    : S4(write);
5617   src1   : S2(read);
5618   src2   : S2(read);
5619   INS0   : ISS;
5620   NEON_FP : S4;
5621 %}
5622 
5623 pipe_class vlogical64(vecD dst, vecD src1, vecD src2)
5624 %{
5625   single_instruction;
5626   dst    : S3(write);
5627   src1   : S2(read);
5628   src2   : S2(read);
5629   INS01  : ISS;
5630   NEON_FP : S3;
5631 %}
5632 
5633 pipe_class vlogical128(vecX dst, vecX src1, vecX src2)
5634 %{
5635   single_instruction;
5636   dst    : S3(write);
5637   src1   : S2(read);
5638   src2   : S2(read);
5639   INS0   : ISS;
5640   NEON_FP : S3;
5641 %}
5642 
5643 pipe_class vshift64(vecD dst, vecD src, vecX shift)
5644 %{
5645   single_instruction;
5646   dst    : S3(write);
5647   src    : S1(read);
5648   shift  : S1(read);
5649   INS01  : ISS;
5650   NEON_FP : S3;
5651 %}
5652 
5653 pipe_class vshift128(vecX dst, vecX src, vecX shift)
5654 %{
5655   single_instruction;
5656   dst    : S3(write);
5657   src    : S1(read);
5658   shift  : S1(read);
5659   INS0   : ISS;
5660   NEON_FP : S3;
5661 %}
5662 
5663 pipe_class vshift64_imm(vecD dst, vecD src, immI shift)
5664 %{
5665   single_instruction;
5666   dst    : S3(write);
5667   src    : S1(read);
5668   INS01  : ISS;
5669   NEON_FP : S3;
5670 %}
5671 
5672 pipe_class vshift128_imm(vecX dst, vecX src, immI shift)
5673 %{
5674   single_instruction;
5675   dst    : S3(write);
5676   src    : S1(read);
5677   INS0   : ISS;
5678   NEON_FP : S3;
5679 %}
5680 
5681 pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2)
5682 %{
5683   single_instruction;
5684   dst    : S5(write);
5685   src1   : S1(read);
5686   src2   : S1(read);
5687   INS01  : ISS;
5688   NEON_FP : S5;
5689 %}
5690 
5691 pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2)
5692 %{
5693   single_instruction;
5694   dst    : S5(write);
5695   src1   : S1(read);
5696   src2   : S1(read);
5697   INS0   : ISS;
5698   NEON_FP : S5;
5699 %}
5700 
5701 pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2)
5702 %{
5703   single_instruction;
5704   dst    : S5(write);
5705   src1   : S1(read);
5706   src2   : S1(read);
5707   INS0   : ISS;
5708   NEON_FP : S5;
5709 %}
5710 
5711 pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2)
5712 %{
5713   single_instruction;
5714   dst    : S5(write);
5715   src1   : S1(read);
5716   src2   : S1(read);
5717   INS0   : ISS;
5718   NEON_FP : S5;
5719 %}
5720 
5721 pipe_class vsqrt_fp128(vecX dst, vecX src)
5722 %{
5723   single_instruction;
5724   dst    : S5(write);
5725   src    : S1(read);
5726   INS0   : ISS;
5727   NEON_FP : S5;
5728 %}
5729 
5730 pipe_class vunop_fp64(vecD dst, vecD src)
5731 %{
5732   single_instruction;
5733   dst    : S5(write);
5734   src    : S1(read);
5735   INS01  : ISS;
5736   NEON_FP : S5;
5737 %}
5738 
5739 pipe_class vunop_fp128(vecX dst, vecX src)
5740 %{
5741   single_instruction;
5742   dst    : S5(write);
5743   src    : S1(read);
5744   INS0   : ISS;
5745   NEON_FP : S5;
5746 %}
5747 
5748 pipe_class vdup_reg_reg64(vecD dst, iRegI src)
5749 %{
5750   single_instruction;
5751   dst    : S3(write);
5752   src    : S1(read);
5753   INS01  : ISS;
5754   NEON_FP : S3;
5755 %}
5756 
5757 pipe_class vdup_reg_reg128(vecX dst, iRegI src)
5758 %{
5759   single_instruction;
5760   dst    : S3(write);
5761   src    : S1(read);
5762   INS01  : ISS;
5763   NEON_FP : S3;
5764 %}
5765 
5766 pipe_class vdup_reg_freg64(vecD dst, vRegF src)
5767 %{
5768   single_instruction;
5769   dst    : S3(write);
5770   src    : S1(read);
5771   INS01  : ISS;
5772   NEON_FP : S3;
5773 %}
5774 
5775 pipe_class vdup_reg_freg128(vecX dst, vRegF src)
5776 %{
5777   single_instruction;
5778   dst    : S3(write);
5779   src    : S1(read);
5780   INS01  : ISS;
5781   NEON_FP : S3;
5782 %}
5783 
5784 pipe_class vdup_reg_dreg128(vecX dst, vRegD src)
5785 %{
5786   single_instruction;
5787   dst    : S3(write);
5788   src    : S1(read);
5789   INS01  : ISS;
5790   NEON_FP : S3;
5791 %}
5792 
5793 pipe_class vmovi_reg_imm64(vecD dst)
5794 %{
5795   single_instruction;
5796   dst    : S3(write);
5797   INS01  : ISS;
5798   NEON_FP : S3;
5799 %}
5800 
5801 pipe_class vmovi_reg_imm128(vecX dst)
5802 %{
5803   single_instruction;
5804   dst    : S3(write);
5805   INS0   : ISS;
5806   NEON_FP : S3;
5807 %}
5808 
5809 pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
5810 %{
5811   single_instruction;
5812   dst    : S5(write);
5813   mem    : ISS(read);
5814   INS01  : ISS;
5815   NEON_FP : S3;
5816 %}
5817 
5818 pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
5819 %{
5820   single_instruction;
5821   dst    : S5(write);
5822   mem    : ISS(read);
5823   INS01  : ISS;
5824   NEON_FP : S3;
5825 %}
5826 
5827 pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
5828 %{
5829   single_instruction;
5830   mem    : ISS(read);
5831   src    : S2(read);
5832   INS01  : ISS;
5833   NEON_FP : S3;
5834 %}
5835 
5836 pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
5837 %{
5838   single_instruction;
5839   mem    : ISS(read);
5840   src    : S2(read);
5841   INS01  : ISS;
5842   NEON_FP : S3;
5843 %}
5844 
5845 //------- Integer ALU operations --------------------------
5846 
5847 // Integer ALU reg-reg operation
5848 // Operands needed in EX1, result generated in EX2
5849 // Eg.  ADD     x0, x1, x2
5850 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
5851 %{
5852   single_instruction;
5853   dst    : EX2(write);
5854   src1   : EX1(read);
5855   src2   : EX1(read);
5856   INS01  : ISS; // Dual issue as instruction 0 or 1
5857   ALU    : EX2;
5858 %}
5859 
5860 // Integer ALU reg-reg operation with constant shift
5861 // Shifted register must be available in LATE_ISS instead of EX1
5862 // Eg.  ADD     x0, x1, x2, LSL #2
5863 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
5864 %{
5865   single_instruction;
5866   dst    : EX2(write);
5867   src1   : EX1(read);
5868   src2   : ISS(read);
5869   INS01  : ISS;
5870   ALU    : EX2;
5871 %}
5872 
5873 // Integer ALU reg operation with constant shift
5874 // Eg.  LSL     x0, x1, #shift
5875 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
5876 %{
5877   single_instruction;
5878   dst    : EX2(write);
5879   src1   : ISS(read);
5880   INS01  : ISS;
5881   ALU    : EX2;
5882 %}
5883 
5884 // Integer ALU reg-reg operation with variable shift
5885 // Both operands must be available in LATE_ISS instead of EX1
5886 // Result is available in EX1 instead of EX2
5887 // Eg.  LSLV    x0, x1, x2
5888 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
5889 %{
5890   single_instruction;
5891   dst    : EX1(write);
5892   src1   : ISS(read);
5893   src2   : ISS(read);
5894   INS01  : ISS;
5895   ALU    : EX1;
5896 %}
5897 
5898 // Integer ALU reg-reg operation with extract
5899 // As for _vshift above, but result generated in EX2
5900 // Eg.  EXTR    x0, x1, x2, #N
5901 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
5902 %{
5903   single_instruction;
5904   dst    : EX2(write);
5905   src1   : ISS(read);
5906   src2   : ISS(read);
5907   INS1   : ISS; // Can only dual issue as Instruction 1
5908   ALU    : EX1;
5909 %}
5910 
5911 // Integer ALU reg operation
5912 // Eg.  NEG     x0, x1
5913 pipe_class ialu_reg(iRegI dst, iRegI src)
5914 %{
5915   single_instruction;
5916   dst    : EX2(write);
5917   src    : EX1(read);
5918   INS01  : ISS;
5919   ALU    : EX2;
5920 %}
5921 
5922 // Integer ALU reg mmediate operation
5923 // Eg.  ADD     x0, x1, #N
5924 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
5925 %{
5926   single_instruction;
5927   dst    : EX2(write);
5928   src1   : EX1(read);
5929   INS01  : ISS;
5930   ALU    : EX2;
5931 %}
5932 
5933 // Integer ALU immediate operation (no source operands)
5934 // Eg.  MOV     x0, #N
5935 pipe_class ialu_imm(iRegI dst)
5936 %{
5937   single_instruction;
5938   dst    : EX1(write);
5939   INS01  : ISS;
5940   ALU    : EX1;
5941 %}
5942 
5943 //------- Compare operation -------------------------------
5944 
5945 // Compare reg-reg
5946 // Eg.  CMP     x0, x1
5947 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
5948 %{
5949   single_instruction;
5950 //  fixed_latency(16);
5951   cr     : EX2(write);
5952   op1    : EX1(read);
5953   op2    : EX1(read);
5954   INS01  : ISS;
5955   ALU    : EX2;
5956 %}
5957 
5958 // Compare reg-reg
5959 // Eg.  CMP     x0, #N
5960 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
5961 %{
5962   single_instruction;
5963 //  fixed_latency(16);
5964   cr     : EX2(write);
5965   op1    : EX1(read);
5966   INS01  : ISS;
5967   ALU    : EX2;
5968 %}
5969 
5970 //------- Conditional instructions ------------------------
5971 
5972 // Conditional no operands
5973 // Eg.  CSINC   x0, zr, zr, <cond>
5974 pipe_class icond_none(iRegI dst, rFlagsReg cr)
5975 %{
5976   single_instruction;
5977   cr     : EX1(read);
5978   dst    : EX2(write);
5979   INS01  : ISS;
5980   ALU    : EX2;
5981 %}
5982 
5983 // Conditional 2 operand
5984 // EG.  CSEL    X0, X1, X2, <cond>
5985 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
5986 %{
5987   single_instruction;
5988   cr     : EX1(read);
5989   src1   : EX1(read);
5990   src2   : EX1(read);
5991   dst    : EX2(write);
5992   INS01  : ISS;
5993   ALU    : EX2;
5994 %}
5995 
5996 // Conditional 2 operand
5997 // EG.  CSEL    X0, X1, X2, <cond>
5998 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
5999 %{
6000   single_instruction;
6001   cr     : EX1(read);
6002   src    : EX1(read);
6003   dst    : EX2(write);
6004   INS01  : ISS;
6005   ALU    : EX2;
6006 %}
6007 
6008 //------- Multiply pipeline operations --------------------
6009 
6010 // Multiply reg-reg
6011 // Eg.  MUL     w0, w1, w2
6012 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6013 %{
6014   single_instruction;
6015   dst    : WR(write);
6016   src1   : ISS(read);
6017   src2   : ISS(read);
6018   INS01  : ISS;
6019   MAC    : WR;
6020 %}
6021 
6022 // Multiply accumulate
6023 // Eg.  MADD    w0, w1, w2, w3
6024 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6025 %{
6026   single_instruction;
6027   dst    : WR(write);
6028   src1   : ISS(read);
6029   src2   : ISS(read);
6030   src3   : ISS(read);
6031   INS01  : ISS;
6032   MAC    : WR;
6033 %}
6034 
6035 // Eg.  MUL     w0, w1, w2
6036 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6037 %{
6038   single_instruction;
6039   fixed_latency(3); // Maximum latency for 64 bit mul
6040   dst    : WR(write);
6041   src1   : ISS(read);
6042   src2   : ISS(read);
6043   INS01  : ISS;
6044   MAC    : WR;
6045 %}
6046 
6047 // Multiply accumulate
6048 // Eg.  MADD    w0, w1, w2, w3
6049 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
6050 %{
6051   single_instruction;
6052   fixed_latency(3); // Maximum latency for 64 bit mul
6053   dst    : WR(write);
6054   src1   : ISS(read);
6055   src2   : ISS(read);
6056   src3   : ISS(read);
6057   INS01  : ISS;
6058   MAC    : WR;
6059 %}
6060 
6061 //------- Divide pipeline operations --------------------
6062 
6063 // Eg.  SDIV    w0, w1, w2
6064 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6065 %{
6066   single_instruction;
6067   fixed_latency(8); // Maximum latency for 32 bit divide
6068   dst    : WR(write);
6069   src1   : ISS(read);
6070   src2   : ISS(read);
6071   INS0   : ISS; // Can only dual issue as instruction 0
6072   DIV    : WR;
6073 %}
6074 
6075 // Eg.  SDIV    x0, x1, x2
6076 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
6077 %{
6078   single_instruction;
6079   fixed_latency(16); // Maximum latency for 64 bit divide
6080   dst    : WR(write);
6081   src1   : ISS(read);
6082   src2   : ISS(read);
6083   INS0   : ISS; // Can only dual issue as instruction 0
6084   DIV    : WR;
6085 %}
6086 
6087 //------- Load pipeline operations ------------------------
6088 
6089 // Load - prefetch
6090 // Eg.  PFRM    <mem>
6091 pipe_class iload_prefetch(memory mem)
6092 %{
6093   single_instruction;
6094   mem    : ISS(read);
6095   INS01  : ISS;
6096   LDST   : WR;
6097 %}
6098 
6099 // Load - reg, mem
6100 // Eg.  LDR     x0, <mem>
6101 pipe_class iload_reg_mem(iRegI dst, memory mem)
6102 %{
6103   single_instruction;
6104   dst    : WR(write);
6105   mem    : ISS(read);
6106   INS01  : ISS;
6107   LDST   : WR;
6108 %}
6109 
6110 // Load - reg, reg
6111 // Eg.  LDR     x0, [sp, x1]
6112 pipe_class iload_reg_reg(iRegI dst, iRegI src)
6113 %{
6114   single_instruction;
6115   dst    : WR(write);
6116   src    : ISS(read);
6117   INS01  : ISS;
6118   LDST   : WR;
6119 %}
6120 
6121 //------- Store pipeline operations -----------------------
6122 
6123 // Store - zr, mem
6124 // Eg.  STR     zr, <mem>
6125 pipe_class istore_mem(memory mem)
6126 %{
6127   single_instruction;
6128   mem    : ISS(read);
6129   INS01  : ISS;
6130   LDST   : WR;
6131 %}
6132 
6133 // Store - reg, mem
6134 // Eg.  STR     x0, <mem>
6135 pipe_class istore_reg_mem(iRegI src, memory mem)
6136 %{
6137   single_instruction;
6138   mem    : ISS(read);
6139   src    : EX2(read);
6140   INS01  : ISS;
6141   LDST   : WR;
6142 %}
6143 
6144 // Store - reg, reg
6145 // Eg. STR      x0, [sp, x1]
6146 pipe_class istore_reg_reg(iRegI dst, iRegI src)
6147 %{
6148   single_instruction;
6149   dst    : ISS(read);
6150   src    : EX2(read);
6151   INS01  : ISS;
6152   LDST   : WR;
6153 %}
6154 
6155 //------- Store pipeline operations -----------------------
6156 
6157 // Branch
6158 pipe_class pipe_branch()
6159 %{
6160   single_instruction;
6161   INS01  : ISS;
6162   BRANCH : EX1;
6163 %}
6164 
6165 // Conditional branch
6166 pipe_class pipe_branch_cond(rFlagsReg cr)
6167 %{
6168   single_instruction;
6169   cr     : EX1(read);
6170   INS01  : ISS;
6171   BRANCH : EX1;
6172 %}
6173 
6174 // Compare & Branch
6175 // EG.  CBZ/CBNZ
6176 pipe_class pipe_cmp_branch(iRegI op1)
6177 %{
6178   single_instruction;
6179   op1    : EX1(read);
6180   INS01  : ISS;
6181   BRANCH : EX1;
6182 %}
6183 
6184 //------- Synchronisation operations ----------------------
6185 
6186 // Any operation requiring serialization.
6187 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
6188 pipe_class pipe_serial()
6189 %{
6190   single_instruction;
6191   force_serialization;
6192   fixed_latency(16);
6193   INS01  : ISS(2); // Cannot dual issue with any other instruction
6194   LDST   : WR;
6195 %}
6196 
6197 // Generic big/slow expanded idiom - also serialized
6198 pipe_class pipe_slow()
6199 %{
6200   instruction_count(10);
6201   multiple_bundles;
6202   force_serialization;
6203   fixed_latency(16);
6204   INS01  : ISS(2); // Cannot dual issue with any other instruction
6205   LDST   : WR;
6206 %}
6207 
6208 // Empty pipeline class
6209 pipe_class pipe_class_empty()
6210 %{
6211   single_instruction;
6212   fixed_latency(0);
6213 %}
6214 
6215 // Default pipeline class.
6216 pipe_class pipe_class_default()
6217 %{
6218   single_instruction;
6219   fixed_latency(2);
6220 %}
6221 
6222 // Pipeline class for compares.
6223 pipe_class pipe_class_compare()
6224 %{
6225   single_instruction;
6226   fixed_latency(16);
6227 %}
6228 
6229 // Pipeline class for memory operations.
6230 pipe_class pipe_class_memory()
6231 %{
6232   single_instruction;
6233   fixed_latency(16);
6234 %}
6235 
6236 // Pipeline class for call.
6237 pipe_class pipe_class_call()
6238 %{
6239   single_instruction;
6240   fixed_latency(100);
6241 %}
6242 
6243 // Define the class for the Nop node.
6244 define %{
6245    MachNop = pipe_class_empty;
6246 %}
6247 
6248 %}
6249 //----------INSTRUCTIONS-------------------------------------------------------
6250 //
6251 // match      -- States which machine-independent subtree may be replaced
6252 //               by this instruction.
6253 // ins_cost   -- The estimated cost of this instruction is used by instruction
6254 //               selection to identify a minimum cost tree of machine
6255 //               instructions that matches a tree of machine-independent
6256 //               instructions.
6257 // format     -- A string providing the disassembly for this instruction.
6258 //               The value of an instruction's operand may be inserted
6259 //               by referring to it with a '$' prefix.
6260 // opcode     -- Three instruction opcodes may be provided.  These are referred
6261 //               to within an encode class as $primary, $secondary, and $tertiary
6262 //               rrspectively.  The primary opcode is commonly used to
6263 //               indicate the type of machine instruction, while secondary
6264 //               and tertiary are often used for prefix options or addressing
6265 //               modes.
6266 // ins_encode -- A list of encode classes with parameters. The encode class
6267 //               name must have been defined in an 'enc_class' specification
6268 //               in the encode section of the architecture description.
6269 
6270 // ============================================================================
6271 // Memory (Load/Store) Instructions
6272 
6273 // Load Instructions
6274 
6275 // Load Byte (8 bit signed)
6276 instruct loadB(iRegINoSp dst, memory mem)
6277 %{
6278   match(Set dst (LoadB mem));
6279   predicate(!needs_acquiring_load(n));
6280 
6281   ins_cost(4 * INSN_COST);
6282   format %{ "ldrsbw  $dst, $mem\t# byte" %}
6283 
6284   ins_encode(aarch64_enc_ldrsbw(dst, mem));
6285 
6286   ins_pipe(iload_reg_mem);
6287 %}
6288 
6289 // Load Byte (8 bit signed) into long
6290 instruct loadB2L(iRegLNoSp dst, memory mem)
6291 %{
6292   match(Set dst (ConvI2L (LoadB mem)));
6293   predicate(!needs_acquiring_load(n->in(1)));
6294 
6295   ins_cost(4 * INSN_COST);
6296   format %{ "ldrsb  $dst, $mem\t# byte" %}
6297 
6298   ins_encode(aarch64_enc_ldrsb(dst, mem));
6299 
6300   ins_pipe(iload_reg_mem);
6301 %}
6302 
6303 // Load Byte (8 bit unsigned)
6304 instruct loadUB(iRegINoSp dst, memory mem)
6305 %{
6306   match(Set dst (LoadUB mem));
6307   predicate(!needs_acquiring_load(n));
6308 
6309   ins_cost(4 * INSN_COST);
6310   format %{ "ldrbw  $dst, $mem\t# byte" %}
6311 
6312   ins_encode(aarch64_enc_ldrb(dst, mem));
6313 
6314   ins_pipe(iload_reg_mem);
6315 %}
6316 
6317 // Load Byte (8 bit unsigned) into long
6318 instruct loadUB2L(iRegLNoSp dst, memory mem)
6319 %{
6320   match(Set dst (ConvI2L (LoadUB mem)));
6321   predicate(!needs_acquiring_load(n->in(1)));
6322 
6323   ins_cost(4 * INSN_COST);
6324   format %{ "ldrb  $dst, $mem\t# byte" %}
6325 
6326   ins_encode(aarch64_enc_ldrb(dst, mem));
6327 
6328   ins_pipe(iload_reg_mem);
6329 %}
6330 
6331 // Load Short (16 bit signed)
6332 instruct loadS(iRegINoSp dst, memory mem)
6333 %{
6334   match(Set dst (LoadS mem));
6335   predicate(!needs_acquiring_load(n));
6336 
6337   ins_cost(4 * INSN_COST);
6338   format %{ "ldrshw  $dst, $mem\t# short" %}
6339 
6340   ins_encode(aarch64_enc_ldrshw(dst, mem));
6341 
6342   ins_pipe(iload_reg_mem);
6343 %}
6344 
6345 // Load Short (16 bit signed) into long
6346 instruct loadS2L(iRegLNoSp dst, memory mem)
6347 %{
6348   match(Set dst (ConvI2L (LoadS mem)));
6349   predicate(!needs_acquiring_load(n->in(1)));
6350 
6351   ins_cost(4 * INSN_COST);
6352   format %{ "ldrsh  $dst, $mem\t# short" %}
6353 
6354   ins_encode(aarch64_enc_ldrsh(dst, mem));
6355 
6356   ins_pipe(iload_reg_mem);
6357 %}
6358 
6359 // Load Char (16 bit unsigned)
6360 instruct loadUS(iRegINoSp dst, memory mem)
6361 %{
6362   match(Set dst (LoadUS mem));
6363   predicate(!needs_acquiring_load(n));
6364 
6365   ins_cost(4 * INSN_COST);
6366   format %{ "ldrh  $dst, $mem\t# short" %}
6367 
6368   ins_encode(aarch64_enc_ldrh(dst, mem));
6369 
6370   ins_pipe(iload_reg_mem);
6371 %}
6372 
6373 // Load Short/Char (16 bit unsigned) into long
6374 instruct loadUS2L(iRegLNoSp dst, memory mem)
6375 %{
6376   match(Set dst (ConvI2L (LoadUS mem)));
6377   predicate(!needs_acquiring_load(n->in(1)));
6378 
6379   ins_cost(4 * INSN_COST);
6380   format %{ "ldrh  $dst, $mem\t# short" %}
6381 
6382   ins_encode(aarch64_enc_ldrh(dst, mem));
6383 
6384   ins_pipe(iload_reg_mem);
6385 %}
6386 
6387 // Load Integer (32 bit signed)
6388 instruct loadI(iRegINoSp dst, memory mem)
6389 %{
6390   match(Set dst (LoadI mem));
6391   predicate(!needs_acquiring_load(n));
6392 
6393   ins_cost(4 * INSN_COST);
6394   format %{ "ldrw  $dst, $mem\t# int" %}
6395 
6396   ins_encode(aarch64_enc_ldrw(dst, mem));
6397 
6398   ins_pipe(iload_reg_mem);
6399 %}
6400 
6401 // Load Integer (32 bit signed) into long
6402 instruct loadI2L(iRegLNoSp dst, memory mem)
6403 %{
6404   match(Set dst (ConvI2L (LoadI mem)));
6405   predicate(!needs_acquiring_load(n->in(1)));
6406 
6407   ins_cost(4 * INSN_COST);
6408   format %{ "ldrsw  $dst, $mem\t# int" %}
6409 
6410   ins_encode(aarch64_enc_ldrsw(dst, mem));
6411 
6412   ins_pipe(iload_reg_mem);
6413 %}
6414 
6415 // Load Integer (32 bit unsigned) into long
6416 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
6417 %{
6418   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
6419   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
6420 
6421   ins_cost(4 * INSN_COST);
6422   format %{ "ldrw  $dst, $mem\t# int" %}
6423 
6424   ins_encode(aarch64_enc_ldrw(dst, mem));
6425 
6426   ins_pipe(iload_reg_mem);
6427 %}
6428 
6429 // Load Long (64 bit signed)
6430 instruct loadL(iRegLNoSp dst, memory mem)
6431 %{
6432   match(Set dst (LoadL mem));
6433   predicate(!needs_acquiring_load(n));
6434 
6435   ins_cost(4 * INSN_COST);
6436   format %{ "ldr  $dst, $mem\t# int" %}
6437 
6438   ins_encode(aarch64_enc_ldr(dst, mem));
6439 
6440   ins_pipe(iload_reg_mem);
6441 %}
6442 
6443 // Load Range
6444 instruct loadRange(iRegINoSp dst, memory mem)
6445 %{
6446   match(Set dst (LoadRange mem));
6447 
6448   ins_cost(4 * INSN_COST);
6449   format %{ "ldrw  $dst, $mem\t# range" %}
6450 
6451   ins_encode(aarch64_enc_ldrw(dst, mem));
6452 
6453   ins_pipe(iload_reg_mem);
6454 %}
6455 
6456 // Load Pointer
6457 instruct loadP(iRegPNoSp dst, memory mem)
6458 %{
6459   match(Set dst (LoadP mem));
6460   predicate(!needs_acquiring_load(n));
6461 
6462   ins_cost(4 * INSN_COST);
6463   format %{ "ldr  $dst, $mem\t# ptr" %}
6464 
6465   ins_encode(aarch64_enc_ldr(dst, mem));
6466 
6467   ins_pipe(iload_reg_mem);
6468 %}
6469 
6470 // Load Compressed Pointer
6471 instruct loadN(iRegNNoSp dst, memory mem)
6472 %{
6473   match(Set dst (LoadN mem));
6474   predicate(!needs_acquiring_load(n));
6475 
6476   ins_cost(4 * INSN_COST);
6477   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
6478 
6479   ins_encode(aarch64_enc_ldrw(dst, mem));
6480 
6481   ins_pipe(iload_reg_mem);
6482 %}
6483 
6484 // Load Klass Pointer
6485 instruct loadKlass(iRegPNoSp dst, memory mem)
6486 %{
6487   match(Set dst (LoadKlass mem));
6488   predicate(!needs_acquiring_load(n));
6489 
6490   ins_cost(4 * INSN_COST);
6491   format %{ "ldr  $dst, $mem\t# class" %}
6492 
6493   ins_encode(aarch64_enc_ldr(dst, mem));
6494 
6495   ins_pipe(iload_reg_mem);
6496 %}
6497 
6498 // Load Narrow Klass Pointer
6499 instruct loadNKlass(iRegNNoSp dst, memory mem)
6500 %{
6501   match(Set dst (LoadNKlass mem));
6502   predicate(!needs_acquiring_load(n));
6503 
6504   ins_cost(4 * INSN_COST);
6505   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
6506 
6507   ins_encode(aarch64_enc_ldrw(dst, mem));
6508 
6509   ins_pipe(iload_reg_mem);
6510 %}
6511 
6512 // Load Float
6513 instruct loadF(vRegF dst, memory mem)
6514 %{
6515   match(Set dst (LoadF mem));
6516   predicate(!needs_acquiring_load(n));
6517 
6518   ins_cost(4 * INSN_COST);
6519   format %{ "ldrs  $dst, $mem\t# float" %}
6520 
6521   ins_encode( aarch64_enc_ldrs(dst, mem) );
6522 
6523   ins_pipe(pipe_class_memory);
6524 %}
6525 
6526 // Load Double
6527 instruct loadD(vRegD dst, memory mem)
6528 %{
6529   match(Set dst (LoadD mem));
6530   predicate(!needs_acquiring_load(n));
6531 
6532   ins_cost(4 * INSN_COST);
6533   format %{ "ldrd  $dst, $mem\t# double" %}
6534 
6535   ins_encode( aarch64_enc_ldrd(dst, mem) );
6536 
6537   ins_pipe(pipe_class_memory);
6538 %}
6539 
6540 
6541 // Load Int Constant
6542 instruct loadConI(iRegINoSp dst, immI src)
6543 %{
6544   match(Set dst src);
6545 
6546   ins_cost(INSN_COST);
6547   format %{ "mov $dst, $src\t# int" %}
6548 
6549   ins_encode( aarch64_enc_movw_imm(dst, src) );
6550 
6551   ins_pipe(ialu_imm);
6552 %}
6553 
6554 // Load Long Constant
6555 instruct loadConL(iRegLNoSp dst, immL src)
6556 %{
6557   match(Set dst src);
6558 
6559   ins_cost(INSN_COST);
6560   format %{ "mov $dst, $src\t# long" %}
6561 
6562   ins_encode( aarch64_enc_mov_imm(dst, src) );
6563 
6564   ins_pipe(ialu_imm);
6565 %}
6566 
6567 // Load Pointer Constant
6568 
6569 instruct loadConP(iRegPNoSp dst, immP con)
6570 %{
6571   match(Set dst con);
6572 
6573   ins_cost(INSN_COST * 4);
6574   format %{
6575     "mov  $dst, $con\t# ptr\n\t"
6576   %}
6577 
6578   ins_encode(aarch64_enc_mov_p(dst, con));
6579 
6580   ins_pipe(ialu_imm);
6581 %}
6582 
6583 // Load Null Pointer Constant
6584 
6585 instruct loadConP0(iRegPNoSp dst, immP0 con)
6586 %{
6587   match(Set dst con);
6588 
6589   ins_cost(INSN_COST);
6590   format %{ "mov  $dst, $con\t# NULL ptr" %}
6591 
6592   ins_encode(aarch64_enc_mov_p0(dst, con));
6593 
6594   ins_pipe(ialu_imm);
6595 %}
6596 
6597 // Load Pointer Constant One
6598 
6599 instruct loadConP1(iRegPNoSp dst, immP_1 con)
6600 %{
6601   match(Set dst con);
6602 
6603   ins_cost(INSN_COST);
6604   format %{ "mov  $dst, $con\t# NULL ptr" %}
6605 
6606   ins_encode(aarch64_enc_mov_p1(dst, con));
6607 
6608   ins_pipe(ialu_imm);
6609 %}
6610 
6611 // Load Poll Page Constant
6612 
6613 instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
6614 %{
6615   match(Set dst con);
6616 
6617   ins_cost(INSN_COST);
6618   format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
6619 
6620   ins_encode(aarch64_enc_mov_poll_page(dst, con));
6621 
6622   ins_pipe(ialu_imm);
6623 %}
6624 
6625 // Load Byte Map Base Constant
6626 
6627 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
6628 %{
6629   match(Set dst con);
6630 
6631   ins_cost(INSN_COST);
6632   format %{ "adr  $dst, $con\t# Byte Map Base" %}
6633 
6634   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
6635 
6636   ins_pipe(ialu_imm);
6637 %}
6638 
6639 // Load Narrow Pointer Constant
6640 
6641 instruct loadConN(iRegNNoSp dst, immN con)
6642 %{
6643   match(Set dst con);
6644 
6645   ins_cost(INSN_COST * 4);
6646   format %{ "mov  $dst, $con\t# compressed ptr" %}
6647 
6648   ins_encode(aarch64_enc_mov_n(dst, con));
6649 
6650   ins_pipe(ialu_imm);
6651 %}
6652 
6653 // Load Narrow Null Pointer Constant
6654 
6655 instruct loadConN0(iRegNNoSp dst, immN0 con)
6656 %{
6657   match(Set dst con);
6658 
6659   ins_cost(INSN_COST);
6660   format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
6661 
6662   ins_encode(aarch64_enc_mov_n0(dst, con));
6663 
6664   ins_pipe(ialu_imm);
6665 %}
6666 
6667 // Load Narrow Klass Constant
6668 
6669 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
6670 %{
6671   match(Set dst con);
6672 
6673   ins_cost(INSN_COST);
6674   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
6675 
6676   ins_encode(aarch64_enc_mov_nk(dst, con));
6677 
6678   ins_pipe(ialu_imm);
6679 %}
6680 
6681 // Load Packed Float Constant
6682 
6683 instruct loadConF_packed(vRegF dst, immFPacked con) %{
6684   match(Set dst con);
6685   ins_cost(INSN_COST * 4);
6686   format %{ "fmovs  $dst, $con"%}
6687   ins_encode %{
6688     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
6689   %}
6690 
6691   ins_pipe(fp_imm_s);
6692 %}
6693 
6694 // Load Float Constant
6695 
6696 instruct loadConF(vRegF dst, immF con) %{
6697   match(Set dst con);
6698 
6699   ins_cost(INSN_COST * 4);
6700 
6701   format %{
6702     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6703   %}
6704 
6705   ins_encode %{
6706     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
6707   %}
6708 
6709   ins_pipe(fp_load_constant_s);
6710 %}
6711 
6712 // Load Packed Double Constant
6713 
6714 instruct loadConD_packed(vRegD dst, immDPacked con) %{
6715   match(Set dst con);
6716   ins_cost(INSN_COST);
6717   format %{ "fmovd  $dst, $con"%}
6718   ins_encode %{
6719     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
6720   %}
6721 
6722   ins_pipe(fp_imm_d);
6723 %}
6724 
6725 // Load Double Constant
6726 
6727 instruct loadConD(vRegD dst, immD con) %{
6728   match(Set dst con);
6729 
6730   ins_cost(INSN_COST * 5);
6731   format %{
6732     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
6733   %}
6734 
6735   ins_encode %{
6736     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
6737   %}
6738 
6739   ins_pipe(fp_load_constant_d);
6740 %}
6741 
6742 // Store Instructions
6743 
6744 // Store CMS card-mark Immediate
6745 instruct storeimmCM0(immI0 zero, memory mem)
6746 %{
6747   match(Set mem (StoreCM mem zero));
6748   predicate(unnecessary_storestore(n));
6749 
6750   ins_cost(INSN_COST);
6751   format %{ "strb zr, $mem\t# byte" %}
6752 
6753   ins_encode(aarch64_enc_strb0(mem));
6754 
6755   ins_pipe(istore_mem);
6756 %}
6757 
6758 // Store CMS card-mark Immediate with intervening StoreStore
6759 // needed when using CMS with no conditional card marking
6760 instruct storeimmCM0_ordered(immI0 zero, memory mem)
6761 %{
6762   match(Set mem (StoreCM mem zero));
6763 
6764   ins_cost(INSN_COST * 2);
6765   format %{ "dmb ishst"
6766       "\n\tstrb zr, $mem\t# byte" %}
6767 
6768   ins_encode(aarch64_enc_strb0_ordered(mem));
6769 
6770   ins_pipe(istore_mem);
6771 %}
6772 
6773 // Store Byte
6774 instruct storeB(iRegIorL2I src, memory mem)
6775 %{
6776   match(Set mem (StoreB mem src));
6777   predicate(!needs_releasing_store(n));
6778 
6779   ins_cost(INSN_COST);
6780   format %{ "strb  $src, $mem\t# byte" %}
6781 
6782   ins_encode(aarch64_enc_strb(src, mem));
6783 
6784   ins_pipe(istore_reg_mem);
6785 %}
6786 
6787 
6788 instruct storeimmB0(immI0 zero, memory mem)
6789 %{
6790   match(Set mem (StoreB mem zero));
6791   predicate(!needs_releasing_store(n));
6792 
6793   ins_cost(INSN_COST);
6794   format %{ "strb zr, $mem\t# byte" %}
6795 
6796   ins_encode(aarch64_enc_strb0(mem));
6797 
6798   ins_pipe(istore_mem);
6799 %}
6800 
6801 // Store Char/Short
6802 instruct storeC(iRegIorL2I src, memory mem)
6803 %{
6804   match(Set mem (StoreC mem src));
6805   predicate(!needs_releasing_store(n));
6806 
6807   ins_cost(INSN_COST);
6808   format %{ "strh  $src, $mem\t# short" %}
6809 
6810   ins_encode(aarch64_enc_strh(src, mem));
6811 
6812   ins_pipe(istore_reg_mem);
6813 %}
6814 
6815 instruct storeimmC0(immI0 zero, memory mem)
6816 %{
6817   match(Set mem (StoreC mem zero));
6818   predicate(!needs_releasing_store(n));
6819 
6820   ins_cost(INSN_COST);
6821   format %{ "strh  zr, $mem\t# short" %}
6822 
6823   ins_encode(aarch64_enc_strh0(mem));
6824 
6825   ins_pipe(istore_mem);
6826 %}
6827 
6828 // Store Integer
6829 
6830 instruct storeI(iRegIorL2I src, memory mem)
6831 %{
6832   match(Set mem(StoreI mem src));
6833   predicate(!needs_releasing_store(n));
6834 
6835   ins_cost(INSN_COST);
6836   format %{ "strw  $src, $mem\t# int" %}
6837 
6838   ins_encode(aarch64_enc_strw(src, mem));
6839 
6840   ins_pipe(istore_reg_mem);
6841 %}
6842 
6843 instruct storeimmI0(immI0 zero, memory mem)
6844 %{
6845   match(Set mem(StoreI mem zero));
6846   predicate(!needs_releasing_store(n));
6847 
6848   ins_cost(INSN_COST);
6849   format %{ "strw  zr, $mem\t# int" %}
6850 
6851   ins_encode(aarch64_enc_strw0(mem));
6852 
6853   ins_pipe(istore_mem);
6854 %}
6855 
6856 // Store Long (64 bit signed)
6857 instruct storeL(iRegL src, memory mem)
6858 %{
6859   match(Set mem (StoreL mem src));
6860   predicate(!needs_releasing_store(n));
6861 
6862   ins_cost(INSN_COST);
6863   format %{ "str  $src, $mem\t# int" %}
6864 
6865   ins_encode(aarch64_enc_str(src, mem));
6866 
6867   ins_pipe(istore_reg_mem);
6868 %}
6869 
6870 // Store Long (64 bit signed)
6871 instruct storeimmL0(immL0 zero, memory mem)
6872 %{
6873   match(Set mem (StoreL mem zero));
6874   predicate(!needs_releasing_store(n));
6875 
6876   ins_cost(INSN_COST);
6877   format %{ "str  zr, $mem\t# int" %}
6878 
6879   ins_encode(aarch64_enc_str0(mem));
6880 
6881   ins_pipe(istore_mem);
6882 %}
6883 
6884 // Store Pointer
6885 instruct storeP(iRegP src, memory mem)
6886 %{
6887   match(Set mem (StoreP mem src));
6888   predicate(!needs_releasing_store(n));
6889 
6890   ins_cost(INSN_COST);
6891   format %{ "str  $src, $mem\t# ptr" %}
6892 
6893   ins_encode(aarch64_enc_str(src, mem));
6894 
6895   ins_pipe(istore_reg_mem);
6896 %}
6897 
6898 // Store Pointer
6899 instruct storeimmP0(immP0 zero, memory mem)
6900 %{
6901   match(Set mem (StoreP mem zero));
6902   predicate(!needs_releasing_store(n));
6903 
6904   ins_cost(INSN_COST);
6905   format %{ "str zr, $mem\t# ptr" %}
6906 
6907   ins_encode(aarch64_enc_str0(mem));
6908 
6909   ins_pipe(istore_mem);
6910 %}
6911 
6912 // Store Compressed Pointer
6913 instruct storeN(iRegN src, memory mem)
6914 %{
6915   match(Set mem (StoreN mem src));
6916   predicate(!needs_releasing_store(n));
6917 
6918   ins_cost(INSN_COST);
6919   format %{ "strw  $src, $mem\t# compressed ptr" %}
6920 
6921   ins_encode(aarch64_enc_strw(src, mem));
6922 
6923   ins_pipe(istore_reg_mem);
6924 %}
6925 
6926 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
6927 %{
6928   match(Set mem (StoreN mem zero));
6929   predicate(Universe::narrow_oop_base() == NULL &&
6930             Universe::narrow_klass_base() == NULL  &&
6931             (!needs_releasing_store(n)));
6932 
6933   ins_cost(INSN_COST);
6934   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
6935 
6936   ins_encode(aarch64_enc_strw(heapbase, mem));
6937 
6938   ins_pipe(istore_reg_mem);
6939 %}
6940 
6941 // Store Float
6942 instruct storeF(vRegF src, memory mem)
6943 %{
6944   match(Set mem (StoreF mem src));
6945   predicate(!needs_releasing_store(n));
6946 
6947   ins_cost(INSN_COST);
6948   format %{ "strs  $src, $mem\t# float" %}
6949 
6950   ins_encode( aarch64_enc_strs(src, mem) );
6951 
6952   ins_pipe(pipe_class_memory);
6953 %}
6954 
6955 // TODO
6956 // implement storeImmF0 and storeFImmPacked
6957 
6958 // Store Double
6959 instruct storeD(vRegD src, memory mem)
6960 %{
6961   match(Set mem (StoreD mem src));
6962   predicate(!needs_releasing_store(n));
6963 
6964   ins_cost(INSN_COST);
6965   format %{ "strd  $src, $mem\t# double" %}
6966 
6967   ins_encode( aarch64_enc_strd(src, mem) );
6968 
6969   ins_pipe(pipe_class_memory);
6970 %}
6971 
6972 // Store Compressed Klass Pointer
6973 instruct storeNKlass(iRegN src, memory mem)
6974 %{
6975   predicate(!needs_releasing_store(n));
6976   match(Set mem (StoreNKlass mem src));
6977 
6978   ins_cost(INSN_COST);
6979   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
6980 
6981   ins_encode(aarch64_enc_strw(src, mem));
6982 
6983   ins_pipe(istore_reg_mem);
6984 %}
6985 
6986 // TODO
6987 // implement storeImmD0 and storeDImmPacked
6988 
6989 // prefetch instructions
6990 // Must be safe to execute with invalid address (cannot fault).
6991 
6992 instruct prefetchr( memory mem ) %{
6993   match(PrefetchRead mem);
6994 
6995   ins_cost(INSN_COST);
6996   format %{ "prfm $mem, PLDL1KEEP\t# Prefetch into level 1 cache read keep" %}
6997 
6998   ins_encode( aarch64_enc_prefetchr(mem) );
6999 
7000   ins_pipe(iload_prefetch);
7001 %}
7002 
7003 instruct prefetchw( memory mem ) %{
7004   match(PrefetchAllocation mem);
7005 
7006   ins_cost(INSN_COST);
7007   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
7008 
7009   ins_encode( aarch64_enc_prefetchw(mem) );
7010 
7011   ins_pipe(iload_prefetch);
7012 %}
7013 
7014 instruct prefetchnta( memory mem ) %{
7015   match(PrefetchWrite mem);
7016 
7017   ins_cost(INSN_COST);
7018   format %{ "prfm $mem, PSTL1STRM\t# Prefetch into level 1 cache write streaming" %}
7019 
7020   ins_encode( aarch64_enc_prefetchnta(mem) );
7021 
7022   ins_pipe(iload_prefetch);
7023 %}
7024 
7025 //  ---------------- volatile loads and stores ----------------
7026 
7027 // Load Byte (8 bit signed)
7028 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7029 %{
7030   match(Set dst (LoadB mem));
7031 
7032   ins_cost(VOLATILE_REF_COST);
7033   format %{ "ldarsb  $dst, $mem\t# byte" %}
7034 
7035   ins_encode(aarch64_enc_ldarsb(dst, mem));
7036 
7037   ins_pipe(pipe_serial);
7038 %}
7039 
7040 // Load Byte (8 bit signed) into long
7041 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7042 %{
7043   match(Set dst (ConvI2L (LoadB mem)));
7044 
7045   ins_cost(VOLATILE_REF_COST);
7046   format %{ "ldarsb  $dst, $mem\t# byte" %}
7047 
7048   ins_encode(aarch64_enc_ldarsb(dst, mem));
7049 
7050   ins_pipe(pipe_serial);
7051 %}
7052 
7053 // Load Byte (8 bit unsigned)
7054 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7055 %{
7056   match(Set dst (LoadUB mem));
7057 
7058   ins_cost(VOLATILE_REF_COST);
7059   format %{ "ldarb  $dst, $mem\t# byte" %}
7060 
7061   ins_encode(aarch64_enc_ldarb(dst, mem));
7062 
7063   ins_pipe(pipe_serial);
7064 %}
7065 
7066 // Load Byte (8 bit unsigned) into long
7067 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7068 %{
7069   match(Set dst (ConvI2L (LoadUB mem)));
7070 
7071   ins_cost(VOLATILE_REF_COST);
7072   format %{ "ldarb  $dst, $mem\t# byte" %}
7073 
7074   ins_encode(aarch64_enc_ldarb(dst, mem));
7075 
7076   ins_pipe(pipe_serial);
7077 %}
7078 
7079 // Load Short (16 bit signed)
7080 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7081 %{
7082   match(Set dst (LoadS mem));
7083 
7084   ins_cost(VOLATILE_REF_COST);
7085   format %{ "ldarshw  $dst, $mem\t# short" %}
7086 
7087   ins_encode(aarch64_enc_ldarshw(dst, mem));
7088 
7089   ins_pipe(pipe_serial);
7090 %}
7091 
7092 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7093 %{
7094   match(Set dst (LoadUS mem));
7095 
7096   ins_cost(VOLATILE_REF_COST);
7097   format %{ "ldarhw  $dst, $mem\t# short" %}
7098 
7099   ins_encode(aarch64_enc_ldarhw(dst, mem));
7100 
7101   ins_pipe(pipe_serial);
7102 %}
7103 
7104 // Load Short/Char (16 bit unsigned) into long
7105 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7106 %{
7107   match(Set dst (ConvI2L (LoadUS mem)));
7108 
7109   ins_cost(VOLATILE_REF_COST);
7110   format %{ "ldarh  $dst, $mem\t# short" %}
7111 
7112   ins_encode(aarch64_enc_ldarh(dst, mem));
7113 
7114   ins_pipe(pipe_serial);
7115 %}
7116 
7117 // Load Short/Char (16 bit signed) into long
7118 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7119 %{
7120   match(Set dst (ConvI2L (LoadS mem)));
7121 
7122   ins_cost(VOLATILE_REF_COST);
7123   format %{ "ldarh  $dst, $mem\t# short" %}
7124 
7125   ins_encode(aarch64_enc_ldarsh(dst, mem));
7126 
7127   ins_pipe(pipe_serial);
7128 %}
7129 
7130 // Load Integer (32 bit signed)
7131 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
7132 %{
7133   match(Set dst (LoadI mem));
7134 
7135   ins_cost(VOLATILE_REF_COST);
7136   format %{ "ldarw  $dst, $mem\t# int" %}
7137 
7138   ins_encode(aarch64_enc_ldarw(dst, mem));
7139 
7140   ins_pipe(pipe_serial);
7141 %}
7142 
7143 // Load Integer (32 bit unsigned) into long
7144 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
7145 %{
7146   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
7147 
7148   ins_cost(VOLATILE_REF_COST);
7149   format %{ "ldarw  $dst, $mem\t# int" %}
7150 
7151   ins_encode(aarch64_enc_ldarw(dst, mem));
7152 
7153   ins_pipe(pipe_serial);
7154 %}
7155 
7156 // Load Long (64 bit signed)
7157 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7158 %{
7159   match(Set dst (LoadL mem));
7160 
7161   ins_cost(VOLATILE_REF_COST);
7162   format %{ "ldar  $dst, $mem\t# int" %}
7163 
7164   ins_encode(aarch64_enc_ldar(dst, mem));
7165 
7166   ins_pipe(pipe_serial);
7167 %}
7168 
7169 // Load Pointer
7170 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7171 %{
7172   match(Set dst (LoadP mem));
7173 
7174   ins_cost(VOLATILE_REF_COST);
7175   format %{ "ldar  $dst, $mem\t# ptr" %}
7176 
7177   ins_encode(aarch64_enc_ldar(dst, mem));
7178 
7179   ins_pipe(pipe_serial);
7180 %}
7181 
7182 // Load Compressed Pointer
7183 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7184 %{
7185   match(Set dst (LoadN mem));
7186 
7187   ins_cost(VOLATILE_REF_COST);
7188   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
7189 
7190   ins_encode(aarch64_enc_ldarw(dst, mem));
7191 
7192   ins_pipe(pipe_serial);
7193 %}
7194 
7195 // Load Float
7196 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
7197 %{
7198   match(Set dst (LoadF mem));
7199 
7200   ins_cost(VOLATILE_REF_COST);
7201   format %{ "ldars  $dst, $mem\t# float" %}
7202 
7203   ins_encode( aarch64_enc_fldars(dst, mem) );
7204 
7205   ins_pipe(pipe_serial);
7206 %}
7207 
7208 // Load Double
7209 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
7210 %{
7211   match(Set dst (LoadD mem));
7212 
7213   ins_cost(VOLATILE_REF_COST);
7214   format %{ "ldard  $dst, $mem\t# double" %}
7215 
7216   ins_encode( aarch64_enc_fldard(dst, mem) );
7217 
7218   ins_pipe(pipe_serial);
7219 %}
7220 
7221 // Store Byte
7222 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7223 %{
7224   match(Set mem (StoreB mem src));
7225 
7226   ins_cost(VOLATILE_REF_COST);
7227   format %{ "stlrb  $src, $mem\t# byte" %}
7228 
7229   ins_encode(aarch64_enc_stlrb(src, mem));
7230 
7231   ins_pipe(pipe_class_memory);
7232 %}
7233 
7234 // Store Char/Short
7235 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7236 %{
7237   match(Set mem (StoreC mem src));
7238 
7239   ins_cost(VOLATILE_REF_COST);
7240   format %{ "stlrh  $src, $mem\t# short" %}
7241 
7242   ins_encode(aarch64_enc_stlrh(src, mem));
7243 
7244   ins_pipe(pipe_class_memory);
7245 %}
7246 
7247 // Store Integer
7248 
7249 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
7250 %{
7251   match(Set mem(StoreI mem src));
7252 
7253   ins_cost(VOLATILE_REF_COST);
7254   format %{ "stlrw  $src, $mem\t# int" %}
7255 
7256   ins_encode(aarch64_enc_stlrw(src, mem));
7257 
7258   ins_pipe(pipe_class_memory);
7259 %}
7260 
7261 // Store Long (64 bit signed)
7262 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
7263 %{
7264   match(Set mem (StoreL mem src));
7265 
7266   ins_cost(VOLATILE_REF_COST);
7267   format %{ "stlr  $src, $mem\t# int" %}
7268 
7269   ins_encode(aarch64_enc_stlr(src, mem));
7270 
7271   ins_pipe(pipe_class_memory);
7272 %}
7273 
7274 // Store Pointer
7275 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
7276 %{
7277   match(Set mem (StoreP mem src));
7278 
7279   ins_cost(VOLATILE_REF_COST);
7280   format %{ "stlr  $src, $mem\t# ptr" %}
7281 
7282   ins_encode(aarch64_enc_stlr(src, mem));
7283 
7284   ins_pipe(pipe_class_memory);
7285 %}
7286 
7287 // Store Compressed Pointer
7288 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
7289 %{
7290   match(Set mem (StoreN mem src));
7291 
7292   ins_cost(VOLATILE_REF_COST);
7293   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
7294 
7295   ins_encode(aarch64_enc_stlrw(src, mem));
7296 
7297   ins_pipe(pipe_class_memory);
7298 %}
7299 
7300 // Store Float
7301 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
7302 %{
7303   match(Set mem (StoreF mem src));
7304 
7305   ins_cost(VOLATILE_REF_COST);
7306   format %{ "stlrs  $src, $mem\t# float" %}
7307 
7308   ins_encode( aarch64_enc_fstlrs(src, mem) );
7309 
7310   ins_pipe(pipe_class_memory);
7311 %}
7312 
7313 // TODO
7314 // implement storeImmF0 and storeFImmPacked
7315 
7316 // Store Double
7317 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
7318 %{
7319   match(Set mem (StoreD mem src));
7320 
7321   ins_cost(VOLATILE_REF_COST);
7322   format %{ "stlrd  $src, $mem\t# double" %}
7323 
7324   ins_encode( aarch64_enc_fstlrd(src, mem) );
7325 
7326   ins_pipe(pipe_class_memory);
7327 %}
7328 
7329 //  ---------------- end of volatile loads and stores ----------------
7330 
7331 // ============================================================================
7332 // BSWAP Instructions
7333 
7334 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
7335   match(Set dst (ReverseBytesI src));
7336 
7337   ins_cost(INSN_COST);
7338   format %{ "revw  $dst, $src" %}
7339 
7340   ins_encode %{
7341     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
7342   %}
7343 
7344   ins_pipe(ialu_reg);
7345 %}
7346 
7347 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
7348   match(Set dst (ReverseBytesL src));
7349 
7350   ins_cost(INSN_COST);
7351   format %{ "rev  $dst, $src" %}
7352 
7353   ins_encode %{
7354     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
7355   %}
7356 
7357   ins_pipe(ialu_reg);
7358 %}
7359 
7360 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
7361   match(Set dst (ReverseBytesUS src));
7362 
7363   ins_cost(INSN_COST);
7364   format %{ "rev16w  $dst, $src" %}
7365 
7366   ins_encode %{
7367     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7368   %}
7369 
7370   ins_pipe(ialu_reg);
7371 %}
7372 
7373 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
7374   match(Set dst (ReverseBytesS src));
7375 
7376   ins_cost(INSN_COST);
7377   format %{ "rev16w  $dst, $src\n\t"
7378             "sbfmw $dst, $dst, #0, #15" %}
7379 
7380   ins_encode %{
7381     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
7382     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
7383   %}
7384 
7385   ins_pipe(ialu_reg);
7386 %}
7387 
7388 // ============================================================================
7389 // Zero Count Instructions
7390 
7391 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7392   match(Set dst (CountLeadingZerosI src));
7393 
7394   ins_cost(INSN_COST);
7395   format %{ "clzw  $dst, $src" %}
7396   ins_encode %{
7397     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
7398   %}
7399 
7400   ins_pipe(ialu_reg);
7401 %}
7402 
7403 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
7404   match(Set dst (CountLeadingZerosL src));
7405 
7406   ins_cost(INSN_COST);
7407   format %{ "clz   $dst, $src" %}
7408   ins_encode %{
7409     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
7410   %}
7411 
7412   ins_pipe(ialu_reg);
7413 %}
7414 
7415 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
7416   match(Set dst (CountTrailingZerosI src));
7417 
7418   ins_cost(INSN_COST * 2);
7419   format %{ "rbitw  $dst, $src\n\t"
7420             "clzw   $dst, $dst" %}
7421   ins_encode %{
7422     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
7423     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
7424   %}
7425 
7426   ins_pipe(ialu_reg);
7427 %}
7428 
7429 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
7430   match(Set dst (CountTrailingZerosL src));
7431 
7432   ins_cost(INSN_COST * 2);
7433   format %{ "rbit   $dst, $src\n\t"
7434             "clz    $dst, $dst" %}
7435   ins_encode %{
7436     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
7437     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
7438   %}
7439 
7440   ins_pipe(ialu_reg);
7441 %}
7442 
7443 //---------- Population Count Instructions -------------------------------------
7444 //
7445 
7446 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
7447   predicate(UsePopCountInstruction);
7448   match(Set dst (PopCountI src));
7449   effect(TEMP tmp);
7450   ins_cost(INSN_COST * 13);
7451 
7452   format %{ "movw   $src, $src\n\t"
7453             "mov    $tmp, $src\t# vector (1D)\n\t"
7454             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7455             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7456             "mov    $dst, $tmp\t# vector (1D)" %}
7457   ins_encode %{
7458     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
7459     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7460     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7461     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7462     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7463   %}
7464 
7465   ins_pipe(pipe_class_default);
7466 %}
7467 
7468 instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
7469   predicate(UsePopCountInstruction);
7470   match(Set dst (PopCountI (LoadI mem)));
7471   effect(TEMP tmp);
7472   ins_cost(INSN_COST * 13);
7473 
7474   format %{ "ldrs   $tmp, $mem\n\t"
7475             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7476             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7477             "mov    $dst, $tmp\t# vector (1D)" %}
7478   ins_encode %{
7479     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7480     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
7481                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7482     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7483     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7484     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7485   %}
7486 
7487   ins_pipe(pipe_class_default);
7488 %}
7489 
7490 // Note: Long.bitCount(long) returns an int.
7491 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
7492   predicate(UsePopCountInstruction);
7493   match(Set dst (PopCountL src));
7494   effect(TEMP tmp);
7495   ins_cost(INSN_COST * 13);
7496 
7497   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
7498             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7499             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7500             "mov    $dst, $tmp\t# vector (1D)" %}
7501   ins_encode %{
7502     __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
7503     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7504     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7505     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7506   %}
7507 
7508   ins_pipe(pipe_class_default);
7509 %}
7510 
7511 instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
7512   predicate(UsePopCountInstruction);
7513   match(Set dst (PopCountL (LoadL mem)));
7514   effect(TEMP tmp);
7515   ins_cost(INSN_COST * 13);
7516 
7517   format %{ "ldrd   $tmp, $mem\n\t"
7518             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
7519             "addv   $tmp, $tmp\t# vector (8B)\n\t"
7520             "mov    $dst, $tmp\t# vector (1D)" %}
7521   ins_encode %{
7522     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
7523     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
7524                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
7525     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7526     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
7527     __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
7528   %}
7529 
7530   ins_pipe(pipe_class_default);
7531 %}
7532 
7533 // ============================================================================
7534 // MemBar Instruction
7535 
7536 instruct load_fence() %{
7537   match(LoadFence);
7538   ins_cost(VOLATILE_REF_COST);
7539 
7540   format %{ "load_fence" %}
7541 
7542   ins_encode %{
7543     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7544   %}
7545   ins_pipe(pipe_serial);
7546 %}
7547 
7548 instruct unnecessary_membar_acquire() %{
7549   predicate(unnecessary_acquire(n));
7550   match(MemBarAcquire);
7551   ins_cost(0);
7552 
7553   format %{ "membar_acquire (elided)" %}
7554 
7555   ins_encode %{
7556     __ block_comment("membar_acquire (elided)");
7557   %}
7558 
7559   ins_pipe(pipe_class_empty);
7560 %}
7561 
7562 instruct membar_acquire() %{
7563   match(MemBarAcquire);
7564   ins_cost(VOLATILE_REF_COST);
7565 
7566   format %{ "membar_acquire" %}
7567 
7568   ins_encode %{
7569     __ block_comment("membar_acquire");
7570     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
7571   %}
7572 
7573   ins_pipe(pipe_serial);
7574 %}
7575 
7576 
7577 instruct membar_acquire_lock() %{
7578   match(MemBarAcquireLock);
7579   ins_cost(VOLATILE_REF_COST);
7580 
7581   format %{ "membar_acquire_lock (elided)" %}
7582 
7583   ins_encode %{
7584     __ block_comment("membar_acquire_lock (elided)");
7585   %}
7586 
7587   ins_pipe(pipe_serial);
7588 %}
7589 
7590 instruct store_fence() %{
7591   match(StoreFence);
7592   ins_cost(VOLATILE_REF_COST);
7593 
7594   format %{ "store_fence" %}
7595 
7596   ins_encode %{
7597     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7598   %}
7599   ins_pipe(pipe_serial);
7600 %}
7601 
7602 instruct unnecessary_membar_release() %{
7603   predicate(unnecessary_release(n));
7604   match(MemBarRelease);
7605   ins_cost(0);
7606 
7607   format %{ "membar_release (elided)" %}
7608 
7609   ins_encode %{
7610     __ block_comment("membar_release (elided)");
7611   %}
7612   ins_pipe(pipe_serial);
7613 %}
7614 
7615 instruct membar_release() %{
7616   match(MemBarRelease);
7617   ins_cost(VOLATILE_REF_COST);
7618 
7619   format %{ "membar_release" %}
7620 
7621   ins_encode %{
7622     __ block_comment("membar_release");
7623     __ membar(Assembler::LoadStore|Assembler::StoreStore);
7624   %}
7625   ins_pipe(pipe_serial);
7626 %}
7627 
7628 instruct membar_storestore() %{
7629   match(MemBarStoreStore);
7630   ins_cost(VOLATILE_REF_COST);
7631 
7632   format %{ "MEMBAR-store-store" %}
7633 
7634   ins_encode %{
7635     __ membar(Assembler::StoreStore);
7636   %}
7637   ins_pipe(pipe_serial);
7638 %}
7639 
7640 instruct membar_release_lock() %{
7641   match(MemBarReleaseLock);
7642   ins_cost(VOLATILE_REF_COST);
7643 
7644   format %{ "membar_release_lock (elided)" %}
7645 
7646   ins_encode %{
7647     __ block_comment("membar_release_lock (elided)");
7648   %}
7649 
7650   ins_pipe(pipe_serial);
7651 %}
7652 
7653 instruct unnecessary_membar_volatile() %{
7654   predicate(unnecessary_volatile(n));
7655   match(MemBarVolatile);
7656   ins_cost(0);
7657 
7658   format %{ "membar_volatile (elided)" %}
7659 
7660   ins_encode %{
7661     __ block_comment("membar_volatile (elided)");
7662   %}
7663 
7664   ins_pipe(pipe_serial);
7665 %}
7666 
7667 instruct membar_volatile() %{
7668   match(MemBarVolatile);
7669   ins_cost(VOLATILE_REF_COST*100);
7670 
7671   format %{ "membar_volatile" %}
7672 
7673   ins_encode %{
7674     __ block_comment("membar_volatile");
7675     __ membar(Assembler::StoreLoad);
7676     %}
7677 
7678   ins_pipe(pipe_serial);
7679 %}
7680 
7681 // ============================================================================
7682 // Cast/Convert Instructions
7683 
7684 instruct castX2P(iRegPNoSp dst, iRegL src) %{
7685   match(Set dst (CastX2P src));
7686 
7687   ins_cost(INSN_COST);
7688   format %{ "mov $dst, $src\t# long -> ptr" %}
7689 
7690   ins_encode %{
7691     if ($dst$$reg != $src$$reg) {
7692       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7693     }
7694   %}
7695 
7696   ins_pipe(ialu_reg);
7697 %}
7698 
7699 instruct castP2X(iRegLNoSp dst, iRegP src) %{
7700   match(Set dst (CastP2X src));
7701 
7702   ins_cost(INSN_COST);
7703   format %{ "mov $dst, $src\t# ptr -> long" %}
7704 
7705   ins_encode %{
7706     if ($dst$$reg != $src$$reg) {
7707       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
7708     }
7709   %}
7710 
7711   ins_pipe(ialu_reg);
7712 %}
7713 
7714 // Convert oop into int for vectors alignment masking
7715 instruct convP2I(iRegINoSp dst, iRegP src) %{
7716   match(Set dst (ConvL2I (CastP2X src)));
7717 
7718   ins_cost(INSN_COST);
7719   format %{ "movw $dst, $src\t# ptr -> int" %}
7720   ins_encode %{
7721     __ movw($dst$$Register, $src$$Register);
7722   %}
7723 
7724   ins_pipe(ialu_reg);
7725 %}
7726 
7727 // Convert compressed oop into int for vectors alignment masking
7728 // in case of 32bit oops (heap < 4Gb).
7729 instruct convN2I(iRegINoSp dst, iRegN src)
7730 %{
7731   predicate(Universe::narrow_oop_shift() == 0);
7732   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
7733 
7734   ins_cost(INSN_COST);
7735   format %{ "mov dst, $src\t# compressed ptr -> int" %}
7736   ins_encode %{
7737     __ movw($dst$$Register, $src$$Register);
7738   %}
7739 
7740   ins_pipe(ialu_reg);
7741 %}
7742 
7743 
7744 // Convert oop pointer into compressed form
7745 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7746   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7747   match(Set dst (EncodeP src));
7748   effect(KILL cr);
7749   ins_cost(INSN_COST * 3);
7750   format %{ "encode_heap_oop $dst, $src" %}
7751   ins_encode %{
7752     Register s = $src$$Register;
7753     Register d = $dst$$Register;
7754     __ encode_heap_oop(d, s);
7755   %}
7756   ins_pipe(ialu_reg);
7757 %}
7758 
7759 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
7760   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7761   match(Set dst (EncodeP src));
7762   ins_cost(INSN_COST * 3);
7763   format %{ "encode_heap_oop_not_null $dst, $src" %}
7764   ins_encode %{
7765     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7766   %}
7767   ins_pipe(ialu_reg);
7768 %}
7769 
7770 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7771   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
7772             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
7773   match(Set dst (DecodeN src));
7774   ins_cost(INSN_COST * 3);
7775   format %{ "decode_heap_oop $dst, $src" %}
7776   ins_encode %{
7777     Register s = $src$$Register;
7778     Register d = $dst$$Register;
7779     __ decode_heap_oop(d, s);
7780   %}
7781   ins_pipe(ialu_reg);
7782 %}
7783 
7784 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
7785   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
7786             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
7787   match(Set dst (DecodeN src));
7788   ins_cost(INSN_COST * 3);
7789   format %{ "decode_heap_oop_not_null $dst, $src" %}
7790   ins_encode %{
7791     Register s = $src$$Register;
7792     Register d = $dst$$Register;
7793     __ decode_heap_oop_not_null(d, s);
7794   %}
7795   ins_pipe(ialu_reg);
7796 %}
7797 
7798 // n.b. AArch64 implementations of encode_klass_not_null and
7799 // decode_klass_not_null do not modify the flags register so, unlike
7800 // Intel, we don't kill CR as a side effect here
7801 
7802 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
7803   match(Set dst (EncodePKlass src));
7804 
7805   ins_cost(INSN_COST * 3);
7806   format %{ "encode_klass_not_null $dst,$src" %}
7807 
7808   ins_encode %{
7809     Register src_reg = as_Register($src$$reg);
7810     Register dst_reg = as_Register($dst$$reg);
7811     __ encode_klass_not_null(dst_reg, src_reg);
7812   %}
7813 
7814    ins_pipe(ialu_reg);
7815 %}
7816 
7817 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
7818   match(Set dst (DecodeNKlass src));
7819 
7820   ins_cost(INSN_COST * 3);
7821   format %{ "decode_klass_not_null $dst,$src" %}
7822 
7823   ins_encode %{
7824     Register src_reg = as_Register($src$$reg);
7825     Register dst_reg = as_Register($dst$$reg);
7826     if (dst_reg != src_reg) {
7827       __ decode_klass_not_null(dst_reg, src_reg);
7828     } else {
7829       __ decode_klass_not_null(dst_reg);
7830     }
7831   %}
7832 
7833    ins_pipe(ialu_reg);
7834 %}
7835 
7836 instruct checkCastPP(iRegPNoSp dst)
7837 %{
7838   match(Set dst (CheckCastPP dst));
7839 
7840   size(0);
7841   format %{ "# checkcastPP of $dst" %}
7842   ins_encode(/* empty encoding */);
7843   ins_pipe(pipe_class_empty);
7844 %}
7845 
7846 instruct castPP(iRegPNoSp dst)
7847 %{
7848   match(Set dst (CastPP dst));
7849 
7850   size(0);
7851   format %{ "# castPP of $dst" %}
7852   ins_encode(/* empty encoding */);
7853   ins_pipe(pipe_class_empty);
7854 %}
7855 
7856 instruct castII(iRegI dst)
7857 %{
7858   match(Set dst (CastII dst));
7859 
7860   size(0);
7861   format %{ "# castII of $dst" %}
7862   ins_encode(/* empty encoding */);
7863   ins_cost(0);
7864   ins_pipe(pipe_class_empty);
7865 %}
7866 
7867 // ============================================================================
7868 // Atomic operation instructions
7869 //
7870 // Intel and SPARC both implement Ideal Node LoadPLocked and
7871 // Store{PIL}Conditional instructions using a normal load for the
7872 // LoadPLocked and a CAS for the Store{PIL}Conditional.
7873 //
7874 // The ideal code appears only to use LoadPLocked/StorePLocked as a
7875 // pair to lock object allocations from Eden space when not using
7876 // TLABs.
7877 //
7878 // There does not appear to be a Load{IL}Locked Ideal Node and the
7879 // Ideal code appears to use Store{IL}Conditional as an alias for CAS
7880 // and to use StoreIConditional only for 32-bit and StoreLConditional
7881 // only for 64-bit.
7882 //
7883 // We implement LoadPLocked and StorePLocked instructions using,
7884 // respectively the AArch64 hw load-exclusive and store-conditional
7885 // instructions. Whereas we must implement each of
7886 // Store{IL}Conditional using a CAS which employs a pair of
7887 // instructions comprising a load-exclusive followed by a
7888 // store-conditional.
7889 
7890 
7891 // Locked-load (linked load) of the current heap-top
7892 // used when updating the eden heap top
7893 // implemented using ldaxr on AArch64
7894 
7895 instruct loadPLocked(iRegPNoSp dst, indirect mem)
7896 %{
7897   match(Set dst (LoadPLocked mem));
7898 
7899   ins_cost(VOLATILE_REF_COST);
7900 
7901   format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
7902 
7903   ins_encode(aarch64_enc_ldaxr(dst, mem));
7904 
7905   ins_pipe(pipe_serial);
7906 %}
7907 
7908 // Conditional-store of the updated heap-top.
7909 // Used during allocation of the shared heap.
7910 // Sets flag (EQ) on success.
7911 // implemented using stlxr on AArch64.
7912 
7913 instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr) 
7914 %{
7915   match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7916 
7917   ins_cost(VOLATILE_REF_COST);
7918 
7919  // TODO
7920  // do we need to do a store-conditional release or can we just use a
7921  // plain store-conditional?
7922 
7923   format %{
7924     "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
7925     "cmpw rscratch1, zr\t# EQ on successful write"
7926   %}
7927 
7928   ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
7929 
7930   ins_pipe(pipe_serial);
7931 %}
7932 
7933 
7934 // storeLConditional is used by PhaseMacroExpand::expand_lock_node
7935 // when attempting to rebias a lock towards the current thread.  We
7936 // must use the acquire form of cmpxchg in order to guarantee acquire
7937 // semantics in this case.
7938 instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) 
7939 %{
7940   match(Set cr (StoreLConditional mem (Binary oldval newval)));
7941 
7942   ins_cost(VOLATILE_REF_COST);
7943 
7944   format %{
7945     "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7946     "cmpw rscratch1, zr\t# EQ on successful write"
7947   %}
7948 
7949   ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval));
7950 
7951   ins_pipe(pipe_slow);
7952 %}
7953 
7954 // storeIConditional also has acquire semantics, for no better reason
7955 // than matching storeLConditional.  At the time of writing this
7956 // comment storeIConditional was not used anywhere by AArch64.
7957 instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) 
7958 %{
7959   match(Set cr (StoreIConditional mem (Binary oldval newval)));
7960 
7961   ins_cost(VOLATILE_REF_COST);
7962 
7963   format %{
7964     "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
7965     "cmpw rscratch1, zr\t# EQ on successful write"
7966   %}
7967 
7968   ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval));
7969 
7970   ins_pipe(pipe_slow);
7971 %}
7972 
7973 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
7974 // can't match them
7975 
7976 // standard CompareAndSwapX when we are using barriers
7977 // these have higher priority than the rules selected by a predicate
7978 
7979 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
7980 
7981   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
7982   ins_cost(2 * VOLATILE_REF_COST);
7983 
7984   effect(KILL cr);
7985 
7986  format %{
7987     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
7988     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
7989  %}
7990 
7991  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
7992             aarch64_enc_cset_eq(res));
7993 
7994   ins_pipe(pipe_slow);
7995 %}
7996 
7997 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
7998 
7999   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8000   ins_cost(2 * VOLATILE_REF_COST);
8001 
8002   effect(KILL cr);
8003 
8004  format %{
8005     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8006     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8007  %}
8008 
8009  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8010             aarch64_enc_cset_eq(res));
8011 
8012   ins_pipe(pipe_slow);
8013 %}
8014 
8015 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8016 
8017   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8018   ins_cost(2 * VOLATILE_REF_COST);
8019 
8020   effect(KILL cr);
8021 
8022  format %{
8023     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8024     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8025  %}
8026 
8027  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8028             aarch64_enc_cset_eq(res));
8029 
8030   ins_pipe(pipe_slow);
8031 %}
8032 
8033 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8034 
8035   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8036   ins_cost(2 * VOLATILE_REF_COST);
8037 
8038   effect(KILL cr);
8039 
8040  format %{
8041     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8042     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8043  %}
8044 
8045  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8046             aarch64_enc_cset_eq(res));
8047 
8048   ins_pipe(pipe_slow);
8049 %}
8050 
8051 
8052 // alternative CompareAndSwapX when we are eliding barriers
8053 
8054 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
8055 
8056   predicate(needs_acquiring_load_exclusive(n));
8057   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
8058   ins_cost(VOLATILE_REF_COST);
8059 
8060   effect(KILL cr);
8061 
8062  format %{
8063     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8064     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8065  %}
8066 
8067  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8068             aarch64_enc_cset_eq(res));
8069 
8070   ins_pipe(pipe_slow);
8071 %}
8072 
8073 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8074 
8075   predicate(needs_acquiring_load_exclusive(n));
8076   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8077   ins_cost(VOLATILE_REF_COST);
8078 
8079   effect(KILL cr);
8080 
8081  format %{
8082     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8083     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8084  %}
8085 
8086  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8087             aarch64_enc_cset_eq(res));
8088 
8089   ins_pipe(pipe_slow);
8090 %}
8091 
8092 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8093 
8094   predicate(needs_acquiring_load_exclusive(n));
8095   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8096   ins_cost(VOLATILE_REF_COST);
8097 
8098   effect(KILL cr);
8099 
8100  format %{
8101     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8102     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8103  %}
8104 
8105  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8106             aarch64_enc_cset_eq(res));
8107 
8108   ins_pipe(pipe_slow);
8109 %}
8110 
8111 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8112 
8113   predicate(needs_acquiring_load_exclusive(n));
8114   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8115   ins_cost(VOLATILE_REF_COST);
8116 
8117   effect(KILL cr);
8118 
8119  format %{
8120     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
8121     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8122  %}
8123 
8124  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8125             aarch64_enc_cset_eq(res));
8126 
8127   ins_pipe(pipe_slow);
8128 %}
8129 
8130 
8131 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
8132   match(Set prev (GetAndSetI mem newv));
8133   ins_cost(2 * VOLATILE_REF_COST);
8134   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
8135   ins_encode %{
8136     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8137   %}
8138   ins_pipe(pipe_serial);
8139 %}
8140 
8141 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
8142   match(Set prev (GetAndSetL mem newv));
8143   ins_cost(2 * VOLATILE_REF_COST);
8144   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8145   ins_encode %{
8146     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8147   %}
8148   ins_pipe(pipe_serial);
8149 %}
8150 
8151 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
8152   match(Set prev (GetAndSetN mem newv));
8153   ins_cost(2 * VOLATILE_REF_COST);
8154   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
8155   ins_encode %{
8156     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8157   %}
8158   ins_pipe(pipe_serial);
8159 %}
8160 
8161 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
8162   match(Set prev (GetAndSetP mem newv));
8163   ins_cost(2 * VOLATILE_REF_COST);
8164   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
8165   ins_encode %{
8166     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
8167   %}
8168   ins_pipe(pipe_serial);
8169 %}
8170 
8171 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
8172   predicate(needs_acquiring_load_exclusive(n));
8173   match(Set prev (GetAndSetI mem newv));
8174   ins_cost(VOLATILE_REF_COST);
8175   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
8176   ins_encode %{
8177     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8178   %}
8179   ins_pipe(pipe_serial);
8180 %}
8181 
8182 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
8183   predicate(needs_acquiring_load_exclusive(n));
8184   match(Set prev (GetAndSetL mem newv));
8185   ins_cost(VOLATILE_REF_COST);
8186   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8187   ins_encode %{
8188     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8189   %}
8190   ins_pipe(pipe_serial);
8191 %}
8192 
8193 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
8194   predicate(needs_acquiring_load_exclusive(n));
8195   match(Set prev (GetAndSetN mem newv));
8196   ins_cost(VOLATILE_REF_COST);
8197   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
8198   ins_encode %{
8199     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
8200   %}
8201   ins_pipe(pipe_serial);
8202 %}
8203 
8204 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
8205   predicate(needs_acquiring_load_exclusive(n));
8206   match(Set prev (GetAndSetP mem newv));
8207   ins_cost(VOLATILE_REF_COST);
8208   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
8209   ins_encode %{
8210     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
8211   %}
8212   ins_pipe(pipe_serial);
8213 %}
8214 
8215 
8216 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
8217   match(Set newval (GetAndAddL mem incr));
8218   ins_cost(2 * VOLATILE_REF_COST + 1);
8219   format %{ "get_and_addL $newval, [$mem], $incr" %}
8220   ins_encode %{
8221     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
8222   %}
8223   ins_pipe(pipe_serial);
8224 %}
8225 
8226 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
8227   predicate(n->as_LoadStore()->result_not_used());
8228   match(Set dummy (GetAndAddL mem incr));
8229   ins_cost(2 * VOLATILE_REF_COST);
8230   format %{ "get_and_addL [$mem], $incr" %}
8231   ins_encode %{
8232     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
8233   %}
8234   ins_pipe(pipe_serial);
8235 %}
8236 
8237 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8238   match(Set newval (GetAndAddL mem incr));
8239   ins_cost(2 * VOLATILE_REF_COST + 1);
8240   format %{ "get_and_addL $newval, [$mem], $incr" %}
8241   ins_encode %{
8242     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
8243   %}
8244   ins_pipe(pipe_serial);
8245 %}
8246 
8247 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
8248   predicate(n->as_LoadStore()->result_not_used());
8249   match(Set dummy (GetAndAddL mem incr));
8250   ins_cost(2 * VOLATILE_REF_COST);
8251   format %{ "get_and_addL [$mem], $incr" %}
8252   ins_encode %{
8253     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
8254   %}
8255   ins_pipe(pipe_serial);
8256 %}
8257 
8258 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8259   match(Set newval (GetAndAddI mem incr));
8260   ins_cost(2 * VOLATILE_REF_COST + 1);
8261   format %{ "get_and_addI $newval, [$mem], $incr" %}
8262   ins_encode %{
8263     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8264   %}
8265   ins_pipe(pipe_serial);
8266 %}
8267 
8268 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
8269   predicate(n->as_LoadStore()->result_not_used());
8270   match(Set dummy (GetAndAddI mem incr));
8271   ins_cost(2 * VOLATILE_REF_COST);
8272   format %{ "get_and_addI [$mem], $incr" %}
8273   ins_encode %{
8274     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
8275   %}
8276   ins_pipe(pipe_serial);
8277 %}
8278 
8279 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8280   match(Set newval (GetAndAddI mem incr));
8281   ins_cost(2 * VOLATILE_REF_COST + 1);
8282   format %{ "get_and_addI $newval, [$mem], $incr" %}
8283   ins_encode %{
8284     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8285   %}
8286   ins_pipe(pipe_serial);
8287 %}
8288 
8289 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
8290   predicate(n->as_LoadStore()->result_not_used());
8291   match(Set dummy (GetAndAddI mem incr));
8292   ins_cost(2 * VOLATILE_REF_COST);
8293   format %{ "get_and_addI [$mem], $incr" %}
8294   ins_encode %{
8295     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
8296   %}
8297   ins_pipe(pipe_serial);
8298 %}
8299 
8300 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
8301   predicate(needs_acquiring_load_exclusive(n));
8302   match(Set newval (GetAndAddL mem incr));
8303   ins_cost(VOLATILE_REF_COST + 1);
8304   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8305   ins_encode %{
8306     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
8307   %}
8308   ins_pipe(pipe_serial);
8309 %}
8310 
8311 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
8312   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8313   match(Set dummy (GetAndAddL mem incr));
8314   ins_cost(VOLATILE_REF_COST);
8315   format %{ "get_and_addL_acq [$mem], $incr" %}
8316   ins_encode %{
8317     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
8318   %}
8319   ins_pipe(pipe_serial);
8320 %}
8321 
8322 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
8323   predicate(needs_acquiring_load_exclusive(n));
8324   match(Set newval (GetAndAddL mem incr));
8325   ins_cost(VOLATILE_REF_COST + 1);
8326   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
8327   ins_encode %{
8328     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
8329   %}
8330   ins_pipe(pipe_serial);
8331 %}
8332 
8333 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
8334   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8335   match(Set dummy (GetAndAddL mem incr));
8336   ins_cost(VOLATILE_REF_COST);
8337   format %{ "get_and_addL_acq [$mem], $incr" %}
8338   ins_encode %{
8339     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
8340   %}
8341   ins_pipe(pipe_serial);
8342 %}
8343 
8344 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
8345   predicate(needs_acquiring_load_exclusive(n));
8346   match(Set newval (GetAndAddI mem incr));
8347   ins_cost(VOLATILE_REF_COST + 1);
8348   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
8349   ins_encode %{
8350     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
8351   %}
8352   ins_pipe(pipe_serial);
8353 %}
8354 
8355 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
8356   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8357   match(Set dummy (GetAndAddI mem incr));
8358   ins_cost(VOLATILE_REF_COST);
8359   format %{ "get_and_addI_acq [$mem], $incr" %}
8360   ins_encode %{
8361     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
8362   %}
8363   ins_pipe(pipe_serial);
8364 %}
8365 
8366 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
8367   predicate(needs_acquiring_load_exclusive(n));
8368   match(Set newval (GetAndAddI mem incr));
8369   ins_cost(VOLATILE_REF_COST + 1);
8370   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
8371   ins_encode %{
8372     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
8373   %}
8374   ins_pipe(pipe_serial);
8375 %}
8376 
8377 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
8378   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
8379   match(Set dummy (GetAndAddI mem incr));
8380   ins_cost(VOLATILE_REF_COST);
8381   format %{ "get_and_addI_acq [$mem], $incr" %}
8382   ins_encode %{
8383     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
8384   %}
8385   ins_pipe(pipe_serial);
8386 %}
8387 
8388 // ============================================================================
8389 // Conditional Move Instructions
8390 
8391 // n.b. we have identical rules for both a signed compare op (cmpOp)
8392 // and an unsigned compare op (cmpOpU). it would be nice if we could
8393 // define an op class which merged both inputs and use it to type the
8394 // argument to a single rule. unfortunatelyt his fails because the
8395 // opclass does not live up to the COND_INTER interface of its
8396 // component operands. When the generic code tries to negate the
8397 // operand it ends up running the generci Machoper::negate method
8398 // which throws a ShouldNotHappen. So, we have to provide two flavours
8399 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
8400 
8401 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8402   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8403 
8404   ins_cost(INSN_COST * 2);
8405   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
8406 
8407   ins_encode %{
8408     __ cselw(as_Register($dst$$reg),
8409              as_Register($src2$$reg),
8410              as_Register($src1$$reg),
8411              (Assembler::Condition)$cmp$$cmpcode);
8412   %}
8413 
8414   ins_pipe(icond_reg_reg);
8415 %}
8416 
8417 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8418   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
8419 
8420   ins_cost(INSN_COST * 2);
8421   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
8422 
8423   ins_encode %{
8424     __ cselw(as_Register($dst$$reg),
8425              as_Register($src2$$reg),
8426              as_Register($src1$$reg),
8427              (Assembler::Condition)$cmp$$cmpcode);
8428   %}
8429 
8430   ins_pipe(icond_reg_reg);
8431 %}
8432 
8433 // special cases where one arg is zero
8434 
8435 // n.b. this is selected in preference to the rule above because it
8436 // avoids loading constant 0 into a source register
8437 
8438 // TODO
8439 // we ought only to be able to cull one of these variants as the ideal
8440 // transforms ought always to order the zero consistently (to left/right?)
8441 
8442 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8443   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8444 
8445   ins_cost(INSN_COST * 2);
8446   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
8447 
8448   ins_encode %{
8449     __ cselw(as_Register($dst$$reg),
8450              as_Register($src$$reg),
8451              zr,
8452              (Assembler::Condition)$cmp$$cmpcode);
8453   %}
8454 
8455   ins_pipe(icond_reg);
8456 %}
8457 
8458 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
8459   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
8460 
8461   ins_cost(INSN_COST * 2);
8462   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
8463 
8464   ins_encode %{
8465     __ cselw(as_Register($dst$$reg),
8466              as_Register($src$$reg),
8467              zr,
8468              (Assembler::Condition)$cmp$$cmpcode);
8469   %}
8470 
8471   ins_pipe(icond_reg);
8472 %}
8473 
8474 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8475   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8476 
8477   ins_cost(INSN_COST * 2);
8478   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
8479 
8480   ins_encode %{
8481     __ cselw(as_Register($dst$$reg),
8482              zr,
8483              as_Register($src$$reg),
8484              (Assembler::Condition)$cmp$$cmpcode);
8485   %}
8486 
8487   ins_pipe(icond_reg);
8488 %}
8489 
8490 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
8491   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
8492 
8493   ins_cost(INSN_COST * 2);
8494   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
8495 
8496   ins_encode %{
8497     __ cselw(as_Register($dst$$reg),
8498              zr,
8499              as_Register($src$$reg),
8500              (Assembler::Condition)$cmp$$cmpcode);
8501   %}
8502 
8503   ins_pipe(icond_reg);
8504 %}
8505 
8506 // special case for creating a boolean 0 or 1
8507 
8508 // n.b. this is selected in preference to the rule above because it
8509 // avoids loading constants 0 and 1 into a source register
8510 
8511 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8512   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8513 
8514   ins_cost(INSN_COST * 2);
8515   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
8516 
8517   ins_encode %{
8518     // equivalently
8519     // cset(as_Register($dst$$reg),
8520     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8521     __ csincw(as_Register($dst$$reg),
8522              zr,
8523              zr,
8524              (Assembler::Condition)$cmp$$cmpcode);
8525   %}
8526 
8527   ins_pipe(icond_none);
8528 %}
8529 
8530 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
8531   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
8532 
8533   ins_cost(INSN_COST * 2);
8534   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
8535 
8536   ins_encode %{
8537     // equivalently
8538     // cset(as_Register($dst$$reg),
8539     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
8540     __ csincw(as_Register($dst$$reg),
8541              zr,
8542              zr,
8543              (Assembler::Condition)$cmp$$cmpcode);
8544   %}
8545 
8546   ins_pipe(icond_none);
8547 %}
8548 
8549 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8550   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8551 
8552   ins_cost(INSN_COST * 2);
8553   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
8554 
8555   ins_encode %{
8556     __ csel(as_Register($dst$$reg),
8557             as_Register($src2$$reg),
8558             as_Register($src1$$reg),
8559             (Assembler::Condition)$cmp$$cmpcode);
8560   %}
8561 
8562   ins_pipe(icond_reg_reg);
8563 %}
8564 
8565 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
8566   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
8567 
8568   ins_cost(INSN_COST * 2);
8569   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
8570 
8571   ins_encode %{
8572     __ csel(as_Register($dst$$reg),
8573             as_Register($src2$$reg),
8574             as_Register($src1$$reg),
8575             (Assembler::Condition)$cmp$$cmpcode);
8576   %}
8577 
8578   ins_pipe(icond_reg_reg);
8579 %}
8580 
8581 // special cases where one arg is zero
8582 
8583 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8584   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8585 
8586   ins_cost(INSN_COST * 2);
8587   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
8588 
8589   ins_encode %{
8590     __ csel(as_Register($dst$$reg),
8591             zr,
8592             as_Register($src$$reg),
8593             (Assembler::Condition)$cmp$$cmpcode);
8594   %}
8595 
8596   ins_pipe(icond_reg);
8597 %}
8598 
8599 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
8600   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
8601 
8602   ins_cost(INSN_COST * 2);
8603   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
8604 
8605   ins_encode %{
8606     __ csel(as_Register($dst$$reg),
8607             zr,
8608             as_Register($src$$reg),
8609             (Assembler::Condition)$cmp$$cmpcode);
8610   %}
8611 
8612   ins_pipe(icond_reg);
8613 %}
8614 
8615 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8616   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8617 
8618   ins_cost(INSN_COST * 2);
8619   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
8620 
8621   ins_encode %{
8622     __ csel(as_Register($dst$$reg),
8623             as_Register($src$$reg),
8624             zr,
8625             (Assembler::Condition)$cmp$$cmpcode);
8626   %}
8627 
8628   ins_pipe(icond_reg);
8629 %}
8630 
8631 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
8632   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
8633 
8634   ins_cost(INSN_COST * 2);
8635   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
8636 
8637   ins_encode %{
8638     __ csel(as_Register($dst$$reg),
8639             as_Register($src$$reg),
8640             zr,
8641             (Assembler::Condition)$cmp$$cmpcode);
8642   %}
8643 
8644   ins_pipe(icond_reg);
8645 %}
8646 
8647 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8648   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8649 
8650   ins_cost(INSN_COST * 2);
8651   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
8652 
8653   ins_encode %{
8654     __ csel(as_Register($dst$$reg),
8655             as_Register($src2$$reg),
8656             as_Register($src1$$reg),
8657             (Assembler::Condition)$cmp$$cmpcode);
8658   %}
8659 
8660   ins_pipe(icond_reg_reg);
8661 %}
8662 
8663 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
8664   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
8665 
8666   ins_cost(INSN_COST * 2);
8667   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
8668 
8669   ins_encode %{
8670     __ csel(as_Register($dst$$reg),
8671             as_Register($src2$$reg),
8672             as_Register($src1$$reg),
8673             (Assembler::Condition)$cmp$$cmpcode);
8674   %}
8675 
8676   ins_pipe(icond_reg_reg);
8677 %}
8678 
8679 // special cases where one arg is zero
8680 
8681 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8682   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8683 
8684   ins_cost(INSN_COST * 2);
8685   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
8686 
8687   ins_encode %{
8688     __ csel(as_Register($dst$$reg),
8689             zr,
8690             as_Register($src$$reg),
8691             (Assembler::Condition)$cmp$$cmpcode);
8692   %}
8693 
8694   ins_pipe(icond_reg);
8695 %}
8696 
8697 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
8698   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
8699 
8700   ins_cost(INSN_COST * 2);
8701   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
8702 
8703   ins_encode %{
8704     __ csel(as_Register($dst$$reg),
8705             zr,
8706             as_Register($src$$reg),
8707             (Assembler::Condition)$cmp$$cmpcode);
8708   %}
8709 
8710   ins_pipe(icond_reg);
8711 %}
8712 
8713 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8714   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8715 
8716   ins_cost(INSN_COST * 2);
8717   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
8718 
8719   ins_encode %{
8720     __ csel(as_Register($dst$$reg),
8721             as_Register($src$$reg),
8722             zr,
8723             (Assembler::Condition)$cmp$$cmpcode);
8724   %}
8725 
8726   ins_pipe(icond_reg);
8727 %}
8728 
8729 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
8730   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
8731 
8732   ins_cost(INSN_COST * 2);
8733   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
8734 
8735   ins_encode %{
8736     __ csel(as_Register($dst$$reg),
8737             as_Register($src$$reg),
8738             zr,
8739             (Assembler::Condition)$cmp$$cmpcode);
8740   %}
8741 
8742   ins_pipe(icond_reg);
8743 %}
8744 
8745 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8746   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8747 
8748   ins_cost(INSN_COST * 2);
8749   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8750 
8751   ins_encode %{
8752     __ cselw(as_Register($dst$$reg),
8753              as_Register($src2$$reg),
8754              as_Register($src1$$reg),
8755              (Assembler::Condition)$cmp$$cmpcode);
8756   %}
8757 
8758   ins_pipe(icond_reg_reg);
8759 %}
8760 
8761 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
8762   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
8763 
8764   ins_cost(INSN_COST * 2);
8765   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
8766 
8767   ins_encode %{
8768     __ cselw(as_Register($dst$$reg),
8769              as_Register($src2$$reg),
8770              as_Register($src1$$reg),
8771              (Assembler::Condition)$cmp$$cmpcode);
8772   %}
8773 
8774   ins_pipe(icond_reg_reg);
8775 %}
8776 
8777 // special cases where one arg is zero
8778 
8779 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8780   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8781 
8782   ins_cost(INSN_COST * 2);
8783   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
8784 
8785   ins_encode %{
8786     __ cselw(as_Register($dst$$reg),
8787              zr,
8788              as_Register($src$$reg),
8789              (Assembler::Condition)$cmp$$cmpcode);
8790   %}
8791 
8792   ins_pipe(icond_reg);
8793 %}
8794 
8795 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
8796   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
8797 
8798   ins_cost(INSN_COST * 2);
8799   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
8800 
8801   ins_encode %{
8802     __ cselw(as_Register($dst$$reg),
8803              zr,
8804              as_Register($src$$reg),
8805              (Assembler::Condition)$cmp$$cmpcode);
8806   %}
8807 
8808   ins_pipe(icond_reg);
8809 %}
8810 
8811 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8812   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8813 
8814   ins_cost(INSN_COST * 2);
8815   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
8816 
8817   ins_encode %{
8818     __ cselw(as_Register($dst$$reg),
8819              as_Register($src$$reg),
8820              zr,
8821              (Assembler::Condition)$cmp$$cmpcode);
8822   %}
8823 
8824   ins_pipe(icond_reg);
8825 %}
8826 
8827 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
8828   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
8829 
8830   ins_cost(INSN_COST * 2);
8831   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
8832 
8833   ins_encode %{
8834     __ cselw(as_Register($dst$$reg),
8835              as_Register($src$$reg),
8836              zr,
8837              (Assembler::Condition)$cmp$$cmpcode);
8838   %}
8839 
8840   ins_pipe(icond_reg);
8841 %}
8842 
8843 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
8844 %{
8845   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8846 
8847   ins_cost(INSN_COST * 3);
8848 
8849   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8850   ins_encode %{
8851     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8852     __ fcsels(as_FloatRegister($dst$$reg),
8853               as_FloatRegister($src2$$reg),
8854               as_FloatRegister($src1$$reg),
8855               cond);
8856   %}
8857 
8858   ins_pipe(fp_cond_reg_reg_s);
8859 %}
8860 
8861 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
8862 %{
8863   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
8864 
8865   ins_cost(INSN_COST * 3);
8866 
8867   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8868   ins_encode %{
8869     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8870     __ fcsels(as_FloatRegister($dst$$reg),
8871               as_FloatRegister($src2$$reg),
8872               as_FloatRegister($src1$$reg),
8873               cond);
8874   %}
8875 
8876   ins_pipe(fp_cond_reg_reg_s);
8877 %}
8878 
8879 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
8880 %{
8881   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8882 
8883   ins_cost(INSN_COST * 3);
8884 
8885   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
8886   ins_encode %{
8887     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8888     __ fcseld(as_FloatRegister($dst$$reg),
8889               as_FloatRegister($src2$$reg),
8890               as_FloatRegister($src1$$reg),
8891               cond);
8892   %}
8893 
8894   ins_pipe(fp_cond_reg_reg_d);
8895 %}
8896 
8897 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
8898 %{
8899   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
8900 
8901   ins_cost(INSN_COST * 3);
8902 
8903   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
8904   ins_encode %{
8905     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
8906     __ fcseld(as_FloatRegister($dst$$reg),
8907               as_FloatRegister($src2$$reg),
8908               as_FloatRegister($src1$$reg),
8909               cond);
8910   %}
8911 
8912   ins_pipe(fp_cond_reg_reg_d);
8913 %}
8914 
8915 // ============================================================================
8916 // Arithmetic Instructions
8917 //
8918 
8919 // Integer Addition
8920 
8921 // TODO
8922 // these currently employ operations which do not set CR and hence are
8923 // not flagged as killing CR but we would like to isolate the cases
8924 // where we want to set flags from those where we don't. need to work
8925 // out how to do that.
8926 
8927 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
8928   match(Set dst (AddI src1 src2));
8929 
8930   ins_cost(INSN_COST);
8931   format %{ "addw  $dst, $src1, $src2" %}
8932 
8933   ins_encode %{
8934     __ addw(as_Register($dst$$reg),
8935             as_Register($src1$$reg),
8936             as_Register($src2$$reg));
8937   %}
8938 
8939   ins_pipe(ialu_reg_reg);
8940 %}
8941 
8942 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
8943   match(Set dst (AddI src1 src2));
8944 
8945   ins_cost(INSN_COST);
8946   format %{ "addw $dst, $src1, $src2" %}
8947 
8948   // use opcode to indicate that this is an add not a sub
8949   opcode(0x0);
8950 
8951   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8952 
8953   ins_pipe(ialu_reg_imm);
8954 %}
8955 
8956 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
8957   match(Set dst (AddI (ConvL2I src1) src2));
8958 
8959   ins_cost(INSN_COST);
8960   format %{ "addw $dst, $src1, $src2" %}
8961 
8962   // use opcode to indicate that this is an add not a sub
8963   opcode(0x0);
8964 
8965   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
8966 
8967   ins_pipe(ialu_reg_imm);
8968 %}
8969 
8970 // Pointer Addition
8971 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
8972   match(Set dst (AddP src1 src2));
8973 
8974   ins_cost(INSN_COST);
8975   format %{ "add $dst, $src1, $src2\t# ptr" %}
8976 
8977   ins_encode %{
8978     __ add(as_Register($dst$$reg),
8979            as_Register($src1$$reg),
8980            as_Register($src2$$reg));
8981   %}
8982 
8983   ins_pipe(ialu_reg_reg);
8984 %}
8985 
8986 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
8987   match(Set dst (AddP src1 (ConvI2L src2)));
8988 
8989   ins_cost(1.9 * INSN_COST);
8990   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
8991 
8992   ins_encode %{
8993     __ add(as_Register($dst$$reg),
8994            as_Register($src1$$reg),
8995            as_Register($src2$$reg), ext::sxtw);
8996   %}
8997 
8998   ins_pipe(ialu_reg_reg);
8999 %}
9000 
9001 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
9002   match(Set dst (AddP src1 (LShiftL src2 scale)));
9003 
9004   ins_cost(1.9 * INSN_COST);
9005   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
9006 
9007   ins_encode %{
9008     __ lea(as_Register($dst$$reg),
9009            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9010                    Address::lsl($scale$$constant)));
9011   %}
9012 
9013   ins_pipe(ialu_reg_reg_shift);
9014 %}
9015 
9016 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
9017   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
9018 
9019   ins_cost(1.9 * INSN_COST);
9020   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
9021 
9022   ins_encode %{
9023     __ lea(as_Register($dst$$reg),
9024            Address(as_Register($src1$$reg), as_Register($src2$$reg),
9025                    Address::sxtw($scale$$constant)));
9026   %}
9027 
9028   ins_pipe(ialu_reg_reg_shift);
9029 %}
9030 
9031 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
9032   match(Set dst (LShiftL (ConvI2L src) scale));
9033 
9034   ins_cost(INSN_COST);
9035   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
9036 
9037   ins_encode %{
9038     __ sbfiz(as_Register($dst$$reg),
9039           as_Register($src$$reg),
9040           $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
9041   %}
9042 
9043   ins_pipe(ialu_reg_shift);
9044 %}
9045 
9046 // Pointer Immediate Addition
9047 // n.b. this needs to be more expensive than using an indirect memory
9048 // operand
9049 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
9050   match(Set dst (AddP src1 src2));
9051 
9052   ins_cost(INSN_COST);
9053   format %{ "add $dst, $src1, $src2\t# ptr" %}
9054 
9055   // use opcode to indicate that this is an add not a sub
9056   opcode(0x0);
9057 
9058   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9059 
9060   ins_pipe(ialu_reg_imm);
9061 %}
9062 
9063 // Long Addition
9064 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9065 
9066   match(Set dst (AddL src1 src2));
9067 
9068   ins_cost(INSN_COST);
9069   format %{ "add  $dst, $src1, $src2" %}
9070 
9071   ins_encode %{
9072     __ add(as_Register($dst$$reg),
9073            as_Register($src1$$reg),
9074            as_Register($src2$$reg));
9075   %}
9076 
9077   ins_pipe(ialu_reg_reg);
9078 %}
9079 
9080 // No constant pool entries requiredLong Immediate Addition.
9081 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9082   match(Set dst (AddL src1 src2));
9083 
9084   ins_cost(INSN_COST);
9085   format %{ "add $dst, $src1, $src2" %}
9086 
9087   // use opcode to indicate that this is an add not a sub
9088   opcode(0x0);
9089 
9090   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9091 
9092   ins_pipe(ialu_reg_imm);
9093 %}
9094 
9095 // Integer Subtraction
9096 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9097   match(Set dst (SubI src1 src2));
9098 
9099   ins_cost(INSN_COST);
9100   format %{ "subw  $dst, $src1, $src2" %}
9101 
9102   ins_encode %{
9103     __ subw(as_Register($dst$$reg),
9104             as_Register($src1$$reg),
9105             as_Register($src2$$reg));
9106   %}
9107 
9108   ins_pipe(ialu_reg_reg);
9109 %}
9110 
9111 // Immediate Subtraction
9112 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
9113   match(Set dst (SubI src1 src2));
9114 
9115   ins_cost(INSN_COST);
9116   format %{ "subw $dst, $src1, $src2" %}
9117 
9118   // use opcode to indicate that this is a sub not an add
9119   opcode(0x1);
9120 
9121   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
9122 
9123   ins_pipe(ialu_reg_imm);
9124 %}
9125 
9126 // Long Subtraction
9127 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9128 
9129   match(Set dst (SubL src1 src2));
9130 
9131   ins_cost(INSN_COST);
9132   format %{ "sub  $dst, $src1, $src2" %}
9133 
9134   ins_encode %{
9135     __ sub(as_Register($dst$$reg),
9136            as_Register($src1$$reg),
9137            as_Register($src2$$reg));
9138   %}
9139 
9140   ins_pipe(ialu_reg_reg);
9141 %}
9142 
9143 // No constant pool entries requiredLong Immediate Subtraction.
9144 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
9145   match(Set dst (SubL src1 src2));
9146 
9147   ins_cost(INSN_COST);
9148   format %{ "sub$dst, $src1, $src2" %}
9149 
9150   // use opcode to indicate that this is a sub not an add
9151   opcode(0x1);
9152 
9153   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
9154 
9155   ins_pipe(ialu_reg_imm);
9156 %}
9157 
9158 // Integer Negation (special case for sub)
9159 
9160 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
9161   match(Set dst (SubI zero src));
9162 
9163   ins_cost(INSN_COST);
9164   format %{ "negw $dst, $src\t# int" %}
9165 
9166   ins_encode %{
9167     __ negw(as_Register($dst$$reg),
9168             as_Register($src$$reg));
9169   %}
9170 
9171   ins_pipe(ialu_reg);
9172 %}
9173 
9174 // Long Negation
9175 
9176 instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
9177   match(Set dst (SubL zero src));
9178 
9179   ins_cost(INSN_COST);
9180   format %{ "neg $dst, $src\t# long" %}
9181 
9182   ins_encode %{
9183     __ neg(as_Register($dst$$reg),
9184            as_Register($src$$reg));
9185   %}
9186 
9187   ins_pipe(ialu_reg);
9188 %}
9189 
9190 // Integer Multiply
9191 
9192 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9193   match(Set dst (MulI src1 src2));
9194 
9195   ins_cost(INSN_COST * 3);
9196   format %{ "mulw  $dst, $src1, $src2" %}
9197 
9198   ins_encode %{
9199     __ mulw(as_Register($dst$$reg),
9200             as_Register($src1$$reg),
9201             as_Register($src2$$reg));
9202   %}
9203 
9204   ins_pipe(imul_reg_reg);
9205 %}
9206 
9207 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9208   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
9209 
9210   ins_cost(INSN_COST * 3);
9211   format %{ "smull  $dst, $src1, $src2" %}
9212 
9213   ins_encode %{
9214     __ smull(as_Register($dst$$reg),
9215              as_Register($src1$$reg),
9216              as_Register($src2$$reg));
9217   %}
9218 
9219   ins_pipe(imul_reg_reg);
9220 %}
9221 
9222 // Long Multiply
9223 
9224 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9225   match(Set dst (MulL src1 src2));
9226 
9227   ins_cost(INSN_COST * 5);
9228   format %{ "mul  $dst, $src1, $src2" %}
9229 
9230   ins_encode %{
9231     __ mul(as_Register($dst$$reg),
9232            as_Register($src1$$reg),
9233            as_Register($src2$$reg));
9234   %}
9235 
9236   ins_pipe(lmul_reg_reg);
9237 %}
9238 
9239 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
9240 %{
9241   match(Set dst (MulHiL src1 src2));
9242 
9243   ins_cost(INSN_COST * 7);
9244   format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
9245 
9246   ins_encode %{
9247     __ smulh(as_Register($dst$$reg),
9248              as_Register($src1$$reg),
9249              as_Register($src2$$reg));
9250   %}
9251 
9252   ins_pipe(lmul_reg_reg);
9253 %}
9254 
9255 // Combined Integer Multiply & Add/Sub
9256 
9257 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9258   match(Set dst (AddI src3 (MulI src1 src2)));
9259 
9260   ins_cost(INSN_COST * 3);
9261   format %{ "madd  $dst, $src1, $src2, $src3" %}
9262 
9263   ins_encode %{
9264     __ maddw(as_Register($dst$$reg),
9265              as_Register($src1$$reg),
9266              as_Register($src2$$reg),
9267              as_Register($src3$$reg));
9268   %}
9269 
9270   ins_pipe(imac_reg_reg);
9271 %}
9272 
9273 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
9274   match(Set dst (SubI src3 (MulI src1 src2)));
9275 
9276   ins_cost(INSN_COST * 3);
9277   format %{ "msub  $dst, $src1, $src2, $src3" %}
9278 
9279   ins_encode %{
9280     __ msubw(as_Register($dst$$reg),
9281              as_Register($src1$$reg),
9282              as_Register($src2$$reg),
9283              as_Register($src3$$reg));
9284   %}
9285 
9286   ins_pipe(imac_reg_reg);
9287 %}
9288 
9289 // Combined Long Multiply & Add/Sub
9290 
9291 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9292   match(Set dst (AddL src3 (MulL src1 src2)));
9293 
9294   ins_cost(INSN_COST * 5);
9295   format %{ "madd  $dst, $src1, $src2, $src3" %}
9296 
9297   ins_encode %{
9298     __ madd(as_Register($dst$$reg),
9299             as_Register($src1$$reg),
9300             as_Register($src2$$reg),
9301             as_Register($src3$$reg));
9302   %}
9303 
9304   ins_pipe(lmac_reg_reg);
9305 %}
9306 
9307 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
9308   match(Set dst (SubL src3 (MulL src1 src2)));
9309 
9310   ins_cost(INSN_COST * 5);
9311   format %{ "msub  $dst, $src1, $src2, $src3" %}
9312 
9313   ins_encode %{
9314     __ msub(as_Register($dst$$reg),
9315             as_Register($src1$$reg),
9316             as_Register($src2$$reg),
9317             as_Register($src3$$reg));
9318   %}
9319 
9320   ins_pipe(lmac_reg_reg);
9321 %}
9322 
9323 // Integer Divide
9324 
9325 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9326   match(Set dst (DivI src1 src2));
9327 
9328   ins_cost(INSN_COST * 19);
9329   format %{ "sdivw  $dst, $src1, $src2" %}
9330 
9331   ins_encode(aarch64_enc_divw(dst, src1, src2));
9332   ins_pipe(idiv_reg_reg);
9333 %}
9334 
9335 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
9336   match(Set dst (URShiftI (RShiftI src1 div1) div2));
9337   ins_cost(INSN_COST);
9338   format %{ "lsrw $dst, $src1, $div1" %}
9339   ins_encode %{
9340     __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
9341   %}
9342   ins_pipe(ialu_reg_shift);
9343 %}
9344 
9345 instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{
9346   match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
9347   ins_cost(INSN_COST);
9348   format %{ "addw $dst, $src, LSR $div1" %}
9349 
9350   ins_encode %{
9351     __ addw(as_Register($dst$$reg),
9352               as_Register($src$$reg),
9353               as_Register($src$$reg),
9354               Assembler::LSR, 31);
9355   %}
9356   ins_pipe(ialu_reg);
9357 %}
9358 
9359 // Long Divide
9360 
9361 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9362   match(Set dst (DivL src1 src2));
9363 
9364   ins_cost(INSN_COST * 35);
9365   format %{ "sdiv   $dst, $src1, $src2" %}
9366 
9367   ins_encode(aarch64_enc_div(dst, src1, src2));
9368   ins_pipe(ldiv_reg_reg);
9369 %}
9370 
9371 instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
9372   match(Set dst (URShiftL (RShiftL src1 div1) div2));
9373   ins_cost(INSN_COST);
9374   format %{ "lsr $dst, $src1, $div1" %}
9375   ins_encode %{
9376     __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
9377   %}
9378   ins_pipe(ialu_reg_shift);
9379 %}
9380 
9381 instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
9382   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
9383   ins_cost(INSN_COST);
9384   format %{ "add $dst, $src, $div1" %}
9385 
9386   ins_encode %{
9387     __ add(as_Register($dst$$reg),
9388               as_Register($src$$reg),
9389               as_Register($src$$reg),
9390               Assembler::LSR, 63);
9391   %}
9392   ins_pipe(ialu_reg);
9393 %}
9394 
9395 // Integer Remainder
9396 
9397 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9398   match(Set dst (ModI src1 src2));
9399 
9400   ins_cost(INSN_COST * 22);
9401   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
9402             "msubw($dst, rscratch1, $src2, $src1" %}
9403 
9404   ins_encode(aarch64_enc_modw(dst, src1, src2));
9405   ins_pipe(idiv_reg_reg);
9406 %}
9407 
9408 // Long Remainder
9409 
9410 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
9411   match(Set dst (ModL src1 src2));
9412 
9413   ins_cost(INSN_COST * 38);
9414   format %{ "sdiv   rscratch1, $src1, $src2\n"
9415             "msub($dst, rscratch1, $src2, $src1" %}
9416 
9417   ins_encode(aarch64_enc_mod(dst, src1, src2));
9418   ins_pipe(ldiv_reg_reg);
9419 %}
9420 
9421 // Integer Shifts
9422 
9423 // Shift Left Register
9424 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9425   match(Set dst (LShiftI src1 src2));
9426 
9427   ins_cost(INSN_COST * 2);
9428   format %{ "lslvw  $dst, $src1, $src2" %}
9429 
9430   ins_encode %{
9431     __ lslvw(as_Register($dst$$reg),
9432              as_Register($src1$$reg),
9433              as_Register($src2$$reg));
9434   %}
9435 
9436   ins_pipe(ialu_reg_reg_vshift);
9437 %}
9438 
9439 // Shift Left Immediate
9440 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9441   match(Set dst (LShiftI src1 src2));
9442 
9443   ins_cost(INSN_COST);
9444   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
9445 
9446   ins_encode %{
9447     __ lslw(as_Register($dst$$reg),
9448             as_Register($src1$$reg),
9449             $src2$$constant & 0x1f);
9450   %}
9451 
9452   ins_pipe(ialu_reg_shift);
9453 %}
9454 
9455 // Shift Right Logical Register
9456 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9457   match(Set dst (URShiftI src1 src2));
9458 
9459   ins_cost(INSN_COST * 2);
9460   format %{ "lsrvw  $dst, $src1, $src2" %}
9461 
9462   ins_encode %{
9463     __ lsrvw(as_Register($dst$$reg),
9464              as_Register($src1$$reg),
9465              as_Register($src2$$reg));
9466   %}
9467 
9468   ins_pipe(ialu_reg_reg_vshift);
9469 %}
9470 
9471 // Shift Right Logical Immediate
9472 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9473   match(Set dst (URShiftI src1 src2));
9474 
9475   ins_cost(INSN_COST);
9476   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
9477 
9478   ins_encode %{
9479     __ lsrw(as_Register($dst$$reg),
9480             as_Register($src1$$reg),
9481             $src2$$constant & 0x1f);
9482   %}
9483 
9484   ins_pipe(ialu_reg_shift);
9485 %}
9486 
9487 // Shift Right Arithmetic Register
9488 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
9489   match(Set dst (RShiftI src1 src2));
9490 
9491   ins_cost(INSN_COST * 2);
9492   format %{ "asrvw  $dst, $src1, $src2" %}
9493 
9494   ins_encode %{
9495     __ asrvw(as_Register($dst$$reg),
9496              as_Register($src1$$reg),
9497              as_Register($src2$$reg));
9498   %}
9499 
9500   ins_pipe(ialu_reg_reg_vshift);
9501 %}
9502 
9503 // Shift Right Arithmetic Immediate
9504 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
9505   match(Set dst (RShiftI src1 src2));
9506 
9507   ins_cost(INSN_COST);
9508   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
9509 
9510   ins_encode %{
9511     __ asrw(as_Register($dst$$reg),
9512             as_Register($src1$$reg),
9513             $src2$$constant & 0x1f);
9514   %}
9515 
9516   ins_pipe(ialu_reg_shift);
9517 %}
9518 
9519 // Combined Int Mask and Right Shift (using UBFM)
9520 // TODO
9521 
9522 // Long Shifts
9523 
9524 // Shift Left Register
9525 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9526   match(Set dst (LShiftL src1 src2));
9527 
9528   ins_cost(INSN_COST * 2);
9529   format %{ "lslv  $dst, $src1, $src2" %}
9530 
9531   ins_encode %{
9532     __ lslv(as_Register($dst$$reg),
9533             as_Register($src1$$reg),
9534             as_Register($src2$$reg));
9535   %}
9536 
9537   ins_pipe(ialu_reg_reg_vshift);
9538 %}
9539 
9540 // Shift Left Immediate
9541 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9542   match(Set dst (LShiftL src1 src2));
9543 
9544   ins_cost(INSN_COST);
9545   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
9546 
9547   ins_encode %{
9548     __ lsl(as_Register($dst$$reg),
9549             as_Register($src1$$reg),
9550             $src2$$constant & 0x3f);
9551   %}
9552 
9553   ins_pipe(ialu_reg_shift);
9554 %}
9555 
9556 // Shift Right Logical Register
9557 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9558   match(Set dst (URShiftL src1 src2));
9559 
9560   ins_cost(INSN_COST * 2);
9561   format %{ "lsrv  $dst, $src1, $src2" %}
9562 
9563   ins_encode %{
9564     __ lsrv(as_Register($dst$$reg),
9565             as_Register($src1$$reg),
9566             as_Register($src2$$reg));
9567   %}
9568 
9569   ins_pipe(ialu_reg_reg_vshift);
9570 %}
9571 
9572 // Shift Right Logical Immediate
9573 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9574   match(Set dst (URShiftL src1 src2));
9575 
9576   ins_cost(INSN_COST);
9577   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
9578 
9579   ins_encode %{
9580     __ lsr(as_Register($dst$$reg),
9581            as_Register($src1$$reg),
9582            $src2$$constant & 0x3f);
9583   %}
9584 
9585   ins_pipe(ialu_reg_shift);
9586 %}
9587 
9588 // A special-case pattern for card table stores.
9589 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
9590   match(Set dst (URShiftL (CastP2X src1) src2));
9591 
9592   ins_cost(INSN_COST);
9593   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
9594 
9595   ins_encode %{
9596     __ lsr(as_Register($dst$$reg),
9597            as_Register($src1$$reg),
9598            $src2$$constant & 0x3f);
9599   %}
9600 
9601   ins_pipe(ialu_reg_shift);
9602 %}
9603 
9604 // Shift Right Arithmetic Register
9605 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
9606   match(Set dst (RShiftL src1 src2));
9607 
9608   ins_cost(INSN_COST * 2);
9609   format %{ "asrv  $dst, $src1, $src2" %}
9610 
9611   ins_encode %{
9612     __ asrv(as_Register($dst$$reg),
9613             as_Register($src1$$reg),
9614             as_Register($src2$$reg));
9615   %}
9616 
9617   ins_pipe(ialu_reg_reg_vshift);
9618 %}
9619 
9620 // Shift Right Arithmetic Immediate
9621 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
9622   match(Set dst (RShiftL src1 src2));
9623 
9624   ins_cost(INSN_COST);
9625   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
9626 
9627   ins_encode %{
9628     __ asr(as_Register($dst$$reg),
9629            as_Register($src1$$reg),
9630            $src2$$constant & 0x3f);
9631   %}
9632 
9633   ins_pipe(ialu_reg_shift);
9634 %}
9635 
9636 // BEGIN This section of the file is automatically generated. Do not edit --------------
9637 
9638 instruct regL_not_reg(iRegLNoSp dst,
9639                          iRegL src1, immL_M1 m1,
9640                          rFlagsReg cr) %{
9641   match(Set dst (XorL src1 m1));
9642   ins_cost(INSN_COST);
9643   format %{ "eon  $dst, $src1, zr" %}
9644 
9645   ins_encode %{
9646     __ eon(as_Register($dst$$reg),
9647               as_Register($src1$$reg),
9648               zr,
9649               Assembler::LSL, 0);
9650   %}
9651 
9652   ins_pipe(ialu_reg);
9653 %}
9654 instruct regI_not_reg(iRegINoSp dst,
9655                          iRegIorL2I src1, immI_M1 m1,
9656                          rFlagsReg cr) %{
9657   match(Set dst (XorI src1 m1));
9658   ins_cost(INSN_COST);
9659   format %{ "eonw  $dst, $src1, zr" %}
9660 
9661   ins_encode %{
9662     __ eonw(as_Register($dst$$reg),
9663               as_Register($src1$$reg),
9664               zr,
9665               Assembler::LSL, 0);
9666   %}
9667 
9668   ins_pipe(ialu_reg);
9669 %}
9670 
9671 instruct AndI_reg_not_reg(iRegINoSp dst,
9672                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9673                          rFlagsReg cr) %{
9674   match(Set dst (AndI src1 (XorI src2 m1)));
9675   ins_cost(INSN_COST);
9676   format %{ "bicw  $dst, $src1, $src2" %}
9677 
9678   ins_encode %{
9679     __ bicw(as_Register($dst$$reg),
9680               as_Register($src1$$reg),
9681               as_Register($src2$$reg),
9682               Assembler::LSL, 0);
9683   %}
9684 
9685   ins_pipe(ialu_reg_reg);
9686 %}
9687 
9688 instruct AndL_reg_not_reg(iRegLNoSp dst,
9689                          iRegL src1, iRegL src2, immL_M1 m1,
9690                          rFlagsReg cr) %{
9691   match(Set dst (AndL src1 (XorL src2 m1)));
9692   ins_cost(INSN_COST);
9693   format %{ "bic  $dst, $src1, $src2" %}
9694 
9695   ins_encode %{
9696     __ bic(as_Register($dst$$reg),
9697               as_Register($src1$$reg),
9698               as_Register($src2$$reg),
9699               Assembler::LSL, 0);
9700   %}
9701 
9702   ins_pipe(ialu_reg_reg);
9703 %}
9704 
9705 instruct OrI_reg_not_reg(iRegINoSp dst,
9706                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9707                          rFlagsReg cr) %{
9708   match(Set dst (OrI src1 (XorI src2 m1)));
9709   ins_cost(INSN_COST);
9710   format %{ "ornw  $dst, $src1, $src2" %}
9711 
9712   ins_encode %{
9713     __ ornw(as_Register($dst$$reg),
9714               as_Register($src1$$reg),
9715               as_Register($src2$$reg),
9716               Assembler::LSL, 0);
9717   %}
9718 
9719   ins_pipe(ialu_reg_reg);
9720 %}
9721 
9722 instruct OrL_reg_not_reg(iRegLNoSp dst,
9723                          iRegL src1, iRegL src2, immL_M1 m1,
9724                          rFlagsReg cr) %{
9725   match(Set dst (OrL src1 (XorL src2 m1)));
9726   ins_cost(INSN_COST);
9727   format %{ "orn  $dst, $src1, $src2" %}
9728 
9729   ins_encode %{
9730     __ orn(as_Register($dst$$reg),
9731               as_Register($src1$$reg),
9732               as_Register($src2$$reg),
9733               Assembler::LSL, 0);
9734   %}
9735 
9736   ins_pipe(ialu_reg_reg);
9737 %}
9738 
9739 instruct XorI_reg_not_reg(iRegINoSp dst,
9740                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
9741                          rFlagsReg cr) %{
9742   match(Set dst (XorI m1 (XorI src2 src1)));
9743   ins_cost(INSN_COST);
9744   format %{ "eonw  $dst, $src1, $src2" %}
9745 
9746   ins_encode %{
9747     __ eonw(as_Register($dst$$reg),
9748               as_Register($src1$$reg),
9749               as_Register($src2$$reg),
9750               Assembler::LSL, 0);
9751   %}
9752 
9753   ins_pipe(ialu_reg_reg);
9754 %}
9755 
9756 instruct XorL_reg_not_reg(iRegLNoSp dst,
9757                          iRegL src1, iRegL src2, immL_M1 m1,
9758                          rFlagsReg cr) %{
9759   match(Set dst (XorL m1 (XorL src2 src1)));
9760   ins_cost(INSN_COST);
9761   format %{ "eon  $dst, $src1, $src2" %}
9762 
9763   ins_encode %{
9764     __ eon(as_Register($dst$$reg),
9765               as_Register($src1$$reg),
9766               as_Register($src2$$reg),
9767               Assembler::LSL, 0);
9768   %}
9769 
9770   ins_pipe(ialu_reg_reg);
9771 %}
9772 
9773 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
9774                          iRegIorL2I src1, iRegIorL2I src2,
9775                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9776   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
9777   ins_cost(1.9 * INSN_COST);
9778   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
9779 
9780   ins_encode %{
9781     __ bicw(as_Register($dst$$reg),
9782               as_Register($src1$$reg),
9783               as_Register($src2$$reg),
9784               Assembler::LSR,
9785               $src3$$constant & 0x1f);
9786   %}
9787 
9788   ins_pipe(ialu_reg_reg_shift);
9789 %}
9790 
9791 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
9792                          iRegL src1, iRegL src2,
9793                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9794   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
9795   ins_cost(1.9 * INSN_COST);
9796   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
9797 
9798   ins_encode %{
9799     __ bic(as_Register($dst$$reg),
9800               as_Register($src1$$reg),
9801               as_Register($src2$$reg),
9802               Assembler::LSR,
9803               $src3$$constant & 0x3f);
9804   %}
9805 
9806   ins_pipe(ialu_reg_reg_shift);
9807 %}
9808 
9809 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
9810                          iRegIorL2I src1, iRegIorL2I src2,
9811                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9812   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
9813   ins_cost(1.9 * INSN_COST);
9814   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
9815 
9816   ins_encode %{
9817     __ bicw(as_Register($dst$$reg),
9818               as_Register($src1$$reg),
9819               as_Register($src2$$reg),
9820               Assembler::ASR,
9821               $src3$$constant & 0x1f);
9822   %}
9823 
9824   ins_pipe(ialu_reg_reg_shift);
9825 %}
9826 
9827 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
9828                          iRegL src1, iRegL src2,
9829                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9830   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
9831   ins_cost(1.9 * INSN_COST);
9832   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
9833 
9834   ins_encode %{
9835     __ bic(as_Register($dst$$reg),
9836               as_Register($src1$$reg),
9837               as_Register($src2$$reg),
9838               Assembler::ASR,
9839               $src3$$constant & 0x3f);
9840   %}
9841 
9842   ins_pipe(ialu_reg_reg_shift);
9843 %}
9844 
9845 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
9846                          iRegIorL2I src1, iRegIorL2I src2,
9847                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9848   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
9849   ins_cost(1.9 * INSN_COST);
9850   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
9851 
9852   ins_encode %{
9853     __ bicw(as_Register($dst$$reg),
9854               as_Register($src1$$reg),
9855               as_Register($src2$$reg),
9856               Assembler::LSL,
9857               $src3$$constant & 0x1f);
9858   %}
9859 
9860   ins_pipe(ialu_reg_reg_shift);
9861 %}
9862 
9863 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
9864                          iRegL src1, iRegL src2,
9865                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9866   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
9867   ins_cost(1.9 * INSN_COST);
9868   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
9869 
9870   ins_encode %{
9871     __ bic(as_Register($dst$$reg),
9872               as_Register($src1$$reg),
9873               as_Register($src2$$reg),
9874               Assembler::LSL,
9875               $src3$$constant & 0x3f);
9876   %}
9877 
9878   ins_pipe(ialu_reg_reg_shift);
9879 %}
9880 
9881 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
9882                          iRegIorL2I src1, iRegIorL2I src2,
9883                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9884   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
9885   ins_cost(1.9 * INSN_COST);
9886   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
9887 
9888   ins_encode %{
9889     __ eonw(as_Register($dst$$reg),
9890               as_Register($src1$$reg),
9891               as_Register($src2$$reg),
9892               Assembler::LSR,
9893               $src3$$constant & 0x1f);
9894   %}
9895 
9896   ins_pipe(ialu_reg_reg_shift);
9897 %}
9898 
9899 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
9900                          iRegL src1, iRegL src2,
9901                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9902   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
9903   ins_cost(1.9 * INSN_COST);
9904   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
9905 
9906   ins_encode %{
9907     __ eon(as_Register($dst$$reg),
9908               as_Register($src1$$reg),
9909               as_Register($src2$$reg),
9910               Assembler::LSR,
9911               $src3$$constant & 0x3f);
9912   %}
9913 
9914   ins_pipe(ialu_reg_reg_shift);
9915 %}
9916 
9917 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
9918                          iRegIorL2I src1, iRegIorL2I src2,
9919                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9920   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
9921   ins_cost(1.9 * INSN_COST);
9922   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
9923 
9924   ins_encode %{
9925     __ eonw(as_Register($dst$$reg),
9926               as_Register($src1$$reg),
9927               as_Register($src2$$reg),
9928               Assembler::ASR,
9929               $src3$$constant & 0x1f);
9930   %}
9931 
9932   ins_pipe(ialu_reg_reg_shift);
9933 %}
9934 
9935 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
9936                          iRegL src1, iRegL src2,
9937                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9938   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
9939   ins_cost(1.9 * INSN_COST);
9940   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
9941 
9942   ins_encode %{
9943     __ eon(as_Register($dst$$reg),
9944               as_Register($src1$$reg),
9945               as_Register($src2$$reg),
9946               Assembler::ASR,
9947               $src3$$constant & 0x3f);
9948   %}
9949 
9950   ins_pipe(ialu_reg_reg_shift);
9951 %}
9952 
9953 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
9954                          iRegIorL2I src1, iRegIorL2I src2,
9955                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9956   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
9957   ins_cost(1.9 * INSN_COST);
9958   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
9959 
9960   ins_encode %{
9961     __ eonw(as_Register($dst$$reg),
9962               as_Register($src1$$reg),
9963               as_Register($src2$$reg),
9964               Assembler::LSL,
9965               $src3$$constant & 0x1f);
9966   %}
9967 
9968   ins_pipe(ialu_reg_reg_shift);
9969 %}
9970 
9971 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
9972                          iRegL src1, iRegL src2,
9973                          immI src3, immL_M1 src4, rFlagsReg cr) %{
9974   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
9975   ins_cost(1.9 * INSN_COST);
9976   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
9977 
9978   ins_encode %{
9979     __ eon(as_Register($dst$$reg),
9980               as_Register($src1$$reg),
9981               as_Register($src2$$reg),
9982               Assembler::LSL,
9983               $src3$$constant & 0x3f);
9984   %}
9985 
9986   ins_pipe(ialu_reg_reg_shift);
9987 %}
9988 
9989 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
9990                          iRegIorL2I src1, iRegIorL2I src2,
9991                          immI src3, immI_M1 src4, rFlagsReg cr) %{
9992   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
9993   ins_cost(1.9 * INSN_COST);
9994   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
9995 
9996   ins_encode %{
9997     __ ornw(as_Register($dst$$reg),
9998               as_Register($src1$$reg),
9999               as_Register($src2$$reg),
10000               Assembler::LSR,
10001               $src3$$constant & 0x1f);
10002   %}
10003 
10004   ins_pipe(ialu_reg_reg_shift);
10005 %}
10006 
10007 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
10008                          iRegL src1, iRegL src2,
10009                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10010   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
10011   ins_cost(1.9 * INSN_COST);
10012   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
10013 
10014   ins_encode %{
10015     __ orn(as_Register($dst$$reg),
10016               as_Register($src1$$reg),
10017               as_Register($src2$$reg),
10018               Assembler::LSR,
10019               $src3$$constant & 0x3f);
10020   %}
10021 
10022   ins_pipe(ialu_reg_reg_shift);
10023 %}
10024 
10025 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
10026                          iRegIorL2I src1, iRegIorL2I src2,
10027                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10028   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
10029   ins_cost(1.9 * INSN_COST);
10030   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
10031 
10032   ins_encode %{
10033     __ ornw(as_Register($dst$$reg),
10034               as_Register($src1$$reg),
10035               as_Register($src2$$reg),
10036               Assembler::ASR,
10037               $src3$$constant & 0x1f);
10038   %}
10039 
10040   ins_pipe(ialu_reg_reg_shift);
10041 %}
10042 
10043 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
10044                          iRegL src1, iRegL src2,
10045                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10046   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
10047   ins_cost(1.9 * INSN_COST);
10048   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
10049 
10050   ins_encode %{
10051     __ orn(as_Register($dst$$reg),
10052               as_Register($src1$$reg),
10053               as_Register($src2$$reg),
10054               Assembler::ASR,
10055               $src3$$constant & 0x3f);
10056   %}
10057 
10058   ins_pipe(ialu_reg_reg_shift);
10059 %}
10060 
10061 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
10062                          iRegIorL2I src1, iRegIorL2I src2,
10063                          immI src3, immI_M1 src4, rFlagsReg cr) %{
10064   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
10065   ins_cost(1.9 * INSN_COST);
10066   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
10067 
10068   ins_encode %{
10069     __ ornw(as_Register($dst$$reg),
10070               as_Register($src1$$reg),
10071               as_Register($src2$$reg),
10072               Assembler::LSL,
10073               $src3$$constant & 0x1f);
10074   %}
10075 
10076   ins_pipe(ialu_reg_reg_shift);
10077 %}
10078 
10079 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
10080                          iRegL src1, iRegL src2,
10081                          immI src3, immL_M1 src4, rFlagsReg cr) %{
10082   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
10083   ins_cost(1.9 * INSN_COST);
10084   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
10085 
10086   ins_encode %{
10087     __ orn(as_Register($dst$$reg),
10088               as_Register($src1$$reg),
10089               as_Register($src2$$reg),
10090               Assembler::LSL,
10091               $src3$$constant & 0x3f);
10092   %}
10093 
10094   ins_pipe(ialu_reg_reg_shift);
10095 %}
10096 
10097 instruct AndI_reg_URShift_reg(iRegINoSp dst,
10098                          iRegIorL2I src1, iRegIorL2I src2,
10099                          immI src3, rFlagsReg cr) %{
10100   match(Set dst (AndI src1 (URShiftI src2 src3)));
10101 
10102   ins_cost(1.9 * INSN_COST);
10103   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
10104 
10105   ins_encode %{
10106     __ andw(as_Register($dst$$reg),
10107               as_Register($src1$$reg),
10108               as_Register($src2$$reg),
10109               Assembler::LSR,
10110               $src3$$constant & 0x1f);
10111   %}
10112 
10113   ins_pipe(ialu_reg_reg_shift);
10114 %}
10115 
10116 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
10117                          iRegL src1, iRegL src2,
10118                          immI src3, rFlagsReg cr) %{
10119   match(Set dst (AndL src1 (URShiftL src2 src3)));
10120 
10121   ins_cost(1.9 * INSN_COST);
10122   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
10123 
10124   ins_encode %{
10125     __ andr(as_Register($dst$$reg),
10126               as_Register($src1$$reg),
10127               as_Register($src2$$reg),
10128               Assembler::LSR,
10129               $src3$$constant & 0x3f);
10130   %}
10131 
10132   ins_pipe(ialu_reg_reg_shift);
10133 %}
10134 
10135 instruct AndI_reg_RShift_reg(iRegINoSp dst,
10136                          iRegIorL2I src1, iRegIorL2I src2,
10137                          immI src3, rFlagsReg cr) %{
10138   match(Set dst (AndI src1 (RShiftI src2 src3)));
10139 
10140   ins_cost(1.9 * INSN_COST);
10141   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
10142 
10143   ins_encode %{
10144     __ andw(as_Register($dst$$reg),
10145               as_Register($src1$$reg),
10146               as_Register($src2$$reg),
10147               Assembler::ASR,
10148               $src3$$constant & 0x1f);
10149   %}
10150 
10151   ins_pipe(ialu_reg_reg_shift);
10152 %}
10153 
10154 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
10155                          iRegL src1, iRegL src2,
10156                          immI src3, rFlagsReg cr) %{
10157   match(Set dst (AndL src1 (RShiftL src2 src3)));
10158 
10159   ins_cost(1.9 * INSN_COST);
10160   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
10161 
10162   ins_encode %{
10163     __ andr(as_Register($dst$$reg),
10164               as_Register($src1$$reg),
10165               as_Register($src2$$reg),
10166               Assembler::ASR,
10167               $src3$$constant & 0x3f);
10168   %}
10169 
10170   ins_pipe(ialu_reg_reg_shift);
10171 %}
10172 
10173 instruct AndI_reg_LShift_reg(iRegINoSp dst,
10174                          iRegIorL2I src1, iRegIorL2I src2,
10175                          immI src3, rFlagsReg cr) %{
10176   match(Set dst (AndI src1 (LShiftI src2 src3)));
10177 
10178   ins_cost(1.9 * INSN_COST);
10179   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
10180 
10181   ins_encode %{
10182     __ andw(as_Register($dst$$reg),
10183               as_Register($src1$$reg),
10184               as_Register($src2$$reg),
10185               Assembler::LSL,
10186               $src3$$constant & 0x1f);
10187   %}
10188 
10189   ins_pipe(ialu_reg_reg_shift);
10190 %}
10191 
10192 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
10193                          iRegL src1, iRegL src2,
10194                          immI src3, rFlagsReg cr) %{
10195   match(Set dst (AndL src1 (LShiftL src2 src3)));
10196 
10197   ins_cost(1.9 * INSN_COST);
10198   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
10199 
10200   ins_encode %{
10201     __ andr(as_Register($dst$$reg),
10202               as_Register($src1$$reg),
10203               as_Register($src2$$reg),
10204               Assembler::LSL,
10205               $src3$$constant & 0x3f);
10206   %}
10207 
10208   ins_pipe(ialu_reg_reg_shift);
10209 %}
10210 
10211 instruct XorI_reg_URShift_reg(iRegINoSp dst,
10212                          iRegIorL2I src1, iRegIorL2I src2,
10213                          immI src3, rFlagsReg cr) %{
10214   match(Set dst (XorI src1 (URShiftI src2 src3)));
10215 
10216   ins_cost(1.9 * INSN_COST);
10217   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
10218 
10219   ins_encode %{
10220     __ eorw(as_Register($dst$$reg),
10221               as_Register($src1$$reg),
10222               as_Register($src2$$reg),
10223               Assembler::LSR,
10224               $src3$$constant & 0x1f);
10225   %}
10226 
10227   ins_pipe(ialu_reg_reg_shift);
10228 %}
10229 
10230 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
10231                          iRegL src1, iRegL src2,
10232                          immI src3, rFlagsReg cr) %{
10233   match(Set dst (XorL src1 (URShiftL src2 src3)));
10234 
10235   ins_cost(1.9 * INSN_COST);
10236   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
10237 
10238   ins_encode %{
10239     __ eor(as_Register($dst$$reg),
10240               as_Register($src1$$reg),
10241               as_Register($src2$$reg),
10242               Assembler::LSR,
10243               $src3$$constant & 0x3f);
10244   %}
10245 
10246   ins_pipe(ialu_reg_reg_shift);
10247 %}
10248 
10249 instruct XorI_reg_RShift_reg(iRegINoSp dst,
10250                          iRegIorL2I src1, iRegIorL2I src2,
10251                          immI src3, rFlagsReg cr) %{
10252   match(Set dst (XorI src1 (RShiftI src2 src3)));
10253 
10254   ins_cost(1.9 * INSN_COST);
10255   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
10256 
10257   ins_encode %{
10258     __ eorw(as_Register($dst$$reg),
10259               as_Register($src1$$reg),
10260               as_Register($src2$$reg),
10261               Assembler::ASR,
10262               $src3$$constant & 0x1f);
10263   %}
10264 
10265   ins_pipe(ialu_reg_reg_shift);
10266 %}
10267 
10268 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
10269                          iRegL src1, iRegL src2,
10270                          immI src3, rFlagsReg cr) %{
10271   match(Set dst (XorL src1 (RShiftL src2 src3)));
10272 
10273   ins_cost(1.9 * INSN_COST);
10274   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
10275 
10276   ins_encode %{
10277     __ eor(as_Register($dst$$reg),
10278               as_Register($src1$$reg),
10279               as_Register($src2$$reg),
10280               Assembler::ASR,
10281               $src3$$constant & 0x3f);
10282   %}
10283 
10284   ins_pipe(ialu_reg_reg_shift);
10285 %}
10286 
10287 instruct XorI_reg_LShift_reg(iRegINoSp dst,
10288                          iRegIorL2I src1, iRegIorL2I src2,
10289                          immI src3, rFlagsReg cr) %{
10290   match(Set dst (XorI src1 (LShiftI src2 src3)));
10291 
10292   ins_cost(1.9 * INSN_COST);
10293   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
10294 
10295   ins_encode %{
10296     __ eorw(as_Register($dst$$reg),
10297               as_Register($src1$$reg),
10298               as_Register($src2$$reg),
10299               Assembler::LSL,
10300               $src3$$constant & 0x1f);
10301   %}
10302 
10303   ins_pipe(ialu_reg_reg_shift);
10304 %}
10305 
10306 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
10307                          iRegL src1, iRegL src2,
10308                          immI src3, rFlagsReg cr) %{
10309   match(Set dst (XorL src1 (LShiftL src2 src3)));
10310 
10311   ins_cost(1.9 * INSN_COST);
10312   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
10313 
10314   ins_encode %{
10315     __ eor(as_Register($dst$$reg),
10316               as_Register($src1$$reg),
10317               as_Register($src2$$reg),
10318               Assembler::LSL,
10319               $src3$$constant & 0x3f);
10320   %}
10321 
10322   ins_pipe(ialu_reg_reg_shift);
10323 %}
10324 
10325 instruct OrI_reg_URShift_reg(iRegINoSp dst,
10326                          iRegIorL2I src1, iRegIorL2I src2,
10327                          immI src3, rFlagsReg cr) %{
10328   match(Set dst (OrI src1 (URShiftI src2 src3)));
10329 
10330   ins_cost(1.9 * INSN_COST);
10331   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
10332 
10333   ins_encode %{
10334     __ orrw(as_Register($dst$$reg),
10335               as_Register($src1$$reg),
10336               as_Register($src2$$reg),
10337               Assembler::LSR,
10338               $src3$$constant & 0x1f);
10339   %}
10340 
10341   ins_pipe(ialu_reg_reg_shift);
10342 %}
10343 
10344 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
10345                          iRegL src1, iRegL src2,
10346                          immI src3, rFlagsReg cr) %{
10347   match(Set dst (OrL src1 (URShiftL src2 src3)));
10348 
10349   ins_cost(1.9 * INSN_COST);
10350   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
10351 
10352   ins_encode %{
10353     __ orr(as_Register($dst$$reg),
10354               as_Register($src1$$reg),
10355               as_Register($src2$$reg),
10356               Assembler::LSR,
10357               $src3$$constant & 0x3f);
10358   %}
10359 
10360   ins_pipe(ialu_reg_reg_shift);
10361 %}
10362 
10363 instruct OrI_reg_RShift_reg(iRegINoSp dst,
10364                          iRegIorL2I src1, iRegIorL2I src2,
10365                          immI src3, rFlagsReg cr) %{
10366   match(Set dst (OrI src1 (RShiftI src2 src3)));
10367 
10368   ins_cost(1.9 * INSN_COST);
10369   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
10370 
10371   ins_encode %{
10372     __ orrw(as_Register($dst$$reg),
10373               as_Register($src1$$reg),
10374               as_Register($src2$$reg),
10375               Assembler::ASR,
10376               $src3$$constant & 0x1f);
10377   %}
10378 
10379   ins_pipe(ialu_reg_reg_shift);
10380 %}
10381 
10382 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
10383                          iRegL src1, iRegL src2,
10384                          immI src3, rFlagsReg cr) %{
10385   match(Set dst (OrL src1 (RShiftL src2 src3)));
10386 
10387   ins_cost(1.9 * INSN_COST);
10388   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
10389 
10390   ins_encode %{
10391     __ orr(as_Register($dst$$reg),
10392               as_Register($src1$$reg),
10393               as_Register($src2$$reg),
10394               Assembler::ASR,
10395               $src3$$constant & 0x3f);
10396   %}
10397 
10398   ins_pipe(ialu_reg_reg_shift);
10399 %}
10400 
10401 instruct OrI_reg_LShift_reg(iRegINoSp dst,
10402                          iRegIorL2I src1, iRegIorL2I src2,
10403                          immI src3, rFlagsReg cr) %{
10404   match(Set dst (OrI src1 (LShiftI src2 src3)));
10405 
10406   ins_cost(1.9 * INSN_COST);
10407   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
10408 
10409   ins_encode %{
10410     __ orrw(as_Register($dst$$reg),
10411               as_Register($src1$$reg),
10412               as_Register($src2$$reg),
10413               Assembler::LSL,
10414               $src3$$constant & 0x1f);
10415   %}
10416 
10417   ins_pipe(ialu_reg_reg_shift);
10418 %}
10419 
10420 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
10421                          iRegL src1, iRegL src2,
10422                          immI src3, rFlagsReg cr) %{
10423   match(Set dst (OrL src1 (LShiftL src2 src3)));
10424 
10425   ins_cost(1.9 * INSN_COST);
10426   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
10427 
10428   ins_encode %{
10429     __ orr(as_Register($dst$$reg),
10430               as_Register($src1$$reg),
10431               as_Register($src2$$reg),
10432               Assembler::LSL,
10433               $src3$$constant & 0x3f);
10434   %}
10435 
10436   ins_pipe(ialu_reg_reg_shift);
10437 %}
10438 
10439 instruct AddI_reg_URShift_reg(iRegINoSp dst,
10440                          iRegIorL2I src1, iRegIorL2I src2,
10441                          immI src3, rFlagsReg cr) %{
10442   match(Set dst (AddI src1 (URShiftI src2 src3)));
10443 
10444   ins_cost(1.9 * INSN_COST);
10445   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
10446 
10447   ins_encode %{
10448     __ addw(as_Register($dst$$reg),
10449               as_Register($src1$$reg),
10450               as_Register($src2$$reg),
10451               Assembler::LSR,
10452               $src3$$constant & 0x1f);
10453   %}
10454 
10455   ins_pipe(ialu_reg_reg_shift);
10456 %}
10457 
10458 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
10459                          iRegL src1, iRegL src2,
10460                          immI src3, rFlagsReg cr) %{
10461   match(Set dst (AddL src1 (URShiftL src2 src3)));
10462 
10463   ins_cost(1.9 * INSN_COST);
10464   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
10465 
10466   ins_encode %{
10467     __ add(as_Register($dst$$reg),
10468               as_Register($src1$$reg),
10469               as_Register($src2$$reg),
10470               Assembler::LSR,
10471               $src3$$constant & 0x3f);
10472   %}
10473 
10474   ins_pipe(ialu_reg_reg_shift);
10475 %}
10476 
10477 instruct AddI_reg_RShift_reg(iRegINoSp dst,
10478                          iRegIorL2I src1, iRegIorL2I src2,
10479                          immI src3, rFlagsReg cr) %{
10480   match(Set dst (AddI src1 (RShiftI src2 src3)));
10481 
10482   ins_cost(1.9 * INSN_COST);
10483   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
10484 
10485   ins_encode %{
10486     __ addw(as_Register($dst$$reg),
10487               as_Register($src1$$reg),
10488               as_Register($src2$$reg),
10489               Assembler::ASR,
10490               $src3$$constant & 0x1f);
10491   %}
10492 
10493   ins_pipe(ialu_reg_reg_shift);
10494 %}
10495 
10496 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
10497                          iRegL src1, iRegL src2,
10498                          immI src3, rFlagsReg cr) %{
10499   match(Set dst (AddL src1 (RShiftL src2 src3)));
10500 
10501   ins_cost(1.9 * INSN_COST);
10502   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
10503 
10504   ins_encode %{
10505     __ add(as_Register($dst$$reg),
10506               as_Register($src1$$reg),
10507               as_Register($src2$$reg),
10508               Assembler::ASR,
10509               $src3$$constant & 0x3f);
10510   %}
10511 
10512   ins_pipe(ialu_reg_reg_shift);
10513 %}
10514 
10515 instruct AddI_reg_LShift_reg(iRegINoSp dst,
10516                          iRegIorL2I src1, iRegIorL2I src2,
10517                          immI src3, rFlagsReg cr) %{
10518   match(Set dst (AddI src1 (LShiftI src2 src3)));
10519 
10520   ins_cost(1.9 * INSN_COST);
10521   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
10522 
10523   ins_encode %{
10524     __ addw(as_Register($dst$$reg),
10525               as_Register($src1$$reg),
10526               as_Register($src2$$reg),
10527               Assembler::LSL,
10528               $src3$$constant & 0x1f);
10529   %}
10530 
10531   ins_pipe(ialu_reg_reg_shift);
10532 %}
10533 
10534 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
10535                          iRegL src1, iRegL src2,
10536                          immI src3, rFlagsReg cr) %{
10537   match(Set dst (AddL src1 (LShiftL src2 src3)));
10538 
10539   ins_cost(1.9 * INSN_COST);
10540   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
10541 
10542   ins_encode %{
10543     __ add(as_Register($dst$$reg),
10544               as_Register($src1$$reg),
10545               as_Register($src2$$reg),
10546               Assembler::LSL,
10547               $src3$$constant & 0x3f);
10548   %}
10549 
10550   ins_pipe(ialu_reg_reg_shift);
10551 %}
10552 
10553 instruct SubI_reg_URShift_reg(iRegINoSp dst,
10554                          iRegIorL2I src1, iRegIorL2I src2,
10555                          immI src3, rFlagsReg cr) %{
10556   match(Set dst (SubI src1 (URShiftI src2 src3)));
10557 
10558   ins_cost(1.9 * INSN_COST);
10559   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
10560 
10561   ins_encode %{
10562     __ subw(as_Register($dst$$reg),
10563               as_Register($src1$$reg),
10564               as_Register($src2$$reg),
10565               Assembler::LSR,
10566               $src3$$constant & 0x1f);
10567   %}
10568 
10569   ins_pipe(ialu_reg_reg_shift);
10570 %}
10571 
10572 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
10573                          iRegL src1, iRegL src2,
10574                          immI src3, rFlagsReg cr) %{
10575   match(Set dst (SubL src1 (URShiftL src2 src3)));
10576 
10577   ins_cost(1.9 * INSN_COST);
10578   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
10579 
10580   ins_encode %{
10581     __ sub(as_Register($dst$$reg),
10582               as_Register($src1$$reg),
10583               as_Register($src2$$reg),
10584               Assembler::LSR,
10585               $src3$$constant & 0x3f);
10586   %}
10587 
10588   ins_pipe(ialu_reg_reg_shift);
10589 %}
10590 
10591 instruct SubI_reg_RShift_reg(iRegINoSp dst,
10592                          iRegIorL2I src1, iRegIorL2I src2,
10593                          immI src3, rFlagsReg cr) %{
10594   match(Set dst (SubI src1 (RShiftI src2 src3)));
10595 
10596   ins_cost(1.9 * INSN_COST);
10597   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
10598 
10599   ins_encode %{
10600     __ subw(as_Register($dst$$reg),
10601               as_Register($src1$$reg),
10602               as_Register($src2$$reg),
10603               Assembler::ASR,
10604               $src3$$constant & 0x1f);
10605   %}
10606 
10607   ins_pipe(ialu_reg_reg_shift);
10608 %}
10609 
10610 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
10611                          iRegL src1, iRegL src2,
10612                          immI src3, rFlagsReg cr) %{
10613   match(Set dst (SubL src1 (RShiftL src2 src3)));
10614 
10615   ins_cost(1.9 * INSN_COST);
10616   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
10617 
10618   ins_encode %{
10619     __ sub(as_Register($dst$$reg),
10620               as_Register($src1$$reg),
10621               as_Register($src2$$reg),
10622               Assembler::ASR,
10623               $src3$$constant & 0x3f);
10624   %}
10625 
10626   ins_pipe(ialu_reg_reg_shift);
10627 %}
10628 
10629 instruct SubI_reg_LShift_reg(iRegINoSp dst,
10630                          iRegIorL2I src1, iRegIorL2I src2,
10631                          immI src3, rFlagsReg cr) %{
10632   match(Set dst (SubI src1 (LShiftI src2 src3)));
10633 
10634   ins_cost(1.9 * INSN_COST);
10635   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
10636 
10637   ins_encode %{
10638     __ subw(as_Register($dst$$reg),
10639               as_Register($src1$$reg),
10640               as_Register($src2$$reg),
10641               Assembler::LSL,
10642               $src3$$constant & 0x1f);
10643   %}
10644 
10645   ins_pipe(ialu_reg_reg_shift);
10646 %}
10647 
10648 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
10649                          iRegL src1, iRegL src2,
10650                          immI src3, rFlagsReg cr) %{
10651   match(Set dst (SubL src1 (LShiftL src2 src3)));
10652 
10653   ins_cost(1.9 * INSN_COST);
10654   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
10655 
10656   ins_encode %{
10657     __ sub(as_Register($dst$$reg),
10658               as_Register($src1$$reg),
10659               as_Register($src2$$reg),
10660               Assembler::LSL,
10661               $src3$$constant & 0x3f);
10662   %}
10663 
10664   ins_pipe(ialu_reg_reg_shift);
10665 %}
10666 
10667 
10668 
10669 // Shift Left followed by Shift Right.
10670 // This idiom is used by the compiler for the i2b bytecode etc.
10671 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10672 %{
10673   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
10674   // Make sure we are not going to exceed what sbfm can do.
10675   predicate((unsigned int)n->in(2)->get_int() <= 63
10676             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10677 
10678   ins_cost(INSN_COST * 2);
10679   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10680   ins_encode %{
10681     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10682     int s = 63 - lshift;
10683     int r = (rshift - lshift) & 63;
10684     __ sbfm(as_Register($dst$$reg),
10685             as_Register($src$$reg),
10686             r, s);
10687   %}
10688 
10689   ins_pipe(ialu_reg_shift);
10690 %}
10691 
10692 // Shift Left followed by Shift Right.
10693 // This idiom is used by the compiler for the i2b bytecode etc.
10694 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10695 %{
10696   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
10697   // Make sure we are not going to exceed what sbfmw can do.
10698   predicate((unsigned int)n->in(2)->get_int() <= 31
10699             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10700 
10701   ins_cost(INSN_COST * 2);
10702   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10703   ins_encode %{
10704     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10705     int s = 31 - lshift;
10706     int r = (rshift - lshift) & 31;
10707     __ sbfmw(as_Register($dst$$reg),
10708             as_Register($src$$reg),
10709             r, s);
10710   %}
10711 
10712   ins_pipe(ialu_reg_shift);
10713 %}
10714 
10715 // Shift Left followed by Shift Right.
10716 // This idiom is used by the compiler for the i2b bytecode etc.
10717 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
10718 %{
10719   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
10720   // Make sure we are not going to exceed what ubfm can do.
10721   predicate((unsigned int)n->in(2)->get_int() <= 63
10722             && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
10723 
10724   ins_cost(INSN_COST * 2);
10725   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
10726   ins_encode %{
10727     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10728     int s = 63 - lshift;
10729     int r = (rshift - lshift) & 63;
10730     __ ubfm(as_Register($dst$$reg),
10731             as_Register($src$$reg),
10732             r, s);
10733   %}
10734 
10735   ins_pipe(ialu_reg_shift);
10736 %}
10737 
10738 // Shift Left followed by Shift Right.
10739 // This idiom is used by the compiler for the i2b bytecode etc.
10740 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
10741 %{
10742   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
10743   // Make sure we are not going to exceed what ubfmw can do.
10744   predicate((unsigned int)n->in(2)->get_int() <= 31
10745             && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
10746 
10747   ins_cost(INSN_COST * 2);
10748   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
10749   ins_encode %{
10750     int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
10751     int s = 31 - lshift;
10752     int r = (rshift - lshift) & 31;
10753     __ ubfmw(as_Register($dst$$reg),
10754             as_Register($src$$reg),
10755             r, s);
10756   %}
10757 
10758   ins_pipe(ialu_reg_shift);
10759 %}
10760 // Bitfield extract with shift & mask
10761 
10762 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10763 %{
10764   match(Set dst (AndI (URShiftI src rshift) mask));
10765   // Make sure we are not going to exceed what ubfxw can do.
10766   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
10767 
10768   ins_cost(INSN_COST);
10769   format %{ "ubfxw $dst, $src, $mask" %}
10770   ins_encode %{
10771     int rshift = $rshift$$constant & 31;
10772     long mask = $mask$$constant;
10773     int width = exact_log2(mask+1);
10774     __ ubfxw(as_Register($dst$$reg),
10775             as_Register($src$$reg), rshift, width);
10776   %}
10777   ins_pipe(ialu_reg_shift);
10778 %}
10779 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
10780 %{
10781   match(Set dst (AndL (URShiftL src rshift) mask));
10782   // Make sure we are not going to exceed what ubfx can do.
10783   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
10784 
10785   ins_cost(INSN_COST);
10786   format %{ "ubfx $dst, $src, $mask" %}
10787   ins_encode %{
10788     int rshift = $rshift$$constant & 63;
10789     long mask = $mask$$constant;
10790     int width = exact_log2_long(mask+1);
10791     __ ubfx(as_Register($dst$$reg),
10792             as_Register($src$$reg), rshift, width);
10793   %}
10794   ins_pipe(ialu_reg_shift);
10795 %}
10796 
10797 // We can use ubfx when extending an And with a mask when we know mask
10798 // is positive.  We know that because immI_bitmask guarantees it.
10799 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
10800 %{
10801   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
10802   // Make sure we are not going to exceed what ubfxw can do.
10803   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
10804 
10805   ins_cost(INSN_COST * 2);
10806   format %{ "ubfx $dst, $src, $mask" %}
10807   ins_encode %{
10808     int rshift = $rshift$$constant & 31;
10809     long mask = $mask$$constant;
10810     int width = exact_log2(mask+1);
10811     __ ubfx(as_Register($dst$$reg),
10812             as_Register($src$$reg), rshift, width);
10813   %}
10814   ins_pipe(ialu_reg_shift);
10815 %}
10816 
10817 // Rotations
10818 
10819 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10820 %{
10821   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10822   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10823 
10824   ins_cost(INSN_COST);
10825   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10826 
10827   ins_encode %{
10828     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10829             $rshift$$constant & 63);
10830   %}
10831   ins_pipe(ialu_reg_reg_extr);
10832 %}
10833 
10834 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10835 %{
10836   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10837   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10838 
10839   ins_cost(INSN_COST);
10840   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10841 
10842   ins_encode %{
10843     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10844             $rshift$$constant & 31);
10845   %}
10846   ins_pipe(ialu_reg_reg_extr);
10847 %}
10848 
10849 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
10850 %{
10851   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
10852   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
10853 
10854   ins_cost(INSN_COST);
10855   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10856 
10857   ins_encode %{
10858     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10859             $rshift$$constant & 63);
10860   %}
10861   ins_pipe(ialu_reg_reg_extr);
10862 %}
10863 
10864 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
10865 %{
10866   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
10867   predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
10868 
10869   ins_cost(INSN_COST);
10870   format %{ "extr $dst, $src1, $src2, #$rshift" %}
10871 
10872   ins_encode %{
10873     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
10874             $rshift$$constant & 31);
10875   %}
10876   ins_pipe(ialu_reg_reg_extr);
10877 %}
10878 
10879 
10880 // rol expander
10881 
10882 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10883 %{
10884   effect(DEF dst, USE src, USE shift);
10885 
10886   format %{ "rol    $dst, $src, $shift" %}
10887   ins_cost(INSN_COST * 3);
10888   ins_encode %{
10889     __ subw(rscratch1, zr, as_Register($shift$$reg));
10890     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10891             rscratch1);
10892     %}
10893   ins_pipe(ialu_reg_reg_vshift);
10894 %}
10895 
10896 // rol expander
10897 
10898 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10899 %{
10900   effect(DEF dst, USE src, USE shift);
10901 
10902   format %{ "rol    $dst, $src, $shift" %}
10903   ins_cost(INSN_COST * 3);
10904   ins_encode %{
10905     __ subw(rscratch1, zr, as_Register($shift$$reg));
10906     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10907             rscratch1);
10908     %}
10909   ins_pipe(ialu_reg_reg_vshift);
10910 %}
10911 
10912 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10913 %{
10914   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
10915 
10916   expand %{
10917     rolL_rReg(dst, src, shift, cr);
10918   %}
10919 %}
10920 
10921 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10922 %{
10923   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
10924 
10925   expand %{
10926     rolL_rReg(dst, src, shift, cr);
10927   %}
10928 %}
10929 
10930 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10931 %{
10932   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
10933 
10934   expand %{
10935     rolI_rReg(dst, src, shift, cr);
10936   %}
10937 %}
10938 
10939 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
10940 %{
10941   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
10942 
10943   expand %{
10944     rolI_rReg(dst, src, shift, cr);
10945   %}
10946 %}
10947 
10948 // ror expander
10949 
10950 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
10951 %{
10952   effect(DEF dst, USE src, USE shift);
10953 
10954   format %{ "ror    $dst, $src, $shift" %}
10955   ins_cost(INSN_COST);
10956   ins_encode %{
10957     __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
10958             as_Register($shift$$reg));
10959     %}
10960   ins_pipe(ialu_reg_reg_vshift);
10961 %}
10962 
10963 // ror expander
10964 
10965 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
10966 %{
10967   effect(DEF dst, USE src, USE shift);
10968 
10969   format %{ "ror    $dst, $src, $shift" %}
10970   ins_cost(INSN_COST);
10971   ins_encode %{
10972     __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
10973             as_Register($shift$$reg));
10974     %}
10975   ins_pipe(ialu_reg_reg_vshift);
10976 %}
10977 
10978 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
10979 %{
10980   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
10981 
10982   expand %{
10983     rorL_rReg(dst, src, shift, cr);
10984   %}
10985 %}
10986 
10987 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
10988 %{
10989   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
10990 
10991   expand %{
10992     rorL_rReg(dst, src, shift, cr);
10993   %}
10994 %}
10995 
10996 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
10997 %{
10998   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
10999 
11000   expand %{
11001     rorI_rReg(dst, src, shift, cr);
11002   %}
11003 %}
11004 
11005 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
11006 %{
11007   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
11008 
11009   expand %{
11010     rorI_rReg(dst, src, shift, cr);
11011   %}
11012 %}
11013 
11014 // Add/subtract (extended)
11015 
11016 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11017 %{
11018   match(Set dst (AddL src1 (ConvI2L src2)));
11019   ins_cost(INSN_COST);
11020   format %{ "add  $dst, $src1, sxtw $src2" %}
11021 
11022    ins_encode %{
11023      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11024             as_Register($src2$$reg), ext::sxtw);
11025    %}
11026   ins_pipe(ialu_reg_reg);
11027 %};
11028 
11029 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
11030 %{
11031   match(Set dst (SubL src1 (ConvI2L src2)));
11032   ins_cost(INSN_COST);
11033   format %{ "sub  $dst, $src1, sxtw $src2" %}
11034 
11035    ins_encode %{
11036      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11037             as_Register($src2$$reg), ext::sxtw);
11038    %}
11039   ins_pipe(ialu_reg_reg);
11040 %};
11041 
11042 
11043 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
11044 %{
11045   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11046   ins_cost(INSN_COST);
11047   format %{ "add  $dst, $src1, sxth $src2" %}
11048 
11049    ins_encode %{
11050      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11051             as_Register($src2$$reg), ext::sxth);
11052    %}
11053   ins_pipe(ialu_reg_reg);
11054 %}
11055 
11056 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11057 %{
11058   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
11059   ins_cost(INSN_COST);
11060   format %{ "add  $dst, $src1, sxtb $src2" %}
11061 
11062    ins_encode %{
11063      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11064             as_Register($src2$$reg), ext::sxtb);
11065    %}
11066   ins_pipe(ialu_reg_reg);
11067 %}
11068 
11069 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
11070 %{
11071   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
11072   ins_cost(INSN_COST);
11073   format %{ "add  $dst, $src1, uxtb $src2" %}
11074 
11075    ins_encode %{
11076      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11077             as_Register($src2$$reg), ext::uxtb);
11078    %}
11079   ins_pipe(ialu_reg_reg);
11080 %}
11081 
11082 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
11083 %{
11084   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11085   ins_cost(INSN_COST);
11086   format %{ "add  $dst, $src1, sxth $src2" %}
11087 
11088    ins_encode %{
11089      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11090             as_Register($src2$$reg), ext::sxth);
11091    %}
11092   ins_pipe(ialu_reg_reg);
11093 %}
11094 
11095 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
11096 %{
11097   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11098   ins_cost(INSN_COST);
11099   format %{ "add  $dst, $src1, sxtw $src2" %}
11100 
11101    ins_encode %{
11102      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11103             as_Register($src2$$reg), ext::sxtw);
11104    %}
11105   ins_pipe(ialu_reg_reg);
11106 %}
11107 
11108 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11109 %{
11110   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
11111   ins_cost(INSN_COST);
11112   format %{ "add  $dst, $src1, sxtb $src2" %}
11113 
11114    ins_encode %{
11115      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11116             as_Register($src2$$reg), ext::sxtb);
11117    %}
11118   ins_pipe(ialu_reg_reg);
11119 %}
11120 
11121 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
11122 %{
11123   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
11124   ins_cost(INSN_COST);
11125   format %{ "add  $dst, $src1, uxtb $src2" %}
11126 
11127    ins_encode %{
11128      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11129             as_Register($src2$$reg), ext::uxtb);
11130    %}
11131   ins_pipe(ialu_reg_reg);
11132 %}
11133 
11134 
11135 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11136 %{
11137   match(Set dst (AddI src1 (AndI src2 mask)));
11138   ins_cost(INSN_COST);
11139   format %{ "addw  $dst, $src1, $src2, uxtb" %}
11140 
11141    ins_encode %{
11142      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11143             as_Register($src2$$reg), ext::uxtb);
11144    %}
11145   ins_pipe(ialu_reg_reg);
11146 %}
11147 
11148 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11149 %{
11150   match(Set dst (AddI src1 (AndI src2 mask)));
11151   ins_cost(INSN_COST);
11152   format %{ "addw  $dst, $src1, $src2, uxth" %}
11153 
11154    ins_encode %{
11155      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
11156             as_Register($src2$$reg), ext::uxth);
11157    %}
11158   ins_pipe(ialu_reg_reg);
11159 %}
11160 
11161 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11162 %{
11163   match(Set dst (AddL src1 (AndL src2 mask)));
11164   ins_cost(INSN_COST);
11165   format %{ "add  $dst, $src1, $src2, uxtb" %}
11166 
11167    ins_encode %{
11168      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11169             as_Register($src2$$reg), ext::uxtb);
11170    %}
11171   ins_pipe(ialu_reg_reg);
11172 %}
11173 
11174 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11175 %{
11176   match(Set dst (AddL src1 (AndL src2 mask)));
11177   ins_cost(INSN_COST);
11178   format %{ "add  $dst, $src1, $src2, uxth" %}
11179 
11180    ins_encode %{
11181      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11182             as_Register($src2$$reg), ext::uxth);
11183    %}
11184   ins_pipe(ialu_reg_reg);
11185 %}
11186 
11187 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11188 %{
11189   match(Set dst (AddL src1 (AndL src2 mask)));
11190   ins_cost(INSN_COST);
11191   format %{ "add  $dst, $src1, $src2, uxtw" %}
11192 
11193    ins_encode %{
11194      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
11195             as_Register($src2$$reg), ext::uxtw);
11196    %}
11197   ins_pipe(ialu_reg_reg);
11198 %}
11199 
11200 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
11201 %{
11202   match(Set dst (SubI src1 (AndI src2 mask)));
11203   ins_cost(INSN_COST);
11204   format %{ "subw  $dst, $src1, $src2, uxtb" %}
11205 
11206    ins_encode %{
11207      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11208             as_Register($src2$$reg), ext::uxtb);
11209    %}
11210   ins_pipe(ialu_reg_reg);
11211 %}
11212 
11213 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
11214 %{
11215   match(Set dst (SubI src1 (AndI src2 mask)));
11216   ins_cost(INSN_COST);
11217   format %{ "subw  $dst, $src1, $src2, uxth" %}
11218 
11219    ins_encode %{
11220      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
11221             as_Register($src2$$reg), ext::uxth);
11222    %}
11223   ins_pipe(ialu_reg_reg);
11224 %}
11225 
11226 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
11227 %{
11228   match(Set dst (SubL src1 (AndL src2 mask)));
11229   ins_cost(INSN_COST);
11230   format %{ "sub  $dst, $src1, $src2, uxtb" %}
11231 
11232    ins_encode %{
11233      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11234             as_Register($src2$$reg), ext::uxtb);
11235    %}
11236   ins_pipe(ialu_reg_reg);
11237 %}
11238 
11239 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
11240 %{
11241   match(Set dst (SubL src1 (AndL src2 mask)));
11242   ins_cost(INSN_COST);
11243   format %{ "sub  $dst, $src1, $src2, uxth" %}
11244 
11245    ins_encode %{
11246      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11247             as_Register($src2$$reg), ext::uxth);
11248    %}
11249   ins_pipe(ialu_reg_reg);
11250 %}
11251 
11252 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
11253 %{
11254   match(Set dst (SubL src1 (AndL src2 mask)));
11255   ins_cost(INSN_COST);
11256   format %{ "sub  $dst, $src1, $src2, uxtw" %}
11257 
11258    ins_encode %{
11259      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
11260             as_Register($src2$$reg), ext::uxtw);
11261    %}
11262   ins_pipe(ialu_reg_reg);
11263 %}
11264 
11265 // END This section of the file is automatically generated. Do not edit --------------
11266 
11267 // ============================================================================
11268 // Floating Point Arithmetic Instructions
11269 
11270 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11271   match(Set dst (AddF src1 src2));
11272 
11273   ins_cost(INSN_COST * 5);
11274   format %{ "fadds   $dst, $src1, $src2" %}
11275 
11276   ins_encode %{
11277     __ fadds(as_FloatRegister($dst$$reg),
11278              as_FloatRegister($src1$$reg),
11279              as_FloatRegister($src2$$reg));
11280   %}
11281 
11282   ins_pipe(fp_dop_reg_reg_s);
11283 %}
11284 
11285 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11286   match(Set dst (AddD src1 src2));
11287 
11288   ins_cost(INSN_COST * 5);
11289   format %{ "faddd   $dst, $src1, $src2" %}
11290 
11291   ins_encode %{
11292     __ faddd(as_FloatRegister($dst$$reg),
11293              as_FloatRegister($src1$$reg),
11294              as_FloatRegister($src2$$reg));
11295   %}
11296 
11297   ins_pipe(fp_dop_reg_reg_d);
11298 %}
11299 
11300 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11301   match(Set dst (SubF src1 src2));
11302 
11303   ins_cost(INSN_COST * 5);
11304   format %{ "fsubs   $dst, $src1, $src2" %}
11305 
11306   ins_encode %{
11307     __ fsubs(as_FloatRegister($dst$$reg),
11308              as_FloatRegister($src1$$reg),
11309              as_FloatRegister($src2$$reg));
11310   %}
11311 
11312   ins_pipe(fp_dop_reg_reg_s);
11313 %}
11314 
11315 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11316   match(Set dst (SubD src1 src2));
11317 
11318   ins_cost(INSN_COST * 5);
11319   format %{ "fsubd   $dst, $src1, $src2" %}
11320 
11321   ins_encode %{
11322     __ fsubd(as_FloatRegister($dst$$reg),
11323              as_FloatRegister($src1$$reg),
11324              as_FloatRegister($src2$$reg));
11325   %}
11326 
11327   ins_pipe(fp_dop_reg_reg_d);
11328 %}
11329 
11330 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11331   match(Set dst (MulF src1 src2));
11332 
11333   ins_cost(INSN_COST * 6);
11334   format %{ "fmuls   $dst, $src1, $src2" %}
11335 
11336   ins_encode %{
11337     __ fmuls(as_FloatRegister($dst$$reg),
11338              as_FloatRegister($src1$$reg),
11339              as_FloatRegister($src2$$reg));
11340   %}
11341 
11342   ins_pipe(fp_dop_reg_reg_s);
11343 %}
11344 
11345 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11346   match(Set dst (MulD src1 src2));
11347 
11348   ins_cost(INSN_COST * 6);
11349   format %{ "fmuld   $dst, $src1, $src2" %}
11350 
11351   ins_encode %{
11352     __ fmuld(as_FloatRegister($dst$$reg),
11353              as_FloatRegister($src1$$reg),
11354              as_FloatRegister($src2$$reg));
11355   %}
11356 
11357   ins_pipe(fp_dop_reg_reg_d);
11358 %}
11359 
11360 // We cannot use these fused mul w add/sub ops because they don't
11361 // produce the same result as the equivalent separated ops
11362 // (essentially they don't round the intermediate result). that's a
11363 // shame. leaving them here in case we can idenitfy cases where it is
11364 // legitimate to use them
11365 
11366 
11367 // instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11368 //   match(Set dst (AddF (MulF src1 src2) src3));
11369 
11370 //   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
11371 
11372 //   ins_encode %{
11373 //     __ fmadds(as_FloatRegister($dst$$reg),
11374 //              as_FloatRegister($src1$$reg),
11375 //              as_FloatRegister($src2$$reg),
11376 //              as_FloatRegister($src3$$reg));
11377 //   %}
11378 
11379 //   ins_pipe(pipe_class_default);
11380 // %}
11381 
11382 // instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11383 //   match(Set dst (AddD (MulD src1 src2) src3));
11384 
11385 //   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
11386 
11387 //   ins_encode %{
11388 //     __ fmaddd(as_FloatRegister($dst$$reg),
11389 //              as_FloatRegister($src1$$reg),
11390 //              as_FloatRegister($src2$$reg),
11391 //              as_FloatRegister($src3$$reg));
11392 //   %}
11393 
11394 //   ins_pipe(pipe_class_default);
11395 // %}
11396 
11397 // instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11398 //   match(Set dst (AddF (MulF (NegF src1) src2) src3));
11399 //   match(Set dst (AddF (NegF (MulF src1 src2)) src3));
11400 
11401 //   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
11402 
11403 //   ins_encode %{
11404 //     __ fmsubs(as_FloatRegister($dst$$reg),
11405 //               as_FloatRegister($src1$$reg),
11406 //               as_FloatRegister($src2$$reg),
11407 //              as_FloatRegister($src3$$reg));
11408 //   %}
11409 
11410 //   ins_pipe(pipe_class_default);
11411 // %}
11412 
11413 // instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11414 //   match(Set dst (AddD (MulD (NegD src1) src2) src3));
11415 //   match(Set dst (AddD (NegD (MulD src1 src2)) src3));
11416 
11417 //   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
11418 
11419 //   ins_encode %{
11420 //     __ fmsubd(as_FloatRegister($dst$$reg),
11421 //               as_FloatRegister($src1$$reg),
11422 //               as_FloatRegister($src2$$reg),
11423 //               as_FloatRegister($src3$$reg));
11424 //   %}
11425 
11426 //   ins_pipe(pipe_class_default);
11427 // %}
11428 
11429 // instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
11430 //   match(Set dst (SubF (MulF (NegF src1) src2) src3));
11431 //   match(Set dst (SubF (NegF (MulF src1 src2)) src3));
11432 
11433 //   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
11434 
11435 //   ins_encode %{
11436 //     __ fnmadds(as_FloatRegister($dst$$reg),
11437 //                as_FloatRegister($src1$$reg),
11438 //                as_FloatRegister($src2$$reg),
11439 //                as_FloatRegister($src3$$reg));
11440 //   %}
11441 
11442 //   ins_pipe(pipe_class_default);
11443 // %}
11444 
11445 // instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
11446 //   match(Set dst (SubD (MulD (NegD src1) src2) src3));
11447 //   match(Set dst (SubD (NegD (MulD src1 src2)) src3));
11448 
11449 //   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
11450 
11451 //   ins_encode %{
11452 //     __ fnmaddd(as_FloatRegister($dst$$reg),
11453 //                as_FloatRegister($src1$$reg),
11454 //                as_FloatRegister($src2$$reg),
11455 //                as_FloatRegister($src3$$reg));
11456 //   %}
11457 
11458 //   ins_pipe(pipe_class_default);
11459 // %}
11460 
11461 // instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
11462 //   match(Set dst (SubF (MulF src1 src2) src3));
11463 
11464 //   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
11465 
11466 //   ins_encode %{
11467 //     __ fnmsubs(as_FloatRegister($dst$$reg),
11468 //                as_FloatRegister($src1$$reg),
11469 //                as_FloatRegister($src2$$reg),
11470 //                as_FloatRegister($src3$$reg));
11471 //   %}
11472 
11473 //   ins_pipe(pipe_class_default);
11474 // %}
11475 
11476 // instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
11477 //   match(Set dst (SubD (MulD src1 src2) src3));
11478 
11479 //   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
11480 
11481 //   ins_encode %{
11482 //   // n.b. insn name should be fnmsubd
11483 //     __ fnmsub(as_FloatRegister($dst$$reg),
11484 //                as_FloatRegister($src1$$reg),
11485 //                as_FloatRegister($src2$$reg),
11486 //                as_FloatRegister($src3$$reg));
11487 //   %}
11488 
11489 //   ins_pipe(pipe_class_default);
11490 // %}
11491 
11492 
11493 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
11494   match(Set dst (DivF src1  src2));
11495 
11496   ins_cost(INSN_COST * 18);
11497   format %{ "fdivs   $dst, $src1, $src2" %}
11498 
11499   ins_encode %{
11500     __ fdivs(as_FloatRegister($dst$$reg),
11501              as_FloatRegister($src1$$reg),
11502              as_FloatRegister($src2$$reg));
11503   %}
11504 
11505   ins_pipe(fp_div_s);
11506 %}
11507 
11508 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
11509   match(Set dst (DivD src1  src2));
11510 
11511   ins_cost(INSN_COST * 32);
11512   format %{ "fdivd   $dst, $src1, $src2" %}
11513 
11514   ins_encode %{
11515     __ fdivd(as_FloatRegister($dst$$reg),
11516              as_FloatRegister($src1$$reg),
11517              as_FloatRegister($src2$$reg));
11518   %}
11519 
11520   ins_pipe(fp_div_d);
11521 %}
11522 
11523 instruct negF_reg_reg(vRegF dst, vRegF src) %{
11524   match(Set dst (NegF src));
11525 
11526   ins_cost(INSN_COST * 3);
11527   format %{ "fneg   $dst, $src" %}
11528 
11529   ins_encode %{
11530     __ fnegs(as_FloatRegister($dst$$reg),
11531              as_FloatRegister($src$$reg));
11532   %}
11533 
11534   ins_pipe(fp_uop_s);
11535 %}
11536 
11537 instruct negD_reg_reg(vRegD dst, vRegD src) %{
11538   match(Set dst (NegD src));
11539 
11540   ins_cost(INSN_COST * 3);
11541   format %{ "fnegd   $dst, $src" %}
11542 
11543   ins_encode %{
11544     __ fnegd(as_FloatRegister($dst$$reg),
11545              as_FloatRegister($src$$reg));
11546   %}
11547 
11548   ins_pipe(fp_uop_d);
11549 %}
11550 
11551 instruct absF_reg(vRegF dst, vRegF src) %{
11552   match(Set dst (AbsF src));
11553 
11554   ins_cost(INSN_COST * 3);
11555   format %{ "fabss   $dst, $src" %}
11556   ins_encode %{
11557     __ fabss(as_FloatRegister($dst$$reg),
11558              as_FloatRegister($src$$reg));
11559   %}
11560 
11561   ins_pipe(fp_uop_s);
11562 %}
11563 
11564 instruct absD_reg(vRegD dst, vRegD src) %{
11565   match(Set dst (AbsD src));
11566 
11567   ins_cost(INSN_COST * 3);
11568   format %{ "fabsd   $dst, $src" %}
11569   ins_encode %{
11570     __ fabsd(as_FloatRegister($dst$$reg),
11571              as_FloatRegister($src$$reg));
11572   %}
11573 
11574   ins_pipe(fp_uop_d);
11575 %}
11576 
11577 instruct sqrtD_reg(vRegD dst, vRegD src) %{
11578   match(Set dst (SqrtD src));
11579 
11580   ins_cost(INSN_COST * 50);
11581   format %{ "fsqrtd  $dst, $src" %}
11582   ins_encode %{
11583     __ fsqrtd(as_FloatRegister($dst$$reg),
11584              as_FloatRegister($src$$reg));
11585   %}
11586 
11587   ins_pipe(fp_div_s);
11588 %}
11589 
11590 instruct sqrtF_reg(vRegF dst, vRegF src) %{
11591   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11592 
11593   ins_cost(INSN_COST * 50);
11594   format %{ "fsqrts  $dst, $src" %}
11595   ins_encode %{
11596     __ fsqrts(as_FloatRegister($dst$$reg),
11597              as_FloatRegister($src$$reg));
11598   %}
11599 
11600   ins_pipe(fp_div_d);
11601 %}
11602 
11603 // ============================================================================
11604 // Logical Instructions
11605 
11606 // Integer Logical Instructions
11607 
11608 // And Instructions
11609 
11610 
11611 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
11612   match(Set dst (AndI src1 src2));
11613 
11614   format %{ "andw  $dst, $src1, $src2\t# int" %}
11615 
11616   ins_cost(INSN_COST);
11617   ins_encode %{
11618     __ andw(as_Register($dst$$reg),
11619             as_Register($src1$$reg),
11620             as_Register($src2$$reg));
11621   %}
11622 
11623   ins_pipe(ialu_reg_reg);
11624 %}
11625 
11626 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
11627   match(Set dst (AndI src1 src2));
11628 
11629   format %{ "andsw  $dst, $src1, $src2\t# int" %}
11630 
11631   ins_cost(INSN_COST);
11632   ins_encode %{
11633     __ andw(as_Register($dst$$reg),
11634             as_Register($src1$$reg),
11635             (unsigned long)($src2$$constant));
11636   %}
11637 
11638   ins_pipe(ialu_reg_imm);
11639 %}
11640 
11641 // Or Instructions
11642 
11643 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11644   match(Set dst (OrI src1 src2));
11645 
11646   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11647 
11648   ins_cost(INSN_COST);
11649   ins_encode %{
11650     __ orrw(as_Register($dst$$reg),
11651             as_Register($src1$$reg),
11652             as_Register($src2$$reg));
11653   %}
11654 
11655   ins_pipe(ialu_reg_reg);
11656 %}
11657 
11658 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11659   match(Set dst (OrI src1 src2));
11660 
11661   format %{ "orrw  $dst, $src1, $src2\t# int" %}
11662 
11663   ins_cost(INSN_COST);
11664   ins_encode %{
11665     __ orrw(as_Register($dst$$reg),
11666             as_Register($src1$$reg),
11667             (unsigned long)($src2$$constant));
11668   %}
11669 
11670   ins_pipe(ialu_reg_imm);
11671 %}
11672 
11673 // Xor Instructions
11674 
11675 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
11676   match(Set dst (XorI src1 src2));
11677 
11678   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11679 
11680   ins_cost(INSN_COST);
11681   ins_encode %{
11682     __ eorw(as_Register($dst$$reg),
11683             as_Register($src1$$reg),
11684             as_Register($src2$$reg));
11685   %}
11686 
11687   ins_pipe(ialu_reg_reg);
11688 %}
11689 
11690 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
11691   match(Set dst (XorI src1 src2));
11692 
11693   format %{ "eorw  $dst, $src1, $src2\t# int" %}
11694 
11695   ins_cost(INSN_COST);
11696   ins_encode %{
11697     __ eorw(as_Register($dst$$reg),
11698             as_Register($src1$$reg),
11699             (unsigned long)($src2$$constant));
11700   %}
11701 
11702   ins_pipe(ialu_reg_imm);
11703 %}
11704 
11705 // Long Logical Instructions
11706 // TODO
11707 
11708 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
11709   match(Set dst (AndL src1 src2));
11710 
11711   format %{ "and  $dst, $src1, $src2\t# int" %}
11712 
11713   ins_cost(INSN_COST);
11714   ins_encode %{
11715     __ andr(as_Register($dst$$reg),
11716             as_Register($src1$$reg),
11717             as_Register($src2$$reg));
11718   %}
11719 
11720   ins_pipe(ialu_reg_reg);
11721 %}
11722 
11723 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
11724   match(Set dst (AndL src1 src2));
11725 
11726   format %{ "and  $dst, $src1, $src2\t# int" %}
11727 
11728   ins_cost(INSN_COST);
11729   ins_encode %{
11730     __ andr(as_Register($dst$$reg),
11731             as_Register($src1$$reg),
11732             (unsigned long)($src2$$constant));
11733   %}
11734 
11735   ins_pipe(ialu_reg_imm);
11736 %}
11737 
11738 // Or Instructions
11739 
11740 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11741   match(Set dst (OrL src1 src2));
11742 
11743   format %{ "orr  $dst, $src1, $src2\t# int" %}
11744 
11745   ins_cost(INSN_COST);
11746   ins_encode %{
11747     __ orr(as_Register($dst$$reg),
11748            as_Register($src1$$reg),
11749            as_Register($src2$$reg));
11750   %}
11751 
11752   ins_pipe(ialu_reg_reg);
11753 %}
11754 
11755 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11756   match(Set dst (OrL src1 src2));
11757 
11758   format %{ "orr  $dst, $src1, $src2\t# int" %}
11759 
11760   ins_cost(INSN_COST);
11761   ins_encode %{
11762     __ orr(as_Register($dst$$reg),
11763            as_Register($src1$$reg),
11764            (unsigned long)($src2$$constant));
11765   %}
11766 
11767   ins_pipe(ialu_reg_imm);
11768 %}
11769 
11770 // Xor Instructions
11771 
11772 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
11773   match(Set dst (XorL src1 src2));
11774 
11775   format %{ "eor  $dst, $src1, $src2\t# int" %}
11776 
11777   ins_cost(INSN_COST);
11778   ins_encode %{
11779     __ eor(as_Register($dst$$reg),
11780            as_Register($src1$$reg),
11781            as_Register($src2$$reg));
11782   %}
11783 
11784   ins_pipe(ialu_reg_reg);
11785 %}
11786 
11787 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
11788   match(Set dst (XorL src1 src2));
11789 
11790   ins_cost(INSN_COST);
11791   format %{ "eor  $dst, $src1, $src2\t# int" %}
11792 
11793   ins_encode %{
11794     __ eor(as_Register($dst$$reg),
11795            as_Register($src1$$reg),
11796            (unsigned long)($src2$$constant));
11797   %}
11798 
11799   ins_pipe(ialu_reg_imm);
11800 %}
11801 
11802 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
11803 %{
11804   match(Set dst (ConvI2L src));
11805 
11806   ins_cost(INSN_COST);
11807   format %{ "sxtw  $dst, $src\t# i2l" %}
11808   ins_encode %{
11809     __ sbfm($dst$$Register, $src$$Register, 0, 31);
11810   %}
11811   ins_pipe(ialu_reg_shift);
11812 %}
11813 
11814 // this pattern occurs in bigmath arithmetic
11815 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
11816 %{
11817   match(Set dst (AndL (ConvI2L src) mask));
11818 
11819   ins_cost(INSN_COST);
11820   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
11821   ins_encode %{
11822     __ ubfm($dst$$Register, $src$$Register, 0, 31);
11823   %}
11824 
11825   ins_pipe(ialu_reg_shift);
11826 %}
11827 
11828 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
11829   match(Set dst (ConvL2I src));
11830 
11831   ins_cost(INSN_COST);
11832   format %{ "movw  $dst, $src \t// l2i" %}
11833 
11834   ins_encode %{
11835     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
11836   %}
11837 
11838   ins_pipe(ialu_reg);
11839 %}
11840 
11841 instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
11842 %{
11843   match(Set dst (Conv2B src));
11844   effect(KILL cr);
11845 
11846   format %{
11847     "cmpw $src, zr\n\t"
11848     "cset $dst, ne"
11849   %}
11850 
11851   ins_encode %{
11852     __ cmpw(as_Register($src$$reg), zr);
11853     __ cset(as_Register($dst$$reg), Assembler::NE);
11854   %}
11855 
11856   ins_pipe(ialu_reg);
11857 %}
11858 
11859 instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr)
11860 %{
11861   match(Set dst (Conv2B src));
11862   effect(KILL cr);
11863 
11864   format %{
11865     "cmp  $src, zr\n\t"
11866     "cset $dst, ne"
11867   %}
11868 
11869   ins_encode %{
11870     __ cmp(as_Register($src$$reg), zr);
11871     __ cset(as_Register($dst$$reg), Assembler::NE);
11872   %}
11873 
11874   ins_pipe(ialu_reg);
11875 %}
11876 
11877 instruct convD2F_reg(vRegF dst, vRegD src) %{
11878   match(Set dst (ConvD2F src));
11879 
11880   ins_cost(INSN_COST * 5);
11881   format %{ "fcvtd  $dst, $src \t// d2f" %}
11882 
11883   ins_encode %{
11884     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11885   %}
11886 
11887   ins_pipe(fp_d2f);
11888 %}
11889 
11890 instruct convF2D_reg(vRegD dst, vRegF src) %{
11891   match(Set dst (ConvF2D src));
11892 
11893   ins_cost(INSN_COST * 5);
11894   format %{ "fcvts  $dst, $src \t// f2d" %}
11895 
11896   ins_encode %{
11897     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
11898   %}
11899 
11900   ins_pipe(fp_f2d);
11901 %}
11902 
11903 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
11904   match(Set dst (ConvF2I src));
11905 
11906   ins_cost(INSN_COST * 5);
11907   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
11908 
11909   ins_encode %{
11910     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11911   %}
11912 
11913   ins_pipe(fp_f2i);
11914 %}
11915 
11916 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
11917   match(Set dst (ConvF2L src));
11918 
11919   ins_cost(INSN_COST * 5);
11920   format %{ "fcvtzs  $dst, $src \t// f2l" %}
11921 
11922   ins_encode %{
11923     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11924   %}
11925 
11926   ins_pipe(fp_f2l);
11927 %}
11928 
11929 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
11930   match(Set dst (ConvI2F src));
11931 
11932   ins_cost(INSN_COST * 5);
11933   format %{ "scvtfws  $dst, $src \t// i2f" %}
11934 
11935   ins_encode %{
11936     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11937   %}
11938 
11939   ins_pipe(fp_i2f);
11940 %}
11941 
11942 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
11943   match(Set dst (ConvL2F src));
11944 
11945   ins_cost(INSN_COST * 5);
11946   format %{ "scvtfs  $dst, $src \t// l2f" %}
11947 
11948   ins_encode %{
11949     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11950   %}
11951 
11952   ins_pipe(fp_l2f);
11953 %}
11954 
11955 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
11956   match(Set dst (ConvD2I src));
11957 
11958   ins_cost(INSN_COST * 5);
11959   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
11960 
11961   ins_encode %{
11962     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11963   %}
11964 
11965   ins_pipe(fp_d2i);
11966 %}
11967 
11968 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
11969   match(Set dst (ConvD2L src));
11970 
11971   ins_cost(INSN_COST * 5);
11972   format %{ "fcvtzd  $dst, $src \t// d2l" %}
11973 
11974   ins_encode %{
11975     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
11976   %}
11977 
11978   ins_pipe(fp_d2l);
11979 %}
11980 
11981 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
11982   match(Set dst (ConvI2D src));
11983 
11984   ins_cost(INSN_COST * 5);
11985   format %{ "scvtfwd  $dst, $src \t// i2d" %}
11986 
11987   ins_encode %{
11988     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
11989   %}
11990 
11991   ins_pipe(fp_i2d);
11992 %}
11993 
11994 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
11995   match(Set dst (ConvL2D src));
11996 
11997   ins_cost(INSN_COST * 5);
11998   format %{ "scvtfd  $dst, $src \t// l2d" %}
11999 
12000   ins_encode %{
12001     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
12002   %}
12003 
12004   ins_pipe(fp_l2d);
12005 %}
12006 
12007 // stack <-> reg and reg <-> reg shuffles with no conversion
12008 
12009 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
12010 
12011   match(Set dst (MoveF2I src));
12012 
12013   effect(DEF dst, USE src);
12014 
12015   ins_cost(4 * INSN_COST);
12016 
12017   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
12018 
12019   ins_encode %{
12020     __ ldrw($dst$$Register, Address(sp, $src$$disp));
12021   %}
12022 
12023   ins_pipe(iload_reg_reg);
12024 
12025 %}
12026 
12027 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
12028 
12029   match(Set dst (MoveI2F src));
12030 
12031   effect(DEF dst, USE src);
12032 
12033   ins_cost(4 * INSN_COST);
12034 
12035   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
12036 
12037   ins_encode %{
12038     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12039   %}
12040 
12041   ins_pipe(pipe_class_memory);
12042 
12043 %}
12044 
12045 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
12046 
12047   match(Set dst (MoveD2L src));
12048 
12049   effect(DEF dst, USE src);
12050 
12051   ins_cost(4 * INSN_COST);
12052 
12053   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
12054 
12055   ins_encode %{
12056     __ ldr($dst$$Register, Address(sp, $src$$disp));
12057   %}
12058 
12059   ins_pipe(iload_reg_reg);
12060 
12061 %}
12062 
12063 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
12064 
12065   match(Set dst (MoveL2D src));
12066 
12067   effect(DEF dst, USE src);
12068 
12069   ins_cost(4 * INSN_COST);
12070 
12071   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
12072 
12073   ins_encode %{
12074     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
12075   %}
12076 
12077   ins_pipe(pipe_class_memory);
12078 
12079 %}
12080 
12081 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
12082 
12083   match(Set dst (MoveF2I src));
12084 
12085   effect(DEF dst, USE src);
12086 
12087   ins_cost(INSN_COST);
12088 
12089   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
12090 
12091   ins_encode %{
12092     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12093   %}
12094 
12095   ins_pipe(pipe_class_memory);
12096 
12097 %}
12098 
12099 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
12100 
12101   match(Set dst (MoveI2F src));
12102 
12103   effect(DEF dst, USE src);
12104 
12105   ins_cost(INSN_COST);
12106 
12107   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
12108 
12109   ins_encode %{
12110     __ strw($src$$Register, Address(sp, $dst$$disp));
12111   %}
12112 
12113   ins_pipe(istore_reg_reg);
12114 
12115 %}
12116 
12117 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
12118 
12119   match(Set dst (MoveD2L src));
12120 
12121   effect(DEF dst, USE src);
12122 
12123   ins_cost(INSN_COST);
12124 
12125   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
12126 
12127   ins_encode %{
12128     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
12129   %}
12130 
12131   ins_pipe(pipe_class_memory);
12132 
12133 %}
12134 
12135 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
12136 
12137   match(Set dst (MoveL2D src));
12138 
12139   effect(DEF dst, USE src);
12140 
12141   ins_cost(INSN_COST);
12142 
12143   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
12144 
12145   ins_encode %{
12146     __ str($src$$Register, Address(sp, $dst$$disp));
12147   %}
12148 
12149   ins_pipe(istore_reg_reg);
12150 
12151 %}
12152 
12153 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
12154 
12155   match(Set dst (MoveF2I src));
12156 
12157   effect(DEF dst, USE src);
12158 
12159   ins_cost(INSN_COST);
12160 
12161   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
12162 
12163   ins_encode %{
12164     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
12165   %}
12166 
12167   ins_pipe(fp_f2i);
12168 
12169 %}
12170 
12171 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
12172 
12173   match(Set dst (MoveI2F src));
12174 
12175   effect(DEF dst, USE src);
12176 
12177   ins_cost(INSN_COST);
12178 
12179   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
12180 
12181   ins_encode %{
12182     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
12183   %}
12184 
12185   ins_pipe(fp_i2f);
12186 
12187 %}
12188 
12189 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
12190 
12191   match(Set dst (MoveD2L src));
12192 
12193   effect(DEF dst, USE src);
12194 
12195   ins_cost(INSN_COST);
12196 
12197   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
12198 
12199   ins_encode %{
12200     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
12201   %}
12202 
12203   ins_pipe(fp_d2l);
12204 
12205 %}
12206 
12207 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
12208 
12209   match(Set dst (MoveL2D src));
12210 
12211   effect(DEF dst, USE src);
12212 
12213   ins_cost(INSN_COST);
12214 
12215   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
12216 
12217   ins_encode %{
12218     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
12219   %}
12220 
12221   ins_pipe(fp_l2d);
12222 
12223 %}
12224 
12225 // ============================================================================
12226 // clearing of an array
12227 
12228 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
12229 %{
12230   match(Set dummy (ClearArray cnt base));
12231   effect(USE_KILL cnt, USE_KILL base, KILL cr);
12232 
12233   ins_cost(4 * INSN_COST);
12234   format %{ "ClearArray $cnt, $base" %}
12235 
12236   ins_encode %{
12237     __ zero_words($base$$Register, $cnt$$Register);
12238   %}
12239 
12240   ins_pipe(pipe_class_memory);
12241 %}
12242 
12243 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 tmp, Universe dummy, rFlagsReg cr)
12244 %{
12245   match(Set dummy (ClearArray cnt base));
12246   effect(USE_KILL base, TEMP tmp, KILL cr);
12247 
12248   ins_cost(4 * INSN_COST);
12249   format %{ "ClearArray $cnt, $base" %}
12250 
12251   ins_encode %{
12252     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
12253   %}
12254 
12255   ins_pipe(pipe_class_memory);
12256 %}
12257 
12258 // ============================================================================
12259 // Overflow Math Instructions
12260 
12261 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12262 %{
12263   match(Set cr (OverflowAddI op1 op2));
12264 
12265   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12266   ins_cost(INSN_COST);
12267   ins_encode %{
12268     __ cmnw($op1$$Register, $op2$$Register);
12269   %}
12270 
12271   ins_pipe(icmp_reg_reg);
12272 %}
12273 
12274 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12275 %{
12276   match(Set cr (OverflowAddI op1 op2));
12277 
12278   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
12279   ins_cost(INSN_COST);
12280   ins_encode %{
12281     __ cmnw($op1$$Register, $op2$$constant);
12282   %}
12283 
12284   ins_pipe(icmp_reg_imm);
12285 %}
12286 
12287 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12288 %{
12289   match(Set cr (OverflowAddL op1 op2));
12290 
12291   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12292   ins_cost(INSN_COST);
12293   ins_encode %{
12294     __ cmn($op1$$Register, $op2$$Register);
12295   %}
12296 
12297   ins_pipe(icmp_reg_reg);
12298 %}
12299 
12300 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12301 %{
12302   match(Set cr (OverflowAddL op1 op2));
12303 
12304   format %{ "cmn   $op1, $op2\t# overflow check long" %}
12305   ins_cost(INSN_COST);
12306   ins_encode %{
12307     __ cmn($op1$$Register, $op2$$constant);
12308   %}
12309 
12310   ins_pipe(icmp_reg_imm);
12311 %}
12312 
12313 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12314 %{
12315   match(Set cr (OverflowSubI op1 op2));
12316 
12317   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12318   ins_cost(INSN_COST);
12319   ins_encode %{
12320     __ cmpw($op1$$Register, $op2$$Register);
12321   %}
12322 
12323   ins_pipe(icmp_reg_reg);
12324 %}
12325 
12326 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
12327 %{
12328   match(Set cr (OverflowSubI op1 op2));
12329 
12330   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
12331   ins_cost(INSN_COST);
12332   ins_encode %{
12333     __ cmpw($op1$$Register, $op2$$constant);
12334   %}
12335 
12336   ins_pipe(icmp_reg_imm);
12337 %}
12338 
12339 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12340 %{
12341   match(Set cr (OverflowSubL op1 op2));
12342 
12343   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12344   ins_cost(INSN_COST);
12345   ins_encode %{
12346     __ cmp($op1$$Register, $op2$$Register);
12347   %}
12348 
12349   ins_pipe(icmp_reg_reg);
12350 %}
12351 
12352 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
12353 %{
12354   match(Set cr (OverflowSubL op1 op2));
12355 
12356   format %{ "cmp   $op1, $op2\t# overflow check long" %}
12357   ins_cost(INSN_COST);
12358   ins_encode %{
12359     __ cmp($op1$$Register, $op2$$constant);
12360   %}
12361 
12362   ins_pipe(icmp_reg_imm);
12363 %}
12364 
12365 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
12366 %{
12367   match(Set cr (OverflowSubI zero op1));
12368 
12369   format %{ "cmpw  zr, $op1\t# overflow check int" %}
12370   ins_cost(INSN_COST);
12371   ins_encode %{
12372     __ cmpw(zr, $op1$$Register);
12373   %}
12374 
12375   ins_pipe(icmp_reg_imm);
12376 %}
12377 
12378 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
12379 %{
12380   match(Set cr (OverflowSubL zero op1));
12381 
12382   format %{ "cmp   zr, $op1\t# overflow check long" %}
12383   ins_cost(INSN_COST);
12384   ins_encode %{
12385     __ cmp(zr, $op1$$Register);
12386   %}
12387 
12388   ins_pipe(icmp_reg_imm);
12389 %}
12390 
12391 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
12392 %{
12393   match(Set cr (OverflowMulI op1 op2));
12394 
12395   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12396             "cmp   rscratch1, rscratch1, sxtw\n\t"
12397             "movw  rscratch1, #0x80000000\n\t"
12398             "cselw rscratch1, rscratch1, zr, NE\n\t"
12399             "cmpw  rscratch1, #1" %}
12400   ins_cost(5 * INSN_COST);
12401   ins_encode %{
12402     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12403     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12404     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12405     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12406     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12407   %}
12408 
12409   ins_pipe(pipe_slow);
12410 %}
12411 
12412 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
12413 %{
12414   match(If cmp (OverflowMulI op1 op2));
12415   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12416             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12417   effect(USE labl, KILL cr);
12418 
12419   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
12420             "cmp   rscratch1, rscratch1, sxtw\n\t"
12421             "b$cmp   $labl" %}
12422   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
12423   ins_encode %{
12424     Label* L = $labl$$label;
12425     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12426     __ smull(rscratch1, $op1$$Register, $op2$$Register);
12427     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
12428     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12429   %}
12430 
12431   ins_pipe(pipe_serial);
12432 %}
12433 
12434 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12435 %{
12436   match(Set cr (OverflowMulL op1 op2));
12437 
12438   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12439             "smulh rscratch2, $op1, $op2\n\t"
12440             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12441             "movw  rscratch1, #0x80000000\n\t"
12442             "cselw rscratch1, rscratch1, zr, NE\n\t"
12443             "cmpw  rscratch1, #1" %}
12444   ins_cost(6 * INSN_COST);
12445   ins_encode %{
12446     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12447     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12448     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12449     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
12450     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
12451     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
12452   %}
12453 
12454   ins_pipe(pipe_slow);
12455 %}
12456 
12457 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
12458 %{
12459   match(If cmp (OverflowMulL op1 op2));
12460   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
12461             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
12462   effect(USE labl, KILL cr);
12463 
12464   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
12465             "smulh rscratch2, $op1, $op2\n\t"
12466             "cmp   rscratch2, rscratch1, ASR #31\n\t"
12467             "b$cmp $labl" %}
12468   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
12469   ins_encode %{
12470     Label* L = $labl$$label;
12471     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
12472     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
12473     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
12474     __ cmp(rscratch2, rscratch1, Assembler::ASR, 31);    // Top is pure sign ext
12475     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
12476   %}
12477 
12478   ins_pipe(pipe_serial);
12479 %}
12480 
12481 // ============================================================================
12482 // Compare Instructions
12483 
12484 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
12485 %{
12486   match(Set cr (CmpI op1 op2));
12487 
12488   effect(DEF cr, USE op1, USE op2);
12489 
12490   ins_cost(INSN_COST);
12491   format %{ "cmpw  $op1, $op2" %}
12492 
12493   ins_encode(aarch64_enc_cmpw(op1, op2));
12494 
12495   ins_pipe(icmp_reg_reg);
12496 %}
12497 
12498 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
12499 %{
12500   match(Set cr (CmpI op1 zero));
12501 
12502   effect(DEF cr, USE op1);
12503 
12504   ins_cost(INSN_COST);
12505   format %{ "cmpw $op1, 0" %}
12506 
12507   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12508 
12509   ins_pipe(icmp_reg_imm);
12510 %}
12511 
12512 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
12513 %{
12514   match(Set cr (CmpI op1 op2));
12515 
12516   effect(DEF cr, USE op1);
12517 
12518   ins_cost(INSN_COST);
12519   format %{ "cmpw  $op1, $op2" %}
12520 
12521   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12522 
12523   ins_pipe(icmp_reg_imm);
12524 %}
12525 
12526 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
12527 %{
12528   match(Set cr (CmpI op1 op2));
12529 
12530   effect(DEF cr, USE op1);
12531 
12532   ins_cost(INSN_COST * 2);
12533   format %{ "cmpw  $op1, $op2" %}
12534 
12535   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12536 
12537   ins_pipe(icmp_reg_imm);
12538 %}
12539 
12540 // Unsigned compare Instructions; really, same as signed compare
12541 // except it should only be used to feed an If or a CMovI which takes a
12542 // cmpOpU.
12543 
12544 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
12545 %{
12546   match(Set cr (CmpU op1 op2));
12547 
12548   effect(DEF cr, USE op1, USE op2);
12549 
12550   ins_cost(INSN_COST);
12551   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12552 
12553   ins_encode(aarch64_enc_cmpw(op1, op2));
12554 
12555   ins_pipe(icmp_reg_reg);
12556 %}
12557 
12558 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
12559 %{
12560   match(Set cr (CmpU op1 zero));
12561 
12562   effect(DEF cr, USE op1);
12563 
12564   ins_cost(INSN_COST);
12565   format %{ "cmpw $op1, #0\t# unsigned" %}
12566 
12567   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
12568 
12569   ins_pipe(icmp_reg_imm);
12570 %}
12571 
12572 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
12573 %{
12574   match(Set cr (CmpU op1 op2));
12575 
12576   effect(DEF cr, USE op1);
12577 
12578   ins_cost(INSN_COST);
12579   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12580 
12581   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
12582 
12583   ins_pipe(icmp_reg_imm);
12584 %}
12585 
12586 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
12587 %{
12588   match(Set cr (CmpU op1 op2));
12589 
12590   effect(DEF cr, USE op1);
12591 
12592   ins_cost(INSN_COST * 2);
12593   format %{ "cmpw  $op1, $op2\t# unsigned" %}
12594 
12595   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
12596 
12597   ins_pipe(icmp_reg_imm);
12598 %}
12599 
12600 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
12601 %{
12602   match(Set cr (CmpL op1 op2));
12603 
12604   effect(DEF cr, USE op1, USE op2);
12605 
12606   ins_cost(INSN_COST);
12607   format %{ "cmp  $op1, $op2" %}
12608 
12609   ins_encode(aarch64_enc_cmp(op1, op2));
12610 
12611   ins_pipe(icmp_reg_reg);
12612 %}
12613 
12614 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
12615 %{
12616   match(Set cr (CmpL op1 zero));
12617 
12618   effect(DEF cr, USE op1);
12619 
12620   ins_cost(INSN_COST);
12621   format %{ "tst  $op1" %}
12622 
12623   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
12624 
12625   ins_pipe(icmp_reg_imm);
12626 %}
12627 
12628 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
12629 %{
12630   match(Set cr (CmpL op1 op2));
12631 
12632   effect(DEF cr, USE op1);
12633 
12634   ins_cost(INSN_COST);
12635   format %{ "cmp  $op1, $op2" %}
12636 
12637   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
12638 
12639   ins_pipe(icmp_reg_imm);
12640 %}
12641 
12642 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
12643 %{
12644   match(Set cr (CmpL op1 op2));
12645 
12646   effect(DEF cr, USE op1);
12647 
12648   ins_cost(INSN_COST * 2);
12649   format %{ "cmp  $op1, $op2" %}
12650 
12651   ins_encode(aarch64_enc_cmp_imm(op1, op2));
12652 
12653   ins_pipe(icmp_reg_imm);
12654 %}
12655 
12656 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
12657 %{
12658   match(Set cr (CmpUL op1 op2));
12659 
12660   effect(DEF cr, USE op1, USE op2);
12661 
12662   ins_cost(INSN_COST);
12663   format %{ "cmp  $op1, $op2" %}
12664 
12665   ins_encode(aarch64_enc_cmp(op1, op2));
12666 
12667   ins_pipe(icmp_reg_reg);
12668 %}
12669 
12670 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
12671 %{
12672   match(Set cr (CmpUL op1 zero));
12673 
12674   effect(DEF cr, USE op1);
12675 
12676   ins_cost(INSN_COST);
12677   format %{ "tst  $op1" %}
12678 
12679   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
12680 
12681   ins_pipe(icmp_reg_imm);
12682 %}
12683 
12684 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
12685 %{
12686   match(Set cr (CmpUL op1 op2));
12687 
12688   effect(DEF cr, USE op1);
12689 
12690   ins_cost(INSN_COST);
12691   format %{ "cmp  $op1, $op2" %}
12692 
12693   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
12694 
12695   ins_pipe(icmp_reg_imm);
12696 %}
12697 
12698 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
12699 %{
12700   match(Set cr (CmpUL op1 op2));
12701 
12702   effect(DEF cr, USE op1);
12703 
12704   ins_cost(INSN_COST * 2);
12705   format %{ "cmp  $op1, $op2" %}
12706 
12707   ins_encode(aarch64_enc_cmp_imm(op1, op2));
12708 
12709   ins_pipe(icmp_reg_imm);
12710 %}
12711 
12712 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
12713 %{
12714   match(Set cr (CmpP op1 op2));
12715 
12716   effect(DEF cr, USE op1, USE op2);
12717 
12718   ins_cost(INSN_COST);
12719   format %{ "cmp  $op1, $op2\t // ptr" %}
12720 
12721   ins_encode(aarch64_enc_cmpp(op1, op2));
12722 
12723   ins_pipe(icmp_reg_reg);
12724 %}
12725 
12726 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
12727 %{
12728   match(Set cr (CmpN op1 op2));
12729 
12730   effect(DEF cr, USE op1, USE op2);
12731 
12732   ins_cost(INSN_COST);
12733   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
12734 
12735   ins_encode(aarch64_enc_cmpn(op1, op2));
12736 
12737   ins_pipe(icmp_reg_reg);
12738 %}
12739 
12740 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
12741 %{
12742   match(Set cr (CmpP op1 zero));
12743 
12744   effect(DEF cr, USE op1, USE zero);
12745 
12746   ins_cost(INSN_COST);
12747   format %{ "cmp  $op1, 0\t // ptr" %}
12748 
12749   ins_encode(aarch64_enc_testp(op1));
12750 
12751   ins_pipe(icmp_reg_imm);
12752 %}
12753 
12754 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
12755 %{
12756   match(Set cr (CmpN op1 zero));
12757 
12758   effect(DEF cr, USE op1, USE zero);
12759 
12760   ins_cost(INSN_COST);
12761   format %{ "cmp  $op1, 0\t // compressed ptr" %}
12762 
12763   ins_encode(aarch64_enc_testn(op1));
12764 
12765   ins_pipe(icmp_reg_imm);
12766 %}
12767 
12768 // FP comparisons
12769 //
12770 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
12771 // using normal cmpOp. See declaration of rFlagsReg for details.
12772 
12773 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
12774 %{
12775   match(Set cr (CmpF src1 src2));
12776 
12777   ins_cost(3 * INSN_COST);
12778   format %{ "fcmps $src1, $src2" %}
12779 
12780   ins_encode %{
12781     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12782   %}
12783 
12784   ins_pipe(pipe_class_compare);
12785 %}
12786 
12787 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
12788 %{
12789   match(Set cr (CmpF src1 src2));
12790 
12791   ins_cost(3 * INSN_COST);
12792   format %{ "fcmps $src1, 0.0" %}
12793 
12794   ins_encode %{
12795     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
12796   %}
12797 
12798   ins_pipe(pipe_class_compare);
12799 %}
12800 // FROM HERE
12801 
12802 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
12803 %{
12804   match(Set cr (CmpD src1 src2));
12805 
12806   ins_cost(3 * INSN_COST);
12807   format %{ "fcmpd $src1, $src2" %}
12808 
12809   ins_encode %{
12810     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
12811   %}
12812 
12813   ins_pipe(pipe_class_compare);
12814 %}
12815 
12816 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
12817 %{
12818   match(Set cr (CmpD src1 src2));
12819 
12820   ins_cost(3 * INSN_COST);
12821   format %{ "fcmpd $src1, 0.0" %}
12822 
12823   ins_encode %{
12824     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
12825   %}
12826 
12827   ins_pipe(pipe_class_compare);
12828 %}
12829 
12830 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
12831 %{
12832   match(Set dst (CmpF3 src1 src2));
12833   effect(KILL cr);
12834 
12835   ins_cost(5 * INSN_COST);
12836   format %{ "fcmps $src1, $src2\n\t"
12837             "csinvw($dst, zr, zr, eq\n\t"
12838             "csnegw($dst, $dst, $dst, lt)"
12839   %}
12840 
12841   ins_encode %{
12842     Label done;
12843     FloatRegister s1 = as_FloatRegister($src1$$reg);
12844     FloatRegister s2 = as_FloatRegister($src2$$reg);
12845     Register d = as_Register($dst$$reg);
12846     __ fcmps(s1, s2);
12847     // installs 0 if EQ else -1
12848     __ csinvw(d, zr, zr, Assembler::EQ);
12849     // keeps -1 if less or unordered else installs 1
12850     __ csnegw(d, d, d, Assembler::LT);
12851     __ bind(done);
12852   %}
12853 
12854   ins_pipe(pipe_class_default);
12855 
12856 %}
12857 
12858 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
12859 %{
12860   match(Set dst (CmpD3 src1 src2));
12861   effect(KILL cr);
12862 
12863   ins_cost(5 * INSN_COST);
12864   format %{ "fcmpd $src1, $src2\n\t"
12865             "csinvw($dst, zr, zr, eq\n\t"
12866             "csnegw($dst, $dst, $dst, lt)"
12867   %}
12868 
12869   ins_encode %{
12870     Label done;
12871     FloatRegister s1 = as_FloatRegister($src1$$reg);
12872     FloatRegister s2 = as_FloatRegister($src2$$reg);
12873     Register d = as_Register($dst$$reg);
12874     __ fcmpd(s1, s2);
12875     // installs 0 if EQ else -1
12876     __ csinvw(d, zr, zr, Assembler::EQ);
12877     // keeps -1 if less or unordered else installs 1
12878     __ csnegw(d, d, d, Assembler::LT);
12879     __ bind(done);
12880   %}
12881   ins_pipe(pipe_class_default);
12882 
12883 %}
12884 
12885 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
12886 %{
12887   match(Set dst (CmpF3 src1 zero));
12888   effect(KILL cr);
12889 
12890   ins_cost(5 * INSN_COST);
12891   format %{ "fcmps $src1, 0.0\n\t"
12892             "csinvw($dst, zr, zr, eq\n\t"
12893             "csnegw($dst, $dst, $dst, lt)"
12894   %}
12895 
12896   ins_encode %{
12897     Label done;
12898     FloatRegister s1 = as_FloatRegister($src1$$reg);
12899     Register d = as_Register($dst$$reg);
12900     __ fcmps(s1, 0.0);
12901     // installs 0 if EQ else -1
12902     __ csinvw(d, zr, zr, Assembler::EQ);
12903     // keeps -1 if less or unordered else installs 1
12904     __ csnegw(d, d, d, Assembler::LT);
12905     __ bind(done);
12906   %}
12907 
12908   ins_pipe(pipe_class_default);
12909 
12910 %}
12911 
12912 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
12913 %{
12914   match(Set dst (CmpD3 src1 zero));
12915   effect(KILL cr);
12916 
12917   ins_cost(5 * INSN_COST);
12918   format %{ "fcmpd $src1, 0.0\n\t"
12919             "csinvw($dst, zr, zr, eq\n\t"
12920             "csnegw($dst, $dst, $dst, lt)"
12921   %}
12922 
12923   ins_encode %{
12924     Label done;
12925     FloatRegister s1 = as_FloatRegister($src1$$reg);
12926     Register d = as_Register($dst$$reg);
12927     __ fcmpd(s1, 0.0);
12928     // installs 0 if EQ else -1
12929     __ csinvw(d, zr, zr, Assembler::EQ);
12930     // keeps -1 if less or unordered else installs 1
12931     __ csnegw(d, d, d, Assembler::LT);
12932     __ bind(done);
12933   %}
12934   ins_pipe(pipe_class_default);
12935 
12936 %}
12937 
12938 // Manifest a CmpL result in an integer register.
12939 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
12940 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
12941 %{
12942   match(Set dst (CmpL3 src1 src2));
12943   effect(KILL flags);
12944 
12945   ins_cost(INSN_COST * 6);
12946   format %{
12947       "cmp $src1, $src2"
12948       "csetw $dst, ne"
12949       "cnegw $dst, lt"
12950   %}
12951   // format %{ "CmpL3 $dst, $src1, $src2" %}
12952   ins_encode %{
12953     __ cmp($src1$$Register, $src2$$Register);
12954     __ csetw($dst$$Register, Assembler::NE);
12955     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
12956   %}
12957 
12958   ins_pipe(ialu_reg_reg);
12959 %}
12960 
12961 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
12962 %{
12963   match(Set dst (CmpLTMask p q));
12964   effect(KILL cr);
12965 
12966   ins_cost(3 * INSN_COST);
12967 
12968   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
12969             "csetw $dst, lt\n\t"
12970             "subw $dst, zr, $dst"
12971   %}
12972 
12973   ins_encode %{
12974     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
12975     __ csetw(as_Register($dst$$reg), Assembler::LT);
12976     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
12977   %}
12978 
12979   ins_pipe(ialu_reg_reg);
12980 %}
12981 
12982 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
12983 %{
12984   match(Set dst (CmpLTMask src zero));
12985   effect(KILL cr);
12986 
12987   ins_cost(INSN_COST);
12988 
12989   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
12990 
12991   ins_encode %{
12992     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
12993   %}
12994 
12995   ins_pipe(ialu_reg_shift);
12996 %}
12997 
12998 // ============================================================================
12999 // Max and Min
13000 
13001 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13002 %{
13003   match(Set dst (MinI src1 src2));
13004 
13005   effect(DEF dst, USE src1, USE src2, KILL cr);
13006   size(8);
13007 
13008   ins_cost(INSN_COST * 3);
13009   format %{
13010     "cmpw $src1 $src2\t signed int\n\t"
13011     "cselw $dst, $src1, $src2 lt\t"
13012   %}
13013 
13014   ins_encode %{
13015     __ cmpw(as_Register($src1$$reg),
13016             as_Register($src2$$reg));
13017     __ cselw(as_Register($dst$$reg),
13018              as_Register($src1$$reg),
13019              as_Register($src2$$reg),
13020              Assembler::LT);
13021   %}
13022 
13023   ins_pipe(ialu_reg_reg);
13024 %}
13025 // FROM HERE
13026 
13027 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13028 %{
13029   match(Set dst (MaxI src1 src2));
13030 
13031   effect(DEF dst, USE src1, USE src2, KILL cr);
13032   size(8);
13033 
13034   ins_cost(INSN_COST * 3);
13035   format %{
13036     "cmpw $src1 $src2\t signed int\n\t"
13037     "cselw $dst, $src1, $src2 gt\t"
13038   %}
13039 
13040   ins_encode %{
13041     __ cmpw(as_Register($src1$$reg),
13042             as_Register($src2$$reg));
13043     __ cselw(as_Register($dst$$reg),
13044              as_Register($src1$$reg),
13045              as_Register($src2$$reg),
13046              Assembler::GT);
13047   %}
13048 
13049   ins_pipe(ialu_reg_reg);
13050 %}
13051 
13052 // ============================================================================
13053 // Branch Instructions
13054 
13055 // Direct Branch.
13056 instruct branch(label lbl)
13057 %{
13058   match(Goto);
13059 
13060   effect(USE lbl);
13061 
13062   ins_cost(BRANCH_COST);
13063   format %{ "b  $lbl" %}
13064 
13065   ins_encode(aarch64_enc_b(lbl));
13066 
13067   ins_pipe(pipe_branch);
13068 %}
13069 
13070 // Conditional Near Branch
13071 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
13072 %{
13073   // Same match rule as `branchConFar'.
13074   match(If cmp cr);
13075 
13076   effect(USE lbl);
13077 
13078   ins_cost(BRANCH_COST);
13079   // If set to 1 this indicates that the current instruction is a
13080   // short variant of a long branch. This avoids using this
13081   // instruction in first-pass matching. It will then only be used in
13082   // the `Shorten_branches' pass.
13083   // ins_short_branch(1);
13084   format %{ "b$cmp  $lbl" %}
13085 
13086   ins_encode(aarch64_enc_br_con(cmp, lbl));
13087 
13088   ins_pipe(pipe_branch_cond);
13089 %}
13090 
13091 // Conditional Near Branch Unsigned
13092 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13093 %{
13094   // Same match rule as `branchConFar'.
13095   match(If cmp cr);
13096 
13097   effect(USE lbl);
13098 
13099   ins_cost(BRANCH_COST);
13100   // If set to 1 this indicates that the current instruction is a
13101   // short variant of a long branch. This avoids using this
13102   // instruction in first-pass matching. It will then only be used in
13103   // the `Shorten_branches' pass.
13104   // ins_short_branch(1);
13105   format %{ "b$cmp  $lbl\t# unsigned" %}
13106 
13107   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13108 
13109   ins_pipe(pipe_branch_cond);
13110 %}
13111 
13112 // Make use of CBZ and CBNZ.  These instructions, as well as being
13113 // shorter than (cmp; branch), have the additional benefit of not
13114 // killing the flags.
13115 
13116 instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
13117   match(If cmp (CmpI op1 op2));
13118   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13119             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13120   effect(USE labl);
13121 
13122   ins_cost(BRANCH_COST);
13123   format %{ "cbw$cmp   $op1, $labl" %}
13124   ins_encode %{
13125     Label* L = $labl$$label;
13126     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13127     if (cond == Assembler::EQ)
13128       __ cbzw($op1$$Register, *L);
13129     else
13130       __ cbnzw($op1$$Register, *L);
13131   %}
13132   ins_pipe(pipe_cmp_branch);
13133 %}
13134 
13135 instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
13136   match(If cmp (CmpL op1 op2));
13137   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13138             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13139   effect(USE labl);
13140 
13141   ins_cost(BRANCH_COST);
13142   format %{ "cb$cmp   $op1, $labl" %}
13143   ins_encode %{
13144     Label* L = $labl$$label;
13145     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13146     if (cond == Assembler::EQ)
13147       __ cbz($op1$$Register, *L);
13148     else
13149       __ cbnz($op1$$Register, *L);
13150   %}
13151   ins_pipe(pipe_cmp_branch);
13152 %}
13153 
13154 instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
13155   match(If cmp (CmpP op1 op2));
13156   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13157             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13158   effect(USE labl);
13159 
13160   ins_cost(BRANCH_COST);
13161   format %{ "cb$cmp   $op1, $labl" %}
13162   ins_encode %{
13163     Label* L = $labl$$label;
13164     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13165     if (cond == Assembler::EQ)
13166       __ cbz($op1$$Register, *L);
13167     else
13168       __ cbnz($op1$$Register, *L);
13169   %}
13170   ins_pipe(pipe_cmp_branch);
13171 %}
13172 
13173 instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
13174   match(If cmp (CmpN op1 op2));
13175   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13176             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13177   effect(USE labl);
13178 
13179   ins_cost(BRANCH_COST);
13180   format %{ "cbw$cmp   $op1, $labl" %}
13181   ins_encode %{
13182     Label* L = $labl$$label;
13183     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13184     if (cond == Assembler::EQ)
13185       __ cbzw($op1$$Register, *L);
13186     else
13187       __ cbnzw($op1$$Register, *L);
13188   %}
13189   ins_pipe(pipe_cmp_branch);
13190 %}
13191 
13192 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
13193   match(If cmp (CmpP (DecodeN oop) zero));
13194   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13195             || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
13196   effect(USE labl);
13197 
13198   ins_cost(BRANCH_COST);
13199   format %{ "cb$cmp   $oop, $labl" %}
13200   ins_encode %{
13201     Label* L = $labl$$label;
13202     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13203     if (cond == Assembler::EQ)
13204       __ cbzw($oop$$Register, *L);
13205     else
13206       __ cbnzw($oop$$Register, *L);
13207   %}
13208   ins_pipe(pipe_cmp_branch);
13209 %}
13210 
13211 instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
13212   match(If cmp (CmpU op1 op2));
13213   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13214             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
13215             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
13216             ||  n->in(1)->as_Bool()->_test._test == BoolTest::le);
13217   effect(USE labl);
13218 
13219   ins_cost(BRANCH_COST);
13220   format %{ "cbw$cmp   $op1, $labl" %}
13221   ins_encode %{
13222     Label* L = $labl$$label;
13223     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13224     if (cond == Assembler::EQ || cond == Assembler::LS)
13225       __ cbzw($op1$$Register, *L);
13226     else
13227       __ cbnzw($op1$$Register, *L);
13228   %}
13229   ins_pipe(pipe_cmp_branch);
13230 %}
13231 
13232 instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
13233   match(If cmp (CmpUL op1 op2));
13234   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
13235             || n->in(1)->as_Bool()->_test._test == BoolTest::eq
13236             || n->in(1)->as_Bool()->_test._test == BoolTest::gt
13237             || n->in(1)->as_Bool()->_test._test == BoolTest::le);
13238   effect(USE labl);
13239 
13240   ins_cost(BRANCH_COST);
13241   format %{ "cb$cmp   $op1, $labl" %}
13242   ins_encode %{
13243     Label* L = $labl$$label;
13244     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13245     if (cond == Assembler::EQ || cond == Assembler::LS)
13246       __ cbz($op1$$Register, *L);
13247     else
13248       __ cbnz($op1$$Register, *L);
13249   %}
13250   ins_pipe(pipe_cmp_branch);
13251 %}
13252 
13253 // Test bit and Branch
13254 
13255 // Patterns for short (< 32KiB) variants
13256 instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
13257   match(If cmp (CmpL op1 op2));
13258   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
13259             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
13260   effect(USE labl);
13261 
13262   ins_cost(BRANCH_COST);
13263   format %{ "cb$cmp   $op1, $labl # long" %}
13264   ins_encode %{
13265     Label* L = $labl$$label;
13266     Assembler::Condition cond =
13267       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13268     __ tbr(cond, $op1$$Register, 63, *L);
13269   %}
13270   ins_pipe(pipe_cmp_branch);
13271   ins_short_branch(1);
13272 %}
13273 
13274 instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
13275   match(If cmp (CmpI op1 op2));
13276   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
13277             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
13278   effect(USE labl);
13279 
13280   ins_cost(BRANCH_COST);
13281   format %{ "cb$cmp   $op1, $labl # int" %}
13282   ins_encode %{
13283     Label* L = $labl$$label;
13284     Assembler::Condition cond =
13285       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13286     __ tbr(cond, $op1$$Register, 31, *L);
13287   %}
13288   ins_pipe(pipe_cmp_branch);
13289   ins_short_branch(1);
13290 %}
13291 
13292 instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
13293   match(If cmp (CmpL (AndL op1 op2) op3));
13294   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
13295             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
13296             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
13297   effect(USE labl);
13298 
13299   ins_cost(BRANCH_COST);
13300   format %{ "tb$cmp   $op1, $op2, $labl" %}
13301   ins_encode %{
13302     Label* L = $labl$$label;
13303     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13304     int bit = exact_log2($op2$$constant);
13305     __ tbr(cond, $op1$$Register, bit, *L);
13306   %}
13307   ins_pipe(pipe_cmp_branch);
13308   ins_short_branch(1);
13309 %}
13310 
13311 instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
13312   match(If cmp (CmpI (AndI op1 op2) op3));
13313   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
13314             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
13315             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
13316   effect(USE labl);
13317 
13318   ins_cost(BRANCH_COST);
13319   format %{ "tb$cmp   $op1, $op2, $labl" %}
13320   ins_encode %{
13321     Label* L = $labl$$label;
13322     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13323     int bit = exact_log2($op2$$constant);
13324     __ tbr(cond, $op1$$Register, bit, *L);
13325   %}
13326   ins_pipe(pipe_cmp_branch);
13327   ins_short_branch(1);
13328 %}
13329 
13330 // And far variants
13331 instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
13332   match(If cmp (CmpL op1 op2));
13333   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
13334             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
13335   effect(USE labl);
13336 
13337   ins_cost(BRANCH_COST);
13338   format %{ "cb$cmp   $op1, $labl # long" %}
13339   ins_encode %{
13340     Label* L = $labl$$label;
13341     Assembler::Condition cond =
13342       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13343     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
13344   %}
13345   ins_pipe(pipe_cmp_branch);
13346 %}
13347 
13348 instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
13349   match(If cmp (CmpI op1 op2));
13350   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
13351             || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
13352   effect(USE labl);
13353 
13354   ins_cost(BRANCH_COST);
13355   format %{ "cb$cmp   $op1, $labl # int" %}
13356   ins_encode %{
13357     Label* L = $labl$$label;
13358     Assembler::Condition cond =
13359       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
13360     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
13361   %}
13362   ins_pipe(pipe_cmp_branch);
13363 %}
13364 
13365 instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
13366   match(If cmp (CmpL (AndL op1 op2) op3));
13367   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
13368             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
13369             && is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
13370   effect(USE labl);
13371 
13372   ins_cost(BRANCH_COST);
13373   format %{ "tb$cmp   $op1, $op2, $labl" %}
13374   ins_encode %{
13375     Label* L = $labl$$label;
13376     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13377     int bit = exact_log2($op2$$constant);
13378     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
13379   %}
13380   ins_pipe(pipe_cmp_branch);
13381 %}
13382 
13383 instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
13384   match(If cmp (CmpI (AndI op1 op2) op3));
13385   predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
13386             || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
13387             && is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
13388   effect(USE labl);
13389 
13390   ins_cost(BRANCH_COST);
13391   format %{ "tb$cmp   $op1, $op2, $labl" %}
13392   ins_encode %{
13393     Label* L = $labl$$label;
13394     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
13395     int bit = exact_log2($op2$$constant);
13396     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
13397   %}
13398   ins_pipe(pipe_cmp_branch);
13399 %}
13400 
13401 // Test bits
13402 
13403 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
13404   match(Set cr (CmpL (AndL op1 op2) op3));
13405   predicate(Assembler::operand_valid_for_logical_immediate
13406             (/*is_32*/false, n->in(1)->in(2)->get_long()));
13407 
13408   ins_cost(INSN_COST);
13409   format %{ "tst $op1, $op2 # long" %}
13410   ins_encode %{
13411     __ tst($op1$$Register, $op2$$constant);
13412   %}
13413   ins_pipe(ialu_reg_reg);
13414 %}
13415 
13416 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
13417   match(Set cr (CmpI (AndI op1 op2) op3));
13418   predicate(Assembler::operand_valid_for_logical_immediate
13419             (/*is_32*/true, n->in(1)->in(2)->get_int()));
13420 
13421   ins_cost(INSN_COST);
13422   format %{ "tst $op1, $op2 # int" %}
13423   ins_encode %{
13424     __ tstw($op1$$Register, $op2$$constant);
13425   %}
13426   ins_pipe(ialu_reg_reg);
13427 %}
13428 
13429 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
13430   match(Set cr (CmpL (AndL op1 op2) op3));
13431 
13432   ins_cost(INSN_COST);
13433   format %{ "tst $op1, $op2 # long" %}
13434   ins_encode %{
13435     __ tst($op1$$Register, $op2$$Register);
13436   %}
13437   ins_pipe(ialu_reg_reg);
13438 %}
13439 
13440 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
13441   match(Set cr (CmpI (AndI op1 op2) op3));
13442 
13443   ins_cost(INSN_COST);
13444   format %{ "tstw $op1, $op2 # int" %}
13445   ins_encode %{
13446     __ tstw($op1$$Register, $op2$$Register);
13447   %}
13448   ins_pipe(ialu_reg_reg);
13449 %}
13450 
13451 
13452 // Conditional Far Branch
13453 // Conditional Far Branch Unsigned
13454 // TODO: fixme
13455 
13456 // counted loop end branch near
13457 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
13458 %{
13459   match(CountedLoopEnd cmp cr);
13460 
13461   effect(USE lbl);
13462 
13463   ins_cost(BRANCH_COST);
13464   // short variant.
13465   // ins_short_branch(1);
13466   format %{ "b$cmp $lbl \t// counted loop end" %}
13467 
13468   ins_encode(aarch64_enc_br_con(cmp, lbl));
13469 
13470   ins_pipe(pipe_branch);
13471 %}
13472 
13473 // counted loop end branch near Unsigned
13474 instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl)
13475 %{
13476   match(CountedLoopEnd cmp cr);
13477 
13478   effect(USE lbl);
13479 
13480   ins_cost(BRANCH_COST);
13481   // short variant.
13482   // ins_short_branch(1);
13483   format %{ "b$cmp $lbl \t// counted loop end unsigned" %}
13484 
13485   ins_encode(aarch64_enc_br_conU(cmp, lbl));
13486 
13487   ins_pipe(pipe_branch);
13488 %}
13489 
13490 // counted loop end branch far
13491 // counted loop end branch far unsigned
13492 // TODO: fixme
13493 
13494 // ============================================================================
13495 // inlined locking and unlocking
13496 
13497 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13498 %{
13499   match(Set cr (FastLock object box));
13500   effect(TEMP tmp, TEMP tmp2);
13501 
13502   // TODO
13503   // identify correct cost
13504   ins_cost(5 * INSN_COST);
13505   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
13506 
13507   ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
13508 
13509   ins_pipe(pipe_serial);
13510 %}
13511 
13512 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
13513 %{
13514   match(Set cr (FastUnlock object box));
13515   effect(TEMP tmp, TEMP tmp2);
13516 
13517   ins_cost(5 * INSN_COST);
13518   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
13519 
13520   ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
13521 
13522   ins_pipe(pipe_serial);
13523 %}
13524 
13525 
13526 // ============================================================================
13527 // Safepoint Instructions
13528 
13529 // TODO
13530 // provide a near and far version of this code
13531 
13532 instruct safePoint(rFlagsReg cr, iRegP poll)
13533 %{
13534   match(SafePoint poll);
13535   effect(KILL cr);
13536 
13537   format %{
13538     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
13539   %}
13540   ins_encode %{
13541     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
13542   %}
13543   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
13544 %}
13545 
13546 
13547 // ============================================================================
13548 // Procedure Call/Return Instructions
13549 
13550 // Call Java Static Instruction
13551 
13552 instruct CallStaticJavaDirect(method meth)
13553 %{
13554   match(CallStaticJava);
13555 
13556   effect(USE meth);
13557 
13558   predicate(!((CallStaticJavaNode*)n)->is_method_handle_invoke());
13559 
13560   ins_cost(CALL_COST);
13561 
13562   format %{ "call,static $meth \t// ==> " %}
13563 
13564   ins_encode( aarch64_enc_java_static_call(meth),
13565               aarch64_enc_call_epilog );
13566 
13567   ins_pipe(pipe_class_call);
13568 %}
13569 
13570 // TO HERE
13571 
13572 // Call Java Static Instruction (method handle version)
13573 
13574 instruct CallStaticJavaDirectHandle(method meth, iRegP_FP reg_mh_save)
13575 %{
13576   match(CallStaticJava);
13577 
13578   effect(USE meth);
13579 
13580   predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
13581 
13582   ins_cost(CALL_COST);
13583 
13584   format %{ "call,static $meth \t// (methodhandle) ==> " %}
13585 
13586   ins_encode( aarch64_enc_java_handle_call(meth),
13587               aarch64_enc_call_epilog );
13588 
13589   ins_pipe(pipe_class_call);
13590 %}
13591 
13592 // Call Java Dynamic Instruction
13593 instruct CallDynamicJavaDirect(method meth)
13594 %{
13595   match(CallDynamicJava);
13596 
13597   effect(USE meth);
13598 
13599   ins_cost(CALL_COST);
13600 
13601   format %{ "CALL,dynamic $meth \t// ==> " %}
13602 
13603   ins_encode( aarch64_enc_java_dynamic_call(meth),
13604                aarch64_enc_call_epilog );
13605 
13606   ins_pipe(pipe_class_call);
13607 %}
13608 
13609 // Call Runtime Instruction
13610 
13611 instruct CallRuntimeDirect(method meth)
13612 %{
13613   match(CallRuntime);
13614 
13615   effect(USE meth);
13616 
13617   ins_cost(CALL_COST);
13618 
13619   format %{ "CALL, runtime $meth" %}
13620 
13621   ins_encode( aarch64_enc_java_to_runtime(meth) );
13622 
13623   ins_pipe(pipe_class_call);
13624 %}
13625 
13626 // Call Runtime Instruction
13627 
13628 instruct CallLeafDirect(method meth)
13629 %{
13630   match(CallLeaf);
13631 
13632   effect(USE meth);
13633 
13634   ins_cost(CALL_COST);
13635 
13636   format %{ "CALL, runtime leaf $meth" %}
13637 
13638   ins_encode( aarch64_enc_java_to_runtime(meth) );
13639 
13640   ins_pipe(pipe_class_call);
13641 %}
13642 
13643 // Call Runtime Instruction
13644 
13645 instruct CallLeafNoFPDirect(method meth)
13646 %{
13647   match(CallLeafNoFP);
13648 
13649   effect(USE meth);
13650 
13651   ins_cost(CALL_COST);
13652 
13653   format %{ "CALL, runtime leaf nofp $meth" %}
13654 
13655   ins_encode( aarch64_enc_java_to_runtime(meth) );
13656 
13657   ins_pipe(pipe_class_call);
13658 %}
13659 
13660 // Tail Call; Jump from runtime stub to Java code.
13661 // Also known as an 'interprocedural jump'.
13662 // Target of jump will eventually return to caller.
13663 // TailJump below removes the return address.
13664 instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop)
13665 %{
13666   match(TailCall jump_target method_oop);
13667 
13668   ins_cost(CALL_COST);
13669 
13670   format %{ "br $jump_target\t# $method_oop holds method oop" %}
13671 
13672   ins_encode(aarch64_enc_tail_call(jump_target));
13673 
13674   ins_pipe(pipe_class_call);
13675 %}
13676 
13677 instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop)
13678 %{
13679   match(TailJump jump_target ex_oop);
13680 
13681   ins_cost(CALL_COST);
13682 
13683   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
13684 
13685   ins_encode(aarch64_enc_tail_jmp(jump_target));
13686 
13687   ins_pipe(pipe_class_call);
13688 %}
13689 
13690 // Create exception oop: created by stack-crawling runtime code.
13691 // Created exception is now available to this handler, and is setup
13692 // just prior to jumping to this handler. No code emitted.
13693 // TODO check
13694 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
13695 instruct CreateException(iRegP_R0 ex_oop)
13696 %{
13697   match(Set ex_oop (CreateEx));
13698 
13699   format %{ " -- \t// exception oop; no code emitted" %}
13700 
13701   size(0);
13702 
13703   ins_encode( /*empty*/ );
13704 
13705   ins_pipe(pipe_class_empty);
13706 %}
13707 
13708 // Rethrow exception: The exception oop will come in the first
13709 // argument position. Then JUMP (not call) to the rethrow stub code.
13710 instruct RethrowException() %{
13711   match(Rethrow);
13712   ins_cost(CALL_COST);
13713 
13714   format %{ "b rethrow_stub" %}
13715 
13716   ins_encode( aarch64_enc_rethrow() );
13717 
13718   ins_pipe(pipe_class_call);
13719 %}
13720 
13721 
13722 // Return Instruction
13723 // epilog node loads ret address into lr as part of frame pop
13724 instruct Ret()
13725 %{
13726   match(Return);
13727 
13728   format %{ "ret\t// return register" %}
13729 
13730   ins_encode( aarch64_enc_ret() );
13731 
13732   ins_pipe(pipe_branch);
13733 %}
13734 
13735 // Die now.
13736 instruct ShouldNotReachHere() %{
13737   match(Halt);
13738 
13739   ins_cost(CALL_COST);
13740   format %{ "ShouldNotReachHere" %}
13741 
13742   ins_encode %{
13743     // TODO
13744     // implement proper trap call here
13745     __ brk(999);
13746   %}
13747 
13748   ins_pipe(pipe_class_default);
13749 %}
13750 
13751 // ============================================================================
13752 // Partial Subtype Check
13753 // 
13754 // superklass array for an instance of the superklass.  Set a hidden
13755 // internal cache on a hit (cache is checked with exposed code in
13756 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
13757 // encoding ALSO sets flags.
13758 
13759 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
13760 %{
13761   match(Set result (PartialSubtypeCheck sub super));
13762   effect(KILL cr, KILL temp);
13763 
13764   ins_cost(1100);  // slightly larger than the next version
13765   format %{ "partialSubtypeCheck $result, $sub, $super" %}
13766 
13767   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13768 
13769   opcode(0x1); // Force zero of result reg on hit
13770 
13771   ins_pipe(pipe_class_memory);
13772 %}
13773 
13774 instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr)
13775 %{
13776   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
13777   effect(KILL temp, KILL result);
13778 
13779   ins_cost(1100);  // slightly larger than the next version
13780   format %{ "partialSubtypeCheck $result, $sub, $super == 0" %}
13781 
13782   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
13783 
13784   opcode(0x0); // Don't zero result reg on hit
13785 
13786   ins_pipe(pipe_class_memory);
13787 %}
13788 
13789 instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
13790                         iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
13791 %{
13792   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
13793   effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
13794 
13795   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
13796   ins_encode %{
13797     __ string_compare($str1$$Register, $str2$$Register,
13798                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
13799                       $tmp1$$Register);
13800   %}
13801   ins_pipe(pipe_class_memory);
13802 %}
13803 
13804 instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
13805        iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
13806 %{
13807   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
13808   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
13809          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13810   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result" %}
13811 
13812   ins_encode %{
13813     __ string_indexof($str1$$Register, $str2$$Register,
13814                       $cnt1$$Register, $cnt2$$Register,
13815                       $tmp1$$Register, $tmp2$$Register,
13816                       $tmp3$$Register, $tmp4$$Register,
13817                       -1, $result$$Register);
13818   %}
13819   ins_pipe(pipe_class_memory);
13820 %}
13821 
13822 instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
13823                  immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
13824                  iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
13825 %{
13826   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
13827   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
13828          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
13829   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result" %}
13830 
13831   ins_encode %{
13832     int icnt2 = (int)$int_cnt2$$constant;
13833     __ string_indexof($str1$$Register, $str2$$Register,
13834                       $cnt1$$Register, zr,
13835                       $tmp1$$Register, $tmp2$$Register,
13836                       $tmp3$$Register, $tmp4$$Register,
13837                       icnt2, $result$$Register);
13838   %}
13839   ins_pipe(pipe_class_memory);
13840 %}
13841 
13842 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
13843                         iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
13844 %{
13845   match(Set result (StrEquals (Binary str1 str2) cnt));
13846   effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
13847 
13848   format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
13849   ins_encode %{
13850     __ string_equals($str1$$Register, $str2$$Register,
13851                       $cnt$$Register, $result$$Register,
13852                       $tmp$$Register);
13853   %}
13854   ins_pipe(pipe_class_memory);
13855 %}
13856 
13857 instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
13858                       iRegP_R10 tmp, rFlagsReg cr)
13859 %{
13860   match(Set result (AryEq ary1 ary2));
13861   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);
13862 
13863   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
13864   ins_encode %{
13865     __ char_arrays_equals($ary1$$Register, $ary2$$Register,
13866                           $result$$Register, $tmp$$Register);
13867   %}
13868   ins_pipe(pipe_class_memory);
13869 %}
13870 
13871 // encode char[] to byte[] in ISO_8859_1
13872 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
13873                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
13874                           vRegD_V2 Vtmp3, vRegD_V3 Vtmp4,
13875                           iRegI_R0 result, rFlagsReg cr)
13876 %{
13877   match(Set result (EncodeISOArray src (Binary dst len)));
13878   effect(USE_KILL src, USE_KILL dst, USE_KILL len,
13879          KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr);
13880 
13881   format %{ "Encode array $src,$dst,$len -> $result" %}
13882   ins_encode %{
13883     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
13884          $result$$Register, $Vtmp1$$FloatRegister,  $Vtmp2$$FloatRegister,
13885          $Vtmp3$$FloatRegister,  $Vtmp4$$FloatRegister);
13886   %}
13887   ins_pipe( pipe_class_memory );
13888 %}
13889 
13890 // ============================================================================
13891 // This name is KNOWN by the ADLC and cannot be changed.
13892 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
13893 // for this guy.
13894 instruct tlsLoadP(thread_RegP dst)
13895 %{
13896   match(Set dst (ThreadLocal));
13897 
13898   ins_cost(0);
13899 
13900   format %{ " -- \t// $dst=Thread::current(), empty" %}
13901 
13902   size(0);
13903 
13904   ins_encode( /*empty*/ );
13905 
13906   ins_pipe(pipe_class_empty);
13907 %}
13908 
13909 // ====================VECTOR INSTRUCTIONS=====================================
13910 
13911 // Load vector (32 bits)
13912 instruct loadV4(vecD dst, vmem4 mem)
13913 %{
13914   predicate(n->as_LoadVector()->memory_size() == 4);
13915   match(Set dst (LoadVector mem));
13916   ins_cost(4 * INSN_COST);
13917   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
13918   ins_encode( aarch64_enc_ldrvS(dst, mem) );
13919   ins_pipe(vload_reg_mem64);
13920 %}
13921 
13922 // Load vector (64 bits)
13923 instruct loadV8(vecD dst, vmem8 mem)
13924 %{
13925   predicate(n->as_LoadVector()->memory_size() == 8);
13926   match(Set dst (LoadVector mem));
13927   ins_cost(4 * INSN_COST);
13928   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
13929   ins_encode( aarch64_enc_ldrvD(dst, mem) );
13930   ins_pipe(vload_reg_mem64);
13931 %}
13932 
13933 // Load Vector (128 bits)
13934 instruct loadV16(vecX dst, vmem16 mem)
13935 %{
13936   predicate(n->as_LoadVector()->memory_size() == 16);
13937   match(Set dst (LoadVector mem));
13938   ins_cost(4 * INSN_COST);
13939   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
13940   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
13941   ins_pipe(vload_reg_mem128);
13942 %}
13943 
13944 // Store Vector (32 bits)
13945 instruct storeV4(vecD src, vmem4 mem)
13946 %{
13947   predicate(n->as_StoreVector()->memory_size() == 4);
13948   match(Set mem (StoreVector mem src));
13949   ins_cost(4 * INSN_COST);
13950   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
13951   ins_encode( aarch64_enc_strvS(src, mem) );
13952   ins_pipe(vstore_reg_mem64);
13953 %}
13954 
13955 // Store Vector (64 bits)
13956 instruct storeV8(vecD src, vmem8 mem)
13957 %{
13958   predicate(n->as_StoreVector()->memory_size() == 8);
13959   match(Set mem (StoreVector mem src));
13960   ins_cost(4 * INSN_COST);
13961   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
13962   ins_encode( aarch64_enc_strvD(src, mem) );
13963   ins_pipe(vstore_reg_mem64);
13964 %}
13965 
13966 // Store Vector (128 bits)
13967 instruct storeV16(vecX src, vmem16 mem)
13968 %{
13969   predicate(n->as_StoreVector()->memory_size() == 16);
13970   match(Set mem (StoreVector mem src));
13971   ins_cost(4 * INSN_COST);
13972   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
13973   ins_encode( aarch64_enc_strvQ(src, mem) );
13974   ins_pipe(vstore_reg_mem128);
13975 %}
13976 
13977 instruct replicate8B(vecD dst, iRegIorL2I src)
13978 %{
13979   predicate(n->as_Vector()->length() == 4 ||
13980             n->as_Vector()->length() == 8);
13981   match(Set dst (ReplicateB src));
13982   ins_cost(INSN_COST);
13983   format %{ "dup  $dst, $src\t# vector (8B)" %}
13984   ins_encode %{
13985     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
13986   %}
13987   ins_pipe(vdup_reg_reg64);
13988 %}
13989 
13990 instruct replicate16B(vecX dst, iRegIorL2I src)
13991 %{
13992   predicate(n->as_Vector()->length() == 16);
13993   match(Set dst (ReplicateB src));
13994   ins_cost(INSN_COST);
13995   format %{ "dup  $dst, $src\t# vector (16B)" %}
13996   ins_encode %{
13997     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg));
13998   %}
13999   ins_pipe(vdup_reg_reg128);
14000 %}
14001 
14002 instruct replicate8B_imm(vecD dst, immI con)
14003 %{
14004   predicate(n->as_Vector()->length() == 4 ||
14005             n->as_Vector()->length() == 8);
14006   match(Set dst (ReplicateB con));
14007   ins_cost(INSN_COST);
14008   format %{ "movi  $dst, $con\t# vector(8B)" %}
14009   ins_encode %{
14010     __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
14011   %}
14012   ins_pipe(vmovi_reg_imm64);
14013 %}
14014 
14015 instruct replicate16B_imm(vecX dst, immI con)
14016 %{
14017   predicate(n->as_Vector()->length() == 16);
14018   match(Set dst (ReplicateB con));
14019   ins_cost(INSN_COST);
14020   format %{ "movi  $dst, $con\t# vector(16B)" %}
14021   ins_encode %{
14022     __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
14023   %}
14024   ins_pipe(vmovi_reg_imm128);
14025 %}
14026 
14027 instruct replicate4S(vecD dst, iRegIorL2I src)
14028 %{
14029   predicate(n->as_Vector()->length() == 2 ||
14030             n->as_Vector()->length() == 4);
14031   match(Set dst (ReplicateS src));
14032   ins_cost(INSN_COST);
14033   format %{ "dup  $dst, $src\t# vector (4S)" %}
14034   ins_encode %{
14035     __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
14036   %}
14037   ins_pipe(vdup_reg_reg64);
14038 %}
14039 
14040 instruct replicate8S(vecX dst, iRegIorL2I src)
14041 %{
14042   predicate(n->as_Vector()->length() == 8);
14043   match(Set dst (ReplicateS src));
14044   ins_cost(INSN_COST);
14045   format %{ "dup  $dst, $src\t# vector (8S)" %}
14046   ins_encode %{
14047     __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg));
14048   %}
14049   ins_pipe(vdup_reg_reg128);
14050 %}
14051 
14052 instruct replicate4S_imm(vecD dst, immI con)
14053 %{
14054   predicate(n->as_Vector()->length() == 2 ||
14055             n->as_Vector()->length() == 4);
14056   match(Set dst (ReplicateS con));
14057   ins_cost(INSN_COST);
14058   format %{ "movi  $dst, $con\t# vector(4H)" %}
14059   ins_encode %{
14060     __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
14061   %}
14062   ins_pipe(vmovi_reg_imm64);
14063 %}
14064 
14065 instruct replicate8S_imm(vecX dst, immI con)
14066 %{
14067   predicate(n->as_Vector()->length() == 8);
14068   match(Set dst (ReplicateS con));
14069   ins_cost(INSN_COST);
14070   format %{ "movi  $dst, $con\t# vector(8H)" %}
14071   ins_encode %{
14072     __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
14073   %}
14074   ins_pipe(vmovi_reg_imm128);
14075 %}
14076 
14077 instruct replicate2I(vecD dst, iRegIorL2I src)
14078 %{
14079   predicate(n->as_Vector()->length() == 2);
14080   match(Set dst (ReplicateI src));
14081   ins_cost(INSN_COST);
14082   format %{ "dup  $dst, $src\t# vector (2I)" %}
14083   ins_encode %{
14084     __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
14085   %}
14086   ins_pipe(vdup_reg_reg64);
14087 %}
14088 
14089 instruct replicate4I(vecX dst, iRegIorL2I src)
14090 %{
14091   predicate(n->as_Vector()->length() == 4);
14092   match(Set dst (ReplicateI src));
14093   ins_cost(INSN_COST);
14094   format %{ "dup  $dst, $src\t# vector (4I)" %}
14095   ins_encode %{
14096     __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg));
14097   %}
14098   ins_pipe(vdup_reg_reg128);
14099 %}
14100 
14101 instruct replicate2I_imm(vecD dst, immI con)
14102 %{
14103   predicate(n->as_Vector()->length() == 2);
14104   match(Set dst (ReplicateI con));
14105   ins_cost(INSN_COST);
14106   format %{ "movi  $dst, $con\t# vector(2I)" %}
14107   ins_encode %{
14108     __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
14109   %}
14110   ins_pipe(vmovi_reg_imm64);
14111 %}
14112 
14113 instruct replicate4I_imm(vecX dst, immI con)
14114 %{
14115   predicate(n->as_Vector()->length() == 4);
14116   match(Set dst (ReplicateI con));
14117   ins_cost(INSN_COST);
14118   format %{ "movi  $dst, $con\t# vector(4I)" %}
14119   ins_encode %{
14120     __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant);
14121   %}
14122   ins_pipe(vmovi_reg_imm128);
14123 %}
14124 
14125 instruct replicate2L(vecX dst, iRegL src)
14126 %{
14127   predicate(n->as_Vector()->length() == 2);
14128   match(Set dst (ReplicateL src));
14129   ins_cost(INSN_COST);
14130   format %{ "dup  $dst, $src\t# vector (2L)" %}
14131   ins_encode %{
14132     __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg));
14133   %}
14134   ins_pipe(vdup_reg_reg128);
14135 %}
14136 
14137 instruct replicate2L_zero(vecX dst, immI0 zero)
14138 %{
14139   predicate(n->as_Vector()->length() == 2);
14140   match(Set dst (ReplicateI zero));
14141   ins_cost(INSN_COST);
14142   format %{ "movi  $dst, $zero\t# vector(4I)" %}
14143   ins_encode %{
14144     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14145            as_FloatRegister($dst$$reg),
14146            as_FloatRegister($dst$$reg));
14147   %}
14148   ins_pipe(vmovi_reg_imm128);
14149 %}
14150 
14151 instruct replicate2F(vecD dst, vRegF src)
14152 %{
14153   predicate(n->as_Vector()->length() == 2);
14154   match(Set dst (ReplicateF src));
14155   ins_cost(INSN_COST);
14156   format %{ "dup  $dst, $src\t# vector (2F)" %}
14157   ins_encode %{
14158     __ dup(as_FloatRegister($dst$$reg), __ T2S,
14159            as_FloatRegister($src$$reg));
14160   %}
14161   ins_pipe(vdup_reg_freg64);
14162 %}
14163 
14164 instruct replicate4F(vecX dst, vRegF src)
14165 %{
14166   predicate(n->as_Vector()->length() == 4);
14167   match(Set dst (ReplicateF src));
14168   ins_cost(INSN_COST);
14169   format %{ "dup  $dst, $src\t# vector (4F)" %}
14170   ins_encode %{
14171     __ dup(as_FloatRegister($dst$$reg), __ T4S,
14172            as_FloatRegister($src$$reg));
14173   %}
14174   ins_pipe(vdup_reg_freg128);
14175 %}
14176 
14177 instruct replicate2D(vecX dst, vRegD src)
14178 %{
14179   predicate(n->as_Vector()->length() == 2);
14180   match(Set dst (ReplicateD src));
14181   ins_cost(INSN_COST);
14182   format %{ "dup  $dst, $src\t# vector (2D)" %}
14183   ins_encode %{
14184     __ dup(as_FloatRegister($dst$$reg), __ T2D,
14185            as_FloatRegister($src$$reg));
14186   %}
14187   ins_pipe(vdup_reg_dreg128);
14188 %}
14189 
14190 // ====================VECTOR ARITHMETIC=======================================
14191 
14192 // --------------------------------- ADD --------------------------------------
14193 
14194 instruct vadd8B(vecD dst, vecD src1, vecD src2)
14195 %{
14196   predicate(n->as_Vector()->length() == 4 ||
14197             n->as_Vector()->length() == 8);
14198   match(Set dst (AddVB src1 src2));
14199   ins_cost(INSN_COST);
14200   format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
14201   ins_encode %{
14202     __ addv(as_FloatRegister($dst$$reg), __ T8B,
14203             as_FloatRegister($src1$$reg),
14204             as_FloatRegister($src2$$reg));
14205   %}
14206   ins_pipe(vdop64);
14207 %}
14208 
14209 instruct vadd16B(vecX dst, vecX src1, vecX src2)
14210 %{
14211   predicate(n->as_Vector()->length() == 16);
14212   match(Set dst (AddVB src1 src2));
14213   ins_cost(INSN_COST);
14214   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
14215   ins_encode %{
14216     __ addv(as_FloatRegister($dst$$reg), __ T16B,
14217             as_FloatRegister($src1$$reg),
14218             as_FloatRegister($src2$$reg));
14219   %}
14220   ins_pipe(vdop128);
14221 %}
14222 
14223 instruct vadd4S(vecD dst, vecD src1, vecD src2)
14224 %{
14225   predicate(n->as_Vector()->length() == 2 ||
14226             n->as_Vector()->length() == 4);
14227   match(Set dst (AddVS src1 src2));
14228   ins_cost(INSN_COST);
14229   format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
14230   ins_encode %{
14231     __ addv(as_FloatRegister($dst$$reg), __ T4H,
14232             as_FloatRegister($src1$$reg),
14233             as_FloatRegister($src2$$reg));
14234   %}
14235   ins_pipe(vdop64);
14236 %}
14237 
14238 instruct vadd8S(vecX dst, vecX src1, vecX src2)
14239 %{
14240   predicate(n->as_Vector()->length() == 8);
14241   match(Set dst (AddVS src1 src2));
14242   ins_cost(INSN_COST);
14243   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
14244   ins_encode %{
14245     __ addv(as_FloatRegister($dst$$reg), __ T8H,
14246             as_FloatRegister($src1$$reg),
14247             as_FloatRegister($src2$$reg));
14248   %}
14249   ins_pipe(vdop128);
14250 %}
14251 
14252 instruct vadd2I(vecD dst, vecD src1, vecD src2)
14253 %{
14254   predicate(n->as_Vector()->length() == 2);
14255   match(Set dst (AddVI src1 src2));
14256   ins_cost(INSN_COST);
14257   format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
14258   ins_encode %{
14259     __ addv(as_FloatRegister($dst$$reg), __ T2S,
14260             as_FloatRegister($src1$$reg),
14261             as_FloatRegister($src2$$reg));
14262   %}
14263   ins_pipe(vdop64);
14264 %}
14265 
14266 instruct vadd4I(vecX dst, vecX src1, vecX src2)
14267 %{
14268   predicate(n->as_Vector()->length() == 4);
14269   match(Set dst (AddVI src1 src2));
14270   ins_cost(INSN_COST);
14271   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
14272   ins_encode %{
14273     __ addv(as_FloatRegister($dst$$reg), __ T4S,
14274             as_FloatRegister($src1$$reg),
14275             as_FloatRegister($src2$$reg));
14276   %}
14277   ins_pipe(vdop128);
14278 %}
14279 
14280 instruct vadd2L(vecX dst, vecX src1, vecX src2)
14281 %{
14282   predicate(n->as_Vector()->length() == 2);
14283   match(Set dst (AddVL src1 src2));
14284   ins_cost(INSN_COST);
14285   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
14286   ins_encode %{
14287     __ addv(as_FloatRegister($dst$$reg), __ T2D,
14288             as_FloatRegister($src1$$reg),
14289             as_FloatRegister($src2$$reg));
14290   %}
14291   ins_pipe(vdop128);
14292 %}
14293 
14294 instruct vadd2F(vecD dst, vecD src1, vecD src2)
14295 %{
14296   predicate(n->as_Vector()->length() == 2);
14297   match(Set dst (AddVF src1 src2));
14298   ins_cost(INSN_COST);
14299   format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
14300   ins_encode %{
14301     __ fadd(as_FloatRegister($dst$$reg), __ T2S,
14302             as_FloatRegister($src1$$reg),
14303             as_FloatRegister($src2$$reg));
14304   %}
14305   ins_pipe(vdop_fp64);
14306 %}
14307 
14308 instruct vadd4F(vecX dst, vecX src1, vecX src2)
14309 %{
14310   predicate(n->as_Vector()->length() == 4);
14311   match(Set dst (AddVF src1 src2));
14312   ins_cost(INSN_COST);
14313   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
14314   ins_encode %{
14315     __ fadd(as_FloatRegister($dst$$reg), __ T4S,
14316             as_FloatRegister($src1$$reg),
14317             as_FloatRegister($src2$$reg));
14318   %}
14319   ins_pipe(vdop_fp128);
14320 %}
14321 
14322 instruct vadd2D(vecX dst, vecX src1, vecX src2)
14323 %{
14324   match(Set dst (AddVD src1 src2));
14325   ins_cost(INSN_COST);
14326   format %{ "fadd  $dst,$src1,$src2\t# vector (2D)" %}
14327   ins_encode %{
14328     __ fadd(as_FloatRegister($dst$$reg), __ T2D,
14329             as_FloatRegister($src1$$reg),
14330             as_FloatRegister($src2$$reg));
14331   %}
14332   ins_pipe(vdop_fp128);
14333 %}
14334 
14335 // --------------------------------- SUB --------------------------------------
14336 
14337 instruct vsub8B(vecD dst, vecD src1, vecD src2)
14338 %{
14339   predicate(n->as_Vector()->length() == 4 ||
14340             n->as_Vector()->length() == 8);
14341   match(Set dst (SubVB src1 src2));
14342   ins_cost(INSN_COST);
14343   format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
14344   ins_encode %{
14345     __ subv(as_FloatRegister($dst$$reg), __ T8B,
14346             as_FloatRegister($src1$$reg),
14347             as_FloatRegister($src2$$reg));
14348   %}
14349   ins_pipe(vdop64);
14350 %}
14351 
14352 instruct vsub16B(vecX dst, vecX src1, vecX src2)
14353 %{
14354   predicate(n->as_Vector()->length() == 16);
14355   match(Set dst (SubVB src1 src2));
14356   ins_cost(INSN_COST);
14357   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
14358   ins_encode %{
14359     __ subv(as_FloatRegister($dst$$reg), __ T16B,
14360             as_FloatRegister($src1$$reg),
14361             as_FloatRegister($src2$$reg));
14362   %}
14363   ins_pipe(vdop128);
14364 %}
14365 
14366 instruct vsub4S(vecD dst, vecD src1, vecD src2)
14367 %{
14368   predicate(n->as_Vector()->length() == 2 ||
14369             n->as_Vector()->length() == 4);
14370   match(Set dst (SubVS src1 src2));
14371   ins_cost(INSN_COST);
14372   format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
14373   ins_encode %{
14374     __ subv(as_FloatRegister($dst$$reg), __ T4H,
14375             as_FloatRegister($src1$$reg),
14376             as_FloatRegister($src2$$reg));
14377   %}
14378   ins_pipe(vdop64);
14379 %}
14380 
14381 instruct vsub8S(vecX dst, vecX src1, vecX src2)
14382 %{
14383   predicate(n->as_Vector()->length() == 8);
14384   match(Set dst (SubVS src1 src2));
14385   ins_cost(INSN_COST);
14386   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
14387   ins_encode %{
14388     __ subv(as_FloatRegister($dst$$reg), __ T8H,
14389             as_FloatRegister($src1$$reg),
14390             as_FloatRegister($src2$$reg));
14391   %}
14392   ins_pipe(vdop128);
14393 %}
14394 
14395 instruct vsub2I(vecD dst, vecD src1, vecD src2)
14396 %{
14397   predicate(n->as_Vector()->length() == 2);
14398   match(Set dst (SubVI src1 src2));
14399   ins_cost(INSN_COST);
14400   format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
14401   ins_encode %{
14402     __ subv(as_FloatRegister($dst$$reg), __ T2S,
14403             as_FloatRegister($src1$$reg),
14404             as_FloatRegister($src2$$reg));
14405   %}
14406   ins_pipe(vdop64);
14407 %}
14408 
14409 instruct vsub4I(vecX dst, vecX src1, vecX src2)
14410 %{
14411   predicate(n->as_Vector()->length() == 4);
14412   match(Set dst (SubVI src1 src2));
14413   ins_cost(INSN_COST);
14414   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
14415   ins_encode %{
14416     __ subv(as_FloatRegister($dst$$reg), __ T4S,
14417             as_FloatRegister($src1$$reg),
14418             as_FloatRegister($src2$$reg));
14419   %}
14420   ins_pipe(vdop128);
14421 %}
14422 
14423 instruct vsub2L(vecX dst, vecX src1, vecX src2)
14424 %{
14425   predicate(n->as_Vector()->length() == 2);
14426   match(Set dst (SubVL src1 src2));
14427   ins_cost(INSN_COST);
14428   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
14429   ins_encode %{
14430     __ subv(as_FloatRegister($dst$$reg), __ T2D,
14431             as_FloatRegister($src1$$reg),
14432             as_FloatRegister($src2$$reg));
14433   %}
14434   ins_pipe(vdop128);
14435 %}
14436 
14437 instruct vsub2F(vecD dst, vecD src1, vecD src2)
14438 %{
14439   predicate(n->as_Vector()->length() == 2);
14440   match(Set dst (SubVF src1 src2));
14441   ins_cost(INSN_COST);
14442   format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
14443   ins_encode %{
14444     __ fsub(as_FloatRegister($dst$$reg), __ T2S,
14445             as_FloatRegister($src1$$reg),
14446             as_FloatRegister($src2$$reg));
14447   %}
14448   ins_pipe(vdop_fp64);
14449 %}
14450 
14451 instruct vsub4F(vecX dst, vecX src1, vecX src2)
14452 %{
14453   predicate(n->as_Vector()->length() == 4);
14454   match(Set dst (SubVF src1 src2));
14455   ins_cost(INSN_COST);
14456   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
14457   ins_encode %{
14458     __ fsub(as_FloatRegister($dst$$reg), __ T4S,
14459             as_FloatRegister($src1$$reg),
14460             as_FloatRegister($src2$$reg));
14461   %}
14462   ins_pipe(vdop_fp128);
14463 %}
14464 
14465 instruct vsub2D(vecX dst, vecX src1, vecX src2)
14466 %{
14467   predicate(n->as_Vector()->length() == 2);
14468   match(Set dst (SubVD src1 src2));
14469   ins_cost(INSN_COST);
14470   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
14471   ins_encode %{
14472     __ fsub(as_FloatRegister($dst$$reg), __ T2D,
14473             as_FloatRegister($src1$$reg),
14474             as_FloatRegister($src2$$reg));
14475   %}
14476   ins_pipe(vdop_fp128);
14477 %}
14478 
14479 // --------------------------------- MUL --------------------------------------
14480 
14481 instruct vmul4S(vecD dst, vecD src1, vecD src2)
14482 %{
14483   predicate(n->as_Vector()->length() == 2 ||
14484             n->as_Vector()->length() == 4);
14485   match(Set dst (MulVS src1 src2));
14486   ins_cost(INSN_COST);
14487   format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
14488   ins_encode %{
14489     __ mulv(as_FloatRegister($dst$$reg), __ T4H,
14490             as_FloatRegister($src1$$reg),
14491             as_FloatRegister($src2$$reg));
14492   %}
14493   ins_pipe(vmul64);
14494 %}
14495 
14496 instruct vmul8S(vecX dst, vecX src1, vecX src2)
14497 %{
14498   predicate(n->as_Vector()->length() == 8);
14499   match(Set dst (MulVS src1 src2));
14500   ins_cost(INSN_COST);
14501   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
14502   ins_encode %{
14503     __ mulv(as_FloatRegister($dst$$reg), __ T8H,
14504             as_FloatRegister($src1$$reg),
14505             as_FloatRegister($src2$$reg));
14506   %}
14507   ins_pipe(vmul128);
14508 %}
14509 
14510 instruct vmul2I(vecD dst, vecD src1, vecD src2)
14511 %{
14512   predicate(n->as_Vector()->length() == 2);
14513   match(Set dst (MulVI src1 src2));
14514   ins_cost(INSN_COST);
14515   format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
14516   ins_encode %{
14517     __ mulv(as_FloatRegister($dst$$reg), __ T2S,
14518             as_FloatRegister($src1$$reg),
14519             as_FloatRegister($src2$$reg));
14520   %}
14521   ins_pipe(vmul64);
14522 %}
14523 
14524 instruct vmul4I(vecX dst, vecX src1, vecX src2)
14525 %{
14526   predicate(n->as_Vector()->length() == 4);
14527   match(Set dst (MulVI src1 src2));
14528   ins_cost(INSN_COST);
14529   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
14530   ins_encode %{
14531     __ mulv(as_FloatRegister($dst$$reg), __ T4S,
14532             as_FloatRegister($src1$$reg),
14533             as_FloatRegister($src2$$reg));
14534   %}
14535   ins_pipe(vmul128);
14536 %}
14537 
14538 instruct vmul2F(vecD dst, vecD src1, vecD src2)
14539 %{
14540   predicate(n->as_Vector()->length() == 2);
14541   match(Set dst (MulVF src1 src2));
14542   ins_cost(INSN_COST);
14543   format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
14544   ins_encode %{
14545     __ fmul(as_FloatRegister($dst$$reg), __ T2S,
14546             as_FloatRegister($src1$$reg),
14547             as_FloatRegister($src2$$reg));
14548   %}
14549   ins_pipe(vmuldiv_fp64);
14550 %}
14551 
14552 instruct vmul4F(vecX dst, vecX src1, vecX src2)
14553 %{
14554   predicate(n->as_Vector()->length() == 4);
14555   match(Set dst (MulVF src1 src2));
14556   ins_cost(INSN_COST);
14557   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
14558   ins_encode %{
14559     __ fmul(as_FloatRegister($dst$$reg), __ T4S,
14560             as_FloatRegister($src1$$reg),
14561             as_FloatRegister($src2$$reg));
14562   %}
14563   ins_pipe(vmuldiv_fp128);
14564 %}
14565 
14566 instruct vmul2D(vecX dst, vecX src1, vecX src2)
14567 %{
14568   predicate(n->as_Vector()->length() == 2);
14569   match(Set dst (MulVD src1 src2));
14570   ins_cost(INSN_COST);
14571   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
14572   ins_encode %{
14573     __ fmul(as_FloatRegister($dst$$reg), __ T2D,
14574             as_FloatRegister($src1$$reg),
14575             as_FloatRegister($src2$$reg));
14576   %}
14577   ins_pipe(vmuldiv_fp128);
14578 %}
14579 
14580 // --------------------------------- MLA --------------------------------------
14581 
14582 instruct vmla4S(vecD dst, vecD src1, vecD src2)
14583 %{
14584   predicate(n->as_Vector()->length() == 2 ||
14585             n->as_Vector()->length() == 4);
14586   match(Set dst (AddVS dst (MulVS src1 src2)));
14587   ins_cost(INSN_COST);
14588   format %{ "mlav  $dst,$src1,$src2\t# vector (4H)" %}
14589   ins_encode %{
14590     __ mlav(as_FloatRegister($dst$$reg), __ T4H,
14591             as_FloatRegister($src1$$reg),
14592             as_FloatRegister($src2$$reg));
14593   %}
14594   ins_pipe(vmla64);
14595 %}
14596 
14597 instruct vmla8S(vecX dst, vecX src1, vecX src2)
14598 %{
14599   predicate(n->as_Vector()->length() == 8);
14600   match(Set dst (AddVS dst (MulVS src1 src2)));
14601   ins_cost(INSN_COST);
14602   format %{ "mlav  $dst,$src1,$src2\t# vector (8H)" %}
14603   ins_encode %{
14604     __ mlav(as_FloatRegister($dst$$reg), __ T8H,
14605             as_FloatRegister($src1$$reg),
14606             as_FloatRegister($src2$$reg));
14607   %}
14608   ins_pipe(vmla128);
14609 %}
14610 
14611 instruct vmla2I(vecD dst, vecD src1, vecD src2)
14612 %{
14613   predicate(n->as_Vector()->length() == 2);
14614   match(Set dst (AddVI dst (MulVI src1 src2)));
14615   ins_cost(INSN_COST);
14616   format %{ "mlav  $dst,$src1,$src2\t# vector (2S)" %}
14617   ins_encode %{
14618     __ mlav(as_FloatRegister($dst$$reg), __ T2S,
14619             as_FloatRegister($src1$$reg),
14620             as_FloatRegister($src2$$reg));
14621   %}
14622   ins_pipe(vmla64);
14623 %}
14624 
14625 instruct vmla4I(vecX dst, vecX src1, vecX src2)
14626 %{
14627   predicate(n->as_Vector()->length() == 4);
14628   match(Set dst (AddVI dst (MulVI src1 src2)));
14629   ins_cost(INSN_COST);
14630   format %{ "mlav  $dst,$src1,$src2\t# vector (4S)" %}
14631   ins_encode %{
14632     __ mlav(as_FloatRegister($dst$$reg), __ T4S,
14633             as_FloatRegister($src1$$reg),
14634             as_FloatRegister($src2$$reg));
14635   %}
14636   ins_pipe(vmla128);
14637 %}
14638 
14639 // --------------------------------- MLS --------------------------------------
14640 
14641 instruct vmls4S(vecD dst, vecD src1, vecD src2)
14642 %{
14643   predicate(n->as_Vector()->length() == 2 ||
14644             n->as_Vector()->length() == 4);
14645   match(Set dst (SubVS dst (MulVS src1 src2)));
14646   ins_cost(INSN_COST);
14647   format %{ "mlsv  $dst,$src1,$src2\t# vector (4H)" %}
14648   ins_encode %{
14649     __ mlsv(as_FloatRegister($dst$$reg), __ T4H,
14650             as_FloatRegister($src1$$reg),
14651             as_FloatRegister($src2$$reg));
14652   %}
14653   ins_pipe(vmla64);
14654 %}
14655 
14656 instruct vmls8S(vecX dst, vecX src1, vecX src2)
14657 %{
14658   predicate(n->as_Vector()->length() == 8);
14659   match(Set dst (SubVS dst (MulVS src1 src2)));
14660   ins_cost(INSN_COST);
14661   format %{ "mlsv  $dst,$src1,$src2\t# vector (8H)" %}
14662   ins_encode %{
14663     __ mlsv(as_FloatRegister($dst$$reg), __ T8H,
14664             as_FloatRegister($src1$$reg),
14665             as_FloatRegister($src2$$reg));
14666   %}
14667   ins_pipe(vmla128);
14668 %}
14669 
14670 instruct vmls2I(vecD dst, vecD src1, vecD src2)
14671 %{
14672   predicate(n->as_Vector()->length() == 2);
14673   match(Set dst (SubVI dst (MulVI src1 src2)));
14674   ins_cost(INSN_COST);
14675   format %{ "mlsv  $dst,$src1,$src2\t# vector (2S)" %}
14676   ins_encode %{
14677     __ mlsv(as_FloatRegister($dst$$reg), __ T2S,
14678             as_FloatRegister($src1$$reg),
14679             as_FloatRegister($src2$$reg));
14680   %}
14681   ins_pipe(vmla64);
14682 %}
14683 
14684 instruct vmls4I(vecX dst, vecX src1, vecX src2)
14685 %{
14686   predicate(n->as_Vector()->length() == 4);
14687   match(Set dst (SubVI dst (MulVI src1 src2)));
14688   ins_cost(INSN_COST);
14689   format %{ "mlsv  $dst,$src1,$src2\t# vector (4S)" %}
14690   ins_encode %{
14691     __ mlsv(as_FloatRegister($dst$$reg), __ T4S,
14692             as_FloatRegister($src1$$reg),
14693             as_FloatRegister($src2$$reg));
14694   %}
14695   ins_pipe(vmla128);
14696 %}
14697 
14698 // --------------------------------- DIV --------------------------------------
14699 
14700 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
14701 %{
14702   predicate(n->as_Vector()->length() == 2);
14703   match(Set dst (DivVF src1 src2));
14704   ins_cost(INSN_COST);
14705   format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
14706   ins_encode %{
14707     __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
14708             as_FloatRegister($src1$$reg),
14709             as_FloatRegister($src2$$reg));
14710   %}
14711   ins_pipe(vmuldiv_fp64);
14712 %}
14713 
14714 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
14715 %{
14716   predicate(n->as_Vector()->length() == 4);
14717   match(Set dst (DivVF src1 src2));
14718   ins_cost(INSN_COST);
14719   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
14720   ins_encode %{
14721     __ fdiv(as_FloatRegister($dst$$reg), __ T4S,
14722             as_FloatRegister($src1$$reg),
14723             as_FloatRegister($src2$$reg));
14724   %}
14725   ins_pipe(vmuldiv_fp128);
14726 %}
14727 
14728 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
14729 %{
14730   predicate(n->as_Vector()->length() == 2);
14731   match(Set dst (DivVD src1 src2));
14732   ins_cost(INSN_COST);
14733   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
14734   ins_encode %{
14735     __ fdiv(as_FloatRegister($dst$$reg), __ T2D,
14736             as_FloatRegister($src1$$reg),
14737             as_FloatRegister($src2$$reg));
14738   %}
14739   ins_pipe(vmuldiv_fp128);
14740 %}
14741 
14742 // --------------------------------- AND --------------------------------------
14743 
14744 instruct vand8B(vecD dst, vecD src1, vecD src2)
14745 %{
14746   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14747             n->as_Vector()->length_in_bytes() == 8);
14748   match(Set dst (AndV src1 src2));
14749   ins_cost(INSN_COST);
14750   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14751   ins_encode %{
14752     __ andr(as_FloatRegister($dst$$reg), __ T8B,
14753             as_FloatRegister($src1$$reg),
14754             as_FloatRegister($src2$$reg));
14755   %}
14756   ins_pipe(vlogical64);
14757 %}
14758 
14759 instruct vand16B(vecX dst, vecX src1, vecX src2)
14760 %{
14761   predicate(n->as_Vector()->length_in_bytes() == 16);
14762   match(Set dst (AndV src1 src2));
14763   ins_cost(INSN_COST);
14764   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
14765   ins_encode %{
14766     __ andr(as_FloatRegister($dst$$reg), __ T16B,
14767             as_FloatRegister($src1$$reg),
14768             as_FloatRegister($src2$$reg));
14769   %}
14770   ins_pipe(vlogical128);
14771 %}
14772 
14773 // --------------------------------- OR ---------------------------------------
14774 
14775 instruct vor8B(vecD dst, vecD src1, vecD src2)
14776 %{
14777   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14778             n->as_Vector()->length_in_bytes() == 8);
14779   match(Set dst (OrV src1 src2));
14780   ins_cost(INSN_COST);
14781   format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
14782   ins_encode %{
14783     __ orr(as_FloatRegister($dst$$reg), __ T8B,
14784             as_FloatRegister($src1$$reg),
14785             as_FloatRegister($src2$$reg));
14786   %}
14787   ins_pipe(vlogical64);
14788 %}
14789 
14790 instruct vor16B(vecX dst, vecX src1, vecX src2)
14791 %{
14792   predicate(n->as_Vector()->length_in_bytes() == 16);
14793   match(Set dst (OrV src1 src2));
14794   ins_cost(INSN_COST);
14795   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
14796   ins_encode %{
14797     __ orr(as_FloatRegister($dst$$reg), __ T16B,
14798             as_FloatRegister($src1$$reg),
14799             as_FloatRegister($src2$$reg));
14800   %}
14801   ins_pipe(vlogical128);
14802 %}
14803 
14804 // --------------------------------- XOR --------------------------------------
14805 
14806 instruct vxor8B(vecD dst, vecD src1, vecD src2)
14807 %{
14808   predicate(n->as_Vector()->length_in_bytes() == 4 ||
14809             n->as_Vector()->length_in_bytes() == 8);
14810   match(Set dst (XorV src1 src2));
14811   ins_cost(INSN_COST);
14812   format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
14813   ins_encode %{
14814     __ eor(as_FloatRegister($dst$$reg), __ T8B,
14815             as_FloatRegister($src1$$reg),
14816             as_FloatRegister($src2$$reg));
14817   %}
14818   ins_pipe(vlogical64);
14819 %}
14820 
14821 instruct vxor16B(vecX dst, vecX src1, vecX src2)
14822 %{
14823   predicate(n->as_Vector()->length_in_bytes() == 16);
14824   match(Set dst (XorV src1 src2));
14825   ins_cost(INSN_COST);
14826   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
14827   ins_encode %{
14828     __ eor(as_FloatRegister($dst$$reg), __ T16B,
14829             as_FloatRegister($src1$$reg),
14830             as_FloatRegister($src2$$reg));
14831   %}
14832   ins_pipe(vlogical128);
14833 %}
14834 
14835 // ------------------------------ Shift ---------------------------------------
14836 instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
14837   predicate(n->as_Vector()->length_in_bytes() == 8);
14838   match(Set dst (LShiftCntV cnt));
14839   match(Set dst (RShiftCntV cnt));
14840   format %{ "dup  $dst, $cnt\t# shift count vector (8B)" %}
14841   ins_encode %{
14842     __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
14843   %}
14844   ins_pipe(vdup_reg_reg64);
14845 %}
14846 
14847 instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
14848   predicate(n->as_Vector()->length_in_bytes() == 16);
14849   match(Set dst (LShiftCntV cnt));
14850   match(Set dst (RShiftCntV cnt));
14851   format %{ "dup  $dst, $cnt\t# shift count vector (16B)" %}
14852   ins_encode %{
14853     __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
14854   %}
14855   ins_pipe(vdup_reg_reg128);
14856 %}
14857 
14858 instruct vsll8B(vecD dst, vecD src, vecD shift) %{
14859   predicate(n->as_Vector()->length() == 4 ||
14860             n->as_Vector()->length() == 8);
14861   match(Set dst (LShiftVB src shift));
14862   ins_cost(INSN_COST);
14863   format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
14864   ins_encode %{
14865     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
14866             as_FloatRegister($src$$reg),
14867             as_FloatRegister($shift$$reg));
14868   %}
14869   ins_pipe(vshift64);
14870 %}
14871 
14872 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
14873   predicate(n->as_Vector()->length() == 16);
14874   match(Set dst (LShiftVB src shift));
14875   ins_cost(INSN_COST);
14876   format %{ "sshl  $dst,$src,$shift\t# vector (16B)" %}
14877   ins_encode %{
14878     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
14879             as_FloatRegister($src$$reg),
14880             as_FloatRegister($shift$$reg));
14881   %}
14882   ins_pipe(vshift128);
14883 %}
14884 
14885 // Right shifts with vector shift count on aarch64 SIMD are implemented
14886 // as left shift by negative shift count.
14887 // There are two cases for vector shift count.
14888 //
14889 // Case 1: The vector shift count is from replication.
14890 //        |            |
14891 //    LoadVector  RShiftCntV
14892 //        |       /
14893 //     RShiftVI
14894 // Note: In inner loop, multiple neg instructions are used, which can be
14895 // moved to outer loop and merge into one neg instruction.
14896 //
14897 // Case 2: The vector shift count is from loading.
14898 // This case isn't supported by middle-end now. But it's supported by
14899 // panama/vectorIntrinsics(JEP 338: Vector API).
14900 //        |            |
14901 //    LoadVector  LoadVector
14902 //        |       /
14903 //     RShiftVI
14904 //
14905 
14906 instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
14907   predicate(n->as_Vector()->length() == 4 ||
14908             n->as_Vector()->length() == 8);
14909   match(Set dst (RShiftVB src shift));
14910   ins_cost(INSN_COST);
14911   effect(TEMP tmp);
14912   format %{ "negr  $tmp,$shift\t"
14913             "sshl  $dst,$src,$tmp\t# vector (8B)" %}
14914   ins_encode %{
14915     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
14916             as_FloatRegister($shift$$reg));
14917     __ sshl(as_FloatRegister($dst$$reg), __ T8B,
14918             as_FloatRegister($src$$reg),
14919             as_FloatRegister($tmp$$reg));
14920   %}
14921   ins_pipe(vshift64);
14922 %}
14923 
14924 instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
14925   predicate(n->as_Vector()->length() == 16);
14926   match(Set dst (RShiftVB src shift));
14927   ins_cost(INSN_COST);
14928   effect(TEMP tmp);
14929   format %{ "negr  $tmp,$shift\t"
14930             "sshl  $dst,$src,$tmp\t# vector (16B)" %}
14931   ins_encode %{
14932     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
14933             as_FloatRegister($shift$$reg));
14934     __ sshl(as_FloatRegister($dst$$reg), __ T16B,
14935             as_FloatRegister($src$$reg),
14936             as_FloatRegister($tmp$$reg));
14937   %}
14938   ins_pipe(vshift128);
14939 %}
14940 
14941 instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
14942   predicate(n->as_Vector()->length() == 4 ||
14943             n->as_Vector()->length() == 8);
14944   match(Set dst (URShiftVB src shift));
14945   ins_cost(INSN_COST);
14946   effect(TEMP tmp);
14947   format %{ "negr  $tmp,$shift\t"
14948             "ushl  $dst,$src,$tmp\t# vector (8B)" %}
14949   ins_encode %{
14950     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
14951             as_FloatRegister($shift$$reg));
14952     __ ushl(as_FloatRegister($dst$$reg), __ T8B,
14953             as_FloatRegister($src$$reg),
14954             as_FloatRegister($tmp$$reg));
14955   %}
14956   ins_pipe(vshift64);
14957 %}
14958 
14959 instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
14960   predicate(n->as_Vector()->length() == 16);
14961   match(Set dst (URShiftVB src shift));
14962   ins_cost(INSN_COST);
14963   effect(TEMP tmp);
14964   format %{ "negr  $tmp,$shift\t"
14965             "ushl  $dst,$src,$tmp\t# vector (16B)" %}
14966   ins_encode %{
14967     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
14968             as_FloatRegister($shift$$reg));
14969     __ ushl(as_FloatRegister($dst$$reg), __ T16B,
14970             as_FloatRegister($src$$reg),
14971             as_FloatRegister($tmp$$reg));
14972   %}
14973   ins_pipe(vshift128);
14974 %}
14975 
14976 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
14977   predicate(n->as_Vector()->length() == 4 ||
14978             n->as_Vector()->length() == 8);
14979   match(Set dst (LShiftVB src shift));
14980   ins_cost(INSN_COST);
14981   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
14982   ins_encode %{
14983     int sh = (int)$shift$$constant & 31;
14984     if (sh >= 8) {
14985       __ eor(as_FloatRegister($dst$$reg), __ T8B,
14986              as_FloatRegister($src$$reg),
14987              as_FloatRegister($src$$reg));
14988     } else {
14989       __ shl(as_FloatRegister($dst$$reg), __ T8B,
14990              as_FloatRegister($src$$reg), sh);
14991     }
14992   %}
14993   ins_pipe(vshift64_imm);
14994 %}
14995 
14996 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
14997   predicate(n->as_Vector()->length() == 16);
14998   match(Set dst (LShiftVB src shift));
14999   ins_cost(INSN_COST);
15000   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
15001   ins_encode %{
15002     int sh = (int)$shift$$constant & 31;
15003     if (sh >= 8) {
15004       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15005              as_FloatRegister($src$$reg),
15006              as_FloatRegister($src$$reg));
15007     } else {
15008       __ shl(as_FloatRegister($dst$$reg), __ T16B,
15009              as_FloatRegister($src$$reg), sh);
15010     }
15011   %}
15012   ins_pipe(vshift128_imm);
15013 %}
15014 
15015 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
15016   predicate(n->as_Vector()->length() == 4 ||
15017             n->as_Vector()->length() == 8);
15018   match(Set dst (RShiftVB src shift));
15019   ins_cost(INSN_COST);
15020   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
15021   ins_encode %{
15022     int sh = (int)$shift$$constant & 31;
15023     if (sh >= 8) sh = 7;
15024     sh = -sh & 7;
15025     __ sshr(as_FloatRegister($dst$$reg), __ T8B,
15026            as_FloatRegister($src$$reg), sh);
15027   %}
15028   ins_pipe(vshift64_imm);
15029 %}
15030 
15031 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
15032   predicate(n->as_Vector()->length() == 16);
15033   match(Set dst (RShiftVB src shift));
15034   ins_cost(INSN_COST);
15035   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
15036   ins_encode %{
15037     int sh = (int)$shift$$constant & 31;
15038     if (sh >= 8) sh = 7;
15039     sh = -sh & 7;
15040     __ sshr(as_FloatRegister($dst$$reg), __ T16B,
15041            as_FloatRegister($src$$reg), sh);
15042   %}
15043   ins_pipe(vshift128_imm);
15044 %}
15045 
15046 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
15047   predicate(n->as_Vector()->length() == 4 ||
15048             n->as_Vector()->length() == 8);
15049   match(Set dst (URShiftVB src shift));
15050   ins_cost(INSN_COST);
15051   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
15052   ins_encode %{
15053     int sh = (int)$shift$$constant & 31;
15054     if (sh >= 8) {
15055       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15056              as_FloatRegister($src$$reg),
15057              as_FloatRegister($src$$reg));
15058     } else {
15059       __ ushr(as_FloatRegister($dst$$reg), __ T8B,
15060              as_FloatRegister($src$$reg), -sh & 7);
15061     }
15062   %}
15063   ins_pipe(vshift64_imm);
15064 %}
15065 
15066 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
15067   predicate(n->as_Vector()->length() == 16);
15068   match(Set dst (URShiftVB src shift));
15069   ins_cost(INSN_COST);
15070   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
15071   ins_encode %{
15072     int sh = (int)$shift$$constant & 31;
15073     if (sh >= 8) {
15074       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15075              as_FloatRegister($src$$reg),
15076              as_FloatRegister($src$$reg));
15077     } else {
15078       __ ushr(as_FloatRegister($dst$$reg), __ T16B,
15079              as_FloatRegister($src$$reg), -sh & 7);
15080     }
15081   %}
15082   ins_pipe(vshift128_imm);
15083 %}
15084 
15085 instruct vsll4S(vecD dst, vecD src, vecD shift) %{
15086   predicate(n->as_Vector()->length() == 2 ||
15087             n->as_Vector()->length() == 4);
15088   match(Set dst (LShiftVS src shift));
15089   ins_cost(INSN_COST);
15090   format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
15091   ins_encode %{
15092     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
15093             as_FloatRegister($src$$reg),
15094             as_FloatRegister($shift$$reg));
15095   %}
15096   ins_pipe(vshift64);
15097 %}
15098 
15099 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
15100   predicate(n->as_Vector()->length() == 8);
15101   match(Set dst (LShiftVS src shift));
15102   ins_cost(INSN_COST);
15103   format %{ "sshl  $dst,$src,$shift\t# vector (8H)" %}
15104   ins_encode %{
15105     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15106             as_FloatRegister($src$$reg),
15107             as_FloatRegister($shift$$reg));
15108   %}
15109   ins_pipe(vshift128);
15110 %}
15111 
15112 instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
15113   predicate(n->as_Vector()->length() == 2 ||
15114             n->as_Vector()->length() == 4);
15115   match(Set dst (RShiftVS src shift));
15116   ins_cost(INSN_COST);
15117   effect(TEMP tmp);
15118   format %{ "negr  $tmp,$shift\t"
15119             "sshl  $dst,$src,$tmp\t# vector (4H)" %}
15120   ins_encode %{
15121     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
15122             as_FloatRegister($shift$$reg));
15123     __ sshl(as_FloatRegister($dst$$reg), __ T4H,
15124             as_FloatRegister($src$$reg),
15125             as_FloatRegister($tmp$$reg));
15126   %}
15127   ins_pipe(vshift64);
15128 %}
15129 
15130 instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
15131   predicate(n->as_Vector()->length() == 8);
15132   match(Set dst (RShiftVS src shift));
15133   ins_cost(INSN_COST);
15134   effect(TEMP tmp);
15135   format %{ "negr  $tmp,$shift\t"
15136             "sshl  $dst,$src,$tmp\t# vector (8H)" %}
15137   ins_encode %{
15138     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
15139             as_FloatRegister($shift$$reg));
15140     __ sshl(as_FloatRegister($dst$$reg), __ T8H,
15141             as_FloatRegister($src$$reg),
15142             as_FloatRegister($tmp$$reg));
15143   %}
15144   ins_pipe(vshift128);
15145 %}
15146 
15147 instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
15148   predicate(n->as_Vector()->length() == 2 ||
15149             n->as_Vector()->length() == 4);
15150   match(Set dst (URShiftVS src shift));
15151   ins_cost(INSN_COST);
15152   effect(TEMP tmp);
15153   format %{ "negr  $tmp,$shift\t"
15154             "ushl  $dst,$src,$tmp\t# vector (4H)" %}
15155   ins_encode %{
15156     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
15157             as_FloatRegister($shift$$reg));
15158     __ ushl(as_FloatRegister($dst$$reg), __ T4H,
15159             as_FloatRegister($src$$reg),
15160             as_FloatRegister($tmp$$reg));
15161   %}
15162   ins_pipe(vshift64);
15163 %}
15164 
15165 instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
15166   predicate(n->as_Vector()->length() == 8);
15167   match(Set dst (URShiftVS src shift));
15168   ins_cost(INSN_COST);
15169   effect(TEMP tmp);
15170   format %{ "negr  $tmp,$shift\t"
15171             "ushl  $dst,$src,$tmp\t# vector (8H)" %}
15172   ins_encode %{
15173     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
15174             as_FloatRegister($shift$$reg));
15175     __ ushl(as_FloatRegister($dst$$reg), __ T8H,
15176             as_FloatRegister($src$$reg),
15177             as_FloatRegister($tmp$$reg));
15178   %}
15179   ins_pipe(vshift128);
15180 %}
15181 
15182 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
15183   predicate(n->as_Vector()->length() == 2 ||
15184             n->as_Vector()->length() == 4);
15185   match(Set dst (LShiftVS src shift));
15186   ins_cost(INSN_COST);
15187   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
15188   ins_encode %{
15189     int sh = (int)$shift$$constant & 31;
15190     if (sh >= 16) {
15191       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15192              as_FloatRegister($src$$reg),
15193              as_FloatRegister($src$$reg));
15194     } else {
15195       __ shl(as_FloatRegister($dst$$reg), __ T4H,
15196              as_FloatRegister($src$$reg), sh);
15197     }
15198   %}
15199   ins_pipe(vshift64_imm);
15200 %}
15201 
15202 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
15203   predicate(n->as_Vector()->length() == 8);
15204   match(Set dst (LShiftVS src shift));
15205   ins_cost(INSN_COST);
15206   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
15207   ins_encode %{
15208     int sh = (int)$shift$$constant & 31;
15209     if (sh >= 16) {
15210       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15211              as_FloatRegister($src$$reg),
15212              as_FloatRegister($src$$reg));
15213     } else {
15214       __ shl(as_FloatRegister($dst$$reg), __ T8H,
15215              as_FloatRegister($src$$reg), sh);
15216     }
15217   %}
15218   ins_pipe(vshift128_imm);
15219 %}
15220 
15221 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
15222   predicate(n->as_Vector()->length() == 2 ||
15223             n->as_Vector()->length() == 4);
15224   match(Set dst (RShiftVS src shift));
15225   ins_cost(INSN_COST);
15226   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
15227   ins_encode %{
15228     int sh = (int)$shift$$constant & 31;
15229     if (sh >= 16) sh = 15;
15230     sh = -sh & 15;
15231     __ sshr(as_FloatRegister($dst$$reg), __ T4H,
15232            as_FloatRegister($src$$reg), sh);
15233   %}
15234   ins_pipe(vshift64_imm);
15235 %}
15236 
15237 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
15238   predicate(n->as_Vector()->length() == 8);
15239   match(Set dst (RShiftVS src shift));
15240   ins_cost(INSN_COST);
15241   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
15242   ins_encode %{
15243     int sh = (int)$shift$$constant & 31;
15244     if (sh >= 16) sh = 15;
15245     sh = -sh & 15;
15246     __ sshr(as_FloatRegister($dst$$reg), __ T8H,
15247            as_FloatRegister($src$$reg), sh);
15248   %}
15249   ins_pipe(vshift128_imm);
15250 %}
15251 
15252 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
15253   predicate(n->as_Vector()->length() == 2 ||
15254             n->as_Vector()->length() == 4);
15255   match(Set dst (URShiftVS src shift));
15256   ins_cost(INSN_COST);
15257   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
15258   ins_encode %{
15259     int sh = (int)$shift$$constant & 31;
15260     if (sh >= 16) {
15261       __ eor(as_FloatRegister($dst$$reg), __ T8B,
15262              as_FloatRegister($src$$reg),
15263              as_FloatRegister($src$$reg));
15264     } else {
15265       __ ushr(as_FloatRegister($dst$$reg), __ T4H,
15266              as_FloatRegister($src$$reg), -sh & 15);
15267     }
15268   %}
15269   ins_pipe(vshift64_imm);
15270 %}
15271 
15272 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
15273   predicate(n->as_Vector()->length() == 8);
15274   match(Set dst (URShiftVS src shift));
15275   ins_cost(INSN_COST);
15276   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
15277   ins_encode %{
15278     int sh = (int)$shift$$constant & 31;
15279     if (sh >= 16) {
15280       __ eor(as_FloatRegister($dst$$reg), __ T16B,
15281              as_FloatRegister($src$$reg),
15282              as_FloatRegister($src$$reg));
15283     } else {
15284       __ ushr(as_FloatRegister($dst$$reg), __ T8H,
15285              as_FloatRegister($src$$reg), -sh & 15);
15286     }
15287   %}
15288   ins_pipe(vshift128_imm);
15289 %}
15290 
15291 instruct vsll2I(vecD dst, vecD src, vecD shift) %{
15292   predicate(n->as_Vector()->length() == 2);
15293   match(Set dst (LShiftVI src shift));
15294   ins_cost(INSN_COST);
15295   format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
15296   ins_encode %{
15297     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15298             as_FloatRegister($src$$reg),
15299             as_FloatRegister($shift$$reg));
15300   %}
15301   ins_pipe(vshift64);
15302 %}
15303 
15304 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
15305   predicate(n->as_Vector()->length() == 4);
15306   match(Set dst (LShiftVI src shift));
15307   ins_cost(INSN_COST);
15308   format %{ "sshl  $dst,$src,$shift\t# vector (4S)" %}
15309   ins_encode %{
15310     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15311             as_FloatRegister($src$$reg),
15312             as_FloatRegister($shift$$reg));
15313   %}
15314   ins_pipe(vshift128);
15315 %}
15316 
15317 instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
15318   predicate(n->as_Vector()->length() == 2);
15319   match(Set dst (RShiftVI src shift));
15320   ins_cost(INSN_COST);
15321   effect(TEMP tmp);
15322   format %{ "negr  $tmp,$shift\t"
15323             "sshl  $dst,$src,$tmp\t# vector (2S)" %}
15324   ins_encode %{
15325     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
15326             as_FloatRegister($shift$$reg));
15327     __ sshl(as_FloatRegister($dst$$reg), __ T2S,
15328             as_FloatRegister($src$$reg),
15329             as_FloatRegister($tmp$$reg));
15330   %}
15331   ins_pipe(vshift64);
15332 %}
15333 
15334 instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
15335   predicate(n->as_Vector()->length() == 4);
15336   match(Set dst (RShiftVI src shift));
15337   ins_cost(INSN_COST);
15338   effect(TEMP tmp);
15339   format %{ "negr  $tmp,$shift\t"
15340             "sshl  $dst,$src,$tmp\t# vector (4S)" %}
15341   ins_encode %{
15342     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
15343             as_FloatRegister($shift$$reg));
15344     __ sshl(as_FloatRegister($dst$$reg), __ T4S,
15345             as_FloatRegister($src$$reg),
15346             as_FloatRegister($tmp$$reg));
15347   %}
15348   ins_pipe(vshift128);
15349 %}
15350 
15351 instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
15352   predicate(n->as_Vector()->length() == 2);
15353   match(Set dst (URShiftVI src shift));
15354   ins_cost(INSN_COST);
15355   effect(TEMP tmp);
15356   format %{ "negr  $tmp,$shift\t"
15357             "ushl  $dst,$src,$tmp\t# vector (2S)" %}
15358   ins_encode %{
15359     __ negr(as_FloatRegister($tmp$$reg), __ T8B,
15360             as_FloatRegister($shift$$reg));
15361     __ ushl(as_FloatRegister($dst$$reg), __ T2S,
15362             as_FloatRegister($src$$reg),
15363             as_FloatRegister($tmp$$reg));
15364   %}
15365   ins_pipe(vshift64);
15366 %}
15367 
15368 instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
15369   predicate(n->as_Vector()->length() == 4);
15370   match(Set dst (URShiftVI src shift));
15371   ins_cost(INSN_COST);
15372   effect(TEMP tmp);
15373   format %{ "negr  $tmp,$shift\t"
15374             "ushl  $dst,$src,$tmp\t# vector (4S)" %}
15375   ins_encode %{
15376     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
15377             as_FloatRegister($shift$$reg));
15378     __ ushl(as_FloatRegister($dst$$reg), __ T4S,
15379             as_FloatRegister($src$$reg),
15380             as_FloatRegister($tmp$$reg));
15381   %}
15382   ins_pipe(vshift128);
15383 %}
15384 
15385 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
15386   predicate(n->as_Vector()->length() == 2);
15387   match(Set dst (LShiftVI src shift));
15388   ins_cost(INSN_COST);
15389   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
15390   ins_encode %{
15391     __ shl(as_FloatRegister($dst$$reg), __ T2S,
15392            as_FloatRegister($src$$reg),
15393            (int)$shift$$constant & 31);
15394   %}
15395   ins_pipe(vshift64_imm);
15396 %}
15397 
15398 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
15399   predicate(n->as_Vector()->length() == 4);
15400   match(Set dst (LShiftVI src shift));
15401   ins_cost(INSN_COST);
15402   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
15403   ins_encode %{
15404     __ shl(as_FloatRegister($dst$$reg), __ T4S,
15405            as_FloatRegister($src$$reg),
15406            (int)$shift$$constant & 31);
15407   %}
15408   ins_pipe(vshift128_imm);
15409 %}
15410 
15411 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
15412   predicate(n->as_Vector()->length() == 2);
15413   match(Set dst (RShiftVI src shift));
15414   ins_cost(INSN_COST);
15415   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
15416   ins_encode %{
15417     __ sshr(as_FloatRegister($dst$$reg), __ T2S,
15418             as_FloatRegister($src$$reg),
15419             -(int)$shift$$constant & 31);
15420   %}
15421   ins_pipe(vshift64_imm);
15422 %}
15423 
15424 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
15425   predicate(n->as_Vector()->length() == 4);
15426   match(Set dst (RShiftVI src shift));
15427   ins_cost(INSN_COST);
15428   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
15429   ins_encode %{
15430     __ sshr(as_FloatRegister($dst$$reg), __ T4S,
15431             as_FloatRegister($src$$reg),
15432             -(int)$shift$$constant & 31);
15433   %}
15434   ins_pipe(vshift128_imm);
15435 %}
15436 
15437 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
15438   predicate(n->as_Vector()->length() == 2);
15439   match(Set dst (URShiftVI src shift));
15440   ins_cost(INSN_COST);
15441   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
15442   ins_encode %{
15443     __ ushr(as_FloatRegister($dst$$reg), __ T2S,
15444             as_FloatRegister($src$$reg),
15445             -(int)$shift$$constant & 31);
15446   %}
15447   ins_pipe(vshift64_imm);
15448 %}
15449 
15450 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
15451   predicate(n->as_Vector()->length() == 4);
15452   match(Set dst (URShiftVI src shift));
15453   ins_cost(INSN_COST);
15454   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
15455   ins_encode %{
15456     __ ushr(as_FloatRegister($dst$$reg), __ T4S,
15457             as_FloatRegister($src$$reg),
15458             -(int)$shift$$constant & 31);
15459   %}
15460   ins_pipe(vshift128_imm);
15461 %}
15462 
15463 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
15464   predicate(n->as_Vector()->length() == 2);
15465   match(Set dst (LShiftVL src shift));
15466   ins_cost(INSN_COST);
15467   format %{ "sshl  $dst,$src,$shift\t# vector (2D)" %}
15468   ins_encode %{
15469     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15470             as_FloatRegister($src$$reg),
15471             as_FloatRegister($shift$$reg));
15472   %}
15473   ins_pipe(vshift128);
15474 %}
15475 
15476 instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
15477   predicate(n->as_Vector()->length() == 2);
15478   match(Set dst (RShiftVL src shift));
15479   ins_cost(INSN_COST);
15480   effect(TEMP tmp);
15481   format %{ "negr  $tmp,$shift\t"
15482             "sshl  $dst,$src,$tmp\t# vector (2D)" %}
15483   ins_encode %{
15484     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
15485             as_FloatRegister($shift$$reg));
15486     __ sshl(as_FloatRegister($dst$$reg), __ T2D,
15487             as_FloatRegister($src$$reg),
15488             as_FloatRegister($tmp$$reg));
15489   %}
15490   ins_pipe(vshift128);
15491 %}
15492 
15493 instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
15494   predicate(n->as_Vector()->length() == 2);
15495   match(Set dst (URShiftVL src shift));
15496   ins_cost(INSN_COST);
15497   effect(TEMP tmp);
15498   format %{ "negr  $tmp,$shift\t"
15499             "ushl  $dst,$src,$tmp\t# vector (2D)" %}
15500   ins_encode %{
15501     __ negr(as_FloatRegister($tmp$$reg), __ T16B,
15502             as_FloatRegister($shift$$reg));
15503     __ ushl(as_FloatRegister($dst$$reg), __ T2D,
15504             as_FloatRegister($src$$reg),
15505             as_FloatRegister($tmp$$reg));
15506   %}
15507   ins_pipe(vshift128);
15508 %}
15509 
15510 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
15511   predicate(n->as_Vector()->length() == 2);
15512   match(Set dst (LShiftVL src shift));
15513   ins_cost(INSN_COST);
15514   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
15515   ins_encode %{
15516     __ shl(as_FloatRegister($dst$$reg), __ T2D,
15517            as_FloatRegister($src$$reg),
15518            (int)$shift$$constant & 63);
15519   %}
15520   ins_pipe(vshift128_imm);
15521 %}
15522 
15523 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
15524   predicate(n->as_Vector()->length() == 2);
15525   match(Set dst (RShiftVL src shift));
15526   ins_cost(INSN_COST);
15527   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
15528   ins_encode %{
15529     __ sshr(as_FloatRegister($dst$$reg), __ T2D,
15530             as_FloatRegister($src$$reg),
15531             -(int)$shift$$constant & 63);
15532   %}
15533   ins_pipe(vshift128_imm);
15534 %}
15535 
15536 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
15537   predicate(n->as_Vector()->length() == 2);
15538   match(Set dst (URShiftVL src shift));
15539   ins_cost(INSN_COST);
15540   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
15541   ins_encode %{
15542     __ ushr(as_FloatRegister($dst$$reg), __ T2D,
15543             as_FloatRegister($src$$reg),
15544             -(int)$shift$$constant & 63);
15545   %}
15546   ins_pipe(vshift128_imm);
15547 %}
15548 
15549 //----------PEEPHOLE RULES-----------------------------------------------------
15550 // These must follow all instruction definitions as they use the names
15551 // defined in the instructions definitions.
15552 //
15553 // peepmatch ( root_instr_name [preceding_instruction]* );
15554 //
15555 // peepconstraint %{
15556 // (instruction_number.operand_name relational_op instruction_number.operand_name
15557 //  [, ...] );
15558 // // instruction numbers are zero-based using left to right order in peepmatch
15559 //
15560 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
15561 // // provide an instruction_number.operand_name for each operand that appears
15562 // // in the replacement instruction's match rule
15563 //
15564 // ---------VM FLAGS---------------------------------------------------------
15565 //
15566 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15567 //
15568 // Each peephole rule is given an identifying number starting with zero and
15569 // increasing by one in the order seen by the parser.  An individual peephole
15570 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15571 // on the command-line.
15572 //
15573 // ---------CURRENT LIMITATIONS----------------------------------------------
15574 //
15575 // Only match adjacent instructions in same basic block
15576 // Only equality constraints
15577 // Only constraints between operands, not (0.dest_reg == RAX_enc)
15578 // Only one replacement instruction
15579 //
15580 // ---------EXAMPLE----------------------------------------------------------
15581 //
15582 // // pertinent parts of existing instructions in architecture description
15583 // instruct movI(iRegINoSp dst, iRegI src)
15584 // %{
15585 //   match(Set dst (CopyI src));
15586 // %}
15587 //
15588 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
15589 // %{
15590 //   match(Set dst (AddI dst src));
15591 //   effect(KILL cr);
15592 // %}
15593 //
15594 // // Change (inc mov) to lea
15595 // peephole %{
15596 //   // increment preceeded by register-register move
15597 //   peepmatch ( incI_iReg movI );
15598 //   // require that the destination register of the increment
15599 //   // match the destination register of the move
15600 //   peepconstraint ( 0.dst == 1.dst );
15601 //   // construct a replacement instruction that sets
15602 //   // the destination to ( move's source register + one )
15603 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
15604 // %}
15605 //
15606 
15607 // Implementation no longer uses movX instructions since
15608 // machine-independent system no longer uses CopyX nodes.
15609 //
15610 // peephole
15611 // %{
15612 //   peepmatch (incI_iReg movI);
15613 //   peepconstraint (0.dst == 1.dst);
15614 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15615 // %}
15616 
15617 // peephole
15618 // %{
15619 //   peepmatch (decI_iReg movI);
15620 //   peepconstraint (0.dst == 1.dst);
15621 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15622 // %}
15623 
15624 // peephole
15625 // %{
15626 //   peepmatch (addI_iReg_imm movI);
15627 //   peepconstraint (0.dst == 1.dst);
15628 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
15629 // %}
15630 
15631 // peephole
15632 // %{
15633 //   peepmatch (incL_iReg movL);
15634 //   peepconstraint (0.dst == 1.dst);
15635 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15636 // %}
15637 
15638 // peephole
15639 // %{
15640 //   peepmatch (decL_iReg movL);
15641 //   peepconstraint (0.dst == 1.dst);
15642 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15643 // %}
15644 
15645 // peephole
15646 // %{
15647 //   peepmatch (addL_iReg_imm movL);
15648 //   peepconstraint (0.dst == 1.dst);
15649 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
15650 // %}
15651 
15652 // peephole
15653 // %{
15654 //   peepmatch (addP_iReg_imm movP);
15655 //   peepconstraint (0.dst == 1.dst);
15656 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
15657 // %}
15658 
15659 // // Change load of spilled value to only a spill
15660 // instruct storeI(memory mem, iRegI src)
15661 // %{
15662 //   match(Set mem (StoreI mem src));
15663 // %}
15664 //
15665 // instruct loadI(iRegINoSp dst, memory mem)
15666 // %{
15667 //   match(Set dst (LoadI mem));
15668 // %}
15669 //
15670 
15671 //----------SMARTSPILL RULES---------------------------------------------------
15672 // These must follow all instruction definitions as they use the names
15673 // defined in the instructions definitions.
15674 
15675 // Local Variables:
15676 // mode: c++
15677 // End: